timers: fix stop/cont with -icount

Stop/cont commands are broken with -icount due to a deadlock.  The
real problem is that the computation of timers_state.cpu_ticks_offset
makes no sense with -icount enabled: we set it to an icount clock value
in cpu_disable_ticks, and subtract a TSC (or similar, whatever
cpu_get_real_ticks happens to return) value in cpu_enable_ticks.

The fix is simple.  timers_state.cpu_ticks_offset is only used
together with cpu_get_real_ticks, so we can use cpu_get_real_ticks
in cpu_disable_ticks.  There is no need to update cpu_ticks_prev
at the time cpu_disable_ticks is called; instead, we can do it
the next time cpu_get_ticks is called.

The change to cpu_disable_ticks is the important part of the patch.
The rest modifies the code to always check timers_state.cpu_ticks_prev,
even when the ticks are not advancing (i.e. the VM is stopped).  It also
makes a similar change to cpu_get_clock_locked, so that the code remains
similar for cpu_get_ticks and cpu_get_clock_locked.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1382977938-13844-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
diff --git a/cpus.c b/cpus.c
index 912938c..01d128d 100644
--- a/cpus.c
+++ b/cpus.c
@@ -165,36 +165,38 @@
 /* Caller must hold the BQL */
 int64_t cpu_get_ticks(void)
 {
+    int64_t ticks;
+
     if (use_icount) {
         return cpu_get_icount();
     }
-    if (!timers_state.cpu_ticks_enabled) {
-        return timers_state.cpu_ticks_offset;
-    } else {
-        int64_t ticks;
-        ticks = cpu_get_real_ticks();
-        if (timers_state.cpu_ticks_prev > ticks) {
-            /* Note: non increasing ticks may happen if the host uses
-               software suspend */
-            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
-        }
-        timers_state.cpu_ticks_prev = ticks;
-        return ticks + timers_state.cpu_ticks_offset;
+
+    ticks = timers_state.cpu_ticks_offset;
+    if (timers_state.cpu_ticks_enabled) {
+        ticks += cpu_get_real_ticks();
     }
+
+    if (timers_state.cpu_ticks_prev > ticks) {
+        /* Note: non increasing ticks may happen if the host uses
+           software suspend */
+        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
+        ticks = timers_state.cpu_ticks_prev;
+    }
+
+    timers_state.cpu_ticks_prev = ticks;
+    return ticks;
 }
 
 static int64_t cpu_get_clock_locked(void)
 {
-    int64_t ti;
+    int64_t ticks;
 
-    if (!timers_state.cpu_ticks_enabled) {
-        ti = timers_state.cpu_clock_offset;
-    } else {
-        ti = get_clock();
-        ti += timers_state.cpu_clock_offset;
+    ticks = timers_state.cpu_clock_offset;
+    if (timers_state.cpu_ticks_enabled) {
+        ticks += get_clock();
     }
 
-    return ti;
+    return ticks;
 }
 
 /* return the host CPU monotonic timer and handle stop/restart */
@@ -235,7 +237,7 @@
     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
     seqlock_write_lock(&timers_state.vm_clock_seqlock);
     if (timers_state.cpu_ticks_enabled) {
-        timers_state.cpu_ticks_offset = cpu_get_ticks();
+        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
         timers_state.cpu_clock_offset = cpu_get_clock_locked();
         timers_state.cpu_ticks_enabled = 0;
     }