timers: fix stop/cont with -icount

Stop/cont commands are broken with -icount due to a deadlock.  The
real problem is that the computation of timers_state.cpu_ticks_offset
makes no sense with -icount enabled: we set it to an icount clock value
in cpu_disable_ticks, and subtract a TSC (or similar, whatever
cpu_get_real_ticks happens to return) value in cpu_enable_ticks.

The fix is simple.  timers_state.cpu_ticks_offset is only used
together with cpu_get_real_ticks, so we can use cpu_get_real_ticks
in cpu_disable_ticks.  There is no need to update cpu_ticks_prev
at the time cpu_disable_ticks is called; instead, we can do it
the next time cpu_get_ticks is called.

The change to cpu_disable_ticks is the important part of the patch.
The rest modifies the code to always check timers_state.cpu_ticks_prev,
even when the ticks are not advancing (i.e. the VM is stopped).  It also
makes a similar change to cpu_get_clock_locked, so that the code remains
similar for cpu_get_ticks and cpu_get_clock_locked.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1382977938-13844-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
This commit is contained in:
Paolo Bonzini 2013-10-28 17:32:18 +01:00 committed by Anthony Liguori
parent cd5be5829c
commit 5f3e31012e

42
cpus.c
View File

@ -165,36 +165,38 @@ int64_t cpu_get_icount(void)
/* Caller must hold the BQL */ /* Caller must hold the BQL */
int64_t cpu_get_ticks(void) int64_t cpu_get_ticks(void)
{ {
int64_t ticks;
if (use_icount) { if (use_icount) {
return cpu_get_icount(); return cpu_get_icount();
} }
if (!timers_state.cpu_ticks_enabled) {
return timers_state.cpu_ticks_offset; ticks = timers_state.cpu_ticks_offset;
} else { if (timers_state.cpu_ticks_enabled) {
int64_t ticks; ticks += cpu_get_real_ticks();
ticks = cpu_get_real_ticks();
if (timers_state.cpu_ticks_prev > ticks) {
/* Note: non increasing ticks may happen if the host uses
software suspend */
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
}
timers_state.cpu_ticks_prev = ticks;
return ticks + timers_state.cpu_ticks_offset;
} }
if (timers_state.cpu_ticks_prev > ticks) {
/* Note: non increasing ticks may happen if the host uses
software suspend */
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
ticks = timers_state.cpu_ticks_prev;
}
timers_state.cpu_ticks_prev = ticks;
return ticks;
} }
static int64_t cpu_get_clock_locked(void) static int64_t cpu_get_clock_locked(void)
{ {
int64_t ti; int64_t ticks;
if (!timers_state.cpu_ticks_enabled) { ticks = timers_state.cpu_clock_offset;
ti = timers_state.cpu_clock_offset; if (timers_state.cpu_ticks_enabled) {
} else { ticks += get_clock();
ti = get_clock();
ti += timers_state.cpu_clock_offset;
} }
return ti; return ticks;
} }
/* return the host CPU monotonic timer and handle stop/restart */ /* return the host CPU monotonic timer and handle stop/restart */
@ -235,7 +237,7 @@ void cpu_disable_ticks(void)
/* Here, the really thing protected by seqlock is cpu_clock_offset. */ /* Here, the really thing protected by seqlock is cpu_clock_offset. */
seqlock_write_lock(&timers_state.vm_clock_seqlock); seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (timers_state.cpu_ticks_enabled) { if (timers_state.cpu_ticks_enabled) {
timers_state.cpu_ticks_offset = cpu_get_ticks(); timers_state.cpu_ticks_offset += cpu_get_real_ticks();
timers_state.cpu_clock_offset = cpu_get_clock_locked(); timers_state.cpu_clock_offset = cpu_get_clock_locked();
timers_state.cpu_ticks_enabled = 0; timers_state.cpu_ticks_enabled = 0;
} }