2007-11-17 18:14:51 +01:00
|
|
|
#ifndef QEMU_TIMER_H
|
|
|
|
#define QEMU_TIMER_H
|
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
#include "qemu/typedefs.h"
|
2010-03-29 21:24:00 +02:00
|
|
|
#include "qemu-common.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/notify.h"
|
2010-03-29 21:24:00 +02:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
/* timers */
|
|
|
|
|
2011-03-11 16:46:02 +01:00
|
|
|
#define SCALE_MS 1000000
|
|
|
|
#define SCALE_US 1000
|
|
|
|
#define SCALE_NS 1
|
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/**
|
|
|
|
* QEMUClockType:
|
|
|
|
*
|
|
|
|
* The following clock types are available:
|
|
|
|
*
|
|
|
|
* @QEMU_CLOCK_REALTIME: Real time clock
|
|
|
|
*
|
|
|
|
* The real time clock should be used only for stuff which does not
|
|
|
|
* change the virtual machine state, as it is run even if the virtual
|
|
|
|
* machine is stopped. The real time clock has a frequency of 1000
|
|
|
|
* Hz.
|
|
|
|
*
|
|
|
|
* Formerly rt_clock
|
|
|
|
*
|
|
|
|
* @QEMU_CLOCK_VIRTUAL: virtual clock
|
|
|
|
*
|
|
|
|
* The virtual clock is only run during the emulation. It is stopped
|
|
|
|
* when the virtual machine is stopped. Virtual timers use a high
|
|
|
|
* precision clock, usually cpu cycles (use ticks_per_sec).
|
|
|
|
*
|
|
|
|
* Formerly vm_clock
|
|
|
|
*
|
|
|
|
* @QEMU_CLOCK_HOST: host clock
|
|
|
|
*
|
|
|
|
* The host clock should be use for device models that emulate accurate
|
|
|
|
* real time sources. It will continue to run when the virtual machine
|
|
|
|
* is suspended, and it will reflect system time changes the host may
|
|
|
|
* undergo (e.g. due to NTP). The host clock has the same precision as
|
|
|
|
* the virtual clock.
|
|
|
|
*
|
|
|
|
* Formerly host_clock
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
QEMU_CLOCK_REALTIME = 0,
|
|
|
|
QEMU_CLOCK_VIRTUAL = 1,
|
|
|
|
QEMU_CLOCK_HOST = 2,
|
|
|
|
QEMU_CLOCK_MAX
|
|
|
|
} QEMUClockType;
|
2013-08-21 17:02:40 +02:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
typedef struct QEMUClock QEMUClock;
|
2013-08-21 17:02:46 +02:00
|
|
|
typedef struct QEMUTimerList QEMUTimerList;
|
2013-08-21 17:02:48 +02:00
|
|
|
|
|
|
|
struct QEMUTimerListGroup {
|
|
|
|
QEMUTimerList *tl[QEMU_CLOCK_MAX];
|
|
|
|
};
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
typedef void QEMUTimerCB(void *opaque);
|
2013-08-21 17:02:50 +02:00
|
|
|
typedef void QEMUTimerListNotifyCB(void *opaque);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
struct QEMUTimer {
|
|
|
|
int64_t expire_time; /* in nanoseconds */
|
|
|
|
QEMUTimerList *timer_list;
|
|
|
|
QEMUTimerCB *cb;
|
|
|
|
void *opaque;
|
|
|
|
QEMUTimer *next;
|
|
|
|
int scale;
|
|
|
|
};
|
|
|
|
|
2013-08-21 17:02:48 +02:00
|
|
|
extern QEMUTimerListGroup main_loop_tlg;
|
2013-08-21 17:02:46 +02:00
|
|
|
extern QEMUClock *qemu_clocks[QEMU_CLOCK_MAX];
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/**
|
|
|
|
* qemu_clock_ptr:
|
|
|
|
* @type: type of clock
|
|
|
|
*
|
|
|
|
* Translate a clock type into a pointer to QEMUClock object.
|
|
|
|
*
|
|
|
|
* Returns: a pointer to the QEMUClock object
|
|
|
|
*/
|
|
|
|
static inline QEMUClock *qemu_clock_ptr(QEMUClockType type)
|
|
|
|
{
|
|
|
|
return qemu_clocks[type];
|
|
|
|
}
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/* These three clocks are maintained here with separate variable
|
|
|
|
* names for compatibility only.
|
|
|
|
*/
|
|
|
|
#define rt_clock (qemu_clock_ptr(QEMU_CLOCK_REALTIME))
|
|
|
|
#define vm_clock (qemu_clock_ptr(QEMU_CLOCK_VIRTUAL))
|
|
|
|
#define host_clock (qemu_clock_ptr(QEMU_CLOCK_HOST))
|
2009-09-15 13:36:04 +02:00
|
|
|
|
2010-01-26 09:31:46 +01:00
|
|
|
int64_t qemu_get_clock_ns(QEMUClock *clock);
|
2013-08-21 17:02:46 +02:00
|
|
|
bool qemu_clock_has_timers(QEMUClock *clock);
|
|
|
|
bool qemu_clock_expired(QEMUClock *clock);
|
2011-09-12 15:50:16 +02:00
|
|
|
int64_t qemu_clock_deadline(QEMUClock *clock);
|
2013-08-21 17:02:41 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_clock_deadline_ns:
|
|
|
|
* @clock: the clock to operate on
|
|
|
|
*
|
|
|
|
* Calculate the timeout of the earliest expiring timer
|
|
|
|
* in nanoseconds, or -1 if no timer is set to expire.
|
|
|
|
*
|
|
|
|
* Returns: time until expiry in nanoseconds or -1
|
|
|
|
*/
|
|
|
|
int64_t qemu_clock_deadline_ns(QEMUClock *clock);
|
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/**
|
|
|
|
* qemu_clock_use_for_deadline:
|
|
|
|
* @clock: the clock to operate on
|
|
|
|
*
|
|
|
|
* Determine whether a clock should be used for deadline
|
|
|
|
* calculations. Some clocks, for instance vm_clock with
|
|
|
|
* use_icount set, do not count in nanoseconds. Such clocks
|
|
|
|
* are not used for deadline calculations, and are presumed
|
|
|
|
* to interrupt any poll using qemu_notify/aio_notify
|
|
|
|
* etc.
|
|
|
|
*
|
|
|
|
* Returns: true if the clock runs in nanoseconds and
|
|
|
|
* should be used for a deadline.
|
|
|
|
*/
|
|
|
|
bool qemu_clock_use_for_deadline(QEMUClock *clock);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_clock_get_main_loop_timerlist:
|
|
|
|
* @clock: the clock to operate on
|
|
|
|
*
|
|
|
|
* Return the default timer list assocatiated with a clock.
|
|
|
|
*
|
|
|
|
* Returns: the default timer list
|
|
|
|
*/
|
|
|
|
QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClock *clock);
|
|
|
|
|
2013-08-21 17:02:55 +02:00
|
|
|
/**
|
|
|
|
* qemu_clock_nofify:
|
|
|
|
* @clock: the clock to operate on
|
|
|
|
*
|
|
|
|
* Call the notifier callback connected with the default timer
|
|
|
|
* list linked to the clock, or qemu_notify() if none.
|
|
|
|
*/
|
|
|
|
void qemu_clock_notify(QEMUClock *clock);
|
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/**
|
|
|
|
* timerlist_new:
|
|
|
|
* @type: the clock type to associate with the timerlist
|
2013-08-21 17:02:50 +02:00
|
|
|
* @cb: the callback to call on notification
|
|
|
|
* @opaque: the opaque pointer to pass to the callback
|
2013-08-21 17:02:46 +02:00
|
|
|
*
|
|
|
|
* Create a new timerlist associated with the clock of
|
|
|
|
* type @type.
|
|
|
|
*
|
|
|
|
* Returns: a pointer to the QEMUTimerList created
|
|
|
|
*/
|
2013-08-21 17:02:50 +02:00
|
|
|
QEMUTimerList *timerlist_new(QEMUClockType type,
|
|
|
|
QEMUTimerListNotifyCB *cb, void *opaque);
|
2013-08-21 17:02:46 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_free:
|
|
|
|
* @timer_list: the timer list to free
|
|
|
|
*
|
|
|
|
* Frees a timer_list. It must have no active timers.
|
|
|
|
*/
|
|
|
|
void timerlist_free(QEMUTimerList *timer_list);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_has_timers:
|
|
|
|
* @timer_list: the timer list to operate on
|
|
|
|
*
|
|
|
|
* Determine whether a timer list has active timers
|
|
|
|
*
|
|
|
|
* Returns: true if the timer list has timers.
|
|
|
|
*/
|
|
|
|
bool timerlist_has_timers(QEMUTimerList *timer_list);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_expired:
|
|
|
|
* @timer_list: the timer list to operate on
|
|
|
|
*
|
|
|
|
* Determine whether a timer list has any timers which
|
|
|
|
* are expired.
|
|
|
|
*
|
|
|
|
* Returns: true if the timer list has timers which
|
|
|
|
* have expired.
|
|
|
|
*/
|
|
|
|
bool timerlist_expired(QEMUTimerList *timer_list);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_deadline:
|
|
|
|
* @timer_list: the timer list to operate on
|
|
|
|
*
|
|
|
|
* Determine the deadline for a timer_list. This is
|
|
|
|
* a legacy function which returns INT32_MAX if the
|
|
|
|
* timer list has no timers or if the earliest timer
|
|
|
|
* expires later than INT32_MAX nanoseconds away.
|
|
|
|
*
|
|
|
|
* Returns: the number of nanoseconds until the earliest
|
|
|
|
* timer expires or INT32_MAX in the situations listed
|
|
|
|
* above
|
|
|
|
*/
|
|
|
|
int64_t timerlist_deadline(QEMUTimerList *timer_list);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_deadline_ns:
|
|
|
|
* @timer_list: the timer list to operate on
|
|
|
|
*
|
|
|
|
* Determine the deadline for a timer_list, i.e.
|
|
|
|
* the number of nanoseconds until the first timer
|
|
|
|
* expires. Return -1 if there are no timers.
|
|
|
|
*
|
|
|
|
* Returns: the number of nanoseconds until the earliest
|
|
|
|
* timer expires -1 if none
|
|
|
|
*/
|
|
|
|
int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_getclock:
|
|
|
|
* @timer_list: the timer list to operate on
|
|
|
|
*
|
|
|
|
* Determine the clock associated with a timer list.
|
|
|
|
*
|
|
|
|
* Returns: the clock associated with the timer list.
|
|
|
|
*/
|
|
|
|
QEMUClock *timerlist_get_clock(QEMUTimerList *timer_list);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlist_run_timers:
|
|
|
|
* @timer_list: the timer list to use
|
|
|
|
*
|
|
|
|
* Call all expired timers associated with the timer list.
|
|
|
|
*
|
|
|
|
* Returns: true if any timer expired
|
|
|
|
*/
|
|
|
|
bool timerlist_run_timers(QEMUTimerList *timer_list);
|
|
|
|
|
2013-08-21 17:02:50 +02:00
|
|
|
/**
|
|
|
|
* timerlist_notify:
|
|
|
|
* @timer_list: the timer list to use
|
|
|
|
*
|
|
|
|
* call the notifier callback associated with the timer list.
|
|
|
|
*/
|
|
|
|
void timerlist_notify(QEMUTimerList *timer_list);
|
|
|
|
|
2013-08-21 17:02:48 +02:00
|
|
|
/**
|
|
|
|
* timerlistgroup_init:
|
|
|
|
* @tlg: the timer list group
|
2013-08-21 17:02:50 +02:00
|
|
|
* @cb: the callback to call when a notify is required
|
|
|
|
* @opaque: the opaque pointer to be passed to the callback.
|
2013-08-21 17:02:48 +02:00
|
|
|
*
|
|
|
|
* Initialise a timer list group. This must already be
|
2013-08-21 17:02:50 +02:00
|
|
|
* allocated in memory and zeroed. The notifier callback is
|
|
|
|
* called whenever a clock in the timer list group is
|
|
|
|
* reenabled or whenever a timer associated with any timer
|
|
|
|
* list is modified. If @cb is specified as null, qemu_notify()
|
|
|
|
* is used instead.
|
2013-08-21 17:02:48 +02:00
|
|
|
*/
|
2013-08-21 17:02:50 +02:00
|
|
|
void timerlistgroup_init(QEMUTimerListGroup *tlg,
|
|
|
|
QEMUTimerListNotifyCB *cb, void *opaque);
|
2013-08-21 17:02:48 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlistgroup_deinit:
|
|
|
|
* @tlg: the timer list group
|
|
|
|
*
|
|
|
|
* Deinitialise a timer list group. This must already be
|
|
|
|
* initialised. Note the memory is not freed.
|
|
|
|
*/
|
|
|
|
void timerlistgroup_deinit(QEMUTimerListGroup *tlg);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlistgroup_run_timers:
|
|
|
|
* @tlg: the timer list group
|
|
|
|
*
|
|
|
|
* Run the timers associated with a timer list group.
|
|
|
|
* This will run timers on multiple clocks.
|
|
|
|
*
|
|
|
|
* Returns: true if any timer callback ran
|
|
|
|
*/
|
|
|
|
bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timerlistgroup_deadline_ns
|
|
|
|
* @tlg: the timer list group
|
|
|
|
*
|
|
|
|
* Determine the deadline of the soonest timer to
|
|
|
|
* expire associated with any timer list linked to
|
|
|
|
* the timer list group. Only clocks suitable for
|
|
|
|
* deadline calculation are included.
|
|
|
|
*
|
|
|
|
* Returns: the deadline in nanoseconds or -1 if no
|
|
|
|
* timers are to expire.
|
|
|
|
*/
|
|
|
|
int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg);
|
|
|
|
|
2013-08-21 17:02:41 +02:00
|
|
|
/**
|
|
|
|
* qemu_timeout_ns_to_ms:
|
|
|
|
* @ns: nanosecond timeout value
|
|
|
|
*
|
|
|
|
* Convert a nanosecond timeout value (or -1) to
|
|
|
|
* a millisecond value (or -1), always rounding up.
|
|
|
|
*
|
|
|
|
* Returns: millisecond timeout value
|
|
|
|
*/
|
|
|
|
int qemu_timeout_ns_to_ms(int64_t ns);
|
|
|
|
|
2013-08-21 17:02:43 +02:00
|
|
|
/**
|
|
|
|
* qemu_poll_ns:
|
|
|
|
* @fds: Array of file descriptors
|
|
|
|
* @nfds: number of file descriptors
|
|
|
|
* @timeout: timeout in nanoseconds
|
|
|
|
*
|
|
|
|
* Perform a poll like g_poll but with a timeout in nanoseconds.
|
|
|
|
* See g_poll documentation for further details.
|
|
|
|
*
|
|
|
|
* Returns: number of fds ready
|
|
|
|
*/
|
|
|
|
int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
|
2012-04-20 10:45:48 +02:00
|
|
|
void qemu_clock_enable(QEMUClock *clock, bool enabled);
|
enable vm_clock to "warp" in the iothread+icount case
The previous patch however is not enough, because if the virtual CPU
goes to sleep waiting for a future timer interrupt to wake it up, qemu
deadlocks. The timer interrupt never comes because time is driven by
icount, but the vCPU doesn't run any insns.
You could say that VCPUs should never go to sleep in icount
mode if there is a pending vm_clock timer; rather time should
just warp to the next vm_clock event with no sleep ever taking place.
Even better, you can sleep for some time related to the
time left until the next event, to avoid that the warps are too visible
externally; for example, you could be sending network packets continously
instead of every 100ms.
This is what this patch implements. qemu_clock_warp is called: 1)
whenever a vm_clock timer is adjusted, to ensure the warp_timer is
synchronized; 2) at strategic points in the CPU thread, to make sure
the insn counter is synchronized before the CPU starts running.
In any case, the warp_timer is disabled while the CPU is running,
because the insn counter will then be making progress on its own.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Tested-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
2011-04-13 10:03:44 +02:00
|
|
|
void qemu_clock_warp(QEMUClock *clock);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2011-06-20 14:06:27 +02:00
|
|
|
void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
|
|
|
|
void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
|
|
|
|
Notifier *notifier);
|
|
|
|
|
2011-03-11 16:33:58 +01:00
|
|
|
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
|
|
|
|
QEMUTimerCB *cb, void *opaque);
|
2013-08-21 17:02:46 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* timer_init:
|
|
|
|
* @ts: the timer to be initialised
|
|
|
|
* @timer_list: the timer list to attach the timer to
|
|
|
|
* @scale: the scale value for the tiemr
|
|
|
|
* @cb: the callback to be called when the timer expires
|
|
|
|
* @opaque: the opaque pointer to be passed to the callback
|
|
|
|
*
|
|
|
|
* Initialise a new timer and associate it with @timer_list.
|
|
|
|
* The caller is responsible for allocating the memory.
|
|
|
|
*
|
|
|
|
* You need not call an explicit deinit call. Simply make
|
|
|
|
* sure it is not on a list with timer_del.
|
|
|
|
*/
|
|
|
|
void timer_init(QEMUTimer *ts,
|
|
|
|
QEMUTimerList *timer_list, int scale,
|
|
|
|
QEMUTimerCB *cb, void *opaque);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timer_new_tl:
|
|
|
|
* @timer_list: the timer list to attach the timer to
|
|
|
|
* @scale: the scale value for the tiemr
|
|
|
|
* @cb: the callback to be called when the timer expires
|
|
|
|
* @opaque: the opaque pointer to be passed to the callback
|
|
|
|
*
|
|
|
|
* Creeate a new timer and associate it with @timer_list.
|
|
|
|
* The memory is allocated by the function.
|
|
|
|
*
|
|
|
|
* This is not the preferred interface unless you know you
|
|
|
|
* are going to call timer_free. Use timer_init instead.
|
|
|
|
*
|
|
|
|
* Returns: a pointer to the timer
|
|
|
|
*/
|
|
|
|
static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list,
|
|
|
|
int scale,
|
|
|
|
QEMUTimerCB *cb,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
|
|
|
|
timer_init(ts, timer_list, scale, cb, opaque);
|
|
|
|
return ts;
|
|
|
|
}
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
void qemu_free_timer(QEMUTimer *ts);
|
|
|
|
void qemu_del_timer(QEMUTimer *ts);
|
2011-09-12 16:21:44 +02:00
|
|
|
void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
|
2007-11-17 18:14:51 +01:00
|
|
|
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
|
2013-08-21 17:02:39 +02:00
|
|
|
bool timer_pending(QEMUTimer *ts);
|
|
|
|
bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
|
|
|
|
uint64_t timer_expire_time_ns(QEMUTimer *ts);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/* New format calling conventions for timers */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timer_free:
|
|
|
|
* @ts: the timer
|
|
|
|
*
|
|
|
|
* Free a timer (it must not be on the active list)
|
|
|
|
*/
|
|
|
|
static inline void timer_free(QEMUTimer *ts)
|
|
|
|
{
|
|
|
|
qemu_free_timer(ts);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timer_del:
|
|
|
|
* @ts: the timer
|
|
|
|
*
|
|
|
|
* Delete a timer from the active list.
|
|
|
|
*/
|
|
|
|
static inline void timer_del(QEMUTimer *ts)
|
|
|
|
{
|
|
|
|
qemu_del_timer(ts);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timer_mod_ns:
|
|
|
|
* @ts: the timer
|
|
|
|
* @expire_time: the expiry time in nanoseconds
|
|
|
|
*
|
|
|
|
* Modify a timer to expire at @expire_time
|
|
|
|
*/
|
|
|
|
static inline void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
|
|
|
|
{
|
|
|
|
qemu_mod_timer_ns(ts, expire_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timer_mod:
|
|
|
|
* @ts: the timer
|
|
|
|
* @expire_time: the expire time in the units associated with the timer
|
|
|
|
*
|
|
|
|
* Modify a timer to expiry at @expire_time, taking into
|
|
|
|
* account the scale associated with the timer.
|
|
|
|
*/
|
|
|
|
static inline void timer_mod(QEMUTimer *ts, int64_t expire_timer)
|
|
|
|
{
|
|
|
|
qemu_mod_timer(ts, expire_timer);
|
|
|
|
}
|
|
|
|
|
2013-08-21 17:02:45 +02:00
|
|
|
/**
|
|
|
|
* qemu_run_timers:
|
|
|
|
* @clock: clock on which to operate
|
|
|
|
*
|
2013-08-21 17:02:46 +02:00
|
|
|
* Run all the timers associated with the default timer list
|
|
|
|
* of a clock.
|
2013-08-21 17:02:45 +02:00
|
|
|
*
|
|
|
|
* Returns: true if any timer ran.
|
|
|
|
*/
|
|
|
|
bool qemu_run_timers(QEMUClock *clock);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_run_all_timers:
|
|
|
|
*
|
2013-08-21 17:02:46 +02:00
|
|
|
* Run all the timers associated with the default timer list
|
|
|
|
* of every clock.
|
2013-08-21 17:02:45 +02:00
|
|
|
*
|
|
|
|
* Returns: true if any timer ran.
|
|
|
|
*/
|
|
|
|
bool qemu_run_all_timers(void);
|
|
|
|
|
2010-03-10 11:38:55 +01:00
|
|
|
void configure_alarms(char const *opt);
|
|
|
|
void init_clocks(void);
|
|
|
|
int init_timer_alarm(void);
|
|
|
|
|
2011-03-27 17:45:39 +02:00
|
|
|
int64_t cpu_get_ticks(void);
|
|
|
|
void cpu_enable_ticks(void);
|
|
|
|
void cpu_disable_ticks(void);
|
|
|
|
|
2013-08-21 17:02:41 +02:00
|
|
|
/**
|
|
|
|
* qemu_soonest_timeout:
|
|
|
|
* @timeout1: first timeout in nanoseconds (or -1 for infinite)
|
|
|
|
* @timeout2: second timeout in nanoseconds (or -1 for infinite)
|
|
|
|
*
|
|
|
|
* Calculates the soonest of two timeout values. -1 means infinite, which
|
|
|
|
* is later than any other value.
|
|
|
|
*
|
|
|
|
* Returns: soonest timeout value in nanoseconds (or -1 for infinite)
|
|
|
|
*/
|
|
|
|
static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
|
|
|
|
{
|
|
|
|
/* we can abuse the fact that -1 (which means infinite) is a maximal
|
|
|
|
* value when cast to unsigned. As this is disgusting, it's kept in
|
|
|
|
* one inline function.
|
|
|
|
*/
|
|
|
|
return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
|
|
|
|
}
|
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/**
|
|
|
|
* qemu_new_timer_ns:
|
|
|
|
* @clock: the clock to associate with the timer
|
|
|
|
* @callback: the callback to call when the timer expires
|
|
|
|
* @opaque: the opaque pointer to pass to the callback
|
|
|
|
*
|
|
|
|
* Create a new timer with nanosecond scale on the default timer list
|
|
|
|
* associated with the clock.
|
|
|
|
*
|
|
|
|
* Returns: a pointer to the newly created timer
|
|
|
|
*/
|
2011-03-11 16:46:02 +01:00
|
|
|
static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2011-03-11 16:33:58 +01:00
|
|
|
return qemu_new_timer(clock, SCALE_NS, cb, opaque);
|
2011-03-11 16:46:02 +01:00
|
|
|
}
|
|
|
|
|
2013-08-21 17:02:46 +02:00
|
|
|
/**
|
|
|
|
* qemu_new_timer_us:
|
|
|
|
* @clock: the clock to associate with the timer
|
|
|
|
* @callback: the callback to call when the timer expires
|
|
|
|
* @opaque: the opaque pointer to pass to the callback
|
|
|
|
*
|
|
|
|
* Create a new timer with microsecond scale on the default timer list
|
|
|
|
* associated with the clock.
|
|
|
|
*
|
|
|
|
* Returns: a pointer to the newly created timer
|
|
|
|
*/
|
|
|
|
static inline QEMUTimer *qemu_new_timer_us(QEMUClock *clock,
|
|
|
|
QEMUTimerCB *cb,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
return qemu_new_timer(clock, SCALE_US, cb, opaque);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_new_timer_ms:
|
|
|
|
* @clock: the clock to associate with the timer
|
|
|
|
* @callback: the callback to call when the timer expires
|
|
|
|
* @opaque: the opaque pointer to pass to the callback
|
|
|
|
*
|
|
|
|
* Create a new timer with millisecond scale on the default timer list
|
|
|
|
* associated with the clock.
|
|
|
|
*
|
|
|
|
* Returns: a pointer to the newly created timer
|
|
|
|
*/
|
|
|
|
static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock,
|
|
|
|
QEMUTimerCB *cb,
|
2011-03-11 16:46:02 +01:00
|
|
|
void *opaque)
|
|
|
|
{
|
2011-03-11 16:33:58 +01:00
|
|
|
return qemu_new_timer(clock, SCALE_MS, cb, opaque);
|
2011-03-11 16:46:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
|
|
|
|
{
|
|
|
|
return qemu_get_clock_ns(clock) / SCALE_MS;
|
|
|
|
}
|
|
|
|
|
2009-09-11 17:28:26 +02:00
|
|
|
static inline int64_t get_ticks_per_sec(void)
|
|
|
|
{
|
|
|
|
return 1000000000LL;
|
|
|
|
}
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2010-10-23 17:24:07 +02:00
|
|
|
/* real time host monotonic timer */
|
|
|
|
static inline int64_t get_clock_realtime(void)
|
|
|
|
{
|
|
|
|
struct timeval tv;
|
|
|
|
|
|
|
|
gettimeofday(&tv, NULL);
|
|
|
|
return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Warning: don't insert tracepoints into these functions, they are
|
|
|
|
also used by simpletrace backend and tracepoints would cause
|
|
|
|
an infinite recursion! */
|
|
|
|
#ifdef _WIN32
|
|
|
|
extern int64_t clock_freq;
|
|
|
|
|
|
|
|
static inline int64_t get_clock(void)
|
|
|
|
{
|
|
|
|
LARGE_INTEGER ti;
|
|
|
|
QueryPerformanceCounter(&ti);
|
|
|
|
return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
extern int use_rt_clock;
|
|
|
|
|
|
|
|
static inline int64_t get_clock(void)
|
|
|
|
{
|
2013-04-05 02:37:53 +02:00
|
|
|
#ifdef CLOCK_MONOTONIC
|
2010-10-23 17:24:07 +02:00
|
|
|
if (use_rt_clock) {
|
|
|
|
struct timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
return ts.tv_sec * 1000000000LL + ts.tv_nsec;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* XXX: using gettimeofday leads to problems if the date
|
|
|
|
changes, so it should be avoided. */
|
|
|
|
return get_clock_realtime();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-03-10 11:38:55 +01:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
|
|
|
|
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
|
|
|
|
|
2010-03-29 21:24:00 +02:00
|
|
|
/* icount */
|
|
|
|
int64_t cpu_get_icount(void);
|
2011-09-12 13:57:37 +02:00
|
|
|
int64_t cpu_get_clock(void);
|
2010-03-29 21:24:00 +02:00
|
|
|
|
|
|
|
/*******************************************/
|
|
|
|
/* host CPU ticks (if available) */
|
|
|
|
|
|
|
|
#if defined(_ARCH_PPC)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
int64_t retval;
|
|
|
|
#ifdef _ARCH_PPC64
|
|
|
|
/* This reads timebase in one 64bit go and includes Cell workaround from:
|
|
|
|
http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
|
|
|
|
*/
|
|
|
|
__asm__ __volatile__ ("mftb %0\n\t"
|
|
|
|
"cmpwi %0,0\n\t"
|
|
|
|
"beq- $-8"
|
|
|
|
: "=r" (retval));
|
|
|
|
#else
|
|
|
|
/* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
|
|
|
|
unsigned long junk;
|
2010-04-03 11:37:26 +02:00
|
|
|
__asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
|
|
|
|
"mfspr %L0,268\n\t" /* mftb */
|
|
|
|
"mfspr %0,269\n\t" /* mftbu */
|
2010-03-29 21:24:00 +02:00
|
|
|
"cmpw %0,%1\n\t"
|
|
|
|
"bne $-16"
|
|
|
|
: "=r" (retval), "=r" (junk));
|
|
|
|
#endif
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__i386__)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
int64_t val;
|
|
|
|
asm volatile ("rdtsc" : "=A" (val));
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__x86_64__)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
uint32_t low,high;
|
|
|
|
int64_t val;
|
|
|
|
asm volatile("rdtsc" : "=a" (low), "=d" (high));
|
|
|
|
val = high;
|
|
|
|
val <<= 32;
|
|
|
|
val |= low;
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__hppa__)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
asm volatile ("mfctl %%cr16, %0" : "=r"(val));
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__ia64)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
int64_t val;
|
|
|
|
asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__s390__)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
int64_t val;
|
|
|
|
asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2012-09-21 19:34:21 +02:00
|
|
|
#elif defined(__sparc__)
|
2010-03-29 21:24:00 +02:00
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks (void)
|
|
|
|
{
|
|
|
|
#if defined(_LP64)
|
|
|
|
uint64_t rval;
|
|
|
|
asm volatile("rd %%tick,%0" : "=r"(rval));
|
|
|
|
return rval;
|
|
|
|
#else
|
2012-09-21 19:34:21 +02:00
|
|
|
/* We need an %o or %g register for this. For recent enough gcc
|
|
|
|
there is an "h" constraint for that. Don't bother with that. */
|
2010-03-29 21:24:00 +02:00
|
|
|
union {
|
|
|
|
uint64_t i64;
|
|
|
|
struct {
|
|
|
|
uint32_t high;
|
|
|
|
uint32_t low;
|
|
|
|
} i32;
|
|
|
|
} rval;
|
2012-09-21 19:34:21 +02:00
|
|
|
asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
|
|
|
|
: "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
|
2010-03-29 21:24:00 +02:00
|
|
|
return rval.i64;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__mips__) && \
|
|
|
|
((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
|
|
|
|
/*
|
|
|
|
* binutils wants to use rdhwr only on mips32r2
|
|
|
|
* but as linux kernel emulate it, it's fine
|
|
|
|
* to use it.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define MIPS_RDHWR(rd, value) { \
|
|
|
|
__asm__ __volatile__ (".set push\n\t" \
|
|
|
|
".set mips32r2\n\t" \
|
|
|
|
"rdhwr %0, "rd"\n\t" \
|
|
|
|
".set pop" \
|
|
|
|
: "=r" (value)); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
/* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
|
|
|
|
uint32_t count;
|
|
|
|
static uint32_t cyc_per_count = 0;
|
|
|
|
|
|
|
|
if (!cyc_per_count) {
|
|
|
|
MIPS_RDHWR("$3", cyc_per_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
MIPS_RDHWR("$2", count);
|
|
|
|
return (int64_t)(count * cyc_per_count);
|
|
|
|
}
|
|
|
|
|
2010-04-13 01:19:26 +02:00
|
|
|
#elif defined(__alpha__)
|
|
|
|
|
|
|
|
static inline int64_t cpu_get_real_ticks(void)
|
|
|
|
{
|
|
|
|
uint64_t cc;
|
|
|
|
uint32_t cur, ofs;
|
|
|
|
|
|
|
|
asm volatile("rpcc %0" : "=r"(cc));
|
|
|
|
cur = cc;
|
|
|
|
ofs = cc >> 32;
|
|
|
|
return cur - ofs;
|
|
|
|
}
|
|
|
|
|
2010-03-29 21:24:00 +02:00
|
|
|
#else
|
|
|
|
/* The host CPU doesn't have an easily accessible cycle counter.
|
|
|
|
Just return a monotonically increasing value. This will be
|
|
|
|
totally wrong, but hopefully better than nothing. */
|
|
|
|
static inline int64_t cpu_get_real_ticks (void)
|
|
|
|
{
|
|
|
|
static int64_t ticks = 0;
|
|
|
|
return ticks++;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-04-17 18:25:10 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
static inline int64_t profile_getclock(void)
|
|
|
|
{
|
|
|
|
return cpu_get_real_ticks();
|
|
|
|
}
|
|
|
|
|
|
|
|
extern int64_t qemu_time, qemu_time_start;
|
|
|
|
extern int64_t tlb_flush_time;
|
|
|
|
extern int64_t dev_time;
|
|
|
|
#endif
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
#endif
|