Replaced get_tick_per_sec() by NANOSECONDS_PER_SECOND

This patch replaces get_ticks_per_sec() calls with the macro
NANOSECONDS_PER_SECOND. Also, as there are no callers, get_ticks_per_sec()
is then removed.  This replacement improves the readability and
understandability of code.

For example,

    timer_mod(fdctrl->result_timer,
	      qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 50));

NANOSECONDS_PER_SECOND makes it obvious that qemu_clock_get_ns
matches the unit of the expression on the right side of the plus.

Signed-off-by: Rutuja Shah <rutu.shah.26@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Rutuja Shah 2016-03-21 21:32:30 +05:30 committed by Paolo Bonzini
parent 4771d756f4
commit 73bcb24d93
53 changed files with 143 additions and 134 deletions

View File

@ -1869,8 +1869,7 @@ static void audio_init (void)
}
conf.period.ticks = 1;
} else {
conf.period.ticks =
muldiv64 (1, get_ticks_per_sec (), conf.period.hertz);
conf.period.ticks = NANOSECONDS_PER_SECOND / conf.period.hertz;
}
e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);

View File

@ -49,8 +49,8 @@ static int no_run_out (HWVoiceOut *hw, int live)
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ticks = now - no->old_ticks;
bytes = muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
bytes = audio_MIN (bytes, INT_MAX);
bytes = muldiv64(ticks, hw->info.bytes_per_second, NANOSECONDS_PER_SECOND);
bytes = audio_MIN(bytes, INT_MAX);
samples = bytes >> hw->info.shift;
no->old_ticks = now;
@ -61,7 +61,7 @@ static int no_run_out (HWVoiceOut *hw, int live)
static int no_write (SWVoiceOut *sw, void *buf, int len)
{
return audio_pcm_sw_write (sw, buf, len);
return audio_pcm_sw_write(sw, buf, len);
}
static int no_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
@ -106,7 +106,7 @@ static int no_run_in (HWVoiceIn *hw)
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - no->old_ticks;
int64_t bytes =
muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
muldiv64(ticks, hw->info.bytes_per_second, NANOSECONDS_PER_SECOND);
no->old_ticks = now;
bytes = audio_MIN (bytes, INT_MAX);

View File

@ -104,11 +104,11 @@ static int rate_get_samples (struct audio_pcm_info *info, SpiceRateCtl *rate)
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ticks = now - rate->start_ticks;
bytes = muldiv64 (ticks, info->bytes_per_second, get_ticks_per_sec ());
bytes = muldiv64(ticks, info->bytes_per_second, NANOSECONDS_PER_SECOND);
samples = (bytes - rate->bytes_sent) >> info->shift;
if (samples < 0 || samples > 65536) {
error_report("Resetting rate control (%" PRId64 " samples)", samples);
rate_start (rate);
rate_start(rate);
samples = 0;
}
rate->bytes_sent += samples << info->shift;

View File

@ -51,7 +51,7 @@ static int wav_run_out (HWVoiceOut *hw, int live)
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - wav->old_ticks;
int64_t bytes =
muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
muldiv64(ticks, hw->info.bytes_per_second, NANOSECONDS_PER_SECOND);
if (bytes > INT_MAX) {
samples = INT_MAX >> hw->info.shift;

View File

@ -337,7 +337,7 @@ static int baum_eat_packet(BaumDriverState *baum, const uint8_t *buf, int len)
/* Allow 100ms to complete the DisplayData packet */
timer_mod(baum->cellCount_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 10);
NANOSECONDS_PER_SECOND / 10);
for (i = 0; i < baum->x * baum->y ; i++) {
EAT(c);
cells[i] = c;

View File

@ -347,7 +347,7 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
* migration.
*/
timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
}
/* It's okay to call this multiple times or when no timer is started */

6
cpus.c
View File

@ -276,7 +276,7 @@ void cpu_disable_ticks(void)
fairly approximate, so ignore small variation.
When the guest is idle real and virtual time will be aligned in
the IO wait loop. */
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
static void icount_adjust(void)
{
@ -327,7 +327,7 @@ static void icount_adjust_vm(void *opaque)
{
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 10);
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
}
@ -674,7 +674,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
icount_adjust_vm, NULL);
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 10);
NANOSECONDS_PER_SECOND / 10);
}
/***********************************************************/

View File

@ -389,7 +389,7 @@ uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar)
acpi_pm_tmr_update function uses ns for setting the timer. */
int64_t d = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (d >= muldiv64(ar->tmr.overflow_time,
get_ticks_per_sec(), PM_TIMER_FREQUENCY)) {
NANOSECONDS_PER_SECOND, PM_TIMER_FREQUENCY)) {
ar->pm1.evt.sts |= ACPI_BITMASK_TIMER_STATUS;
}
return ar->pm1.evt.sts;
@ -483,7 +483,7 @@ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable)
/* schedule a timer interruption if needed */
if (enable) {
expire_time = muldiv64(ar->tmr.overflow_time, get_ticks_per_sec(),
expire_time = muldiv64(ar->tmr.overflow_time, NANOSECONDS_PER_SECOND,
PM_TIMER_FREQUENCY);
timer_mod(ar->tmr.timer, expire_time);
} else {

View File

@ -110,7 +110,7 @@ static inline uint32_t omap_timer_read(struct omap_mpu_timer_s *timer)
if (timer->st && timer->enable && timer->rate)
return timer->val - muldiv64(distance >> (timer->ptv + 1),
timer->rate, get_ticks_per_sec());
timer->rate, NANOSECONDS_PER_SECOND);
else
return timer->val;
}
@ -128,7 +128,7 @@ static inline void omap_timer_update(struct omap_mpu_timer_s *timer)
if (timer->enable && timer->st && timer->rate) {
timer->val = timer->reset_val; /* Should skip this on clk enable */
expires = muldiv64((uint64_t) timer->val << (timer->ptv + 1),
get_ticks_per_sec(), timer->rate);
NANOSECONDS_PER_SECOND, timer->rate);
/* If timer expiry would be sooner than in about 1 ms and
* auto-reload isn't set, then fire immediately. This is a hack
@ -136,10 +136,11 @@ static inline void omap_timer_update(struct omap_mpu_timer_s *timer)
* sets the interval to a very low value and polls the status bit
* in a busy loop when it wants to sleep just a couple of CPU
* ticks. */
if (expires > (get_ticks_per_sec() >> 10) || timer->ar)
if (expires > (NANOSECONDS_PER_SECOND >> 10) || timer->ar) {
timer_mod(timer->timer, timer->time + expires);
else
} else {
qemu_bh_schedule(timer->tick);
}
} else
timer_del(timer->timer);
}
@ -616,14 +617,14 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr,
now -= s->ulpd_gauge_start;
/* 32-kHz ticks */
ticks = muldiv64(now, 32768, get_ticks_per_sec());
ticks = muldiv64(now, 32768, NANOSECONDS_PER_SECOND);
s->ulpd_pm_regs[0x00 >> 2] = (ticks >> 0) & 0xffff;
s->ulpd_pm_regs[0x04 >> 2] = (ticks >> 16) & 0xffff;
if (ticks >> 32) /* OVERFLOW_32K */
s->ulpd_pm_regs[0x14 >> 2] |= 1 << 2;
/* High frequency ticks */
ticks = muldiv64(now, 12000000, get_ticks_per_sec());
ticks = muldiv64(now, 12000000, NANOSECONDS_PER_SECOND);
s->ulpd_pm_regs[0x08 >> 2] = (ticks >> 0) & 0xffff;
s->ulpd_pm_regs[0x0c >> 2] = (ticks >> 16) & 0xffff;
if (ticks >> 32) /* OVERFLOW_HI_FREQ */
@ -3029,7 +3030,7 @@ static void omap_mcbsp_source_tick(void *opaque)
omap_mcbsp_rx_newdata(s);
timer_mod(s->source_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec());
NANOSECONDS_PER_SECOND);
}
static void omap_mcbsp_rx_start(struct omap_mcbsp_s *s)
@ -3075,7 +3076,7 @@ static void omap_mcbsp_sink_tick(void *opaque)
omap_mcbsp_tx_newdata(s);
timer_mod(s->sink_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec());
NANOSECONDS_PER_SECOND);
}
static void omap_mcbsp_tx_start(struct omap_mcbsp_s *s)

View File

@ -405,7 +405,7 @@ static void spitz_keyboard_tick(void *opaque)
}
timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 32);
NANOSECONDS_PER_SECOND / 32);
}
static void spitz_keyboard_pre_map(SpitzKeyboardState *s)

View File

@ -101,7 +101,7 @@ static void gptm_reload(gptm_state *s, int n, int reset)
tick += (int64_t)count * system_clock_scale;
} else if (s->config == 1) {
/* 32-bit RTC. 1Hz tick. */
tick += get_ticks_per_sec();
tick += NANOSECONDS_PER_SECOND;
} else if (s->mode[n] == 0xa) {
/* PWM mode. Not implemented. */
} else {

View File

@ -1025,7 +1025,7 @@ static void strongarm_uart_update_parameters(StrongARMUARTState *s)
ssp.parity = parity;
ssp.data_bits = data_bits;
ssp.stop_bits = stop_bits;
s->char_transmit_time = (get_ticks_per_sec() / speed) * frame_size;
s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size;
if (s->chr) {
qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
}

View File

@ -170,7 +170,7 @@ static void timer_handler (int c, double interval_Sec)
s->ticking[n] = 1;
#ifdef DEBUG
interval = get_ticks_per_sec () * interval_Sec;
interval = NANOSECONDS_PER_SECOND * interval_Sec;
exp = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + interval;
s->exp[n] = exp;
#endif

View File

@ -762,8 +762,8 @@ static void complete (SB16State *s)
freq = s->freq > 0 ? s->freq : 11025;
samples = dsp_get_lohi (s) + 1;
bytes = samples << s->fmt_stereo << (s->fmt_bits == 16);
ticks = muldiv64 (bytes, get_ticks_per_sec (), freq);
if (ticks < get_ticks_per_sec () / 1024) {
ticks = muldiv64(bytes, NANOSECONDS_PER_SECOND, freq);
if (ticks < NANOSECONDS_PER_SECOND / 1024) {
qemu_irq_raise (s->pic);
}
else {

View File

@ -1939,8 +1939,8 @@ static void fdctrl_handle_readid(FDCtrl *fdctrl, int direction)
FDrive *cur_drv = get_cur_drv(fdctrl);
cur_drv->head = (fdctrl->fifo[1] >> 2) & 1;
timer_mod(fdctrl->result_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 50));
timer_mod(fdctrl->result_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND / 50));
}
static void fdctrl_handle_format_track(FDCtrl *fdctrl, int direction)

View File

@ -432,8 +432,8 @@ static void pflash_write (pflash_t *pfl, hwaddr offset,
}
pfl->status = 0x00;
/* Let's wait 5 seconds before chip erase is done */
timer_mod(pfl->timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() * 5));
timer_mod(pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND * 5));
break;
case 0x30:
/* Sector erase */
@ -447,8 +447,8 @@ static void pflash_write (pflash_t *pfl, hwaddr offset,
}
pfl->status = 0x00;
/* Let's wait 1/2 second before sector erase is done */
timer_mod(pfl->timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 2));
timer_mod(pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND / 2));
break;
default:
DPRINTF("%s: invalid command %02x (wc 5)\n", __func__, cmd);

View File

@ -363,7 +363,7 @@ static int csrhci_ioctl(struct CharDriverState *chr, int cmd, void *arg)
switch (cmd) {
case CHR_IOCTL_SERIAL_SET_PARAMS:
ssp = (QEMUSerialSetParams *) arg;
s->baud_delay = get_ticks_per_sec() / ssp->speed;
s->baud_delay = NANOSECONDS_PER_SECOND / ssp->speed;
/* Moments later... (but shorter than 100ms) */
s->modem_state |= CHR_TIOCM_CTS;
break;
@ -389,7 +389,7 @@ static void csrhci_reset(struct csrhci_s *s)
s->out_len = 0;
s->out_size = FIFO_LEN;
s->in_len = 0;
s->baud_delay = get_ticks_per_sec();
s->baud_delay = NANOSECONDS_PER_SECOND;
s->enable = 0;
s->in_hdr = INT_MAX;
s->in_data = INT_MAX;

View File

@ -205,7 +205,7 @@ static void uart_parameters_setup(CadenceUARTState *s)
}
packet_size += ssp.data_bits + ssp.stop_bits;
s->char_tx_time = (get_ticks_per_sec() / ssp.speed) * packet_size;
s->char_tx_time = (NANOSECONDS_PER_SECOND / ssp.speed) * packet_size;
if (s->chr) {
qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
}
@ -479,7 +479,7 @@ static void cadence_uart_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq);
s->char_tx_time = (get_ticks_per_sec() / 9600) * 10;
s->char_tx_time = (NANOSECONDS_PER_SECOND / 9600) * 10;
}
static int cadence_uart_post_load(void *opaque, int version_id)

View File

@ -179,7 +179,7 @@ static void serial_update_parameters(SerialState *s)
ssp.parity = parity;
ssp.data_bits = data_bits;
ssp.stop_bits = stop_bits;
s->char_transmit_time = (get_ticks_per_sec() / speed) * frame_size;
s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size;
qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
DPRINTF("speed=%d parity=%c data=%d stop=%d\n",
@ -217,8 +217,10 @@ static void serial_update_msl(SerialState *s)
/* The real 16550A apparently has a 250ns response latency to line status changes.
We'll be lazy and poll only every 10ms, and only poll it at all if MSI interrupts are turned on */
if (s->poll_msl)
timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 100);
if (s->poll_msl) {
timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 100);
}
}
static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
@ -824,7 +826,7 @@ static void serial_reset(void *opaque)
s->mcr = UART_MCR_OUT2;
s->scr = 0;
s->tsr_retry = 0;
s->char_transmit_time = (get_ticks_per_sec() / 9600) * 10;
s->char_transmit_time = (NANOSECONDS_PER_SECOND / 9600) * 10;
s->poll_msl = 0;
s->timeout_ipending = 0;

View File

@ -236,9 +236,9 @@ static void vga_precise_update_retrace_info(VGACommonState *s)
r->total_chars = vtotal_lines * htotal_chars;
if (r->freq) {
r->ticks_per_char = get_ticks_per_sec() / (r->total_chars * r->freq);
r->ticks_per_char = NANOSECONDS_PER_SECOND / (r->total_chars * r->freq);
} else {
r->ticks_per_char = get_ticks_per_sec() / chars_per_sec;
r->ticks_per_char = NANOSECONDS_PER_SECOND / chars_per_sec;
}
r->vstart = vretr_start_line;
@ -266,7 +266,7 @@ static void vga_precise_update_retrace_info(VGACommonState *s)
"dots = %d\n"
"ticks/char = %" PRId64 "\n"
"\n",
(double) get_ticks_per_sec() / (r->ticks_per_char * r->total_chars),
(double) NANOSECONDS_PER_SECOND / (r->ticks_per_char * r->total_chars),
htotal_chars,
hretr_start_char,
hretr_skew_chars,

View File

@ -112,7 +112,7 @@ static void set_next_tick(rc4030State *s)
tm_hz = 1000 / (s->itr + 1);
timer_mod(s->periodic_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / tm_hz);
NANOSECONDS_PER_SECOND / tm_hz);
}
/* called for accesses to rc4030 */

View File

@ -975,8 +975,8 @@ static void ide_sector_write_cb(void *opaque, int ret)
that at the expense of slower write performances. Use this
option _only_ to install Windows 2000. You must disable it
for normal use. */
timer_mod(s->sector_write_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND / 1000));
} else {
ide_set_irq(s->bus);
}

View File

@ -96,7 +96,7 @@ void hid_set_next_idle(HIDState *hs)
{
if (hs->idle) {
uint64_t expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() * hs->idle * 4 / 1000;
NANOSECONDS_PER_SECOND * hs->idle * 4 / 1000;
if (!hs->idle_timer) {
hs->idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hid_idle_timer, hs);
}

View File

@ -291,7 +291,8 @@ static void tsc2005_pin_update(TSC2005State *s)
s->precision = s->nextprecision;
s->function = s->nextfunction;
s->pdst = !s->pnd0; /* Synchronised on internal clock */
expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() >> 7);
expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND >> 7);
timer_mod(s->timer, expires);
}

View File

@ -835,7 +835,8 @@ static void tsc210x_pin_update(TSC210xState *s)
s->busy = 1;
s->precision = s->nextprecision;
s->function = s->nextfunction;
expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() >> 10);
expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND >> 10);
timer_mod(s->timer, expires);
}

View File

@ -230,7 +230,7 @@ int pic_read_irq(DeviceState *d)
printf("IRQ%d latency=%0.3fus\n",
irq,
(double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
irq_time[irq]) * 1000000.0 / get_ticks_per_sec());
irq_time[irq]) * 1000000.0 / NANOSECONDS_PER_SECOND);
#endif
DPRINTF("pic_interrupt: irq=%d\n", irq);
return intno;

View File

@ -171,7 +171,8 @@ static uint64_t arm_sysctl_read(void *opaque, hwaddr offset,
case 0x58: /* BOOTCS */
return 0;
case 0x5c: /* 24MHz */
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24000000, get_ticks_per_sec());
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24000000,
NANOSECONDS_PER_SECOND);
case 0x60: /* MISC */
return 0;
case 0x84: /* PROCID0 */

View File

@ -145,7 +145,7 @@ static void cuda_update_irq(CUDAState *s)
static uint64_t get_tb(uint64_t time, uint64_t freq)
{
return muldiv64(time, freq, get_ticks_per_sec());
return muldiv64(time, freq, NANOSECONDS_PER_SECOND);
}
static unsigned int get_counter(CUDATimer *ti)
@ -189,7 +189,7 @@ static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
/* current counter value */
d = muldiv64(current_time - s->load_time,
CUDA_TIMER_FREQ, get_ticks_per_sec());
CUDA_TIMER_FREQ, NANOSECONDS_PER_SECOND);
/* the timer goes down from latch to -1 (period of latch + 2) */
if (d <= (s->counter_value + 1)) {
counter = (s->counter_value - d) & 0xffff;
@ -208,7 +208,7 @@ static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
}
CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n",
s->latch, d, next_time - d);
next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) +
next_time = muldiv64(next_time, NANOSECONDS_PER_SECOND, CUDA_TIMER_FREQ) +
s->load_time;
if (next_time <= current_time)
next_time = current_time + 1;
@ -531,7 +531,7 @@ static void cuda_adb_poll(void *opaque)
}
timer_mod(s->adb_poll_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
(NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms)));
}
/* description of commands */
@ -559,7 +559,7 @@ static bool cuda_cmd_autopoll(CUDAState *s,
if (autopoll) {
timer_mod(s->adb_poll_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
(NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms)));
} else {
timer_del(s->adb_poll_timer);
}
@ -585,7 +585,7 @@ static bool cuda_cmd_set_autorate(CUDAState *s,
if (s->autopoll) {
timer_mod(s->adb_poll_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
(NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms)));
}
return true;
}
@ -665,7 +665,7 @@ static bool cuda_cmd_get_time(CUDAState *s,
}
ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)
/ get_ticks_per_sec());
/ NANOSECONDS_PER_SECOND);
out_data[0] = ti >> 24;
out_data[1] = ti >> 16;
out_data[2] = ti >> 8;
@ -687,7 +687,7 @@ static bool cuda_cmd_set_time(CUDAState *s,
ti = (((uint32_t)in_data[1]) << 24) + (((uint32_t)in_data[2]) << 16)
+ (((uint32_t)in_data[3]) << 8) + in_data[4];
s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)
/ get_ticks_per_sec());
/ NANOSECONDS_PER_SECOND);
return true;
}

View File

@ -254,7 +254,7 @@ static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size)
uint64_t systime = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
uint64_t kltime;
kltime = muldiv64(systime, 4194300, get_ticks_per_sec() * 4);
kltime = muldiv64(systime, 4194300, NANOSECONDS_PER_SECOND * 4);
kltime = muldiv64(kltime, 18432000, 1048575);
switch (addr) {

View File

@ -294,7 +294,7 @@ static void dp8393x_set_next_tick(dp8393xState *s)
ticks = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0];
s->wt_last_update = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delay = get_ticks_per_sec() * ticks / 5000000;
delay = NANOSECONDS_PER_SECOND * ticks / 5000000;
timer_mod(s->watchdog, s->wt_last_update + delay);
}

View File

@ -465,7 +465,7 @@ void ppce500_set_mpic_proxy(bool enabled)
uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
{
/* TB time in tb periods */
return muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec()) + tb_offset;
return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
}
uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
@ -506,7 +506,9 @@ uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
int64_t *tb_offsetp, uint64_t value)
{
*tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec());
*tb_offsetp = value -
muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
__func__, value, *tb_offsetp);
}
@ -640,11 +642,11 @@ static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (diff >= 0) {
decr = muldiv64(diff, tb_env->decr_freq, get_ticks_per_sec());
decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
} else if (tb_env->flags & PPC_TIMER_BOOKE) {
decr = 0;
} else {
decr = -muldiv64(-diff, tb_env->decr_freq, get_ticks_per_sec());
decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
}
LOG_TB("%s: %08" PRIx32 "\n", __func__, decr);
@ -676,7 +678,8 @@ uint64_t cpu_ppc_load_purr (CPUPPCState *env)
diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
return tb_env->purr_load + muldiv64(diff, tb_env->tb_freq, get_ticks_per_sec());
return tb_env->purr_load +
muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
}
/* When decrementer expires,
@ -752,7 +755,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
/* Calculate the next timer event */
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
*nextp = next;
/* Adjust timer */
@ -1013,7 +1016,7 @@ static void cpu_4xx_fit_cb (void *opaque)
/* Cannot occur, but makes gcc happy */
return;
}
next = now + muldiv64(next, get_ticks_per_sec(), tb_env->tb_freq);
next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
if (next == now)
next++;
timer_mod(ppc40x_timer->fit_timer, next);
@ -1044,7 +1047,7 @@ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
__func__, ppc40x_timer->pit_reload);
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
next = now + muldiv64(ppc40x_timer->pit_reload,
get_ticks_per_sec(), tb_env->decr_freq);
NANOSECONDS_PER_SECOND, tb_env->decr_freq);
if (is_excp)
next += tb_env->decr_next - now;
if (next == now)
@ -1109,7 +1112,7 @@ static void cpu_4xx_wdt_cb (void *opaque)
/* Cannot occur, but makes gcc happy */
return;
}
next = now + muldiv64(next, get_ticks_per_sec(), tb_env->decr_freq);
next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
if (next == now)
next++;
LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,

View File

@ -1356,7 +1356,7 @@ static uint32_t ppc4xx_gpt_readl (void *opaque, hwaddr addr)
case 0x00:
/* Time base counter */
ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset,
gpt->tb_freq, get_ticks_per_sec());
gpt->tb_freq, NANOSECONDS_PER_SECOND);
break;
case 0x10:
/* Output enable */
@ -1411,7 +1411,7 @@ static void ppc4xx_gpt_writel (void *opaque,
switch (addr) {
case 0x00:
/* Time base counter */
gpt->tb_offset = muldiv64(value, get_ticks_per_sec(), gpt->tb_freq)
gpt->tb_offset = muldiv64(value, NANOSECONDS_PER_SECOND, gpt->tb_freq)
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ppc4xx_gpt_compute_timer(gpt);
break;

View File

@ -165,7 +165,7 @@ static void booke_update_fixed_timer(CPUPPCState *env,
ticks += delta_tick;
}
*next = now + muldiv64(ticks, get_ticks_per_sec(), tb_env->tb_freq);
*next = now + muldiv64(ticks, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
if ((*next < now) || (*next > INT64_MAX)) {
/* Overflow, so assume the biggest number the qemu timer supports. */
*next = INT64_MAX;

View File

@ -216,7 +216,7 @@
#define SD_HOST_SPECv2_VERS 0x2401
#define SDHC_REGISTERS_MAP_SIZE 0x100
#define SDHC_INSERTION_DELAY (get_ticks_per_sec())
#define SDHC_INSERTION_DELAY (NANOSECONDS_PER_SECOND)
#define SDHC_TRANSFER_DELAY 100
#define SDHC_ADMA_DESCS_PER_DELAY 5
#define SDHC_CMD_RESPONSE (3 << 0)

View File

@ -448,12 +448,12 @@ static void hstick_irq(void *opaque)
static int64_t cpu_to_timer_ticks(int64_t cpu_ticks, uint32_t frequency)
{
return muldiv64(cpu_ticks, get_ticks_per_sec(), frequency);
return muldiv64(cpu_ticks, NANOSECONDS_PER_SECOND, frequency);
}
static uint64_t timer_to_cpu_ticks(int64_t timer_ticks, uint32_t frequency)
{
return muldiv64(timer_ticks, frequency, get_ticks_per_sec());
return muldiv64(timer_ticks, frequency, NANOSECONDS_PER_SECOND);
}
void cpu_tick_set_count(CPUTimer *timer, uint64_t count)

View File

@ -53,7 +53,7 @@ static int pit_get_count(PITChannelState *s)
int counter;
d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->count_load_time, PIT_FREQ,
get_ticks_per_sec());
NANOSECONDS_PER_SECOND);
switch(s->mode) {
case 0:
case 1:
@ -263,7 +263,7 @@ static void pit_irq_timer_update(PITChannelState *s, int64_t current_time)
#ifdef DEBUG_PIT
printf("irq_level=%d next_delay=%f\n",
irq_level,
(double)(expire_time - current_time) / get_ticks_per_sec());
(double)(expire_time - current_time) / NANOSECONDS_PER_SECOND);
#endif
s->next_transition_time = expire_time;
if (expire_time != -1)

View File

@ -47,7 +47,7 @@ int pit_get_out(PITChannelState *s, int64_t current_time)
int out;
d = muldiv64(current_time - s->count_load_time, PIT_FREQ,
get_ticks_per_sec());
NANOSECONDS_PER_SECOND);
switch (s->mode) {
default:
case 0:
@ -81,7 +81,7 @@ int64_t pit_get_next_transition_time(PITChannelState *s, int64_t current_time)
int period2;
d = muldiv64(current_time - s->count_load_time, PIT_FREQ,
get_ticks_per_sec());
NANOSECONDS_PER_SECOND);
switch (s->mode) {
default:
case 0:
@ -121,7 +121,7 @@ int64_t pit_get_next_transition_time(PITChannelState *s, int64_t current_time)
break;
}
/* convert to timer units */
next_time = s->count_load_time + muldiv64(next_time, get_ticks_per_sec(),
next_time = s->count_load_time + muldiv64(next_time, NANOSECONDS_PER_SECOND,
PIT_FREQ);
/* fix potential rounding problems */
/* XXX: better solution: use a clock at PIT_FREQ Hz */

View File

@ -107,8 +107,8 @@ static uint64_t get_guest_rtc_ns(RTCState *s)
uint64_t guest_rtc;
uint64_t guest_clock = qemu_clock_get_ns(rtc_clock);
guest_rtc = s->base_rtc * NANOSECONDS_PER_SECOND
+ guest_clock - s->last_update + s->offset;
guest_rtc = s->base_rtc * NANOSECONDS_PER_SECOND +
guest_clock - s->last_update + s->offset;
return guest_rtc;
}
@ -121,7 +121,7 @@ static void rtc_coalesced_timer_update(RTCState *s)
/* divide each RTC interval to 2 - 8 smaller intervals */
int c = MIN(s->irq_coalesced, 7) + 1;
int64_t next_clock = qemu_clock_get_ns(rtc_clock) +
muldiv64(s->period / c, get_ticks_per_sec(), RTC_CLOCK_RATE);
muldiv64(s->period / c, NANOSECONDS_PER_SECOND, RTC_CLOCK_RATE);
timer_mod(s->coalesced_timer, next_clock);
}
}
@ -167,10 +167,12 @@ static void periodic_timer_update(RTCState *s, int64_t current_time)
s->period = period;
#endif
/* compute 32 khz clock */
cur_clock = muldiv64(current_time, RTC_CLOCK_RATE, get_ticks_per_sec());
cur_clock =
muldiv64(current_time, RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND);
next_irq_clock = (cur_clock & ~(period - 1)) + period;
s->next_periodic_time =
muldiv64(next_irq_clock, get_ticks_per_sec(), RTC_CLOCK_RATE) + 1;
s->next_periodic_time = muldiv64(next_irq_clock, NANOSECONDS_PER_SECOND,
RTC_CLOCK_RATE) + 1;
timer_mod(s->periodic_timer, s->next_periodic_time);
} else {
#ifdef TARGET_I386

View File

@ -402,7 +402,7 @@ static void omap_gp_timer_write(void *opaque, hwaddr addr,
if (s->trigger == gpt_trigger_none)
omap_gp_timer_out(s, s->scpwm);
/* TODO: make sure this doesn't overflow 32-bits */
s->ticks_per_sec = get_ticks_per_sec() << (s->pre ? s->ptv + 1 : 0);
s->ticks_per_sec = NANOSECONDS_PER_SECOND << (s->pre ? s->ptv + 1 : 0);
omap_gp_timer_update(s);
break;

View File

@ -29,7 +29,8 @@ struct omap_synctimer_s {
/* 32-kHz Sync Timer of the OMAP2 */
static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) {
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000, get_ticks_per_sec());
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000,
NANOSECONDS_PER_SECOND);
}
void omap_synctimer_reset(struct omap_synctimer_s *s)

View File

@ -80,7 +80,7 @@ static void pl031_interrupt(void * opaque)
static uint32_t pl031_get_count(PL031State *s)
{
int64_t now = qemu_clock_get_ns(rtc_clock);
return s->tick_offset + now / get_ticks_per_sec();
return s->tick_offset + now / NANOSECONDS_PER_SECOND;
}
static void pl031_set_alarm(PL031State *s)
@ -96,7 +96,7 @@ static void pl031_set_alarm(PL031State *s)
pl031_interrupt(s);
} else {
int64_t now = qemu_clock_get_ns(rtc_clock);
timer_mod(s->timer, now + (int64_t)ticks * get_ticks_per_sec());
timer_mod(s->timer, now + (int64_t)ticks * NANOSECONDS_PER_SECOND);
}
}
@ -204,7 +204,7 @@ static void pl031_init(Object *obj)
sysbus_init_irq(dev, &s->irq);
qemu_get_timedate(&tm, 0);
s->tick_offset = mktimegm(&tm) -
qemu_clock_get_ns(rtc_clock) / get_ticks_per_sec();
qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND;
s->timer = timer_new_ns(rtc_clock, pl031_interrupt, s);
}
@ -216,7 +216,7 @@ static void pl031_pre_save(void *opaque)
/* tick_offset is base_time - rtc_clock base time. Instead, we want to
* store the base time relative to the QEMU_CLOCK_VIRTUAL for backwards-compatibility. */
int64_t delta = qemu_clock_get_ns(rtc_clock) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset_vmstate = s->tick_offset + delta / get_ticks_per_sec();
s->tick_offset_vmstate = s->tick_offset + delta / NANOSECONDS_PER_SECOND;
}
static int pl031_post_load(void *opaque, int version_id)
@ -224,7 +224,7 @@ static int pl031_post_load(void *opaque, int version_id)
PL031State *s = opaque;
int64_t delta = qemu_clock_get_ns(rtc_clock) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset = s->tick_offset_vmstate - delta / get_ticks_per_sec();
s->tick_offset = s->tick_offset_vmstate - delta / NANOSECONDS_PER_SECOND;
pl031_set_alarm(s);
return 0;
}

View File

@ -119,11 +119,11 @@ static void pxa2xx_timer_update(void *opaque, uint64_t now_qemu)
uint64_t new_qemu;
now_vm = s->clock +
muldiv64(now_qemu - s->lastload, s->freq, get_ticks_per_sec());
muldiv64(now_qemu - s->lastload, s->freq, NANOSECONDS_PER_SECOND);
for (i = 0; i < 4; i ++) {
new_qemu = now_qemu + muldiv64((uint32_t) (s->timer[i].value - now_vm),
get_ticks_per_sec(), s->freq);
NANOSECONDS_PER_SECOND, s->freq);
timer_mod(s->timer[i].qtimer, new_qemu);
}
}
@ -148,10 +148,10 @@ static void pxa2xx_timer_update4(void *opaque, uint64_t now_qemu, int n)
now_vm = s->tm4[counter].clock + muldiv64(now_qemu -
s->tm4[counter].lastload,
s->tm4[counter].freq, get_ticks_per_sec());
s->tm4[counter].freq, NANOSECONDS_PER_SECOND);
new_qemu = now_qemu + muldiv64((uint32_t) (s->tm4[n].tm.value - now_vm),
get_ticks_per_sec(), s->tm4[counter].freq);
NANOSECONDS_PER_SECOND, s->tm4[counter].freq);
timer_mod(s->tm4[n].tm.qtimer, new_qemu);
}
@ -190,7 +190,7 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
return s->tm4[tm].tm.value;
case OSCR:
return s->clock + muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->lastload, s->freq, get_ticks_per_sec());
s->lastload, s->freq, NANOSECONDS_PER_SECOND);
case OSCR11: tm ++;
/* fall through */
case OSCR10: tm ++;
@ -214,15 +214,17 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
s->snapshot = s->tm4[tm - 1].clock + muldiv64(
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->tm4[tm - 1].lastload,
s->tm4[tm - 1].freq, get_ticks_per_sec());
s->tm4[tm - 1].freq, NANOSECONDS_PER_SECOND);
else
s->snapshot = s->tm4[tm - 1].clock;
}
if (!s->tm4[tm].freq)
return s->tm4[tm].clock;
return s->tm4[tm].clock + muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->tm4[tm].lastload, s->tm4[tm].freq, get_ticks_per_sec());
return s->tm4[tm].clock +
muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->tm4[tm].lastload, s->tm4[tm].freq,
NANOSECONDS_PER_SECOND);
case OIER:
return s->irq_enabled;
case OSSR: /* Status register */

View File

@ -2304,10 +2304,11 @@ static void ehci_frame_timer(void *opaque)
/* If we've raised int, we speed up the timer, so that we quickly
* notice any new packets queued up in response */
if (ehci->int_req_by_async && (ehci->usbsts & USBSTS_INT)) {
expire_time = t_now + get_ticks_per_sec() / (FRAME_TIMER_FREQ * 4);
expire_time = t_now +
NANOSECONDS_PER_SECOND / (FRAME_TIMER_FREQ * 4);
ehci->int_req_by_async = false;
} else {
expire_time = t_now + (get_ticks_per_sec()
expire_time = t_now + (NANOSECONDS_PER_SECOND
* (ehci->async_stepdown+1) / FRAME_TIMER_FREQ);
}
timer_mod(ehci->frame_timer, expire_time);

View File

@ -564,7 +564,7 @@ static void musb_schedule_cb(USBPort *port, USBPacket *packey)
ep->intv_timer[dir] = timer_new_ns(QEMU_CLOCK_VIRTUAL, musb_cb_tick, ep);
timer_mod(ep->intv_timer[dir], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(timeout, get_ticks_per_sec(), 8000));
muldiv64(timeout, NANOSECONDS_PER_SECOND, 8000));
}
static int musb_timeout(int ttype, int speed, int val)

View File

@ -1850,12 +1850,12 @@ static void usb_ohci_init(OHCIState *ohci, DeviceState *dev,
if (usb_frame_time == 0) {
#ifdef OHCI_TIME_WARP
usb_frame_time = get_ticks_per_sec();
usb_bit_time = muldiv64(1, get_ticks_per_sec(), USB_HZ/1000);
usb_frame_time = NANOSECONDS_PER_SECOND;
usb_bit_time = NANOSECONDS_PER_SECOND / (USB_HZ / 1000);
#else
usb_frame_time = muldiv64(1, get_ticks_per_sec(), 1000);
if (get_ticks_per_sec() >= USB_HZ) {
usb_bit_time = muldiv64(1, get_ticks_per_sec(), USB_HZ);
usb_frame_time = NANOSECONDS_PER_SECOND / 1000;
if (NANOSECONDS_PER_SECOND >= USB_HZ) {
usb_bit_time = NANOSECONDS_PER_SECOND / USB_HZ;
} else {
usb_bit_time = 1;
}

View File

@ -403,7 +403,7 @@ static int uhci_post_load(void *opaque, int version_id)
if (version_id < 2) {
s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / FRAME_TIMER_FREQ);
(NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ);
}
return 0;
}
@ -445,7 +445,7 @@ static void uhci_port_write(void *opaque, hwaddr addr,
/* start frame processing */
trace_usb_uhci_schedule_start();
s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / FRAME_TIMER_FREQ);
(NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ);
timer_mod(s->frame_timer, s->expire_time);
s->status &= ~UHCI_STS_HCHALTED;
} else if (!(val & UHCI_CMD_RS)) {
@ -1131,7 +1131,7 @@ static void uhci_frame_timer(void *opaque)
UHCIState *s = opaque;
uint64_t t_now, t_last_run;
int i, frames;
const uint64_t frame_t = get_ticks_per_sec() / FRAME_TIMER_FREQ;
const uint64_t frame_t = NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ;
s->completions_only = false;
qemu_bh_cancel(s->bh);

View File

@ -516,7 +516,7 @@ static void tusb_async_writew(void *opaque, hwaddr addr,
if (value & TUSB_DEV_OTG_TIMER_ENABLE)
timer_mod(s->otg_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(TUSB_DEV_OTG_TIMER_VAL(value),
get_ticks_per_sec(), TUSB_DEVCLOCK));
NANOSECONDS_PER_SECOND, TUSB_DEVCLOCK));
else
timer_del(s->otg_timer);
break;
@ -726,8 +726,8 @@ static void tusb6010_power(TUSBState *s, int on)
/* Pull the interrupt down after TUSB6010 comes up. */
s->intr_ok = 0;
tusb_intr_update(s);
timer_mod(s->pwr_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 2);
timer_mod(s->pwr_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 2);
}
}

View File

@ -79,7 +79,7 @@ static int wdt_diag288_handle_timer(DIAG288State *diag288,
}
timer_mod(diag288->timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
timeout * get_ticks_per_sec());
timeout * NANOSECONDS_PER_SECOND);
break;
case WDT_DIAG288_CANCEL:
if (!diag288->enabled) {

View File

@ -64,7 +64,7 @@ static void ib700_write_enable_reg(void *vp, uint32_t addr, uint32_t data)
ib700_debug("addr = %x, data = %x\n", addr, data);
timeout = (int64_t) time_map[data & 0xF] * get_ticks_per_sec();
timeout = (int64_t) time_map[data & 0xF] * NANOSECONDS_PER_SECOND;
timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout);
}

View File

@ -154,7 +154,7 @@ void acpi_pm_tmr_reset(ACPIREGS *ar);
static inline int64_t acpi_pm_tmr_get_clock(void)
{
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY,
get_ticks_per_sec());
NANOSECONDS_PER_SECOND);
}
/* PM1a_EVT: piix and ich9 don't implement PM1b. */

View File

@ -783,18 +783,13 @@ void cpu_enable_ticks(void);
/* Caller must hold BQL */
void cpu_disable_ticks(void);
static inline int64_t get_ticks_per_sec(void)
{
return 1000000000LL;
}
static inline int64_t get_max_clock_jump(void)
{
/* This should be small enough to prevent excessive interrupts from being
* generated by the RTC on clock jumps, but large enough to avoid frequent
* unnecessary resets in idle VMs.
*/
return 60 * get_ticks_per_sec();
return 60 * NANOSECONDS_PER_SECOND;
}
/*
@ -820,7 +815,7 @@ static inline int64_t get_clock(void)
{
LARGE_INTEGER ti;
QueryPerformanceCounter(&ti);
return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, clock_freq);
}
#else

View File

@ -1523,9 +1523,9 @@ int64_t dev_time;
static void hmp_info_profile(Monitor *mon, const QDict *qdict)
{
monitor_printf(mon, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)get_ticks_per_sec());
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
monitor_printf(mon, "qemu time %" PRId64 " (%0.3f)\n",
tcg_time, tcg_time / (double)get_ticks_per_sec());
tcg_time, tcg_time / (double)NANOSECONDS_PER_SECOND);
tcg_time = 0;
dev_time = 0;
}

View File

@ -1371,7 +1371,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
/* Always wake up soon in case the interrupt was level based */
timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / 50));
(NANOSECONDS_PER_SECOND / 50));
}
/* We don't know if there are more interrupts pending after this. However,
@ -1831,7 +1831,7 @@ uint32_t kvmppc_get_tbfreq(void)
{
char line[512];
char *ns;
uint32_t retval = get_ticks_per_sec();
uint32_t retval = NANOSECONDS_PER_SECOND;
if (read_cpuinfo("timebase", line, sizeof(line))) {
return retval;