timers/posix-timers: Convert internals to use nsecs

Use the new nsec based cputime accessors as part of the whole cputime
conversion from cputime_t to nsecs.

Also convert posix-cpu-timers to use nsec based internal counters to
simplify it.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1485832191-26889-19-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Frederic Weisbecker 2017-01-31 04:09:34 +01:00 committed by Ingo Molnar
parent 715eb7a924
commit ebd7e7fc4b
7 changed files with 100 additions and 146 deletions

View File

@ -8,19 +8,9 @@
#include <linux/alarmtimer.h>
static inline unsigned long long cputime_to_expires(cputime_t expires)
{
return (__force unsigned long long)expires;
}
static inline cputime_t expires_to_cputime(unsigned long long expires)
{
return (__force cputime_t)expires;
}
struct cpu_timer_list {
struct list_head entry;
unsigned long long expires, incr;
u64 expires, incr;
struct task_struct *task;
int firing;
};

View File

@ -755,7 +755,7 @@ struct signal_struct {
struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime_t cputime_expires;
struct task_cputime cputime_expires;
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
@ -1689,7 +1689,7 @@ struct task_struct {
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
struct task_cputime_t cputime_expires;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* process credentials */
@ -3527,7 +3527,7 @@ static __always_inline bool need_resched(void)
* Thread group CPU time accounting.
*/
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_t(struct task_struct *tsk,
struct task_cputime_t *cputime)

View File

@ -1313,7 +1313,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
sig->cputimer.running = true;
}

View File

@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
/* Add user time to process. */
p->utime += cputime_to_nsecs(cputime);
account_group_user_time(p, cputime);
account_group_user_time(p, cputime_to_nsecs(cputime));
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
@ -144,7 +144,7 @@ void account_guest_time(struct task_struct *p, cputime_t cputime)
/* Add guest time to process. */
p->utime += cputime_to_nsecs(cputime);
account_group_user_time(p, cputime);
account_group_user_time(p, cputime_to_nsecs(cputime));
p->gtime += cputime_to_nsecs(cputime);
/* Add guest time to cpustat. */
@ -168,7 +168,7 @@ void account_system_index_time(struct task_struct *p,
{
/* Add system time to process. */
p->stime += cputime_to_nsecs(cputime);
account_group_system_time(p, cputime);
account_group_system_time(p, cputime_to_nsecs(cputime));
/* Add system time to cpustat. */
task_group_account_field(p, index, cputime_to_nsecs(cputime));

View File

@ -216,7 +216,7 @@ static inline bool cputimer_running(struct task_struct *tsk)
* running CPU and update the utime field there.
*/
static inline void account_group_user_time(struct task_struct *tsk,
cputime_t cputime)
u64 cputime)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
@ -237,7 +237,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
* running CPU and update the stime field there.
*/
static inline void account_group_system_time(struct task_struct *tsk,
cputime_t cputime)
u64 cputime)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;

View File

@ -53,15 +53,15 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires;
cinterval = it->incr;
if (cval) {
struct task_cputime_t cputime;
struct task_cputime cputime;
cputime_t t;
thread_group_cputimer(tsk, &cputime);
if (clock_id == CPUCLOCK_PROF)
t = cputime.utime + cputime.stime;
t = nsecs_to_cputime(cputime.utime + cputime.stime);
else
/* CPUCLOCK_VIRT */
t = cputime.utime;
t = nsecs_to_cputime(cputime.utime);
if (cval < t)
/* about to fire */

View File

@ -50,39 +50,14 @@ static int check_clock(const clockid_t which_clock)
return error;
}
static inline unsigned long long
timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
{
unsigned long long ret;
ret = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret = cputime_to_expires(timespec_to_cputime(tp));
}
return ret;
}
static void sample_to_timespec(const clockid_t which_clock,
unsigned long long expires,
struct timespec *tp)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
*tp = ns_to_timespec(expires);
else
cputime_to_timespec((__force cputime_t)expires, tp);
}
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
static void bump_cpu_timer(struct k_itimer *timer,
unsigned long long now)
static void bump_cpu_timer(struct k_itimer *timer, u64 now)
{
int i;
unsigned long long delta, incr;
u64 delta, incr;
if (timer->it.cpu.incr == 0)
return;
@ -115,28 +90,28 @@ static void bump_cpu_timer(struct k_itimer *timer,
* Checks @cputime to see if all fields are zero. Returns true if all fields
* are zero, false if any field is nonzero.
*/
static inline int task_cputime_zero(const struct task_cputime_t *cputime)
static inline int task_cputime_zero(const struct task_cputime *cputime)
{
if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
return 1;
return 0;
}
static inline unsigned long long prof_ticks(struct task_struct *p)
static inline u64 prof_ticks(struct task_struct *p)
{
cputime_t utime, stime;
u64 utime, stime;
task_cputime_t(p, &utime, &stime);
task_cputime(p, &utime, &stime);
return cputime_to_expires(utime + stime);
return utime + stime;
}
static inline unsigned long long virt_ticks(struct task_struct *p)
static inline u64 virt_ticks(struct task_struct *p)
{
cputime_t utime, stime;
u64 utime, stime;
task_cputime_t(p, &utime, &stime);
task_cputime(p, &utime, &stime);
return cputime_to_expires(utime);
return utime;
}
static int
@ -176,8 +151,8 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
/*
* Sample a per-thread clock for the given task.
*/
static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
unsigned long long *sample)
static int cpu_clock_sample(const clockid_t which_clock,
struct task_struct *p, u64 *sample)
{
switch (CPUCLOCK_WHICH(which_clock)) {
default:
@ -210,7 +185,7 @@ retry:
}
}
static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum)
static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
{
__update_gt_cputime(&cputime_atomic->utime, sum->utime);
__update_gt_cputime(&cputime_atomic->stime, sum->stime);
@ -218,7 +193,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct
}
/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
static inline void sample_cputime_atomic(struct task_cputime_t *times,
static inline void sample_cputime_atomic(struct task_cputime *times,
struct task_cputime_atomic *atomic_times)
{
times->utime = atomic64_read(&atomic_times->utime);
@ -226,10 +201,10 @@ static inline void sample_cputime_atomic(struct task_cputime_t *times,
times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
}
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times)
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
struct task_cputime_t sum;
struct task_cputime sum;
/* Check if cputimer isn't running. This is accessed without locking. */
if (!READ_ONCE(cputimer->running)) {
@ -238,7 +213,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times
* values through the TIMER_ABSTIME flag, therefore we have
* to synchronize the timer to the clock every time we start it.
*/
thread_group_cputime_t(tsk, &sum);
thread_group_cputime(tsk, &sum);
update_gt_cputime(&cputimer->cputime_atomic, &sum);
/*
@ -260,23 +235,23 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times
*/
static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
unsigned long long *sample)
u64 *sample)
{
struct task_cputime_t cputime;
struct task_cputime cputime;
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
thread_group_cputime_t(p, &cputime);
*sample = cputime_to_expires(cputime.utime + cputime.stime);
thread_group_cputime(p, &cputime);
*sample = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
thread_group_cputime_t(p, &cputime);
*sample = cputime_to_expires(cputime.utime);
thread_group_cputime(p, &cputime);
*sample = cputime.utime;
break;
case CPUCLOCK_SCHED:
thread_group_cputime_t(p, &cputime);
thread_group_cputime(p, &cputime);
*sample = cputime.sum_exec_runtime;
break;
}
@ -288,7 +263,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
struct timespec *tp)
{
int err = -EINVAL;
unsigned long long rtn;
u64 rtn;
if (CPUCLOCK_PERTHREAD(which_clock)) {
if (same_thread_group(tsk, current))
@ -299,7 +274,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
}
if (!err)
sample_to_timespec(which_clock, rtn, tp);
*tp = ns_to_timespec(rtn);
return err;
}
@ -453,7 +428,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
cleanup_timers(tsk->signal->cpu_timers);
}
static inline int expires_gt(cputime_t expires, cputime_t new_exp)
static inline int expires_gt(u64 expires, u64 new_exp)
{
return expires == 0 || expires > new_exp;
}
@ -466,7 +441,7 @@ static void arm_timer(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
struct list_head *head, *listpos;
struct task_cputime_t *cputime_expires;
struct task_cputime *cputime_expires;
struct cpu_timer_list *const nt = &timer->it.cpu;
struct cpu_timer_list *next;
@ -488,7 +463,7 @@ static void arm_timer(struct k_itimer *timer)
list_add(&nt->entry, listpos);
if (listpos == head) {
unsigned long long exp = nt->expires;
u64 exp = nt->expires;
/*
* We are the new earliest-expiring POSIX 1.b timer, hence
@ -499,16 +474,15 @@ static void arm_timer(struct k_itimer *timer)
switch (CPUCLOCK_WHICH(timer->it_clock)) {
case CPUCLOCK_PROF:
if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
cputime_expires->prof_exp = expires_to_cputime(exp);
if (expires_gt(cputime_expires->prof_exp, exp))
cputime_expires->prof_exp = exp;
break;
case CPUCLOCK_VIRT:
if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
cputime_expires->virt_exp = expires_to_cputime(exp);
if (expires_gt(cputime_expires->virt_exp, exp))
cputime_expires->virt_exp = exp;
break;
case CPUCLOCK_SCHED:
if (cputime_expires->sched_exp == 0 ||
cputime_expires->sched_exp > exp)
if (expires_gt(cputime_expires->sched_exp, exp))
cputime_expires->sched_exp = exp;
break;
}
@ -559,20 +533,19 @@ static void cpu_timer_fire(struct k_itimer *timer)
* traversal.
*/
static int cpu_timer_sample_group(const clockid_t which_clock,
struct task_struct *p,
unsigned long long *sample)
struct task_struct *p, u64 *sample)
{
struct task_cputime_t cputime;
struct task_cputime cputime;
thread_group_cputimer(p, &cputime);
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
*sample = cputime_to_expires(cputime.utime + cputime.stime);
*sample = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
*sample = cputime_to_expires(cputime.utime);
*sample = cputime.utime;
break;
case CPUCLOCK_SCHED:
*sample = cputime.sum_exec_runtime;
@ -593,12 +566,12 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
unsigned long flags;
struct sighand_struct *sighand;
struct task_struct *p = timer->it.cpu.task;
unsigned long long old_expires, new_expires, old_incr, val;
u64 old_expires, new_expires, old_incr, val;
int ret;
WARN_ON_ONCE(p == NULL);
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
new_expires = timespec_to_ns(&new->it_value);
/*
* Protect against sighand release/switch in exit/exec and p->cpu_timers
@ -659,9 +632,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
bump_cpu_timer(timer, val);
if (val < timer->it.cpu.expires) {
old_expires = timer->it.cpu.expires - val;
sample_to_timespec(timer->it_clock,
old_expires,
&old->it_value);
old->it_value = ns_to_timespec(old_expires);
} else {
old->it_value.tv_nsec = 1;
old->it_value.tv_sec = 0;
@ -699,8 +670,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
&new->it_interval);
timer->it.cpu.incr = timespec_to_ns(&new->it_interval);
/*
* This acts as a modification timestamp for the timer,
@ -723,17 +693,15 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
ret = 0;
out:
if (old) {
sample_to_timespec(timer->it_clock,
old_incr, &old->it_interval);
}
if (old)
old->it_interval = ns_to_timespec(old_incr);
return ret;
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
{
unsigned long long now;
u64 now;
struct task_struct *p = timer->it.cpu.task;
WARN_ON_ONCE(p == NULL);
@ -741,8 +709,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
/*
* Easy part: convert the reload time.
*/
sample_to_timespec(timer->it_clock,
timer->it.cpu.incr, &itp->it_interval);
itp->it_interval = ns_to_timespec(timer->it.cpu.incr);
if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
@ -761,7 +728,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
/*
* Protect against sighand release/switch in exit/exec and
* also make timer sampling safe if it ends up calling
* thread_group_cputime_t().
* thread_group_cputime().
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
@ -771,8 +738,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
* Call the timer disarmed, nothing else to do.
*/
timer->it.cpu.expires = 0;
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
itp->it_value = ns_to_timespec(timer->it.cpu.expires);
return;
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
@ -781,9 +747,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
}
if (now < timer->it.cpu.expires) {
sample_to_timespec(timer->it_clock,
timer->it.cpu.expires - now,
&itp->it_value);
itp->it_value = ns_to_timespec(timer->it.cpu.expires - now);
} else {
/*
* The timer should have expired already, but the firing
@ -826,8 +790,8 @@ static void check_thread_timers(struct task_struct *tsk,
{
struct list_head *timers = tsk->cpu_timers;
struct signal_struct *const sig = tsk->signal;
struct task_cputime_t *tsk_expires = &tsk->cputime_expires;
unsigned long long expires;
struct task_cputime *tsk_expires = &tsk->cputime_expires;
u64 expires;
unsigned long soft;
/*
@ -838,10 +802,10 @@ static void check_thread_timers(struct task_struct *tsk,
return;
expires = check_timers_list(timers, firing, prof_ticks(tsk));
tsk_expires->prof_exp = expires_to_cputime(expires);
tsk_expires->prof_exp = expires;
expires = check_timers_list(++timers, firing, virt_ticks(tsk));
tsk_expires->virt_exp = expires_to_cputime(expires);
tsk_expires->virt_exp = expires;
tsk_expires->sched_exp = check_timers_list(++timers, firing,
tsk->se.sum_exec_runtime);
@ -891,13 +855,12 @@ static inline void stop_process_timers(struct signal_struct *sig)
}
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
unsigned long long *expires,
unsigned long long cur_time, int signo)
u64 *expires, u64 cur_time, int signo)
{
if (!it->expires)
return;
if (cur_time >= it->expires) {
if (cur_time >= cputime_to_nsecs(it->expires)) {
if (it->incr) {
it->expires += it->incr;
it->error += it->incr_error;
@ -915,8 +878,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
}
if (it->expires && (!*expires || it->expires < *expires)) {
*expires = it->expires;
if (it->expires && (!*expires || cputime_to_nsecs(it->expires) < *expires)) {
*expires = cputime_to_nsecs(it->expires);
}
}
@ -929,10 +892,10 @@ static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
struct signal_struct *const sig = tsk->signal;
unsigned long long utime, ptime, virt_expires, prof_expires;
unsigned long long sum_sched_runtime, sched_expires;
u64 utime, ptime, virt_expires, prof_expires;
u64 sum_sched_runtime, sched_expires;
struct list_head *timers = sig->cpu_timers;
struct task_cputime_t cputime;
struct task_cputime cputime;
unsigned long soft;
/*
@ -952,8 +915,8 @@ static void check_process_timers(struct task_struct *tsk,
* Collect the current process totals.
*/
thread_group_cputimer(tsk, &cputime);
utime = cputime_to_expires(cputime.utime);
ptime = utime + cputime_to_expires(cputime.stime);
utime = cputime.utime;
ptime = utime + cputime.stime;
sum_sched_runtime = cputime.sum_exec_runtime;
prof_expires = check_timers_list(timers, firing, ptime);
@ -969,10 +932,10 @@ static void check_process_timers(struct task_struct *tsk,
SIGVTALRM);
soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (soft != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime);
unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
unsigned long hard =
READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
cputime_t x;
u64 x;
if (psecs >= hard) {
/*
* At the hard limit, we just die.
@ -991,14 +954,13 @@ static void check_process_timers(struct task_struct *tsk,
sig->rlim[RLIMIT_CPU].rlim_cur = soft;
}
}
x = secs_to_cputime(soft);
if (!prof_expires || x < prof_expires) {
x = soft * NSEC_PER_SEC;
if (!prof_expires || x < prof_expires)
prof_expires = x;
}
}
sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
sig->cputime_expires.prof_exp = prof_expires;
sig->cputime_expires.virt_exp = virt_expires;
sig->cputime_expires.sched_exp = sched_expires;
if (task_cputime_zero(&sig->cputime_expires))
stop_process_timers(sig);
@ -1015,7 +977,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
struct sighand_struct *sighand;
unsigned long flags;
struct task_struct *p = timer->it.cpu.task;
unsigned long long now;
u64 now;
WARN_ON_ONCE(p == NULL);
@ -1035,7 +997,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
} else {
/*
* Protect arm_timer() and timer sampling in case of call to
* thread_group_cputime_t().
* thread_group_cputime().
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
@ -1078,8 +1040,8 @@ out:
* Returns true if any field of the former is greater than the corresponding
* field of the latter if the latter field is set. Otherwise returns false.
*/
static inline int task_cputime_expired(const struct task_cputime_t *sample,
const struct task_cputime_t *expires)
static inline int task_cputime_expired(const struct task_cputime *sample,
const struct task_cputime *expires)
{
if (expires->utime && sample->utime >= expires->utime)
return 1;
@ -1106,9 +1068,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
struct signal_struct *sig;
if (!task_cputime_zero(&tsk->cputime_expires)) {
struct task_cputime_t task_sample;
struct task_cputime task_sample;
task_cputime_t(tsk, &task_sample.utime, &task_sample.stime);
task_cputime(tsk, &task_sample.utime, &task_sample.stime);
task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1;
@ -1131,7 +1093,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
*/
if (READ_ONCE(sig->cputimer.running) &&
!READ_ONCE(sig->cputimer.checking_timer)) {
struct task_cputime_t group_sample;
struct task_cputime group_sample;
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
@ -1214,7 +1176,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
{
unsigned long long now;
u64 now, new;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
cpu_timer_sample_group(clock_idx, tsk, &now);
@ -1226,31 +1188,33 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
* it to be absolute.
*/
if (*oldval) {
if (*oldval <= now) {
if (cputime_to_nsecs(*oldval) <= now) {
/* Just about to fire. */
*oldval = cputime_one_jiffy;
} else {
*oldval -= now;
*oldval -= nsecs_to_cputime(now);
}
}
if (!*newval)
return;
*newval += now;
*newval += nsecs_to_cputime(now);
}
new = cputime_to_nsecs(*newval);
/*
* Update expiration cache if we are the earliest timer, or eventually
* RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
*/
switch (clock_idx) {
case CPUCLOCK_PROF:
if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
tsk->signal->cputime_expires.prof_exp = *newval;
if (expires_gt(tsk->signal->cputime_expires.prof_exp, new))
tsk->signal->cputime_expires.prof_exp = new;
break;
case CPUCLOCK_VIRT:
if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
tsk->signal->cputime_expires.virt_exp = *newval;
if (expires_gt(tsk->signal->cputime_expires.virt_exp, new))
tsk->signal->cputime_expires.virt_exp = new;
break;
}
@ -1308,7 +1272,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
/*
* We were interrupted by a signal.
*/
sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
*rqtp = ns_to_timespec(timer.it.cpu.expires);
error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
if (!error) {
/*