cpu-timers: Simplify RLIMIT_CPU handling
Let always set signal->cputime_expires expiration cache when setting new itimer, POSIX 1.b timer, and RLIMIT_CPU. Since we are initializing prof_exp expiration cache during fork(), this allows to remove "RLIMIT_CPU != inf" check from fastpath_timer_check() and do some other cleanups. Checked against regression using test cases from: http://marc.info/?l=linux-kernel&m=123749066504641&w=4 http://marc.info/?l=linux-kernel&m=123811277916642&w=2 Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
522dba7134
commit
f55db60904
@ -11,19 +11,18 @@
|
|||||||
#include <trace/events/timer.h>
|
#include <trace/events/timer.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called after updating RLIMIT_CPU to set timer expiration if necessary.
|
* Called after updating RLIMIT_CPU to run cpu timer and update
|
||||||
|
* tsk->signal->cputime_expires expiration cache if necessary. Needs
|
||||||
|
* siglock protection since other code may update expiration cache as
|
||||||
|
* well.
|
||||||
*/
|
*/
|
||||||
void update_rlimit_cpu(unsigned long rlim_new)
|
void update_rlimit_cpu(unsigned long rlim_new)
|
||||||
{
|
{
|
||||||
cputime_t cputime = secs_to_cputime(rlim_new);
|
cputime_t cputime = secs_to_cputime(rlim_new);
|
||||||
struct signal_struct *const sig = current->signal;
|
|
||||||
|
|
||||||
if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
|
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
spin_unlock_irq(¤t->sighand->siglock);
|
||||||
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_clock(const clockid_t which_clock)
|
static int check_clock(const clockid_t which_clock)
|
||||||
@ -564,7 +563,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
|
|||||||
struct list_head *head, *listpos;
|
struct list_head *head, *listpos;
|
||||||
struct cpu_timer_list *const nt = &timer->it.cpu;
|
struct cpu_timer_list *const nt = &timer->it.cpu;
|
||||||
struct cpu_timer_list *next;
|
struct cpu_timer_list *next;
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
|
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
|
||||||
p->cpu_timers : p->signal->cpu_timers);
|
p->cpu_timers : p->signal->cpu_timers);
|
||||||
@ -630,20 +628,11 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
|
|||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
case CPUCLOCK_VIRT:
|
case CPUCLOCK_VIRT:
|
||||||
if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
|
if (expires_gt(sig->cputime_expires.virt_exp, exp->cpu))
|
||||||
exp->cpu))
|
sig->cputime_expires.virt_exp = exp->cpu;
|
||||||
break;
|
|
||||||
sig->cputime_expires.virt_exp = exp->cpu;
|
|
||||||
break;
|
|
||||||
case CPUCLOCK_PROF:
|
case CPUCLOCK_PROF:
|
||||||
if (expires_le(sig->it[CPUCLOCK_PROF].expires,
|
if (expires_gt(sig->cputime_expires.prof_exp, exp->cpu))
|
||||||
exp->cpu))
|
sig->cputime_expires.prof_exp = exp->cpu;
|
||||||
break;
|
|
||||||
i = sig->rlim[RLIMIT_CPU].rlim_cur;
|
|
||||||
if (i != RLIM_INFINITY &&
|
|
||||||
i <= cputime_to_secs(exp->cpu))
|
|
||||||
break;
|
|
||||||
sig->cputime_expires.prof_exp = exp->cpu;
|
|
||||||
break;
|
break;
|
||||||
case CPUCLOCK_SCHED:
|
case CPUCLOCK_SCHED:
|
||||||
sig->cputime_expires.sched_exp = exp->sched;
|
sig->cputime_expires.sched_exp = exp->sched;
|
||||||
@ -1386,7 +1375,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1452,21 +1441,23 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set one of the process-wide special case CPU timers.
|
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
|
||||||
* The tsk->sighand->siglock must be held by the caller.
|
* The tsk->sighand->siglock must be held by the caller.
|
||||||
* The *newval argument is relative and we update it to be absolute, *oldval
|
|
||||||
* is absolute and we update it to be relative.
|
|
||||||
*/
|
*/
|
||||||
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
||||||
cputime_t *newval, cputime_t *oldval)
|
cputime_t *newval, cputime_t *oldval)
|
||||||
{
|
{
|
||||||
union cpu_time_count now;
|
union cpu_time_count now;
|
||||||
struct list_head *head;
|
|
||||||
|
|
||||||
BUG_ON(clock_idx == CPUCLOCK_SCHED);
|
BUG_ON(clock_idx == CPUCLOCK_SCHED);
|
||||||
cpu_timer_sample_group(clock_idx, tsk, &now);
|
cpu_timer_sample_group(clock_idx, tsk, &now);
|
||||||
|
|
||||||
if (oldval) {
|
if (oldval) {
|
||||||
|
/*
|
||||||
|
* We are setting itimer. The *oldval is absolute and we update
|
||||||
|
* it to be relative, *newval argument is relative and we update
|
||||||
|
* it to be absolute.
|
||||||
|
*/
|
||||||
if (!cputime_eq(*oldval, cputime_zero)) {
|
if (!cputime_eq(*oldval, cputime_zero)) {
|
||||||
if (cputime_le(*oldval, now.cpu)) {
|
if (cputime_le(*oldval, now.cpu)) {
|
||||||
/* Just about to fire. */
|
/* Just about to fire. */
|
||||||
@ -1479,33 +1470,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
|||||||
if (cputime_eq(*newval, cputime_zero))
|
if (cputime_eq(*newval, cputime_zero))
|
||||||
return;
|
return;
|
||||||
*newval = cputime_add(*newval, now.cpu);
|
*newval = cputime_add(*newval, now.cpu);
|
||||||
|
|
||||||
/*
|
|
||||||
* If the RLIMIT_CPU timer will expire before the
|
|
||||||
* ITIMER_PROF timer, we have nothing else to do.
|
|
||||||
*/
|
|
||||||
if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
|
|
||||||
< cputime_to_secs(*newval))
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether there are any process timers already set to fire
|
* Update expiration cache if we are the earliest timer, or eventually
|
||||||
* before this one. If so, we don't have anything more to do.
|
* RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
|
||||||
*/
|
*/
|
||||||
head = &tsk->signal->cpu_timers[clock_idx];
|
switch (clock_idx) {
|
||||||
if (list_empty(head) ||
|
case CPUCLOCK_PROF:
|
||||||
cputime_ge(list_first_entry(head,
|
if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
|
||||||
struct cpu_timer_list, entry)->expires.cpu,
|
|
||||||
*newval)) {
|
|
||||||
switch (clock_idx) {
|
|
||||||
case CPUCLOCK_PROF:
|
|
||||||
tsk->signal->cputime_expires.prof_exp = *newval;
|
tsk->signal->cputime_expires.prof_exp = *newval;
|
||||||
break;
|
break;
|
||||||
case CPUCLOCK_VIRT:
|
case CPUCLOCK_VIRT:
|
||||||
|
if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
|
||||||
tsk->signal->cputime_expires.virt_exp = *newval;
|
tsk->signal->cputime_expires.virt_exp = *newval;
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user