Updates for posix timers and related signal code:

* Prepare posix timers selftests for upcoming changes:
 
 	- Check signal behaviour sanity against SIG_IGN
 
 	- Check signal behaviour sanity against timer
 	  reprogramm/deletion
 
 	- Check SIGEV_NONE pending expiry read
 
 	- Check interval timer read on a pending SIGNAL
 
 	- Check correct overrun count after signal block/unblock
 
 * Various consolidations:
 
 	- timer get/set
 
 	- signal queue
 
 * Fixes:
 	- Correctly read SIGEV_NONE timers
 
 	- Forward expiry while reading expired interval timers
 	  with pending signal
 
 	- Don't arm SIGEV_NONE timers
 
 * Various cleanups all over the place
 -----BEGIN PGP SIGNATURE-----
 
 iQIyBAABCAAdFiEEd76+gtGM8MbftQlOhSRUR1COjHcFAmaoDw4ACgkQhSRUR1CO
 jHe2vw/2Mhl/VZepQjJXnE23a8JpUBrh9EVMgiiSEL9Ofyuh03XyfAL90GKDzGRu
 TtTurCc2yVZ6aBQonZIHrzyRKIf9YotZgpNu5wv3A4I++TbPiCWJFVP8DwC8Mb2X
 IcUAszDQLhx0pWyvteTY4ampO46EovgHGS/e2hjh+FiQcAvVaudoBd7JBHZ+7El3
 ATIOK5kEUpeYiF/Hjp9HAnB3SFAnwLd5d+3Fs8CHGqRpGoxBgKUf89KchUZ/QUrE
 cz8J96xeXlALzRjHhe424g+LOUlEWyTSAmoJF5+suwozH7N2D+iODKPkxvYEA9XV
 x2kKntBWw7JYd7otLWSeW3ICddnDVWuUo5Yfkf0xVy2LCptgQrqfcOqAlEPkN63i
 02N85K2kNYAiZPN+p62PvE8jAC6odqpJWZ8bY7xnXiV710+35Q6kaIsZ1yGQIsAo
 q/D2OzpHf8ZaePdS8qvNQYIYU1aeg9CXW9hBYkHh5KaXue6g4dKKjS3q4Lh2vYPd
 W1NBTpYC3ErKC2cFTuIjBQJSJALC59zwAWxwq/Z3qYFL+mOrnh2eYgZ7C5MsOc91
 xZtMCX8j3eBcXxxNIlZlPvtsWp0er40CMb+jjBSjji6SA5aMFt+3P8w3pZbcIJBe
 Mt1yes9hXpfH859AgpWUR6z3zKgXLAVK2gqtI5SO8r6naW6/Dw==
 =9GRs
 -----END PGP SIGNATURE-----

Merge tag 'posix-timers-2024-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core

Pull updates for posix timers and related signal code from Frederic Weisbecker:

  * Prepare posix timers selftests for upcoming changes:

	- Check signal behaviour sanity against SIG_IGN

	- Check signal behaviour sanity against timer
	  reprogramm/deletion

	- Check SIGEV_NONE pending expiry read

	- Check interval timer read on a pending SIGNAL

	- Check correct overrun count after signal block/unblock

  * Various consolidations:

	- timer get/set

	- signal queue

  * Fixes:
	- Correctly read SIGEV_NONE timers

	- Forward expiry while reading expired interval timers
	  with pending signal

	- Don't arm SIGEV_NONE timers

  * Various cleanups all over the place
This commit is contained in:
Thomas Gleixner 2024-07-30 18:53:38 +02:00
commit 9a7b0158ae
12 changed files with 610 additions and 289 deletions

View File

@ -2456,13 +2456,13 @@ static void *timers_start(struct seq_file *m, loff_t *pos)
if (!tp->sighand)
return ERR_PTR(-ESRCH);
return seq_list_start(&tp->task->signal->posix_timers, *pos);
return seq_hlist_start(&tp->task->signal->posix_timers, *pos);
}
static void *timers_next(struct seq_file *m, void *v, loff_t *pos)
{
struct timers_private *tp = m->private;
return seq_list_next(v, &tp->task->signal->posix_timers, pos);
return seq_hlist_next(v, &tp->task->signal->posix_timers, pos);
}
static void timers_stop(struct seq_file *m, void *v)
@ -2491,7 +2491,7 @@ static int show_timer(struct seq_file *m, void *v)
[SIGEV_THREAD] = "thread",
};
timer = list_entry((struct list_head *)v, struct k_itimer, list);
timer = hlist_entry((struct hlist_node *)v, struct k_itimer, list);
notify = timer->it_sigev_notify;
seq_printf(m, "ID: %d\n", timer->it_id);

View File

@ -159,7 +159,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(&current->sighand->siglock);
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
ret = dequeue_signal(&ctx->sigmask, info, &type);
switch (ret) {
case 0:
if (!nonblock)
@ -174,7 +174,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
add_wait_queue(&current->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
ret = dequeue_signal(&ctx->sigmask, info, &type);
if (ret != 0)
break;
if (signal_pending(current)) {

View File

@ -158,7 +158,7 @@ static inline void posix_cputimers_init_work(void) { }
* @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
struct list_head list;
struct hlist_node list;
struct hlist_node t_hash;
spinlock_t it_lock;
const struct k_clock *kclock;

View File

@ -137,7 +137,7 @@ struct signal_struct {
/* POSIX.1b Interval Timers */
unsigned int next_posix_timer_id;
struct list_head posix_timers;
struct hlist_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
@ -276,8 +276,7 @@ static inline void signal_set_stop_flags(struct signal_struct *sig,
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
kernel_siginfo_t *info, enum pid_type *type);
extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
@ -287,7 +286,7 @@ static inline int kernel_dequeue_signal(void)
int ret;
spin_lock_irq(&task->sighand->siglock);
ret = dequeue_signal(task, &task->blocked, &__info, &__type);
ret = dequeue_signal(&task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;

View File

@ -29,7 +29,7 @@ static struct signal_struct init_signals = {
.cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
.exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
#ifdef CONFIG_POSIX_TIMERS
.posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
.posix_timers = HLIST_HEAD_INIT,
.cputimer = {
.cputime_atomic = INIT_CPUTIME_ATOMIC,
},

View File

@ -1861,7 +1861,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
prev_cputime_init(&sig->prev_cputime);
#ifdef CONFIG_POSIX_TIMERS
INIT_LIST_HEAD(&sig->posix_timers);
INIT_HLIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
#endif

View File

@ -618,20 +618,18 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
}
/*
* Dequeue a signal and return the element to the caller, which is
* expected to free it.
*
* All callers have to hold the siglock.
* Try to dequeue a signal. If a deliverable signal is found fill in the
* caller provided siginfo and return the signal number. Otherwise return
* 0.
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
kernel_siginfo_t *info, enum pid_type *type)
int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
{
struct task_struct *tsk = current;
bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
lockdep_assert_held(&tsk->sighand->siglock);
*type = PIDTYPE_PID;
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
@ -1940,10 +1938,11 @@ struct sigqueue *sigqueue_alloc(void)
void sigqueue_free(struct sigqueue *q)
{
unsigned long flags;
spinlock_t *lock = &current->sighand->siglock;
unsigned long flags;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
return;
/*
* We must hold ->siglock while testing q->list
* to serialize with collect_signal() or with
@ -1971,7 +1970,10 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
unsigned long flags;
int ret, result;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
return 0;
if (WARN_ON_ONCE(q->info.si_code != SI_TIMER))
return 0;
ret = -1;
rcu_read_lock();
@ -2006,7 +2008,6 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
* If an SI_TIMER entry is already queue just increment
* the overrun count.
*/
BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
result = TRACE_SIGNAL_ALREADY_PENDING;
goto out;
@ -2793,8 +2794,7 @@ relock:
type = PIDTYPE_PID;
signr = dequeue_synchronous_signal(&ksig->info);
if (!signr)
signr = dequeue_signal(current, &current->blocked,
&ksig->info, &type);
signr = dequeue_signal(&current->blocked, &ksig->info, &type);
if (!signr)
break; /* will return 0 */
@ -3648,7 +3648,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
signotset(&mask);
spin_lock_irq(&tsk->sighand->siglock);
sig = dequeue_signal(tsk, &mask, info, &type);
sig = dequeue_signal(&mask, info, &type);
if (!sig && timeout) {
/*
* None ready, temporarily unblock those we're interested
@ -3667,7 +3667,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
sig = dequeue_signal(tsk, &mask, info, &type);
sig = dequeue_signal(&mask, info, &type);
}
spin_unlock_irq(&tsk->sighand->siglock);

View File

@ -574,15 +574,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
it.alarm.alarmtimer);
enum alarmtimer_restart result = ALARMTIMER_NORESTART;
unsigned long flags;
int si_private = 0;
spin_lock_irqsave(&ptr->it_lock, flags);
ptr->it_active = 0;
if (ptr->it_interval)
si_private = ++ptr->it_requeue_pending;
if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
if (posix_timer_queue_signal(ptr) && ptr->it_interval) {
/*
* Handle ignored signals and rearm the timer. This will go
* away once we handle ignored signals proper. Ensure that

View File

@ -453,6 +453,7 @@ static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
struct cpu_timer *ctmr = &timer->it.cpu;
struct posix_cputimer_base *base;
timer->it_active = 0;
if (!cpu_timer_dequeue(ctmr))
return;
@ -559,6 +560,7 @@ static void arm_timer(struct k_itimer *timer, struct task_struct *p)
struct cpu_timer *ctmr = &timer->it.cpu;
u64 newexp = cpu_timer_getexpires(ctmr);
timer->it_active = 1;
if (!cpu_timer_enqueue(&base->tqhead, ctmr))
return;
@ -584,12 +586,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
/*
* User don't want any signal.
*/
cpu_timer_setexpires(ctmr, 0);
} else if (unlikely(timer->sigq == NULL)) {
timer->it_active = 0;
if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
@ -600,9 +598,9 @@ static void cpu_timer_fire(struct k_itimer *timer)
/*
* One-shot timer. Clear it as soon as it's fired.
*/
posix_timer_event(timer, 0);
posix_timer_queue_signal(timer);
cpu_timer_setexpires(ctmr, 0);
} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
} else if (posix_timer_queue_signal(timer)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
@ -614,6 +612,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
}
}
static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now);
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
@ -623,9 +623,10 @@ static void cpu_timer_fire(struct k_itimer *timer)
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec64 *new, struct itimerspec64 *old)
{
bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
u64 old_expires, new_expires, old_incr, val;
struct cpu_timer *ctmr = &timer->it.cpu;
u64 old_expires, new_expires, now;
struct sighand_struct *sighand;
struct task_struct *p;
unsigned long flags;
@ -662,10 +663,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
return -ESRCH;
}
/*
* Disarm any old timer after extracting its expiry time.
*/
old_incr = timer->it_interval;
/* Retrieve the current expiry time before disarming the timer */
old_expires = cpu_timer_getexpires(ctmr);
if (unlikely(timer->it.cpu.firing)) {
@ -673,157 +671,122 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
ret = TIMER_RETRY;
} else {
cpu_timer_dequeue(ctmr);
timer->it_active = 0;
}
/*
* We need to sample the current value to convert the new
* value from to relative and absolute, and to convert the
* old value from absolute to relative. To set a process
* timer, we need a sample to balance the thread expiry
* times (in arm_timer). With an absolute time, we must
* check if it's already passed. In short, we need a sample.
* Sample the current clock for saving the previous setting
* and for rearming the timer.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock))
val = cpu_clock_sample(clkid, p);
now = cpu_clock_sample(clkid, p);
else
val = cpu_clock_sample_group(clkid, p, true);
now = cpu_clock_sample_group(clkid, p, !sigev_none);
/* Retrieve the previous expiry value if requested. */
if (old) {
if (old_expires == 0) {
old->it_value.tv_sec = 0;
old->it_value.tv_nsec = 0;
} else {
/*
* Update the timer in case it has overrun already.
* If it has, we'll report it as having overrun and
* with the next reloaded timer already ticking,
* though we are swallowing that pending
* notification here to install the new setting.
*/
u64 exp = bump_cpu_timer(timer, val);
if (val < exp) {
old_expires = exp - val;
old->it_value = ns_to_timespec64(old_expires);
} else {
old->it_value.tv_nsec = 1;
old->it_value.tv_sec = 0;
}
}
old->it_value = (struct timespec64){ };
if (old_expires)
__posix_cpu_timer_get(timer, old, now);
}
/* Retry if the timer expiry is running concurrently */
if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
unlock_task_sighand(p, &flags);
goto out;
}
if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
new_expires += val;
}
/* Convert relative expiry time to absolute */
if (new_expires && !(timer_flags & TIMER_ABSTIME))
new_expires += now;
/* Set the new expiry time (might be 0) */
cpu_timer_setexpires(ctmr, new_expires);
/*
* Install the new expiry time (or zero).
* For a timer with no notification action, we don't actually
* arm the timer (we'll just fake it for timer_gettime).
* Arm the timer if it is not disabled, the new expiry value has
* not yet expired and the timer requires signal delivery.
* SIGEV_NONE timers are never armed. In case the timer is not
* armed, enforce the reevaluation of the timer base so that the
* process wide cputime counter can be disabled eventually.
*/
cpu_timer_setexpires(ctmr, new_expires);
if (new_expires != 0 && val < new_expires) {
arm_timer(timer, p);
if (likely(!sigev_none)) {
if (new_expires && now < new_expires)
arm_timer(timer, p);
else
trigger_base_recalc_expires(timer, p);
}
unlock_task_sighand(p, &flags);
/*
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
timer->it_interval = timespec64_to_ktime(new->it_interval);
posix_timer_set_common(timer, new);
/*
* This acts as a modification timestamp for the timer,
* so any automatic reload attempt will punt on seeing
* that we have reset the timer manually.
* If the new expiry time was already in the past the timer was not
* queued. Fire it immediately even if the thread never runs to
* accumulate more time on this clock.
*/
timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timer->it_overrun_last = 0;
timer->it_overrun = -1;
if (val >= new_expires) {
if (new_expires != 0) {
/*
* The designated time already passed, so we notify
* immediately, even if the thread never runs to
* accumulate more time on this clock.
*/
cpu_timer_fire(timer);
}
/*
* Make sure we don't keep around the process wide cputime
* counter or the tick dependency if they are not necessary.
*/
sighand = lock_task_sighand(p, &flags);
if (!sighand)
goto out;
if (!cpu_timer_queued(ctmr))
trigger_base_recalc_expires(timer, p);
unlock_task_sighand(p, &flags);
}
out:
if (!sigev_none && new_expires && now >= new_expires)
cpu_timer_fire(timer);
out:
rcu_read_unlock();
if (old)
old->it_interval = ns_to_timespec64(old_incr);
return ret;
}
static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now)
{
bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
u64 expires, iv = timer->it_interval;
/*
* Make sure that interval timers are moved forward for the
* following cases:
* - SIGEV_NONE timers which are never armed
* - Timers which expired, but the signal has not yet been
* delivered
*/
if (iv && ((timer->it_requeue_pending & REQUEUE_PENDING) || sigev_none))
expires = bump_cpu_timer(timer, now);
else
expires = cpu_timer_getexpires(&timer->it.cpu);
/*
* Expired interval timers cannot have a remaining time <= 0.
* The kernel has to move them forward so that the next
* timer expiry is > @now.
*/
if (now < expires) {
itp->it_value = ns_to_timespec64(expires - now);
} else {
/*
* A single shot SIGEV_NONE timer must return 0, when it is
* expired! Timers which have a real signal delivery mode
* must return a remaining time greater than 0 because the
* signal has not yet been delivered.
*/
if (!sigev_none)
itp->it_value.tv_nsec = 1;
}
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 now, expires = cpu_timer_getexpires(ctmr);
struct task_struct *p;
u64 now;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
if (!p)
goto out;
if (p && cpu_timer_getexpires(&timer->it.cpu)) {
itp->it_interval = ktime_to_timespec64(timer->it_interval);
/*
* Easy part: convert the reload time.
*/
itp->it_interval = ktime_to_timespec64(timer->it_interval);
if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
else
now = cpu_clock_sample_group(clkid, p, false);
if (!expires)
goto out;
/*
* Sample the clock to take the difference with the expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
else
now = cpu_clock_sample_group(clkid, p, false);
if (now < expires) {
itp->it_value = ns_to_timespec64(expires - now);
} else {
/*
* The timer should have expired already, but the firing
* hasn't taken place yet. Say it's just about to expire.
*/
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
__posix_cpu_timer_get(timer, itp, now);
}
out:
rcu_read_unlock();
}

View File

@ -277,10 +277,17 @@ void posixtimer_rearm(struct kernel_siginfo *info)
unlock_timer(timr, flags);
}
int posix_timer_event(struct k_itimer *timr, int si_private)
int posix_timer_queue_signal(struct k_itimer *timr)
{
int ret, si_private = 0;
enum pid_type type;
int ret;
lockdep_assert_held(&timr->it_lock);
timr->it_active = 0;
if (timr->it_interval)
si_private = ++timr->it_requeue_pending;
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->posixtimer_rearm().
@ -309,19 +316,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
struct k_itimer *timr = container_of(timer, struct k_itimer, it.real.timer);
enum hrtimer_restart ret = HRTIMER_NORESTART;
struct k_itimer *timr;
unsigned long flags;
int si_private = 0;
timr = container_of(timer, struct k_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
timr->it_active = 0;
if (timr->it_interval != 0)
si_private = ++timr->it_requeue_pending;
if (posix_timer_event(timr, si_private)) {
if (posix_timer_queue_signal(timr)) {
/*
* The signal was not queued due to SIG_IGN. As a
* consequence the timer is not going to be rearmed from
@ -515,7 +516,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
spin_lock_irq(&current->sighand->siglock);
/* This makes the timer valid in the hash table */
WRITE_ONCE(new_timer->it_signal, current->signal);
list_add(&new_timer->list, &current->signal->posix_timers);
hlist_add_head(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
/*
* After unlocking sighand::siglock @new_timer is subject to
@ -856,6 +857,23 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
return lock_timer(timer_id, flags);
}
/*
* Set up the new interval and reset the signal delivery data
*/
void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting)
{
if (new_setting->it_value.tv_sec || new_setting->it_value.tv_nsec)
timer->it_interval = timespec64_to_ktime(new_setting->it_interval);
else
timer->it_interval = 0;
/* Prevent reloading in case there is a signal pending */
timer->it_requeue_pending = (timer->it_requeue_pending + 2) & ~REQUEUE_PENDING;
/* Reset overrun accounting */
timer->it_overrun_last = 0;
timer->it_overrun = -1LL;
}
/* Set a POSIX.1b interval timer. */
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
@ -878,15 +896,12 @@ int common_timer_set(struct k_itimer *timr, int flags,
return TIMER_RETRY;
timr->it_active = 0;
timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timr->it_overrun_last = 0;
posix_timer_set_common(timr, new_setting);
/* Switch off the timer when it_value is zero */
/* Keep timer disarmed when it_value is zero */
if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
return 0;
timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
expires = timespec64_to_ktime(new_setting->it_value);
if (flags & TIMER_ABSTIME)
expires = timens_ktime_to_host(timr->it_clock, expires);
@ -904,7 +919,7 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
const struct k_clock *kc;
struct k_itimer *timr;
unsigned long flags;
int error = 0;
int error;
if (!timespec64_valid(&new_spec64->it_interval) ||
!timespec64_valid(&new_spec64->it_value))
@ -918,6 +933,9 @@ retry:
if (!timr)
return -EINVAL;
if (old_spec64)
old_spec64->it_interval = ktime_to_timespec64(timr->it_interval);
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@ -1021,7 +1039,7 @@ retry_delete:
}
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
hlist_del(&timer->list);
spin_unlock(&current->sighand->siglock);
/*
* A concurrent lookup could check timer::it_signal lockless. It
@ -1071,7 +1089,7 @@ retry_delete:
goto retry_delete;
}
list_del(&timer->list);
hlist_del(&timer->list);
/*
* Setting timer::it_signal to NULL is technically not required
@ -1092,22 +1110,19 @@ retry_delete:
*/
void exit_itimers(struct task_struct *tsk)
{
struct list_head timers;
struct k_itimer *tmr;
struct hlist_head timers;
if (list_empty(&tsk->signal->posix_timers))
if (hlist_empty(&tsk->signal->posix_timers))
return;
/* Protect against concurrent read via /proc/$PID/timers */
spin_lock_irq(&tsk->sighand->siglock);
list_replace_init(&tsk->signal->posix_timers, &timers);
hlist_move_list(&tsk->signal->posix_timers, &timers);
spin_unlock_irq(&tsk->sighand->siglock);
/* The timers are not longer accessible via tsk::signal */
while (!list_empty(&timers)) {
tmr = list_first_entry(&timers, struct k_itimer, list);
itimer_delete(tmr);
}
while (!hlist_empty(&timers))
itimer_delete(hlist_entry(timers.first, struct k_itimer, list));
}
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,

View File

@ -36,10 +36,11 @@ extern const struct k_clock clock_process;
extern const struct k_clock clock_thread;
extern const struct k_clock alarm_clock;
int posix_timer_event(struct k_itimer *timr, int si_private);
int posix_timer_queue_signal(struct k_itimer *timr);
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
struct itimerspec64 *old_setting);
void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting);
int common_timer_del(struct k_itimer *timer);

View File

@ -6,10 +6,13 @@
*
* Kernel loop code stolen from Steven Rostedt <srostedt@redhat.com>
*/
#define _GNU_SOURCE
#include <sys/time.h>
#include <sys/types.h>
#include <stdio.h>
#include <signal.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <pthread.h>
@ -18,6 +21,21 @@
#define DELAY 2
#define USECS_PER_SEC 1000000
#define NSECS_PER_SEC 1000000000
static void __fatal_error(const char *test, const char *name, const char *what)
{
char buf[64];
strerror_r(errno, buf, sizeof(buf));
if (name && strlen(name))
ksft_exit_fail_msg("%s %s %s %s\n", test, name, what, buf);
else
ksft_exit_fail_msg("%s %s %s\n", test, what, buf);
}
#define fatal_error(name, what) __fatal_error(__func__, name, what)
static volatile int done;
@ -74,24 +92,13 @@ static int check_diff(struct timeval start, struct timeval end)
return 0;
}
static int check_itimer(int which)
static void check_itimer(int which, const char *name)
{
const char *name;
int err;
struct timeval start, end;
struct itimerval val = {
.it_value.tv_sec = DELAY,
};
if (which == ITIMER_VIRTUAL)
name = "ITIMER_VIRTUAL";
else if (which == ITIMER_PROF)
name = "ITIMER_PROF";
else if (which == ITIMER_REAL)
name = "ITIMER_REAL";
else
return -1;
done = 0;
if (which == ITIMER_VIRTUAL)
@ -101,17 +108,11 @@ static int check_itimer(int which)
else if (which == ITIMER_REAL)
signal(SIGALRM, sig_handler);
err = gettimeofday(&start, NULL);
if (err < 0) {
ksft_perror("Can't call gettimeofday()");
return -1;
}
if (gettimeofday(&start, NULL) < 0)
fatal_error(name, "gettimeofday()");
err = setitimer(which, &val, NULL);
if (err < 0) {
ksft_perror("Can't set timer");
return -1;
}
if (setitimer(which, &val, NULL) < 0)
fatal_error(name, "setitimer()");
if (which == ITIMER_VIRTUAL)
user_loop();
@ -120,68 +121,41 @@ static int check_itimer(int which)
else if (which == ITIMER_REAL)
idle_loop();
err = gettimeofday(&end, NULL);
if (err < 0) {
ksft_perror("Can't call gettimeofday()");
return -1;
}
if (gettimeofday(&end, NULL) < 0)
fatal_error(name, "gettimeofday()");
ksft_test_result(check_diff(start, end) == 0, "%s\n", name);
return 0;
}
static int check_timer_create(int which)
static void check_timer_create(int which, const char *name)
{
const char *type;
int err;
timer_t id;
struct timeval start, end;
struct itimerspec val = {
.it_value.tv_sec = DELAY,
};
if (which == CLOCK_THREAD_CPUTIME_ID) {
type = "thread";
} else if (which == CLOCK_PROCESS_CPUTIME_ID) {
type = "process";
} else {
ksft_print_msg("Unknown timer_create() type %d\n", which);
return -1;
}
timer_t id;
done = 0;
err = timer_create(which, NULL, &id);
if (err < 0) {
ksft_perror("Can't create timer");
return -1;
}
signal(SIGALRM, sig_handler);
err = gettimeofday(&start, NULL);
if (err < 0) {
ksft_perror("Can't call gettimeofday()");
return -1;
}
if (timer_create(which, NULL, &id) < 0)
fatal_error(name, "timer_create()");
err = timer_settime(id, 0, &val, NULL);
if (err < 0) {
ksft_perror("Can't set timer");
return -1;
}
if (signal(SIGALRM, sig_handler) == SIG_ERR)
fatal_error(name, "signal()");
if (gettimeofday(&start, NULL) < 0)
fatal_error(name, "gettimeofday()");
if (timer_settime(id, 0, &val, NULL) < 0)
fatal_error(name, "timer_settime()");
user_loop();
err = gettimeofday(&end, NULL);
if (err < 0) {
ksft_perror("Can't call gettimeofday()");
return -1;
}
if (gettimeofday(&end, NULL) < 0)
fatal_error(name, "gettimeofday()");
ksft_test_result(check_diff(start, end) == 0,
"timer_create() per %s\n", type);
return 0;
"timer_create() per %s\n", name);
}
static pthread_t ctd_thread;
@ -209,15 +183,14 @@ static void *ctd_thread_func(void *arg)
ctd_count = 100;
if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
return "Can't create timer\n";
fatal_error(NULL, "timer_create()");
if (timer_settime(id, 0, &val, NULL))
return "Can't set timer\n";
fatal_error(NULL, "timer_settime()");
while (ctd_count > 0 && !ctd_failed)
;
if (timer_delete(id))
return "Can't delete timer\n";
fatal_error(NULL, "timer_delete()");
return NULL;
}
@ -225,19 +198,16 @@ static void *ctd_thread_func(void *arg)
/*
* Test that only the running thread receives the timer signal.
*/
static int check_timer_distribution(void)
static void check_timer_distribution(void)
{
const char *errmsg;
if (signal(SIGALRM, ctd_sighandler) == SIG_ERR)
fatal_error(NULL, "signal()");
signal(SIGALRM, ctd_sighandler);
errmsg = "Can't create thread\n";
if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
goto err;
fatal_error(NULL, "pthread_create()");
errmsg = "Can't join thread\n";
if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg)
goto err;
if (pthread_join(ctd_thread, NULL))
fatal_error(NULL, "pthread_join()");
if (!ctd_failed)
ksft_test_result_pass("check signal distribution\n");
@ -245,31 +215,399 @@ static int check_timer_distribution(void)
ksft_test_result_fail("check signal distribution\n");
else
ksft_test_result_skip("check signal distribution (old kernel)\n");
return 0;
err:
ksft_print_msg("%s", errmsg);
return -1;
}
struct tmrsig {
int signals;
int overruns;
};
static void siginfo_handler(int sig, siginfo_t *si, void *uc)
{
struct tmrsig *tsig = si ? si->si_ptr : NULL;
if (tsig) {
tsig->signals++;
tsig->overruns += si->si_overrun;
}
}
static void *ignore_thread(void *arg)
{
unsigned int *tid = arg;
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGUSR1);
if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
*tid = gettid();
sleep(100);
if (sigprocmask(SIG_UNBLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_UNBLOCK)");
return NULL;
}
static void check_sig_ign(int thread)
{
struct tmrsig tsig = { };
struct itimerspec its;
unsigned int tid = 0;
struct sigaction sa;
struct sigevent sev;
pthread_t pthread;
timer_t timerid;
sigset_t set;
if (thread) {
if (pthread_create(&pthread, NULL, ignore_thread, &tid))
fatal_error(NULL, "pthread_create()");
sleep(1);
}
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = siginfo_handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGUSR1, &sa, NULL))
fatal_error(NULL, "sigaction()");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1);
if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
memset(&sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGUSR1;
sev.sigev_value.sival_ptr = &tsig;
if (thread) {
sev.sigev_notify = SIGEV_THREAD_ID;
sev._sigev_un._tid = tid;
}
if (timer_create(CLOCK_MONOTONIC, &sev, &timerid))
fatal_error(NULL, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
timer_settime(timerid, 0, &its, NULL);
sleep(1);
/* Set the signal to be ignored */
if (signal(SIGUSR1, SIG_IGN) == SIG_ERR)
fatal_error(NULL, "signal(SIG_IGN)");
sleep(1);
if (thread) {
/* Stop the thread first. No signal should be delivered to it */
if (pthread_cancel(pthread))
fatal_error(NULL, "pthread_cancel()");
if (pthread_join(pthread, NULL))
fatal_error(NULL, "pthread_join()");
}
/* Restore the handler */
if (sigaction(SIGUSR1, &sa, NULL))
fatal_error(NULL, "sigaction()");
sleep(1);
/* Unblock it, which should deliver the signal in the !thread case*/
if (sigprocmask(SIG_UNBLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_UNBLOCK)");
if (timer_delete(timerid))
fatal_error(NULL, "timer_delete()");
if (!thread) {
ksft_test_result(tsig.signals == 1 && tsig.overruns == 29,
"check_sig_ign SIGEV_SIGNAL\n");
} else {
ksft_test_result(tsig.signals == 0 && tsig.overruns == 0,
"check_sig_ign SIGEV_THREAD_ID\n");
}
}
static void check_rearm(void)
{
struct tmrsig tsig = { };
struct itimerspec its;
struct sigaction sa;
struct sigevent sev;
timer_t timerid;
sigset_t set;
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = siginfo_handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGUSR1, &sa, NULL))
fatal_error(NULL, "sigaction()");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1);
if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
memset(&sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGUSR1;
sev.sigev_value.sival_ptr = &tsig;
if (timer_create(CLOCK_MONOTONIC, &sev, &timerid))
fatal_error(NULL, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
if (timer_settime(timerid, 0, &its, NULL))
fatal_error(NULL, "timer_settime()");
sleep(1);
/* Reprogram the timer to single shot */
its.it_value.tv_sec = 10;
its.it_value.tv_nsec = 0;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 0;
if (timer_settime(timerid, 0, &its, NULL))
fatal_error(NULL, "timer_settime()");
/* Unblock it, which should not deliver a signal */
if (sigprocmask(SIG_UNBLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_UNBLOCK)");
if (timer_delete(timerid))
fatal_error(NULL, "timer_delete()");
ksft_test_result(!tsig.signals, "check_rearm\n");
}
static void check_delete(void)
{
struct tmrsig tsig = { };
struct itimerspec its;
struct sigaction sa;
struct sigevent sev;
timer_t timerid;
sigset_t set;
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = siginfo_handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGUSR1, &sa, NULL))
fatal_error(NULL, "sigaction()");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1);
if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
memset(&sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGUSR1;
sev.sigev_value.sival_ptr = &tsig;
if (timer_create(CLOCK_MONOTONIC, &sev, &timerid))
fatal_error(NULL, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
if (timer_settime(timerid, 0, &its, NULL))
fatal_error(NULL, "timer_settime()");
sleep(1);
if (timer_delete(timerid))
fatal_error(NULL, "timer_delete()");
/* Unblock it, which should not deliver a signal */
if (sigprocmask(SIG_UNBLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_UNBLOCK)");
ksft_test_result(!tsig.signals, "check_delete\n");
}
static inline int64_t calcdiff_ns(struct timespec t1, struct timespec t2)
{
int64_t diff;
diff = NSECS_PER_SEC * (int64_t)((int) t1.tv_sec - (int) t2.tv_sec);
diff += ((int) t1.tv_nsec - (int) t2.tv_nsec);
return diff;
}
static void check_sigev_none(int which, const char *name)
{
struct timespec start, now;
struct itimerspec its;
struct sigevent sev;
timer_t timerid;
memset(&sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_NONE;
if (timer_create(which, &sev, &timerid))
fatal_error(name, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
timer_settime(timerid, 0, &its, NULL);
if (clock_gettime(which, &start))
fatal_error(name, "clock_gettime()");
do {
if (clock_gettime(which, &now))
fatal_error(name, "clock_gettime()");
} while (calcdiff_ns(now, start) < NSECS_PER_SEC);
if (timer_gettime(timerid, &its))
fatal_error(name, "timer_gettime()");
if (timer_delete(timerid))
fatal_error(name, "timer_delete()");
ksft_test_result(its.it_value.tv_sec || its.it_value.tv_nsec,
"check_sigev_none %s\n", name);
}
static void check_gettime(int which, const char *name)
{
struct itimerspec its, prev;
struct timespec start, now;
struct sigevent sev;
timer_t timerid;
int wraps = 0;
sigset_t set;
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1);
if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(name, "sigprocmask(SIG_BLOCK)");
memset(&sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGUSR1;
if (timer_create(which, &sev, &timerid))
fatal_error(name, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
if (timer_settime(timerid, 0, &its, NULL))
fatal_error(name, "timer_settime()");
if (timer_gettime(timerid, &prev))
fatal_error(name, "timer_gettime()");
if (clock_gettime(which, &start))
fatal_error(name, "clock_gettime()");
do {
if (clock_gettime(which, &now))
fatal_error(name, "clock_gettime()");
if (timer_gettime(timerid, &its))
fatal_error(name, "timer_gettime()");
if (its.it_value.tv_nsec > prev.it_value.tv_nsec)
wraps++;
prev = its;
} while (calcdiff_ns(now, start) < NSECS_PER_SEC);
if (timer_delete(timerid))
fatal_error(name, "timer_delete()");
ksft_test_result(wraps > 1, "check_gettime %s\n", name);
}
static void check_overrun(int which, const char *name)
{
struct timespec start, now;
struct tmrsig tsig = { };
struct itimerspec its;
struct sigaction sa;
struct sigevent sev;
timer_t timerid;
sigset_t set;
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = siginfo_handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGUSR1, &sa, NULL))
fatal_error(name, "sigaction()");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1);
if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(name, "sigprocmask(SIG_BLOCK)");
memset(&sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGUSR1;
sev.sigev_value.sival_ptr = &tsig;
if (timer_create(which, &sev, &timerid))
fatal_error(name, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
if (timer_settime(timerid, 0, &its, NULL))
fatal_error(name, "timer_settime()");
if (clock_gettime(which, &start))
fatal_error(name, "clock_gettime()");
do {
if (clock_gettime(which, &now))
fatal_error(name, "clock_gettime()");
} while (calcdiff_ns(now, start) < NSECS_PER_SEC);
/* Unblock it, which should deliver a signal */
if (sigprocmask(SIG_UNBLOCK, &set, NULL))
fatal_error(name, "sigprocmask(SIG_UNBLOCK)");
if (timer_delete(timerid))
fatal_error(name, "timer_delete()");
ksft_test_result(tsig.signals == 1 && tsig.overruns == 9,
"check_overrun %s\n", name);
}
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(6);
ksft_set_plan(18);
ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
ksft_print_msg("based timers if other threads run on the CPU...\n");
if (check_itimer(ITIMER_VIRTUAL) < 0)
ksft_exit_fail();
if (check_itimer(ITIMER_PROF) < 0)
ksft_exit_fail();
if (check_itimer(ITIMER_REAL) < 0)
ksft_exit_fail();
if (check_timer_create(CLOCK_THREAD_CPUTIME_ID) < 0)
ksft_exit_fail();
check_itimer(ITIMER_VIRTUAL, "ITIMER_VIRTUAL");
check_itimer(ITIMER_PROF, "ITIMER_PROF");
check_itimer(ITIMER_REAL, "ITIMER_REAL");
check_timer_create(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID");
/*
* It's unfortunately hard to reliably test a timer expiration
@ -280,11 +618,21 @@ int main(int argc, char **argv)
* to ensure true parallelism. So test only one thread until we
* find a better solution.
*/
if (check_timer_create(CLOCK_PROCESS_CPUTIME_ID) < 0)
ksft_exit_fail();
check_timer_create(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_timer_distribution();
if (check_timer_distribution() < 0)
ksft_exit_fail();
check_sig_ign(0);
check_sig_ign(1);
check_rearm();
check_delete();
check_sigev_none(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
check_sigev_none(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_gettime(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
check_gettime(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_gettime(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID");
check_overrun(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
check_overrun(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_overrun(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID");
ksft_finished();
}