From 298dbd1c5cd66f0ac85981b83b7d519a5d88d1b8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 5 May 2015 09:44:24 +0200 Subject: [PATCH] tick: broadcast: Simplify oneshot logic and shorten lock region Simplify the oneshot logic by avoiding the reprogramming loops. That also allows to call the cpu local handler outside of the broadcast_lock held region. Tested-by: Borislav Petkov Signed-off-by: Thomas Gleixner --- kernel/time/tick-broadcast.c | 42 +++++++++++++++--------------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 5d9e4aab9797..12fcc55d607a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -525,18 +525,14 @@ static void tick_broadcast_set_affinity(struct clock_event_device *bc, irq_set_affinity(bc->irq, bc->cpumask); } -static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, - ktime_t expires, int force) +static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, + ktime_t expires) { - int ret; - if (bc->state != CLOCK_EVT_STATE_ONESHOT) clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); - ret = clockevents_program_event(bc, expires, force); - if (!ret) - tick_broadcast_set_affinity(bc, cpumask_of(cpu)); - return ret; + clockevents_program_event(bc, expires, 1); + tick_broadcast_set_affinity(bc, cpumask_of(cpu)); } static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) @@ -573,9 +569,9 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) struct tick_device *td; ktime_t now, next_event; int cpu, next_cpu = 0; + bool bc_local; raw_spin_lock(&tick_broadcast_lock); -again: dev->next_event.tv64 = KTIME_MAX; next_event.tv64 = KTIME_MAX; cpumask_clear(tmpmask); @@ -615,13 +611,9 @@ again: cpumask_and(tmpmask, tmpmask, cpu_online_mask); /* - * Wakeup the cpus which have an expired event and handle the - * broadcast event of the local cpu. + * Wakeup the cpus which have an expired event. */ - if (tick_do_broadcast(tmpmask)) { - td = this_cpu_ptr(&tick_cpu_device); - td->evtdev->event_handler(td->evtdev); - } + bc_local = tick_do_broadcast(tmpmask); /* * Two reasons for reprogram: @@ -633,15 +625,15 @@ again: * - There are pending events on sleeping CPUs which were not * in the event mask */ - if (next_event.tv64 != KTIME_MAX) { - /* - * Rearm the broadcast device. If event expired, - * repeat the above - */ - if (tick_broadcast_set_event(dev, next_cpu, next_event, 0)) - goto again; - } + if (next_event.tv64 != KTIME_MAX) + tick_broadcast_set_event(dev, next_cpu, next_event); + raw_spin_unlock(&tick_broadcast_lock); + + if (bc_local) { + td = this_cpu_ptr(&tick_cpu_device); + td->evtdev->event_handler(td->evtdev); + } } static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) @@ -723,7 +715,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state) */ if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && dev->next_event.tv64 < bc->next_event.tv64) - tick_broadcast_set_event(bc, cpu, dev->next_event, 1); + tick_broadcast_set_event(bc, cpu, dev->next_event); } /* * If the current CPU owns the hrtimer broadcast @@ -858,7 +850,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); tick_broadcast_init_next_event(tmpmask, tick_next_period); - tick_broadcast_set_event(bc, cpu, tick_next_period, 1); + tick_broadcast_set_event(bc, cpu, tick_next_period); } else bc->next_event.tv64 = KTIME_MAX; } else {