clockevents: Convert to raw_spinlock
Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									d192c47f25
								
							
						
					
					
						commit
						b5f91da0a6
					
				| @ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released); | ||||
| static RAW_NOTIFIER_HEAD(clockevents_chain); | ||||
| 
 | ||||
| /* Protection for the above */ | ||||
| static DEFINE_SPINLOCK(clockevents_lock); | ||||
| static DEFINE_RAW_SPINLOCK(clockevents_lock); | ||||
| 
 | ||||
| /**
 | ||||
|  * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds | ||||
| @ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb) | ||||
| 	unsigned long flags; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&clockevents_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&clockevents_lock, flags); | ||||
| 	ret = raw_notifier_chain_register(&clockevents_chain, nb); | ||||
| 	spin_unlock_irqrestore(&clockevents_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&clockevents_lock, flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| @ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev) | ||||
| 	BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | ||||
| 	BUG_ON(!dev->cpumask); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&clockevents_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&clockevents_lock, flags); | ||||
| 
 | ||||
| 	list_add(&dev->list, &clockevent_devices); | ||||
| 	clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); | ||||
| 	clockevents_notify_released(); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&clockevents_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&clockevents_lock, flags); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(clockevents_register_device); | ||||
| 
 | ||||
| @ -241,7 +241,7 @@ void clockevents_notify(unsigned long reason, void *arg) | ||||
| 	struct list_head *node, *tmp; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&clockevents_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&clockevents_lock, flags); | ||||
| 	clockevents_do_notify(reason, arg); | ||||
| 
 | ||||
| 	switch (reason) { | ||||
| @ -256,7 +256,7 @@ void clockevents_notify(unsigned long reason, void *arg) | ||||
| 	default: | ||||
| 		break; | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&clockevents_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&clockevents_lock, flags); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(clockevents_notify); | ||||
| #endif | ||||
|  | ||||
| @ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device; | ||||
| /* FIXME: Use cpumask_var_t. */ | ||||
| static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); | ||||
| static DECLARE_BITMAP(tmpmask, NR_CPUS); | ||||
| static DEFINE_SPINLOCK(tick_broadcast_lock); | ||||
| static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); | ||||
| static int tick_broadcast_force; | ||||
| 
 | ||||
| #ifdef CONFIG_TICK_ONESHOT | ||||
| @ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | ||||
| 	unsigned long flags; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Devices might be registered with both periodic and oneshot | ||||
| @ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | ||||
| 			tick_broadcast_clear_oneshot(cpu); | ||||
| 		} | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| @ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask) | ||||
|  */ | ||||
| static void tick_do_periodic_broadcast(void) | ||||
| { | ||||
| 	spin_lock(&tick_broadcast_lock); | ||||
| 	raw_spin_lock(&tick_broadcast_lock); | ||||
| 
 | ||||
| 	cpumask_and(to_cpumask(tmpmask), | ||||
| 		    cpu_online_mask, tick_get_broadcast_mask()); | ||||
| 	tick_do_broadcast(to_cpumask(tmpmask)); | ||||
| 
 | ||||
| 	spin_unlock(&tick_broadcast_lock); | ||||
| 	raw_spin_unlock(&tick_broadcast_lock); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | ||||
| 	unsigned long flags; | ||||
| 	int cpu, bc_stopped; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	cpu = smp_processor_id(); | ||||
| 	td = &per_cpu(tick_cpu_device, cpu); | ||||
| @ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | ||||
| 			tick_broadcast_setup_oneshot(bc); | ||||
| 	} | ||||
| out: | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | ||||
| 	unsigned long flags; | ||||
| 	unsigned int cpu = *cpup; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	bc = tick_broadcast_device.evtdev; | ||||
| 	cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); | ||||
| @ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | ||||
| 			clockevents_shutdown(bc); | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| } | ||||
| 
 | ||||
| void tick_suspend_broadcast(void) | ||||
| @ -317,13 +317,13 @@ void tick_suspend_broadcast(void) | ||||
| 	struct clock_event_device *bc; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	bc = tick_broadcast_device.evtdev; | ||||
| 	if (bc) | ||||
| 		clockevents_shutdown(bc); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| } | ||||
| 
 | ||||
| int tick_resume_broadcast(void) | ||||
| @ -332,7 +332,7 @@ int tick_resume_broadcast(void) | ||||
| 	unsigned long flags; | ||||
| 	int broadcast = 0; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	bc = tick_broadcast_device.evtdev; | ||||
| 
 | ||||
| @ -351,7 +351,7 @@ int tick_resume_broadcast(void) | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	return broadcast; | ||||
| } | ||||
| @ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | ||||
| 	ktime_t now, next_event; | ||||
| 	int cpu; | ||||
| 
 | ||||
| 	spin_lock(&tick_broadcast_lock); | ||||
| 	raw_spin_lock(&tick_broadcast_lock); | ||||
| again: | ||||
| 	dev->next_event.tv64 = KTIME_MAX; | ||||
| 	next_event.tv64 = KTIME_MAX; | ||||
| @ -443,7 +443,7 @@ again: | ||||
| 		if (tick_broadcast_set_event(next_event, 0)) | ||||
| 			goto again; | ||||
| 	} | ||||
| 	spin_unlock(&tick_broadcast_lock); | ||||
| 	raw_spin_unlock(&tick_broadcast_lock); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | ||||
| 	unsigned long flags; | ||||
| 	int cpu; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Periodic mode does not care about the enter/exit of power | ||||
| @ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void) | ||||
| 	struct clock_event_device *bc; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | ||||
| 	bc = tick_broadcast_device.evtdev; | ||||
| 	if (bc) | ||||
| 		tick_broadcast_setup_oneshot(bc); | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| @ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | ||||
| 	unsigned long flags; | ||||
| 	unsigned int cpu = *cpup; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Clear the broadcast mask flag for the dead cpu, but do not | ||||
| @ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | ||||
| 	 */ | ||||
| 	cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | ||||
| ktime_t tick_next_period; | ||||
| ktime_t tick_period; | ||||
| int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | ||||
| static DEFINE_SPINLOCK(tick_device_lock); | ||||
| static DEFINE_RAW_SPINLOCK(tick_device_lock); | ||||
| 
 | ||||
| /*
 | ||||
|  * Debugging: see timer_list.c | ||||
| @ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | ||||
| 	int cpu, ret = NOTIFY_OK; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 
 | ||||
| 	cpu = smp_processor_id(); | ||||
| 	if (!cpumask_test_cpu(cpu, newdev->cpumask)) | ||||
| @ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | ||||
| 	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | ||||
| 		tick_oneshot_notify(); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 	return NOTIFY_STOP; | ||||
| 
 | ||||
| out_bc: | ||||
| @ -278,7 +278,7 @@ out_bc: | ||||
| 	if (tick_check_broadcast_device(newdev)) | ||||
| 		ret = NOTIFY_STOP; | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| @ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup) | ||||
| 	struct clock_event_device *dev = td->evtdev; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	td->mode = TICKDEV_MODE_PERIODIC; | ||||
| 	if (dev) { | ||||
| 		/*
 | ||||
| @ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup) | ||||
| 		clockevents_exchange_device(dev, NULL); | ||||
| 		td->evtdev = NULL; | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void tick_suspend(void) | ||||
| @ -330,9 +330,9 @@ static void tick_suspend(void) | ||||
| 	struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	clockevents_shutdown(td->evtdev); | ||||
| 	spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void tick_resume(void) | ||||
| @ -341,7 +341,7 @@ static void tick_resume(void) | ||||
| 	unsigned long flags; | ||||
| 	int broadcast = tick_resume_broadcast(); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&tick_device_lock, flags); | ||||
| 	clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); | ||||
| 
 | ||||
| 	if (!broadcast) { | ||||
| @ -350,7 +350,7 @@ static void tick_resume(void) | ||||
| 		else | ||||
| 			tick_resume_oneshot(); | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user