mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 01:21:28 +00:00
Rabin Vincent found a way that tracing could cause an infinite loop
in the kernel. The splice logic wants a full page from the ring buffer but the ring_buffer_wait() returns when there's any data in the ring buffer. The splice code would then continue the loop waiting for a full page. But if a full page never happens, the splice code will never sleep and just continue to loop. There's another case that Rabin fixed that could loop if there's no memory and kmalloc() constantly returns NULL. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJUYgiFAAoJEEjnJuOKh9ldRuoIAMHOx/TgIYSPuVQBWuljoELZ Mbaq5EGPkJ0fdwP9+X3JPG7pFazcP8+xZx7iVKYihazgS7BkF/khbxsgPl5ZSGOf j39kSoWK1ZzKIbM3MMjIBgZ2LF8wL1VoRu/dyI7GXWeBt9Dnj7vDtkoSGCYjDJ9B UBK2E3vjwNxc4Z9U3YRZj7U4GEKwMkpddKv0DIfAmzA4tF1CryuGmvpkRtGi6wc0 vs6OV1jqFa300v8ckFvTrO/UdBVnisVWHmBrP6XXB/Likz/6+56pphCRvoc/LskG kFHCsjXXJ/tI+/RdhQt/dqgKDl7Cs3nIhXwMZ/TbaxGdFT6kbq3xbVpRk1L90/U= =v5/I -----END PGP SIGNATURE----- Merge tag 'trace-fixes-v3.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fix from Steven Rostedt: "Rabin Vincent found a way that tracing could cause an infinite loop in the kernel. The splice logic wants a full page from the ring buffer but the ring_buffer_wait() returns when there's any data in the ring buffer. The splice code would then continue the loop waiting for a full page. But if a full page never happens, the splice code will never sleep and just continue to loop. There's another case that Rabin fixed that could loop if there's no memory and kmalloc() constantly returns NULL" * tag 'trace-fixes-v3.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Do not risk busy looping in buffer splice tracing: Do not busy wait in buffer splice
This commit is contained in:
commit
15e5cda9e6
@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
|
||||
__ring_buffer_alloc((size), (flags), &__key); \
|
||||
})
|
||||
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
|
||||
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
|
||||
struct file *filp, poll_table *poll_table);
|
||||
|
||||
|
@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
|
||||
* ring_buffer_wait - wait for input to the ring buffer
|
||||
* @buffer: buffer to wait on
|
||||
* @cpu: the cpu buffer to wait on
|
||||
* @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
|
||||
*
|
||||
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
|
||||
* as data is added to any of the @buffer's cpu buffers. Otherwise
|
||||
* it will wait for data to be added to a specific cpu buffer.
|
||||
*/
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
|
||||
DEFINE_WAIT(wait);
|
||||
struct rb_irq_work *work;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Depending on what the caller is waiting for, either any
|
||||
@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
||||
}
|
||||
|
||||
|
||||
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
|
||||
while (true) {
|
||||
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* The events can happen in critical sections where
|
||||
* checking a work queue can cause deadlocks.
|
||||
* After adding a task to the queue, this flag is set
|
||||
* only to notify events to try to wake up the queue
|
||||
* using irq_work.
|
||||
*
|
||||
* We don't clear it even if the buffer is no longer
|
||||
* empty. The flag only causes the next event to run
|
||||
* irq_work to do the work queue wake up. The worse
|
||||
* that can happen if we race with !trace_empty() is that
|
||||
* an event will cause an irq_work to try to wake up
|
||||
* an empty queue.
|
||||
*
|
||||
* There's no reason to protect this flag either, as
|
||||
* the work queue and irq_work logic will do the necessary
|
||||
* synchronization for the wake ups. The only thing
|
||||
* that is necessary is that the wake up happens after
|
||||
* a task has been queued. It's OK for spurious wake ups.
|
||||
*/
|
||||
work->waiters_pending = true;
|
||||
/*
|
||||
* The events can happen in critical sections where
|
||||
* checking a work queue can cause deadlocks.
|
||||
* After adding a task to the queue, this flag is set
|
||||
* only to notify events to try to wake up the queue
|
||||
* using irq_work.
|
||||
*
|
||||
* We don't clear it even if the buffer is no longer
|
||||
* empty. The flag only causes the next event to run
|
||||
* irq_work to do the work queue wake up. The worse
|
||||
* that can happen if we race with !trace_empty() is that
|
||||
* an event will cause an irq_work to try to wake up
|
||||
* an empty queue.
|
||||
*
|
||||
* There's no reason to protect this flag either, as
|
||||
* the work queue and irq_work logic will do the necessary
|
||||
* synchronization for the wake ups. The only thing
|
||||
* that is necessary is that the wake up happens after
|
||||
* a task has been queued. It's OK for spurious wake ups.
|
||||
*/
|
||||
work->waiters_pending = true;
|
||||
|
||||
if (signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
|
||||
break;
|
||||
|
||||
if (cpu != RING_BUFFER_ALL_CPUS &&
|
||||
!ring_buffer_empty_cpu(buffer, cpu)) {
|
||||
unsigned long flags;
|
||||
bool pagebusy;
|
||||
|
||||
if (!full)
|
||||
break;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
if (!pagebusy)
|
||||
break;
|
||||
}
|
||||
|
||||
if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
|
||||
(cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
|
||||
schedule();
|
||||
}
|
||||
|
||||
finish_wait(&work->waiters, &wait);
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
}
|
||||
#endif /* CONFIG_TRACER_MAX_TRACE */
|
||||
|
||||
static int wait_on_pipe(struct trace_iterator *iter)
|
||||
static int wait_on_pipe(struct trace_iterator *iter, bool full)
|
||||
{
|
||||
/* Iterators are static, they should be filled or empty */
|
||||
if (trace_buffer_iter(iter, iter->cpu_file))
|
||||
return 0;
|
||||
|
||||
return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
|
||||
return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
|
||||
full);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
|
||||
|
||||
mutex_unlock(&iter->mutex);
|
||||
|
||||
ret = wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter, false);
|
||||
|
||||
mutex_lock(&iter->mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||
goto out_unlock;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
ret = wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter, false);
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (ret) {
|
||||
size = ret;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (signal_pending(current)) {
|
||||
size = -EINTR;
|
||||
goto out_unlock;
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
size = 0;
|
||||
@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
};
|
||||
struct buffer_ref *ref;
|
||||
int entries, size, i;
|
||||
ssize_t ret;
|
||||
ssize_t ret = 0;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
int r;
|
||||
|
||||
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (!ref)
|
||||
if (!ref) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
ref->ref = 1;
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (!ref->page) {
|
||||
ret = -ENOMEM;
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
|
||||
/* did we read anything? */
|
||||
if (!spd.nr_pages) {
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
ret = wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter, true);
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user