linux/arch/cris/include/asm/mmu_context.h
Rabin Vincent 7f0144e777 CRIS: fix switch_mm() lockdep splat
With lockdep support implemented on CRISv32, we get the following splat.
switch_mm() can be called both from the scheduler() (with interrupts
disabled) and from flush_old_exec (via activate_mm()), with interrupts
enabled.  Fix it by disabling interrupts in activate_mm(), similar to
powerpc and hexagon.

 t======================================================
 [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ]
 3.19.0-08802-g20bc9f1-dirty #323 Not tainted
 ------------------------------------------------------
 init/1 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire:
  (mmu_context_lock){+.+...}, at: [<c0009290>] switch_mm+0x22/0xc6

 and this task is already holding:
  (&rq->lock){-.-.-.}, at: [<c01a0756>] __schedule+0x5e/0x648
 which would create a new lock dependency:
  (&rq->lock){-.-.-.} -> (mmu_context_lock){+.+...}

 but this new dependency connects a HARDIRQ-irq-safe lock:
  (&rq->lock){-.-.-.}
 ... which became HARDIRQ-irq-safe at:
   [<c002b03c>] scheduler_tick+0x28/0x5e
   [<c0007c6c>] timer_interrupt+0x4e/0x6a
   [<c0043ac4>] handle_irq_event_percpu+0x54/0x13c
   [<c004343c>] generic_handle_irq+0x2a/0x36

 to a HARDIRQ-irq-unsafe lock:
  (mmu_context_lock){+.+...}
 ... which became HARDIRQ-irq-unsafe at:
 ...  [<c0039e60>] __lock_acquire+0x8f8/0x1d9c
   [<c0009290>] switch_mm+0x22/0xc6
   [<c009c260>] flush_old_exec+0x500/0x5d4
   [<c00da4c6>] load_elf_phdrs+0x7a/0x84
   [<c00dbdb0>] load_elf_binary+0x21c/0x13b4
   [<c009cdb6>] do_execve+0x22/0x2c
   [<c001dcf2>] ____call_usermodehelper+0x0/0x154
   [<c000581e>] ret_from_kernel_thread+0xe/0x14

 other info that might help us debug this:

  Possible interrupt unsafe locking scenario:

        CPU0                    CPU1
        ----                    ----
   lock(mmu_context_lock);
                                local_irq_disable();
                                lock(&rq->lock);
                                lock(mmu_context_lock);
   <Interrupt>
     lock(&rq->lock);

  *** DEADLOCK ***

 1 lock held by init/1:
  #0:  (&rq->lock){-.-.-.}, at: [<c01a0756>] __schedule+0x5e/0x648

 Call Trace:
 [<c019fe9e>] printk+0x0/0x4e
 [<c00368f8>] print_shortest_lock_dependencies+0x0/0x15c
 [<c0048628>] print_stack_trace+0x0/0x88
 [<c0038912>] __lock_is_held+0x3e/0x5e
 [<c003b894>] lock_acquire+0x8a/0xcc
 [<c01a50c4>] _raw_spin_lock+0x44/0x7a
 [<c0009290>] switch_mm+0x22/0xc6
 [<c01a06f8>] __schedule+0x0/0x648
 [<c01a0d76>] schedule+0x36/0x7c
 [<c0037d04>] trace_hardirqs_on+0x0/0x1e
 [<c0004e18>] do_work_pending+0x30/0xd4
 [<c000591a>] _work_pending+0xe/0x12

Signed-off-by: Rabin Vincent <rabin@rab.in>
Signed-off-by: Jesper Nilsson <jesper.nilsson@axis.com>
2015-09-05 00:56:50 +02:00

35 lines
888 B
C

#ifndef __CRIS_MMU_CONTEXT_H
#define __CRIS_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void get_mmu_context(struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
local_irq_save(flags);
switch_mm(prev, next, NULL);
local_irq_restore(flags);
}
/* current active pgd - this is similar to other processors pgd
* registers like cr3 on the i386
*/
/* defined in arch/cris/mm/fault.c */
DECLARE_PER_CPU(pgd_t *, current_pgd);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
#endif