mirror of
https://github.com/torvalds/linux.git
synced 2024-11-02 10:11:36 +00:00
[S390] pfault: cpu hotplug vs missing completion interrupts
On cpu hot remove a PFAULT CANCEL command is sent to the hypervisor which in turn will cancel all outstanding pfault requests that have been issued on that cpu (the same happens with a SIGP cpu reset). The result is that we end up with uninterruptible processes where the interrupt that would wake up these processes never arrives. In order to solve this all processes which wait for a pfault completion interrupt get woken up after a cpu hot remove. The worst case that could happen is that they fault again and in turn need to wait again. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
b456d94a97
commit
f2db2e6cb3
@ -124,7 +124,7 @@ struct _lowcore {
|
|||||||
/* Address space pointer. */
|
/* Address space pointer. */
|
||||||
__u32 kernel_asce; /* 0x02ac */
|
__u32 kernel_asce; /* 0x02ac */
|
||||||
__u32 user_asce; /* 0x02b0 */
|
__u32 user_asce; /* 0x02b0 */
|
||||||
__u8 pad_0x02b4[0x02b8-0x02b4]; /* 0x02b4 */
|
__u32 current_pid; /* 0x02b4 */
|
||||||
|
|
||||||
/* SMP info area */
|
/* SMP info area */
|
||||||
__u32 cpu_nr; /* 0x02b8 */
|
__u32 cpu_nr; /* 0x02b8 */
|
||||||
@ -255,7 +255,7 @@ struct _lowcore {
|
|||||||
/* Address space pointer. */
|
/* Address space pointer. */
|
||||||
__u64 kernel_asce; /* 0x0310 */
|
__u64 kernel_asce; /* 0x0310 */
|
||||||
__u64 user_asce; /* 0x0318 */
|
__u64 user_asce; /* 0x0318 */
|
||||||
__u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
|
__u64 current_pid; /* 0x0320 */
|
||||||
|
|
||||||
/* SMP info area */
|
/* SMP info area */
|
||||||
__u32 cpu_nr; /* 0x0328 */
|
__u32 cpu_nr; /* 0x0328 */
|
||||||
|
@ -84,6 +84,7 @@ struct thread_struct {
|
|||||||
struct per_event per_event; /* Cause of the last PER trap */
|
struct per_event per_event; /* Cause of the last PER trap */
|
||||||
/* pfault_wait is used to block the process on a pfault event */
|
/* pfault_wait is used to block the process on a pfault event */
|
||||||
unsigned long pfault_wait;
|
unsigned long pfault_wait;
|
||||||
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct thread_struct thread_struct;
|
typedef struct thread_struct thread_struct;
|
||||||
|
@ -124,6 +124,7 @@ int main(void)
|
|||||||
DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
|
DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
|
||||||
DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
|
DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
|
||||||
DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
|
DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
|
||||||
|
DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
|
||||||
DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
|
DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
|
||||||
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
|
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
|
||||||
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
|
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
|
||||||
|
@ -212,6 +212,7 @@ __switch_to:
|
|||||||
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
||||||
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||||
st %r3,__LC_CURRENT # store task struct of next
|
st %r3,__LC_CURRENT # store task struct of next
|
||||||
|
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
|
||||||
st %r5,__LC_THREAD_INFO # store thread info of next
|
st %r5,__LC_THREAD_INFO # store thread info of next
|
||||||
ahi %r5,STACK_SIZE # end of kernel stack of next
|
ahi %r5,STACK_SIZE # end of kernel stack of next
|
||||||
st %r5,__LC_KERNEL_STACK # store end of kernel stack
|
st %r5,__LC_KERNEL_STACK # store end of kernel stack
|
||||||
|
@ -220,6 +220,7 @@ __switch_to:
|
|||||||
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
||||||
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||||
stg %r3,__LC_CURRENT # store task struct of next
|
stg %r3,__LC_CURRENT # store task struct of next
|
||||||
|
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
|
||||||
stg %r5,__LC_THREAD_INFO # store thread info of next
|
stg %r5,__LC_THREAD_INFO # store thread info of next
|
||||||
aghi %r5,STACK_SIZE # end of kernel stack of next
|
aghi %r5,STACK_SIZE # end of kernel stack of next
|
||||||
stg %r5,__LC_KERNEL_STACK # store end of kernel stack
|
stg %r5,__LC_KERNEL_STACK # store end of kernel stack
|
||||||
|
@ -466,7 +466,7 @@ typedef struct {
|
|||||||
int pfault_init(void)
|
int pfault_init(void)
|
||||||
{
|
{
|
||||||
pfault_refbk_t refbk =
|
pfault_refbk_t refbk =
|
||||||
{ 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
|
{ 0x258, 0, 5, 2, __LC_CURRENT_PID, 1ULL << 48, 1ULL << 48,
|
||||||
__PF_RES_FIELD };
|
__PF_RES_FIELD };
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
@ -498,11 +498,15 @@ void pfault_fini(void)
|
|||||||
: : "a" (&refbk), "m" (refbk) : "cc");
|
: : "a" (&refbk), "m" (refbk) : "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DEFINE_SPINLOCK(pfault_lock);
|
||||||
|
static LIST_HEAD(pfault_list);
|
||||||
|
|
||||||
static void pfault_interrupt(unsigned int ext_int_code,
|
static void pfault_interrupt(unsigned int ext_int_code,
|
||||||
unsigned int param32, unsigned long param64)
|
unsigned int param32, unsigned long param64)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
__u16 subcode;
|
__u16 subcode;
|
||||||
|
pid_t pid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the external interruption subcode & pfault
|
* Get the external interruption subcode & pfault
|
||||||
@ -514,44 +518,79 @@ static void pfault_interrupt(unsigned int ext_int_code,
|
|||||||
if ((subcode & 0xff00) != __SUBCODE_MASK)
|
if ((subcode & 0xff00) != __SUBCODE_MASK)
|
||||||
return;
|
return;
|
||||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
|
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
|
||||||
|
if (subcode & 0x0080) {
|
||||||
/*
|
/* Get the token (= pid of the affected task). */
|
||||||
* Get the token (= address of the task structure of the affected task).
|
pid = sizeof(void *) == 4 ? param32 : param64;
|
||||||
*/
|
rcu_read_lock();
|
||||||
#ifdef CONFIG_64BIT
|
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
|
||||||
tsk = (struct task_struct *) param64;
|
if (tsk)
|
||||||
#else
|
get_task_struct(tsk);
|
||||||
tsk = (struct task_struct *) param32;
|
rcu_read_unlock();
|
||||||
#endif
|
if (!tsk)
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
tsk = current;
|
||||||
|
}
|
||||||
|
spin_lock(&pfault_lock);
|
||||||
if (subcode & 0x0080) {
|
if (subcode & 0x0080) {
|
||||||
/* signal bit is set -> a page has been swapped in by VM */
|
/* signal bit is set -> a page has been swapped in by VM */
|
||||||
if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
|
if (tsk->thread.pfault_wait == 1) {
|
||||||
/* Initial interrupt was faster than the completion
|
/* Initial interrupt was faster than the completion
|
||||||
* interrupt. pfault_wait is valid. Set pfault_wait
|
* interrupt. pfault_wait is valid. Set pfault_wait
|
||||||
* back to zero and wake up the process. This can
|
* back to zero and wake up the process. This can
|
||||||
* safely be done because the task is still sleeping
|
* safely be done because the task is still sleeping
|
||||||
* and can't produce new pfaults. */
|
* and can't produce new pfaults. */
|
||||||
tsk->thread.pfault_wait = 0;
|
tsk->thread.pfault_wait = 0;
|
||||||
|
list_del(&tsk->thread.list);
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
} else {
|
||||||
|
/* Completion interrupt was faster than initial
|
||||||
|
* interrupt. Set pfault_wait to -1 so the initial
|
||||||
|
* interrupt doesn't put the task to sleep. */
|
||||||
|
tsk->thread.pfault_wait = -1;
|
||||||
}
|
}
|
||||||
|
put_task_struct(tsk);
|
||||||
} else {
|
} else {
|
||||||
/* signal bit not set -> a real page is missing. */
|
/* signal bit not set -> a real page is missing. */
|
||||||
get_task_struct(tsk);
|
if (tsk->thread.pfault_wait == -1) {
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
|
|
||||||
/* Completion interrupt was faster than the initial
|
/* Completion interrupt was faster than the initial
|
||||||
* interrupt (swapped in a -1 for pfault_wait). Set
|
* interrupt (pfault_wait == -1). Set pfault_wait
|
||||||
* pfault_wait back to zero and exit. This can be
|
* back to zero and exit. */
|
||||||
* done safely because tsk is running in kernel
|
|
||||||
* mode and can't produce new pfaults. */
|
|
||||||
tsk->thread.pfault_wait = 0;
|
tsk->thread.pfault_wait = 0;
|
||||||
set_task_state(tsk, TASK_RUNNING);
|
} else {
|
||||||
put_task_struct(tsk);
|
/* Initial interrupt arrived before completion
|
||||||
} else
|
* interrupt. Let the task sleep. */
|
||||||
|
tsk->thread.pfault_wait = 1;
|
||||||
|
list_add(&tsk->thread.list, &pfault_list);
|
||||||
|
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||||
set_tsk_need_resched(tsk);
|
set_tsk_need_resched(tsk);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&pfault_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
|
||||||
|
unsigned long action, void *hcpu)
|
||||||
|
{
|
||||||
|
struct thread_struct *thread, *next;
|
||||||
|
struct task_struct *tsk;
|
||||||
|
|
||||||
|
switch (action) {
|
||||||
|
case CPU_DEAD:
|
||||||
|
case CPU_DEAD_FROZEN:
|
||||||
|
spin_lock_irq(&pfault_lock);
|
||||||
|
list_for_each_entry_safe(thread, next, &pfault_list, list) {
|
||||||
|
thread->pfault_wait = 0;
|
||||||
|
list_del(&thread->list);
|
||||||
|
tsk = container_of(thread, struct task_struct, thread);
|
||||||
|
wake_up_process(tsk);
|
||||||
|
}
|
||||||
|
spin_unlock_irq(&pfault_lock);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init pfault_irq_init(void)
|
static int __init pfault_irq_init(void)
|
||||||
@ -568,8 +607,10 @@ static int __init pfault_irq_init(void)
|
|||||||
pfault_disable = 1;
|
pfault_disable = 1;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
if (pfault_init() == 0)
|
if (pfault_init() == 0) {
|
||||||
|
hotcpu_notifier(pfault_cpu_notify, 0);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Tough luck, no pfault. */
|
/* Tough luck, no pfault. */
|
||||||
pfault_disable = 1;
|
pfault_disable = 1;
|
||||||
|
Loading…
Reference in New Issue
Block a user