[PATCH] uml: add arch_switch_to for newly forked thread

Newly forked threads have no arch_switch_to_skas() called before their first
run, because when schedule() switches to them they're resumed in the body of
thread_wait() inside fork_handler() rather than in switch_threads() in
switch_to_skas().  Compensate this missing call.

Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Acked-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Paolo 'Blaisorblade' Giarrusso 2006-03-31 02:30:24 -08:00 committed by Linus Torvalds
parent dd77aec07a
commit 54d8d3b5a0
3 changed files with 25 additions and 4 deletions

View File

@ -91,10 +91,17 @@ void fork_handler(int sig)
panic("blech"); panic("blech");
schedule_tail(current->thread.prev_sched); schedule_tail(current->thread.prev_sched);
/* XXX: if interrupt_end() calls schedule, this call to
* arch_switch_to_skas isn't needed. We could want to apply this to
* improve performance. -bb */
arch_switch_to_skas(current->thread.prev_sched, current);
current->thread.prev_sched = NULL; current->thread.prev_sched = NULL;
/* Handle any immediate reschedules or signals */ /* Handle any immediate reschedules or signals */
interrupt_end(); interrupt_end();
userspace(&current->thread.regs.regs); userspace(&current->thread.regs.regs);
} }

View File

@ -23,7 +23,14 @@ void arch_switch_to_tt(struct task_struct *from, struct task_struct *to)
void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
{ {
arch_switch_tls_skas(from, to); int err = arch_switch_tls_skas(from, to);
if (!err)
return;
if (err != -EINVAL)
printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err);
else
printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n");
} }
int is_syscall(unsigned long addr) int is_syscall(unsigned long addr)

View File

@ -70,8 +70,6 @@ static int get_free_idx(struct task_struct* task)
return -ESRCH; return -ESRCH;
} }
#define O_FORCE 1
static inline void clear_user_desc(struct user_desc* info) static inline void clear_user_desc(struct user_desc* info)
{ {
/* Postcondition: LDT_empty(info) returns true. */ /* Postcondition: LDT_empty(info) returns true. */
@ -84,6 +82,8 @@ static inline void clear_user_desc(struct user_desc* info)
info->seg_not_present = 1; info->seg_not_present = 1;
} }
#define O_FORCE 1
static int load_TLS(int flags, struct task_struct *to) static int load_TLS(int flags, struct task_struct *to)
{ {
int ret = 0; int ret = 0;
@ -162,7 +162,13 @@ void clear_flushed_tls(struct task_struct *task)
* SKAS patch. */ * SKAS patch. */
int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to) int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
{ {
/* We have no need whatsoever to switch TLS for kernel threads; beyond
* that, that would also result in us calling os_set_thread_area with
* userspace_pid[cpu] == 0, which gives an error. */
if (likely(to->mm))
return load_TLS(O_FORCE, to); return load_TLS(O_FORCE, to);
return 0;
} }
int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to) int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
@ -324,3 +330,4 @@ int ptrace_get_thread_area(struct task_struct *child, int idx,
out: out:
return ret; return ret;
} }