forked from Minki/linux
[POWERPC] spusched: fix cpu/node binding
Add a cpus_allowed allowed filed to struct spu_context so that we always use the cpu mask of the owning thread instead of the one happening to call into the scheduler. Also use this information in grab_runnable_context to avoid spurious wakeups. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
2cf2b3b49f
commit
ea1ae5949d
@ -53,7 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
||||
INIT_LIST_HEAD(&ctx->rq);
|
||||
if (gang)
|
||||
spu_gang_add_ctx(gang, ctx);
|
||||
|
||||
ctx->cpus_allowed = current->cpus_allowed;
|
||||
spu_set_timeslice(ctx);
|
||||
goto out;
|
||||
out_free:
|
||||
|
@ -112,6 +112,16 @@ void __spu_update_sched_info(struct spu_context *ctx)
|
||||
else
|
||||
ctx->prio = current->static_prio;
|
||||
ctx->policy = current->policy;
|
||||
|
||||
/*
|
||||
* A lot of places that don't hold active_mutex poke into
|
||||
* cpus_allowed, including grab_runnable_context which
|
||||
* already holds the runq_lock. So abuse runq_lock
|
||||
* to protect this field aswell.
|
||||
*/
|
||||
spin_lock(&spu_prio->runq_lock);
|
||||
ctx->cpus_allowed = current->cpus_allowed;
|
||||
spin_unlock(&spu_prio->runq_lock);
|
||||
}
|
||||
|
||||
void spu_update_sched_info(struct spu_context *ctx)
|
||||
@ -123,18 +133,29 @@ void spu_update_sched_info(struct spu_context *ctx)
|
||||
mutex_unlock(&spu_prio->active_mutex[node]);
|
||||
}
|
||||
|
||||
static inline int node_allowed(int node)
|
||||
static int __node_allowed(struct spu_context *ctx, int node)
|
||||
{
|
||||
cpumask_t mask;
|
||||
if (nr_cpus_node(node)) {
|
||||
cpumask_t mask = node_to_cpumask(node);
|
||||
|
||||
if (!nr_cpus_node(node))
|
||||
return 0;
|
||||
mask = node_to_cpumask(node);
|
||||
if (!cpus_intersects(mask, current->cpus_allowed))
|
||||
return 0;
|
||||
if (cpus_intersects(mask, ctx->cpus_allowed))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int node_allowed(struct spu_context *ctx, int node)
|
||||
{
|
||||
int rval;
|
||||
|
||||
spin_lock(&spu_prio->runq_lock);
|
||||
rval = __node_allowed(ctx, node);
|
||||
spin_unlock(&spu_prio->runq_lock);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_add_to_active_list - add spu to active list
|
||||
* @spu: spu to add to the active list
|
||||
@ -289,7 +310,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
|
||||
|
||||
for (n = 0; n < MAX_NUMNODES; n++, node++) {
|
||||
node = (node < MAX_NUMNODES) ? node : 0;
|
||||
if (!node_allowed(node))
|
||||
if (!node_allowed(ctx, node))
|
||||
continue;
|
||||
spu = spu_alloc_node(node);
|
||||
if (spu)
|
||||
@ -321,7 +342,7 @@ static struct spu *find_victim(struct spu_context *ctx)
|
||||
node = cpu_to_node(raw_smp_processor_id());
|
||||
for (n = 0; n < MAX_NUMNODES; n++, node++) {
|
||||
node = (node < MAX_NUMNODES) ? node : 0;
|
||||
if (!node_allowed(node))
|
||||
if (!node_allowed(ctx, node))
|
||||
continue;
|
||||
|
||||
mutex_lock(&spu_prio->active_mutex[node]);
|
||||
@ -416,23 +437,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
|
||||
* Remove the highest priority context on the runqueue and return it
|
||||
* to the caller. Returns %NULL if no runnable context was found.
|
||||
*/
|
||||
static struct spu_context *grab_runnable_context(int prio)
|
||||
static struct spu_context *grab_runnable_context(int prio, int node)
|
||||
{
|
||||
struct spu_context *ctx = NULL;
|
||||
struct spu_context *ctx;
|
||||
int best;
|
||||
|
||||
spin_lock(&spu_prio->runq_lock);
|
||||
best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best < prio) {
|
||||
while (best < prio) {
|
||||
struct list_head *rq = &spu_prio->runq[best];
|
||||
|
||||
BUG_ON(list_empty(rq));
|
||||
|
||||
ctx = list_entry(rq->next, struct spu_context, rq);
|
||||
list_for_each_entry(ctx, rq, rq) {
|
||||
/* XXX(hch): check for affinity here aswell */
|
||||
if (__node_allowed(ctx, node)) {
|
||||
__spu_del_from_rq(ctx);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
best++;
|
||||
}
|
||||
ctx = NULL;
|
||||
found:
|
||||
spin_unlock(&spu_prio->runq_lock);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
@ -442,7 +468,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
|
||||
struct spu_context *new = NULL;
|
||||
|
||||
if (spu) {
|
||||
new = grab_runnable_context(max_prio);
|
||||
new = grab_runnable_context(max_prio, spu->node);
|
||||
if (new || force) {
|
||||
spu_remove_from_active_list(spu);
|
||||
spu_unbind_context(spu, ctx);
|
||||
@ -496,9 +522,11 @@ static void spusched_tick(struct spu_context *ctx)
|
||||
* tick and try again.
|
||||
*/
|
||||
if (mutex_trylock(&ctx->state_mutex)) {
|
||||
struct spu_context *new = grab_runnable_context(ctx->prio + 1);
|
||||
if (new) {
|
||||
struct spu *spu = ctx->spu;
|
||||
struct spu_context *new;
|
||||
|
||||
new = grab_runnable_context(ctx->prio + 1, spu->node);
|
||||
if (new) {
|
||||
|
||||
__spu_remove_from_active_list(spu);
|
||||
spu_unbind_context(spu, ctx);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#include <asm/spu.h>
|
||||
#include <asm/spu_csa.h>
|
||||
@ -80,6 +81,7 @@ struct spu_context {
|
||||
struct list_head rq;
|
||||
unsigned int time_slice;
|
||||
unsigned long sched_flags;
|
||||
cpumask_t cpus_allowed;
|
||||
int policy;
|
||||
int prio;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user