Merge branch 'spufs' of master.kernel.org:/pub/scm/linux/kernel/git/arnd/cell-2.6 into for-2.6.22
This commit is contained in:
commit
13177c8b7e
@ -36,6 +36,8 @@
|
|||||||
#include <asm/xmon.h>
|
#include <asm/xmon.h>
|
||||||
|
|
||||||
const struct spu_management_ops *spu_management_ops;
|
const struct spu_management_ops *spu_management_ops;
|
||||||
|
EXPORT_SYMBOL_GPL(spu_management_ops);
|
||||||
|
|
||||||
const struct spu_priv1_ops *spu_priv1_ops;
|
const struct spu_priv1_ops *spu_priv1_ops;
|
||||||
|
|
||||||
static struct list_head spu_list[MAX_NUMNODES];
|
static struct list_head spu_list[MAX_NUMNODES];
|
||||||
@ -290,7 +292,6 @@ spu_irq_class_1(int irq, void *data)
|
|||||||
|
|
||||||
return stat ? IRQ_HANDLED : IRQ_NONE;
|
return stat ? IRQ_HANDLED : IRQ_NONE;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
|
|
||||||
|
|
||||||
static irqreturn_t
|
static irqreturn_t
|
||||||
spu_irq_class_2(int irq, void *data)
|
spu_irq_class_2(int irq, void *data)
|
||||||
@ -431,10 +432,11 @@ struct spu *spu_alloc_node(int node)
|
|||||||
spu = list_entry(spu_list[node].next, struct spu, list);
|
spu = list_entry(spu_list[node].next, struct spu, list);
|
||||||
list_del_init(&spu->list);
|
list_del_init(&spu->list);
|
||||||
pr_debug("Got SPU %d %d\n", spu->number, spu->node);
|
pr_debug("Got SPU %d %d\n", spu->number, spu->node);
|
||||||
spu_init_channels(spu);
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&spu_mutex);
|
mutex_unlock(&spu_mutex);
|
||||||
|
|
||||||
|
if (spu)
|
||||||
|
spu_init_channels(spu);
|
||||||
return spu;
|
return spu;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spu_alloc_node);
|
EXPORT_SYMBOL_GPL(spu_alloc_node);
|
||||||
@ -461,108 +463,6 @@ void spu_free(struct spu *spu)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spu_free);
|
EXPORT_SYMBOL_GPL(spu_free);
|
||||||
|
|
||||||
static int spu_handle_mm_fault(struct spu *spu)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = spu->mm;
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
u64 ea, dsisr, is_write;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ea = spu->dar;
|
|
||||||
dsisr = spu->dsisr;
|
|
||||||
#if 0
|
|
||||||
if (!IS_VALID_EA(ea)) {
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
#endif /* XXX */
|
|
||||||
if (mm == NULL) {
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
if (mm->pgd == NULL) {
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
|
||||||
vma = find_vma(mm, ea);
|
|
||||||
if (!vma)
|
|
||||||
goto bad_area;
|
|
||||||
if (vma->vm_start <= ea)
|
|
||||||
goto good_area;
|
|
||||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
||||||
goto bad_area;
|
|
||||||
#if 0
|
|
||||||
if (expand_stack(vma, ea))
|
|
||||||
goto bad_area;
|
|
||||||
#endif /* XXX */
|
|
||||||
good_area:
|
|
||||||
is_write = dsisr & MFC_DSISR_ACCESS_PUT;
|
|
||||||
if (is_write) {
|
|
||||||
if (!(vma->vm_flags & VM_WRITE))
|
|
||||||
goto bad_area;
|
|
||||||
} else {
|
|
||||||
if (dsisr & MFC_DSISR_ACCESS_DENIED)
|
|
||||||
goto bad_area;
|
|
||||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
|
||||||
goto bad_area;
|
|
||||||
}
|
|
||||||
ret = 0;
|
|
||||||
switch (handle_mm_fault(mm, vma, ea, is_write)) {
|
|
||||||
case VM_FAULT_MINOR:
|
|
||||||
current->min_flt++;
|
|
||||||
break;
|
|
||||||
case VM_FAULT_MAJOR:
|
|
||||||
current->maj_flt++;
|
|
||||||
break;
|
|
||||||
case VM_FAULT_SIGBUS:
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto bad_area;
|
|
||||||
case VM_FAULT_OOM:
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto bad_area;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
bad_area:
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
int spu_irq_class_1_bottom(struct spu *spu)
|
|
||||||
{
|
|
||||||
u64 ea, dsisr, access, error = 0UL;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
ea = spu->dar;
|
|
||||||
dsisr = spu->dsisr;
|
|
||||||
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
|
|
||||||
u64 flags;
|
|
||||||
|
|
||||||
access = (_PAGE_PRESENT | _PAGE_USER);
|
|
||||||
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (hash_page(ea, access, 0x300) != 0)
|
|
||||||
error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
|
|
||||||
if ((ret = spu_handle_mm_fault(spu)) != 0)
|
|
||||||
error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
|
|
||||||
else
|
|
||||||
error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
|
|
||||||
}
|
|
||||||
spu->dar = 0UL;
|
|
||||||
spu->dsisr = 0UL;
|
|
||||||
if (!error) {
|
|
||||||
spu_restart_dma(spu);
|
|
||||||
} else {
|
|
||||||
spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct sysdev_class spu_sysdev_class = {
|
struct sysdev_class spu_sysdev_class = {
|
||||||
set_kset_name("spu")
|
set_kset_name("spu")
|
||||||
};
|
};
|
||||||
@ -636,12 +536,6 @@ static int spu_create_sysdev(struct spu *spu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spu_destroy_sysdev(struct spu *spu)
|
|
||||||
{
|
|
||||||
sysfs_remove_device_from_node(&spu->sysdev, spu->node);
|
|
||||||
sysdev_unregister(&spu->sysdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init create_spu(void *data)
|
static int __init create_spu(void *data)
|
||||||
{
|
{
|
||||||
struct spu *spu;
|
struct spu *spu;
|
||||||
@ -693,58 +587,37 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_spu(struct spu *spu)
|
|
||||||
{
|
|
||||||
list_del_init(&spu->list);
|
|
||||||
list_del_init(&spu->full_list);
|
|
||||||
|
|
||||||
spu_destroy_sysdev(spu);
|
|
||||||
spu_free_irqs(spu);
|
|
||||||
spu_destroy_spu(spu);
|
|
||||||
kfree(spu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cleanup_spu_base(void)
|
|
||||||
{
|
|
||||||
struct spu *spu, *tmp;
|
|
||||||
int node;
|
|
||||||
|
|
||||||
mutex_lock(&spu_mutex);
|
|
||||||
for (node = 0; node < MAX_NUMNODES; node++) {
|
|
||||||
list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
|
|
||||||
destroy_spu(spu);
|
|
||||||
}
|
|
||||||
mutex_unlock(&spu_mutex);
|
|
||||||
sysdev_class_unregister(&spu_sysdev_class);
|
|
||||||
}
|
|
||||||
module_exit(cleanup_spu_base);
|
|
||||||
|
|
||||||
static int __init init_spu_base(void)
|
static int __init init_spu_base(void)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < MAX_NUMNODES; i++)
|
||||||
|
INIT_LIST_HEAD(&spu_list[i]);
|
||||||
|
|
||||||
if (!spu_management_ops)
|
if (!spu_management_ops)
|
||||||
return 0;
|
goto out;
|
||||||
|
|
||||||
/* create sysdev class for spus */
|
/* create sysdev class for spus */
|
||||||
ret = sysdev_class_register(&spu_sysdev_class);
|
ret = sysdev_class_register(&spu_sysdev_class);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
for (i = 0; i < MAX_NUMNODES; i++)
|
|
||||||
INIT_LIST_HEAD(&spu_list[i]);
|
|
||||||
|
|
||||||
ret = spu_enumerate_spus(create_spu);
|
ret = spu_enumerate_spus(create_spu);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_WARNING "%s: Error initializing spus\n",
|
printk(KERN_WARNING "%s: Error initializing spus\n",
|
||||||
__FUNCTION__);
|
__FUNCTION__);
|
||||||
cleanup_spu_base();
|
goto out_unregister_sysdev_class;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
xmon_register_spus(&spu_full_list);
|
xmon_register_spus(&spu_full_list);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_unregister_sysdev_class:
|
||||||
|
sysdev_class_unregister(&spu_sysdev_class);
|
||||||
|
out:
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(init_spu_base);
|
module_init(init_spu_base);
|
||||||
|
@ -26,19 +26,18 @@
|
|||||||
|
|
||||||
#include <asm/spu.h>
|
#include <asm/spu.h>
|
||||||
|
|
||||||
static struct spu_coredump_calls spu_coredump_calls;
|
static struct spu_coredump_calls *spu_coredump_calls;
|
||||||
static DEFINE_MUTEX(spu_coredump_mutex);
|
static DEFINE_MUTEX(spu_coredump_mutex);
|
||||||
|
|
||||||
int arch_notes_size(void)
|
int arch_notes_size(void)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
struct module *owner = spu_coredump_calls.owner;
|
|
||||||
|
|
||||||
ret = -ENOSYS;
|
ret = -ENOSYS;
|
||||||
mutex_lock(&spu_coredump_mutex);
|
mutex_lock(&spu_coredump_mutex);
|
||||||
if (owner && try_module_get(owner)) {
|
if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
|
||||||
ret = spu_coredump_calls.arch_notes_size();
|
ret = spu_coredump_calls->arch_notes_size();
|
||||||
module_put(owner);
|
module_put(spu_coredump_calls->owner);
|
||||||
}
|
}
|
||||||
mutex_unlock(&spu_coredump_mutex);
|
mutex_unlock(&spu_coredump_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
@ -46,36 +45,35 @@ int arch_notes_size(void)
|
|||||||
|
|
||||||
void arch_write_notes(struct file *file)
|
void arch_write_notes(struct file *file)
|
||||||
{
|
{
|
||||||
struct module *owner = spu_coredump_calls.owner;
|
|
||||||
|
|
||||||
mutex_lock(&spu_coredump_mutex);
|
mutex_lock(&spu_coredump_mutex);
|
||||||
if (owner && try_module_get(owner)) {
|
if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
|
||||||
spu_coredump_calls.arch_write_notes(file);
|
spu_coredump_calls->arch_write_notes(file);
|
||||||
module_put(owner);
|
module_put(spu_coredump_calls->owner);
|
||||||
}
|
}
|
||||||
mutex_unlock(&spu_coredump_mutex);
|
mutex_unlock(&spu_coredump_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int register_arch_coredump_calls(struct spu_coredump_calls *calls)
|
int register_arch_coredump_calls(struct spu_coredump_calls *calls)
|
||||||
{
|
{
|
||||||
if (spu_coredump_calls.owner)
|
int ret = 0;
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
mutex_lock(&spu_coredump_mutex);
|
mutex_lock(&spu_coredump_mutex);
|
||||||
spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
|
if (spu_coredump_calls)
|
||||||
spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
|
ret = -EBUSY;
|
||||||
spu_coredump_calls.owner = calls->owner;
|
else
|
||||||
|
spu_coredump_calls = calls;
|
||||||
mutex_unlock(&spu_coredump_mutex);
|
mutex_unlock(&spu_coredump_mutex);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
|
EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
|
||||||
|
|
||||||
void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
|
void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
|
||||||
{
|
{
|
||||||
BUG_ON(spu_coredump_calls.owner != calls->owner);
|
BUG_ON(spu_coredump_calls != calls);
|
||||||
|
|
||||||
mutex_lock(&spu_coredump_mutex);
|
mutex_lock(&spu_coredump_mutex);
|
||||||
spu_coredump_calls.owner = NULL;
|
spu_coredump_calls = NULL;
|
||||||
mutex_unlock(&spu_coredump_mutex);
|
mutex_unlock(&spu_coredump_mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
|
EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
obj-y += switch.o
|
obj-y += switch.o fault.o
|
||||||
|
|
||||||
obj-$(CONFIG_SPU_FS) += spufs.o
|
obj-$(CONFIG_SPU_FS) += spufs.o
|
||||||
spufs-y += inode.o file.o context.o syscalls.o coredump.o
|
spufs-y += inode.o file.o context.o syscalls.o coredump.o
|
||||||
|
@ -350,6 +350,11 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void spu_backing_restart_dma(struct spu_context *ctx)
|
||||||
|
{
|
||||||
|
/* nothing to do here */
|
||||||
|
}
|
||||||
|
|
||||||
struct spu_context_ops spu_backing_ops = {
|
struct spu_context_ops spu_backing_ops = {
|
||||||
.mbox_read = spu_backing_mbox_read,
|
.mbox_read = spu_backing_mbox_read,
|
||||||
.mbox_stat_read = spu_backing_mbox_stat_read,
|
.mbox_stat_read = spu_backing_mbox_stat_read,
|
||||||
@ -376,4 +381,5 @@ struct spu_context_ops spu_backing_ops = {
|
|||||||
.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
|
.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
|
||||||
.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
|
.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
|
||||||
.send_mfc_command = spu_backing_send_mfc_command,
|
.send_mfc_command = spu_backing_send_mfc_command,
|
||||||
|
.restart_dma = spu_backing_restart_dma,
|
||||||
};
|
};
|
||||||
|
@ -41,9 +41,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
spin_lock_init(&ctx->mmio_lock);
|
spin_lock_init(&ctx->mmio_lock);
|
||||||
|
spin_lock_init(&ctx->mapping_lock);
|
||||||
kref_init(&ctx->kref);
|
kref_init(&ctx->kref);
|
||||||
mutex_init(&ctx->state_mutex);
|
mutex_init(&ctx->state_mutex);
|
||||||
init_MUTEX(&ctx->run_sema);
|
mutex_init(&ctx->run_mutex);
|
||||||
init_waitqueue_head(&ctx->ibox_wq);
|
init_waitqueue_head(&ctx->ibox_wq);
|
||||||
init_waitqueue_head(&ctx->wbox_wq);
|
init_waitqueue_head(&ctx->wbox_wq);
|
||||||
init_waitqueue_head(&ctx->stop_wq);
|
init_waitqueue_head(&ctx->stop_wq);
|
||||||
@ -51,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
|||||||
ctx->state = SPU_STATE_SAVED;
|
ctx->state = SPU_STATE_SAVED;
|
||||||
ctx->ops = &spu_backing_ops;
|
ctx->ops = &spu_backing_ops;
|
||||||
ctx->owner = get_task_mm(current);
|
ctx->owner = get_task_mm(current);
|
||||||
|
INIT_LIST_HEAD(&ctx->rq);
|
||||||
if (gang)
|
if (gang)
|
||||||
spu_gang_add_ctx(gang, ctx);
|
spu_gang_add_ctx(gang, ctx);
|
||||||
ctx->rt_priority = current->rt_priority;
|
ctx->rt_priority = current->rt_priority;
|
||||||
@ -75,6 +77,7 @@ void destroy_spu_context(struct kref *kref)
|
|||||||
spu_fini_csa(&ctx->csa);
|
spu_fini_csa(&ctx->csa);
|
||||||
if (ctx->gang)
|
if (ctx->gang)
|
||||||
spu_gang_remove_ctx(ctx->gang, ctx);
|
spu_gang_remove_ctx(ctx->gang, ctx);
|
||||||
|
BUG_ON(!list_empty(&ctx->rq));
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,46 +121,6 @@ void spu_unmap_mappings(struct spu_context *ctx)
|
|||||||
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
|
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* spu_acquire_exclusive - lock spu contex and protect against userspace access
|
|
||||||
* @ctx: spu contex to lock
|
|
||||||
*
|
|
||||||
* Note:
|
|
||||||
* Returns 0 and with the context locked on success
|
|
||||||
* Returns negative error and with the context _unlocked_ on failure.
|
|
||||||
*/
|
|
||||||
int spu_acquire_exclusive(struct spu_context *ctx)
|
|
||||||
{
|
|
||||||
int ret = -EINVAL;
|
|
||||||
|
|
||||||
spu_acquire(ctx);
|
|
||||||
/*
|
|
||||||
* Context is about to be freed, so we can't acquire it anymore.
|
|
||||||
*/
|
|
||||||
if (!ctx->owner)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
if (ctx->state == SPU_STATE_SAVED) {
|
|
||||||
ret = spu_activate(ctx, 0);
|
|
||||||
if (ret)
|
|
||||||
goto out_unlock;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* We need to exclude userspace access to the context.
|
|
||||||
*
|
|
||||||
* To protect against memory access we invalidate all ptes
|
|
||||||
* and make sure the pagefault handlers block on the mutex.
|
|
||||||
*/
|
|
||||||
spu_unmap_mappings(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
spu_release(ctx);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
|
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
|
||||||
* @ctx: spu contex to lock
|
* @ctx: spu contex to lock
|
||||||
|
@ -169,12 +169,12 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
|
|||||||
struct spu_context *ctx;
|
struct spu_context *ctx;
|
||||||
loff_t pos = 0;
|
loff_t pos = 0;
|
||||||
int sz, dfd, rc, total = 0;
|
int sz, dfd, rc, total = 0;
|
||||||
const int bufsz = 4096;
|
const int bufsz = PAGE_SIZE;
|
||||||
char *name;
|
char *name;
|
||||||
char fullname[80], *buf;
|
char fullname[80], *buf;
|
||||||
struct elf_note en;
|
struct elf_note en;
|
||||||
|
|
||||||
buf = kmalloc(bufsz, GFP_KERNEL);
|
buf = (void *)get_zeroed_page(GFP_KERNEL);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -187,9 +187,8 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
|
|||||||
sz = spufs_coredump_read[i].size;
|
sz = spufs_coredump_read[i].size;
|
||||||
|
|
||||||
ctx = ctx_info->ctx;
|
ctx = ctx_info->ctx;
|
||||||
if (!ctx) {
|
if (!ctx)
|
||||||
return;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
sprintf(fullname, "SPU/%d/%s", dfd, name);
|
sprintf(fullname, "SPU/%d/%s", dfd, name);
|
||||||
en.n_namesz = strlen(fullname) + 1;
|
en.n_namesz = strlen(fullname) + 1;
|
||||||
@ -197,23 +196,25 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
|
|||||||
en.n_type = NT_SPU;
|
en.n_type = NT_SPU;
|
||||||
|
|
||||||
if (!spufs_dump_write(file, &en, sizeof(en)))
|
if (!spufs_dump_write(file, &en, sizeof(en)))
|
||||||
return;
|
goto out;
|
||||||
if (!spufs_dump_write(file, fullname, en.n_namesz))
|
if (!spufs_dump_write(file, fullname, en.n_namesz))
|
||||||
return;
|
goto out;
|
||||||
if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
|
if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
|
rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
|
||||||
if (rc > 0) {
|
if (rc > 0) {
|
||||||
if (!spufs_dump_write(file, buf, rc))
|
if (!spufs_dump_write(file, buf, rc))
|
||||||
return;
|
goto out;
|
||||||
total += rc;
|
total += rc;
|
||||||
}
|
}
|
||||||
} while (rc == bufsz && total < sz);
|
} while (rc == bufsz && total < sz);
|
||||||
|
|
||||||
spufs_dump_seek(file, roundup((unsigned long)file->f_pos
|
spufs_dump_seek(file, roundup((unsigned long)file->f_pos
|
||||||
- total + sz, 4));
|
- total + sz, 4));
|
||||||
|
out:
|
||||||
|
free_page((unsigned long)buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spufs_arch_write_notes(struct file *file)
|
static void spufs_arch_write_notes(struct file *file)
|
||||||
|
211
arch/powerpc/platforms/cell/spufs/fault.c
Normal file
211
arch/powerpc/platforms/cell/spufs/fault.c
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
/*
|
||||||
|
* Low-level SPU handling
|
||||||
|
*
|
||||||
|
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
|
||||||
|
*
|
||||||
|
* Author: Arnd Bergmann <arndb@de.ibm.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; either version 2, or (at your option)
|
||||||
|
* any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||||
|
*/
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
#include <asm/spu.h>
|
||||||
|
#include <asm/spu_csa.h>
|
||||||
|
|
||||||
|
#include "spufs.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This ought to be kept in sync with the powerpc specific do_page_fault
|
||||||
|
* function. Currently, there are a few corner cases that we haven't had
|
||||||
|
* to handle fortunately.
|
||||||
|
*/
|
||||||
|
static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
unsigned long is_write;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
if (!IS_VALID_EA(ea)) {
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
#endif /* XXX */
|
||||||
|
if (mm == NULL) {
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
if (mm->pgd == NULL) {
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
down_read(&mm->mmap_sem);
|
||||||
|
vma = find_vma(mm, ea);
|
||||||
|
if (!vma)
|
||||||
|
goto bad_area;
|
||||||
|
if (vma->vm_start <= ea)
|
||||||
|
goto good_area;
|
||||||
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||||
|
goto bad_area;
|
||||||
|
if (expand_stack(vma, ea))
|
||||||
|
goto bad_area;
|
||||||
|
good_area:
|
||||||
|
is_write = dsisr & MFC_DSISR_ACCESS_PUT;
|
||||||
|
if (is_write) {
|
||||||
|
if (!(vma->vm_flags & VM_WRITE))
|
||||||
|
goto bad_area;
|
||||||
|
} else {
|
||||||
|
if (dsisr & MFC_DSISR_ACCESS_DENIED)
|
||||||
|
goto bad_area;
|
||||||
|
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||||
|
goto bad_area;
|
||||||
|
}
|
||||||
|
ret = 0;
|
||||||
|
switch (handle_mm_fault(mm, vma, ea, is_write)) {
|
||||||
|
case VM_FAULT_MINOR:
|
||||||
|
current->min_flt++;
|
||||||
|
break;
|
||||||
|
case VM_FAULT_MAJOR:
|
||||||
|
current->maj_flt++;
|
||||||
|
break;
|
||||||
|
case VM_FAULT_SIGBUS:
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto bad_area;
|
||||||
|
case VM_FAULT_OOM:
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto bad_area;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bad_area:
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void spufs_handle_dma_error(struct spu_context *ctx,
|
||||||
|
unsigned long ea, int type)
|
||||||
|
{
|
||||||
|
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
|
||||||
|
ctx->event_return |= type;
|
||||||
|
wake_up_all(&ctx->stop_wq);
|
||||||
|
} else {
|
||||||
|
siginfo_t info;
|
||||||
|
memset(&info, 0, sizeof(info));
|
||||||
|
|
||||||
|
switch (type) {
|
||||||
|
case SPE_EVENT_INVALID_DMA:
|
||||||
|
info.si_signo = SIGBUS;
|
||||||
|
info.si_code = BUS_OBJERR;
|
||||||
|
break;
|
||||||
|
case SPE_EVENT_SPE_DATA_STORAGE:
|
||||||
|
info.si_signo = SIGBUS;
|
||||||
|
info.si_addr = (void __user *)ea;
|
||||||
|
info.si_code = BUS_ADRERR;
|
||||||
|
break;
|
||||||
|
case SPE_EVENT_DMA_ALIGNMENT:
|
||||||
|
info.si_signo = SIGBUS;
|
||||||
|
/* DAR isn't set for an alignment fault :( */
|
||||||
|
info.si_code = BUS_ADRALN;
|
||||||
|
break;
|
||||||
|
case SPE_EVENT_SPE_ERROR:
|
||||||
|
info.si_signo = SIGILL;
|
||||||
|
info.si_addr = (void __user *)(unsigned long)
|
||||||
|
ctx->ops->npc_read(ctx) - 4;
|
||||||
|
info.si_code = ILL_ILLOPC;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (info.si_signo)
|
||||||
|
force_sig_info(info.si_signo, &info, current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void spufs_dma_callback(struct spu *spu, int type)
|
||||||
|
{
|
||||||
|
spufs_handle_dma_error(spu->ctx, spu->dar, type);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(spufs_dma_callback);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bottom half handler for page faults, we can't do this from
|
||||||
|
* interrupt context, since we might need to sleep.
|
||||||
|
* we also need to give up the mutex so we can get scheduled
|
||||||
|
* out while waiting for the backing store.
|
||||||
|
*
|
||||||
|
* TODO: try calling hash_page from the interrupt handler first
|
||||||
|
* in order to speed up the easy case.
|
||||||
|
*/
|
||||||
|
int spufs_handle_class1(struct spu_context *ctx)
|
||||||
|
{
|
||||||
|
u64 ea, dsisr, access;
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dar and dsisr get passed from the registers
|
||||||
|
* to the spu_context, to this function, but not
|
||||||
|
* back to the spu if it gets scheduled again.
|
||||||
|
*
|
||||||
|
* if we don't handle the fault for a saved context
|
||||||
|
* in time, we can still expect to get the same fault
|
||||||
|
* the immediately after the context restore.
|
||||||
|
*/
|
||||||
|
if (ctx->state == SPU_STATE_RUNNABLE) {
|
||||||
|
ea = ctx->spu->dar;
|
||||||
|
dsisr = ctx->spu->dsisr;
|
||||||
|
ctx->spu->dar= ctx->spu->dsisr = 0;
|
||||||
|
} else {
|
||||||
|
ea = ctx->csa.priv1.mfc_dar_RW;
|
||||||
|
dsisr = ctx->csa.priv1.mfc_dsisr_RW;
|
||||||
|
ctx->csa.priv1.mfc_dar_RW = 0;
|
||||||
|
ctx->csa.priv1.mfc_dsisr_RW = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
|
||||||
|
dsisr, ctx->state);
|
||||||
|
|
||||||
|
/* we must not hold the lock when entering spu_handle_mm_fault */
|
||||||
|
spu_release(ctx);
|
||||||
|
|
||||||
|
access = (_PAGE_PRESENT | _PAGE_USER);
|
||||||
|
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
|
||||||
|
local_irq_save(flags);
|
||||||
|
ret = hash_page(ea, access, 0x300);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
/* hashing failed, so try the actual fault handler */
|
||||||
|
if (ret)
|
||||||
|
ret = spu_handle_mm_fault(current->mm, ea, dsisr);
|
||||||
|
|
||||||
|
spu_acquire(ctx);
|
||||||
|
/*
|
||||||
|
* If we handled the fault successfully and are in runnable
|
||||||
|
* state, restart the DMA.
|
||||||
|
* In case of unhandled error report the problem to user space.
|
||||||
|
*/
|
||||||
|
if (!ret) {
|
||||||
|
if (ctx->spu)
|
||||||
|
ctx->ops->restart_dma(ctx);
|
||||||
|
} else
|
||||||
|
spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(spufs_handle_class1);
|
@ -44,9 +44,25 @@ spufs_mem_open(struct inode *inode, struct file *file)
|
|||||||
{
|
{
|
||||||
struct spufs_inode_info *i = SPUFS_I(inode);
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
struct spu_context *ctx = i->i_ctx;
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
file->private_data = ctx;
|
file->private_data = ctx;
|
||||||
ctx->local_store = inode->i_mapping;
|
if (!i->i_openers++)
|
||||||
smp_wmb();
|
ctx->local_store = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_mem_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->local_store = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,6 +165,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
|
|
||||||
static const struct file_operations spufs_mem_fops = {
|
static const struct file_operations spufs_mem_fops = {
|
||||||
.open = spufs_mem_open,
|
.open = spufs_mem_open,
|
||||||
|
.release = spufs_mem_release,
|
||||||
.read = spufs_mem_read,
|
.read = spufs_mem_read,
|
||||||
.write = spufs_mem_write,
|
.write = spufs_mem_write,
|
||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
@ -238,16 +255,33 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
|
|||||||
struct spufs_inode_info *i = SPUFS_I(inode);
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
struct spu_context *ctx = i->i_ctx;
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
file->private_data = ctx;
|
file->private_data = ctx;
|
||||||
ctx->cntl = inode->i_mapping;
|
if (!i->i_openers++)
|
||||||
smp_wmb();
|
ctx->cntl = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return simple_attr_open(inode, file, spufs_cntl_get,
|
return simple_attr_open(inode, file, spufs_cntl_get,
|
||||||
spufs_cntl_set, "0x%08lx");
|
spufs_cntl_set, "0x%08lx");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_cntl_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
simple_attr_close(inode, file);
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->cntl = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct file_operations spufs_cntl_fops = {
|
static const struct file_operations spufs_cntl_fops = {
|
||||||
.open = spufs_cntl_open,
|
.open = spufs_cntl_open,
|
||||||
.release = simple_attr_close,
|
.release = spufs_cntl_release,
|
||||||
.read = simple_attr_read,
|
.read = simple_attr_read,
|
||||||
.write = simple_attr_write,
|
.write = simple_attr_write,
|
||||||
.mmap = spufs_cntl_mmap,
|
.mmap = spufs_cntl_mmap,
|
||||||
@ -723,12 +757,28 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
|
|||||||
{
|
{
|
||||||
struct spufs_inode_info *i = SPUFS_I(inode);
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
struct spu_context *ctx = i->i_ctx;
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
file->private_data = ctx;
|
file->private_data = ctx;
|
||||||
ctx->signal1 = inode->i_mapping;
|
if (!i->i_openers++)
|
||||||
smp_wmb();
|
ctx->signal1 = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return nonseekable_open(inode, file);
|
return nonseekable_open(inode, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_signal1_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->signal1 = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
|
static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
|
||||||
size_t len, loff_t *pos)
|
size_t len, loff_t *pos)
|
||||||
{
|
{
|
||||||
@ -821,6 +871,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
|
|
||||||
static const struct file_operations spufs_signal1_fops = {
|
static const struct file_operations spufs_signal1_fops = {
|
||||||
.open = spufs_signal1_open,
|
.open = spufs_signal1_open,
|
||||||
|
.release = spufs_signal1_release,
|
||||||
.read = spufs_signal1_read,
|
.read = spufs_signal1_read,
|
||||||
.write = spufs_signal1_write,
|
.write = spufs_signal1_write,
|
||||||
.mmap = spufs_signal1_mmap,
|
.mmap = spufs_signal1_mmap,
|
||||||
@ -830,12 +881,28 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
|
|||||||
{
|
{
|
||||||
struct spufs_inode_info *i = SPUFS_I(inode);
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
struct spu_context *ctx = i->i_ctx;
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
file->private_data = ctx;
|
file->private_data = ctx;
|
||||||
ctx->signal2 = inode->i_mapping;
|
if (!i->i_openers++)
|
||||||
smp_wmb();
|
ctx->signal2 = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return nonseekable_open(inode, file);
|
return nonseekable_open(inode, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_signal2_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->signal2 = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
|
static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
|
||||||
size_t len, loff_t *pos)
|
size_t len, loff_t *pos)
|
||||||
{
|
{
|
||||||
@ -932,6 +999,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
|
|
||||||
static const struct file_operations spufs_signal2_fops = {
|
static const struct file_operations spufs_signal2_fops = {
|
||||||
.open = spufs_signal2_open,
|
.open = spufs_signal2_open,
|
||||||
|
.release = spufs_signal2_release,
|
||||||
.read = spufs_signal2_read,
|
.read = spufs_signal2_read,
|
||||||
.write = spufs_signal2_write,
|
.write = spufs_signal2_write,
|
||||||
.mmap = spufs_signal2_mmap,
|
.mmap = spufs_signal2_mmap,
|
||||||
@ -1031,13 +1099,30 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
|
|||||||
struct spu_context *ctx = i->i_ctx;
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
file->private_data = i->i_ctx;
|
file->private_data = i->i_ctx;
|
||||||
ctx->mss = inode->i_mapping;
|
|
||||||
smp_wmb();
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!i->i_openers++)
|
||||||
|
ctx->mss = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return nonseekable_open(inode, file);
|
return nonseekable_open(inode, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_mss_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->mss = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct file_operations spufs_mss_fops = {
|
static const struct file_operations spufs_mss_fops = {
|
||||||
.open = spufs_mss_open,
|
.open = spufs_mss_open,
|
||||||
|
.release = spufs_mss_release,
|
||||||
.mmap = spufs_mss_mmap,
|
.mmap = spufs_mss_mmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1072,14 +1157,30 @@ static int spufs_psmap_open(struct inode *inode, struct file *file)
|
|||||||
struct spufs_inode_info *i = SPUFS_I(inode);
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
struct spu_context *ctx = i->i_ctx;
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
file->private_data = i->i_ctx;
|
file->private_data = i->i_ctx;
|
||||||
ctx->psmap = inode->i_mapping;
|
if (!i->i_openers++)
|
||||||
smp_wmb();
|
ctx->psmap = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return nonseekable_open(inode, file);
|
return nonseekable_open(inode, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_psmap_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->psmap = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct file_operations spufs_psmap_fops = {
|
static const struct file_operations spufs_psmap_fops = {
|
||||||
.open = spufs_psmap_open,
|
.open = spufs_psmap_open,
|
||||||
|
.release = spufs_psmap_release,
|
||||||
.mmap = spufs_psmap_mmap,
|
.mmap = spufs_psmap_mmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1126,12 +1227,27 @@ static int spufs_mfc_open(struct inode *inode, struct file *file)
|
|||||||
if (atomic_read(&inode->i_count) != 1)
|
if (atomic_read(&inode->i_count) != 1)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
file->private_data = ctx;
|
file->private_data = ctx;
|
||||||
ctx->mfc = inode->i_mapping;
|
if (!i->i_openers++)
|
||||||
smp_wmb();
|
ctx->mfc = inode->i_mapping;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
return nonseekable_open(inode, file);
|
return nonseekable_open(inode, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
spufs_mfc_release(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||||
|
struct spu_context *ctx = i->i_ctx;
|
||||||
|
|
||||||
|
spin_lock(&ctx->mapping_lock);
|
||||||
|
if (!--i->i_openers)
|
||||||
|
ctx->mfc = NULL;
|
||||||
|
spin_unlock(&ctx->mapping_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* interrupt-level mfc callback function. */
|
/* interrupt-level mfc callback function. */
|
||||||
void spufs_mfc_callback(struct spu *spu)
|
void spufs_mfc_callback(struct spu *spu)
|
||||||
{
|
{
|
||||||
@ -1313,7 +1429,10 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
spu_acquire_runnable(ctx, 0);
|
ret = spu_acquire_runnable(ctx, 0);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (file->f_flags & O_NONBLOCK) {
|
if (file->f_flags & O_NONBLOCK) {
|
||||||
ret = ctx->ops->send_mfc_command(ctx, &cmd);
|
ret = ctx->ops->send_mfc_command(ctx, &cmd);
|
||||||
} else {
|
} else {
|
||||||
@ -1399,6 +1518,7 @@ static int spufs_mfc_fasync(int fd, struct file *file, int on)
|
|||||||
|
|
||||||
static const struct file_operations spufs_mfc_fops = {
|
static const struct file_operations spufs_mfc_fops = {
|
||||||
.open = spufs_mfc_open,
|
.open = spufs_mfc_open,
|
||||||
|
.release = spufs_mfc_release,
|
||||||
.read = spufs_mfc_read,
|
.read = spufs_mfc_read,
|
||||||
.write = spufs_mfc_write,
|
.write = spufs_mfc_write,
|
||||||
.poll = spufs_mfc_poll,
|
.poll = spufs_mfc_poll,
|
||||||
|
@ -296,6 +296,14 @@ static int spu_hw_send_mfc_command(struct spu_context *ctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void spu_hw_restart_dma(struct spu_context *ctx)
|
||||||
|
{
|
||||||
|
struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
|
||||||
|
|
||||||
|
if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
|
||||||
|
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
|
||||||
|
}
|
||||||
|
|
||||||
struct spu_context_ops spu_hw_ops = {
|
struct spu_context_ops spu_hw_ops = {
|
||||||
.mbox_read = spu_hw_mbox_read,
|
.mbox_read = spu_hw_mbox_read,
|
||||||
.mbox_stat_read = spu_hw_mbox_stat_read,
|
.mbox_stat_read = spu_hw_mbox_stat_read,
|
||||||
@ -320,4 +328,5 @@ struct spu_context_ops spu_hw_ops = {
|
|||||||
.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
|
.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
|
||||||
.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
|
.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
|
||||||
.send_mfc_command = spu_hw_send_mfc_command,
|
.send_mfc_command = spu_hw_send_mfc_command,
|
||||||
|
.restart_dma = spu_hw_restart_dma,
|
||||||
};
|
};
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
#include <asm/semaphore.h>
|
#include <asm/semaphore.h>
|
||||||
#include <asm/spu.h>
|
#include <asm/spu.h>
|
||||||
|
#include <asm/spu_priv1.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
#include "spufs.h"
|
#include "spufs.h"
|
||||||
@ -54,6 +55,7 @@ spufs_alloc_inode(struct super_block *sb)
|
|||||||
|
|
||||||
ei->i_gang = NULL;
|
ei->i_gang = NULL;
|
||||||
ei->i_ctx = NULL;
|
ei->i_ctx = NULL;
|
||||||
|
ei->i_openers = 0;
|
||||||
|
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
}
|
}
|
||||||
@ -520,13 +522,14 @@ out:
|
|||||||
|
|
||||||
/* File system initialization */
|
/* File system initialization */
|
||||||
enum {
|
enum {
|
||||||
Opt_uid, Opt_gid, Opt_err,
|
Opt_uid, Opt_gid, Opt_mode, Opt_err,
|
||||||
};
|
};
|
||||||
|
|
||||||
static match_table_t spufs_tokens = {
|
static match_table_t spufs_tokens = {
|
||||||
{ Opt_uid, "uid=%d" },
|
{ Opt_uid, "uid=%d" },
|
||||||
{ Opt_gid, "gid=%d" },
|
{ Opt_gid, "gid=%d" },
|
||||||
{ Opt_err, NULL },
|
{ Opt_mode, "mode=%o" },
|
||||||
|
{ Opt_err, NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -553,6 +556,11 @@ spufs_parse_options(char *options, struct inode *root)
|
|||||||
return 0;
|
return 0;
|
||||||
root->i_gid = option;
|
root->i_gid = option;
|
||||||
break;
|
break;
|
||||||
|
case Opt_mode:
|
||||||
|
if (match_octal(&args[0], &option))
|
||||||
|
return 0;
|
||||||
|
root->i_mode = option | S_IFDIR;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -560,6 +568,11 @@ spufs_parse_options(char *options, struct inode *root)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void spufs_exit_isolated_loader(void)
|
||||||
|
{
|
||||||
|
kfree(isolated_loader);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
spufs_init_isolated_loader(void)
|
spufs_init_isolated_loader(void)
|
||||||
{
|
{
|
||||||
@ -653,6 +666,10 @@ static int __init spufs_init(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
ret = -ENODEV;
|
||||||
|
if (!spu_management_ops)
|
||||||
|
goto out;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
|
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
|
||||||
sizeof(struct spufs_inode_info), 0,
|
sizeof(struct spufs_inode_info), 0,
|
||||||
@ -660,25 +677,29 @@ static int __init spufs_init(void)
|
|||||||
|
|
||||||
if (!spufs_inode_cache)
|
if (!spufs_inode_cache)
|
||||||
goto out;
|
goto out;
|
||||||
if (spu_sched_init() != 0) {
|
ret = spu_sched_init();
|
||||||
kmem_cache_destroy(spufs_inode_cache);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
ret = register_filesystem(&spufs_type);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_cache;
|
goto out_cache;
|
||||||
|
ret = register_filesystem(&spufs_type);
|
||||||
|
if (ret)
|
||||||
|
goto out_sched;
|
||||||
ret = register_spu_syscalls(&spufs_calls);
|
ret = register_spu_syscalls(&spufs_calls);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_fs;
|
goto out_fs;
|
||||||
ret = register_arch_coredump_calls(&spufs_coredump_calls);
|
ret = register_arch_coredump_calls(&spufs_coredump_calls);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_fs;
|
goto out_syscalls;
|
||||||
|
|
||||||
spufs_init_isolated_loader();
|
spufs_init_isolated_loader();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_syscalls:
|
||||||
|
unregister_spu_syscalls(&spufs_calls);
|
||||||
out_fs:
|
out_fs:
|
||||||
unregister_filesystem(&spufs_type);
|
unregister_filesystem(&spufs_type);
|
||||||
|
out_sched:
|
||||||
|
spu_sched_exit();
|
||||||
out_cache:
|
out_cache:
|
||||||
kmem_cache_destroy(spufs_inode_cache);
|
kmem_cache_destroy(spufs_inode_cache);
|
||||||
out:
|
out:
|
||||||
@ -689,6 +710,7 @@ module_init(spufs_init);
|
|||||||
static void __exit spufs_exit(void)
|
static void __exit spufs_exit(void)
|
||||||
{
|
{
|
||||||
spu_sched_exit();
|
spu_sched_exit();
|
||||||
|
spufs_exit_isolated_loader();
|
||||||
unregister_arch_coredump_calls(&spufs_coredump_calls);
|
unregister_arch_coredump_calls(&spufs_coredump_calls);
|
||||||
unregister_spu_syscalls(&spufs_calls);
|
unregister_spu_syscalls(&spufs_calls);
|
||||||
unregister_filesystem(&spufs_type);
|
unregister_filesystem(&spufs_type);
|
||||||
|
@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu)
|
|||||||
wake_up_all(&ctx->stop_wq);
|
wake_up_all(&ctx->stop_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void spufs_dma_callback(struct spu *spu, int type)
|
|
||||||
{
|
|
||||||
struct spu_context *ctx = spu->ctx;
|
|
||||||
|
|
||||||
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
|
|
||||||
ctx->event_return |= type;
|
|
||||||
wake_up_all(&ctx->stop_wq);
|
|
||||||
} else {
|
|
||||||
switch (type) {
|
|
||||||
case SPE_EVENT_DMA_ALIGNMENT:
|
|
||||||
case SPE_EVENT_SPE_DATA_STORAGE:
|
|
||||||
case SPE_EVENT_INVALID_DMA:
|
|
||||||
force_sig(SIGBUS, /* info, */ current);
|
|
||||||
break;
|
|
||||||
case SPE_EVENT_SPE_ERROR:
|
|
||||||
force_sig(SIGILL, /* info */ current);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
|
static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
|
||||||
{
|
{
|
||||||
struct spu *spu;
|
struct spu *spu;
|
||||||
@ -63,13 +42,18 @@ static int spu_setup_isolated(struct spu_context *ctx)
|
|||||||
const u32 status_loading = SPU_STATUS_RUNNING
|
const u32 status_loading = SPU_STATUS_RUNNING
|
||||||
| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
|
| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
|
||||||
|
|
||||||
|
ret = -ENODEV;
|
||||||
if (!isolated_loader)
|
if (!isolated_loader)
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
ret = spu_acquire_exclusive(ctx);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to exclude userspace access to the context.
|
||||||
|
*
|
||||||
|
* To protect against memory access we invalidate all ptes
|
||||||
|
* and make sure the pagefault handlers block on the mutex.
|
||||||
|
*/
|
||||||
|
spu_unmap_mappings(ctx);
|
||||||
|
|
||||||
mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
|
mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
|
||||||
|
|
||||||
/* purge the MFC DMA queue to ensure no spurious accesses before we
|
/* purge the MFC DMA queue to ensure no spurious accesses before we
|
||||||
@ -82,7 +66,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
|
|||||||
printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
|
printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
|
||||||
__FUNCTION__);
|
__FUNCTION__);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out_unlock;
|
goto out;
|
||||||
}
|
}
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
@ -119,12 +103,15 @@ static int spu_setup_isolated(struct spu_context *ctx)
|
|||||||
pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
|
pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
|
||||||
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
||||||
ret = -EACCES;
|
ret = -EACCES;
|
||||||
|
goto out_drop_priv;
|
||||||
|
}
|
||||||
|
|
||||||
} else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
|
if (!(status & SPU_STATUS_ISOLATED_STATE)) {
|
||||||
/* This isn't allowed by the CBEA, but check anyway */
|
/* This isn't allowed by the CBEA, but check anyway */
|
||||||
pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
|
pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
|
||||||
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto out_drop_priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_drop_priv:
|
out_drop_priv:
|
||||||
@ -132,30 +119,19 @@ out_drop_priv:
|
|||||||
sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
|
sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
|
||||||
spu_mfc_sr1_set(ctx->spu, sr1);
|
spu_mfc_sr1_set(ctx->spu, sr1);
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
spu_release(ctx);
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
|
static int spu_run_init(struct spu_context *ctx, u32 * npc)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
|
|
||||||
|
|
||||||
ret = spu_acquire_runnable(ctx, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (ctx->flags & SPU_CREATE_ISOLATE) {
|
if (ctx->flags & SPU_CREATE_ISOLATE) {
|
||||||
|
unsigned long runcntl;
|
||||||
|
|
||||||
if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
|
if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
|
||||||
/* Need to release ctx, because spu_setup_isolated will
|
int ret = spu_setup_isolated(ctx);
|
||||||
* acquire it exclusively.
|
if (ret)
|
||||||
*/
|
return ret;
|
||||||
spu_release(ctx);
|
|
||||||
ret = spu_setup_isolated(ctx);
|
|
||||||
if (!ret)
|
|
||||||
ret = spu_acquire_runnable(ctx, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if userspace has set the runcntrl register (eg, to issue an
|
/* if userspace has set the runcntrl register (eg, to issue an
|
||||||
@ -164,16 +140,17 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
|
|||||||
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
|
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
|
||||||
if (runcntl == 0)
|
if (runcntl == 0)
|
||||||
runcntl = SPU_RUNCNTL_RUNNABLE;
|
runcntl = SPU_RUNCNTL_RUNNABLE;
|
||||||
|
ctx->ops->runcntl_write(ctx, runcntl);
|
||||||
} else {
|
} else {
|
||||||
spu_start_tick(ctx);
|
spu_start_tick(ctx);
|
||||||
ctx->ops->npc_write(ctx, *npc);
|
ctx->ops->npc_write(ctx, *npc);
|
||||||
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->ops->runcntl_write(ctx, runcntl);
|
return 0;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
|
static int spu_run_fini(struct spu_context *ctx, u32 * npc,
|
||||||
u32 * status)
|
u32 * status)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -189,19 +166,27 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
|
static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
|
||||||
u32 *status)
|
u32 *status)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if ((ret = spu_run_fini(ctx, npc, status)) != 0)
|
ret = spu_run_fini(ctx, npc, status);
|
||||||
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (*status & (SPU_STATUS_STOPPED_BY_STOP |
|
|
||||||
SPU_STATUS_STOPPED_BY_HALT)) {
|
if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
|
||||||
return *status;
|
return *status;
|
||||||
}
|
|
||||||
if ((ret = spu_run_init(ctx, npc)) != 0)
|
ret = spu_acquire_runnable(ctx, 0);
|
||||||
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = spu_run_init(ctx, npc);
|
||||||
|
if (ret) {
|
||||||
|
spu_release(ctx);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,17 +238,17 @@ int spu_process_callback(struct spu_context *ctx)
|
|||||||
{
|
{
|
||||||
struct spu_syscall_block s;
|
struct spu_syscall_block s;
|
||||||
u32 ls_pointer, npc;
|
u32 ls_pointer, npc;
|
||||||
char *ls;
|
void __iomem *ls;
|
||||||
long spu_ret;
|
long spu_ret;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* get syscall block from local store */
|
/* get syscall block from local store */
|
||||||
npc = ctx->ops->npc_read(ctx);
|
npc = ctx->ops->npc_read(ctx) & ~3;
|
||||||
ls = ctx->ops->get_ls(ctx);
|
ls = (void __iomem *)ctx->ops->get_ls(ctx);
|
||||||
ls_pointer = *(u32*)(ls + npc);
|
ls_pointer = in_be32(ls + npc);
|
||||||
if (ls_pointer > (LS_SIZE - sizeof(s)))
|
if (ls_pointer > (LS_SIZE - sizeof(s)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
memcpy(&s, ls + ls_pointer, sizeof (s));
|
memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
|
||||||
|
|
||||||
/* do actual syscall without pinning the spu */
|
/* do actual syscall without pinning the spu */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -283,7 +268,7 @@ int spu_process_callback(struct spu_context *ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* write result, jump over indirect pointer */
|
/* write result, jump over indirect pointer */
|
||||||
memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
|
memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
|
||||||
ctx->ops->npc_write(ctx, npc);
|
ctx->ops->npc_write(ctx, npc);
|
||||||
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
||||||
return ret;
|
return ret;
|
||||||
@ -292,11 +277,8 @@ int spu_process_callback(struct spu_context *ctx)
|
|||||||
static inline int spu_process_events(struct spu_context *ctx)
|
static inline int spu_process_events(struct spu_context *ctx)
|
||||||
{
|
{
|
||||||
struct spu *spu = ctx->spu;
|
struct spu *spu = ctx->spu;
|
||||||
u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (spu->dsisr & pte_fault)
|
|
||||||
ret = spu_irq_class_1_bottom(spu);
|
|
||||||
if (spu->class_0_pending)
|
if (spu->class_0_pending)
|
||||||
ret = spu_irq_class_0_bottom(spu);
|
ret = spu_irq_class_0_bottom(spu);
|
||||||
if (!ret && signal_pending(current))
|
if (!ret && signal_pending(current))
|
||||||
@ -310,14 +292,21 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
|
|||||||
int ret;
|
int ret;
|
||||||
u32 status;
|
u32 status;
|
||||||
|
|
||||||
if (down_interruptible(&ctx->run_sema))
|
if (mutex_lock_interruptible(&ctx->run_mutex))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
|
|
||||||
ctx->ops->master_start(ctx);
|
ctx->ops->master_start(ctx);
|
||||||
ctx->event_return = 0;
|
ctx->event_return = 0;
|
||||||
ret = spu_run_init(ctx, npc);
|
|
||||||
|
ret = spu_acquire_runnable(ctx, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = spu_run_init(ctx, npc);
|
||||||
|
if (ret) {
|
||||||
|
spu_release(ctx);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
|
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
|
||||||
@ -330,6 +319,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
|
|||||||
break;
|
break;
|
||||||
status &= ~SPU_STATUS_STOPPED_BY_STOP;
|
status &= ~SPU_STATUS_STOPPED_BY_STOP;
|
||||||
}
|
}
|
||||||
|
ret = spufs_handle_class1(ctx);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
|
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
|
||||||
ret = spu_reacquire_runnable(ctx, npc, &status);
|
ret = spu_reacquire_runnable(ctx, npc, &status);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -363,6 +356,6 @@ out2:
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
*event = ctx->event_return;
|
*event = ctx->event_return;
|
||||||
up(&ctx->run_sema);
|
mutex_unlock(&ctx->run_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -71,14 +71,27 @@ static inline int node_allowed(int node)
|
|||||||
|
|
||||||
void spu_start_tick(struct spu_context *ctx)
|
void spu_start_tick(struct spu_context *ctx)
|
||||||
{
|
{
|
||||||
if (ctx->policy == SCHED_RR)
|
if (ctx->policy == SCHED_RR) {
|
||||||
|
/*
|
||||||
|
* Make sure the exiting bit is cleared.
|
||||||
|
*/
|
||||||
|
clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
|
||||||
|
mb();
|
||||||
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
|
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void spu_stop_tick(struct spu_context *ctx)
|
void spu_stop_tick(struct spu_context *ctx)
|
||||||
{
|
{
|
||||||
if (ctx->policy == SCHED_RR)
|
if (ctx->policy == SCHED_RR) {
|
||||||
|
/*
|
||||||
|
* While the work can be rearming normally setting this flag
|
||||||
|
* makes sure it does not rearm itself anymore.
|
||||||
|
*/
|
||||||
|
set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
|
||||||
|
mb();
|
||||||
cancel_delayed_work(&ctx->sched_work);
|
cancel_delayed_work(&ctx->sched_work);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void spu_sched_tick(struct work_struct *work)
|
void spu_sched_tick(struct work_struct *work)
|
||||||
@ -86,7 +99,15 @@ void spu_sched_tick(struct work_struct *work)
|
|||||||
struct spu_context *ctx =
|
struct spu_context *ctx =
|
||||||
container_of(work, struct spu_context, sched_work.work);
|
container_of(work, struct spu_context, sched_work.work);
|
||||||
struct spu *spu;
|
struct spu *spu;
|
||||||
int rearm = 1;
|
int preempted = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this context is being stopped avoid rescheduling from the
|
||||||
|
* scheduler tick because we would block on the state_mutex.
|
||||||
|
* The caller will yield the spu later on anyway.
|
||||||
|
*/
|
||||||
|
if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
|
||||||
|
return;
|
||||||
|
|
||||||
mutex_lock(&ctx->state_mutex);
|
mutex_lock(&ctx->state_mutex);
|
||||||
spu = ctx->spu;
|
spu = ctx->spu;
|
||||||
@ -94,12 +115,19 @@ void spu_sched_tick(struct work_struct *work)
|
|||||||
int best = sched_find_first_bit(spu_prio->bitmap);
|
int best = sched_find_first_bit(spu_prio->bitmap);
|
||||||
if (best <= ctx->prio) {
|
if (best <= ctx->prio) {
|
||||||
spu_deactivate(ctx);
|
spu_deactivate(ctx);
|
||||||
rearm = 0;
|
preempted = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&ctx->state_mutex);
|
mutex_unlock(&ctx->state_mutex);
|
||||||
|
|
||||||
if (rearm)
|
if (preempted) {
|
||||||
|
/*
|
||||||
|
* We need to break out of the wait loop in spu_run manually
|
||||||
|
* to ensure this context gets put on the runqueue again
|
||||||
|
* ASAP.
|
||||||
|
*/
|
||||||
|
wake_up(&ctx->stop_wq);
|
||||||
|
} else
|
||||||
spu_start_tick(ctx);
|
spu_start_tick(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,58 +236,40 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
|
|||||||
* spu_add_to_rq - add a context to the runqueue
|
* spu_add_to_rq - add a context to the runqueue
|
||||||
* @ctx: context to add
|
* @ctx: context to add
|
||||||
*/
|
*/
|
||||||
static void spu_add_to_rq(struct spu_context *ctx)
|
static void __spu_add_to_rq(struct spu_context *ctx)
|
||||||
{
|
{
|
||||||
spin_lock(&spu_prio->runq_lock);
|
int prio = ctx->prio;
|
||||||
list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
|
|
||||||
set_bit(ctx->prio, spu_prio->bitmap);
|
list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
|
||||||
spin_unlock(&spu_prio->runq_lock);
|
set_bit(prio, spu_prio->bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static void __spu_del_from_rq(struct spu_context *ctx)
|
||||||
* spu_del_from_rq - remove a context from the runqueue
|
|
||||||
* @ctx: context to remove
|
|
||||||
*/
|
|
||||||
static void spu_del_from_rq(struct spu_context *ctx)
|
|
||||||
{
|
{
|
||||||
spin_lock(&spu_prio->runq_lock);
|
int prio = ctx->prio;
|
||||||
list_del_init(&ctx->rq);
|
|
||||||
if (list_empty(&spu_prio->runq[ctx->prio]))
|
|
||||||
clear_bit(ctx->prio, spu_prio->bitmap);
|
|
||||||
spin_unlock(&spu_prio->runq_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
if (!list_empty(&ctx->rq))
|
||||||
* spu_grab_context - remove one context from the runqueue
|
list_del_init(&ctx->rq);
|
||||||
* @prio: priority of the context to be removed
|
if (list_empty(&spu_prio->runq[prio]))
|
||||||
*
|
clear_bit(prio, spu_prio->bitmap);
|
||||||
* This function removes one context from the runqueue for priority @prio.
|
|
||||||
* If there is more than one context with the given priority the first
|
|
||||||
* task on the runqueue will be taken.
|
|
||||||
*
|
|
||||||
* Returns the spu_context it just removed.
|
|
||||||
*
|
|
||||||
* Must be called with spu_prio->runq_lock held.
|
|
||||||
*/
|
|
||||||
static struct spu_context *spu_grab_context(int prio)
|
|
||||||
{
|
|
||||||
struct list_head *rq = &spu_prio->runq[prio];
|
|
||||||
|
|
||||||
if (list_empty(rq))
|
|
||||||
return NULL;
|
|
||||||
return list_entry(rq->next, struct spu_context, rq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spu_prio_wait(struct spu_context *ctx)
|
static void spu_prio_wait(struct spu_context *ctx)
|
||||||
{
|
{
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
|
spin_lock(&spu_prio->runq_lock);
|
||||||
prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
|
||||||
if (!signal_pending(current)) {
|
if (!signal_pending(current)) {
|
||||||
|
__spu_add_to_rq(ctx);
|
||||||
|
spin_unlock(&spu_prio->runq_lock);
|
||||||
mutex_unlock(&ctx->state_mutex);
|
mutex_unlock(&ctx->state_mutex);
|
||||||
schedule();
|
schedule();
|
||||||
mutex_lock(&ctx->state_mutex);
|
mutex_lock(&ctx->state_mutex);
|
||||||
|
spin_lock(&spu_prio->runq_lock);
|
||||||
|
__spu_del_from_rq(ctx);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&spu_prio->runq_lock);
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(&ctx->stop_wq, &wait);
|
remove_wait_queue(&ctx->stop_wq, &wait);
|
||||||
}
|
}
|
||||||
@ -280,9 +290,14 @@ static void spu_reschedule(struct spu *spu)
|
|||||||
spin_lock(&spu_prio->runq_lock);
|
spin_lock(&spu_prio->runq_lock);
|
||||||
best = sched_find_first_bit(spu_prio->bitmap);
|
best = sched_find_first_bit(spu_prio->bitmap);
|
||||||
if (best < MAX_PRIO) {
|
if (best < MAX_PRIO) {
|
||||||
struct spu_context *ctx = spu_grab_context(best);
|
struct list_head *rq = &spu_prio->runq[best];
|
||||||
if (ctx)
|
struct spu_context *ctx;
|
||||||
wake_up(&ctx->stop_wq);
|
|
||||||
|
BUG_ON(list_empty(rq));
|
||||||
|
|
||||||
|
ctx = list_entry(rq->next, struct spu_context, rq);
|
||||||
|
__spu_del_from_rq(ctx);
|
||||||
|
wake_up(&ctx->stop_wq);
|
||||||
}
|
}
|
||||||
spin_unlock(&spu_prio->runq_lock);
|
spin_unlock(&spu_prio->runq_lock);
|
||||||
}
|
}
|
||||||
@ -365,6 +380,12 @@ static struct spu *find_victim(struct spu_context *ctx)
|
|||||||
}
|
}
|
||||||
spu_unbind_context(spu, victim);
|
spu_unbind_context(spu, victim);
|
||||||
mutex_unlock(&victim->state_mutex);
|
mutex_unlock(&victim->state_mutex);
|
||||||
|
/*
|
||||||
|
* We need to break out of the wait loop in spu_run
|
||||||
|
* manually to ensure this context gets put on the
|
||||||
|
* runqueue again ASAP.
|
||||||
|
*/
|
||||||
|
wake_up(&victim->stop_wq);
|
||||||
return spu;
|
return spu;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -377,7 +398,7 @@ static struct spu *find_victim(struct spu_context *ctx)
|
|||||||
* @ctx: spu context to schedule
|
* @ctx: spu context to schedule
|
||||||
* @flags: flags (currently ignored)
|
* @flags: flags (currently ignored)
|
||||||
*
|
*
|
||||||
* Tries to find a free spu to run @ctx. If no free spu is availble
|
* Tries to find a free spu to run @ctx. If no free spu is available
|
||||||
* add the context to the runqueue so it gets woken up once an spu
|
* add the context to the runqueue so it gets woken up once an spu
|
||||||
* is available.
|
* is available.
|
||||||
*/
|
*/
|
||||||
@ -402,9 +423,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
spu_add_to_rq(ctx);
|
|
||||||
spu_prio_wait(ctx);
|
spu_prio_wait(ctx);
|
||||||
spu_del_from_rq(ctx);
|
|
||||||
} while (!signal_pending(current));
|
} while (!signal_pending(current));
|
||||||
|
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
|
@ -41,7 +41,7 @@ struct spu_gang;
|
|||||||
|
|
||||||
/* ctx->sched_flags */
|
/* ctx->sched_flags */
|
||||||
enum {
|
enum {
|
||||||
SPU_SCHED_WAKE = 0, /* currently unused */
|
SPU_SCHED_EXITING = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct spu_context {
|
struct spu_context {
|
||||||
@ -50,16 +50,17 @@ struct spu_context {
|
|||||||
spinlock_t mmio_lock; /* protects mmio access */
|
spinlock_t mmio_lock; /* protects mmio access */
|
||||||
struct address_space *local_store; /* local store mapping. */
|
struct address_space *local_store; /* local store mapping. */
|
||||||
struct address_space *mfc; /* 'mfc' area mappings. */
|
struct address_space *mfc; /* 'mfc' area mappings. */
|
||||||
struct address_space *cntl; /* 'control' area mappings. */
|
struct address_space *cntl; /* 'control' area mappings. */
|
||||||
struct address_space *signal1; /* 'signal1' area mappings. */
|
struct address_space *signal1; /* 'signal1' area mappings. */
|
||||||
struct address_space *signal2; /* 'signal2' area mappings. */
|
struct address_space *signal2; /* 'signal2' area mappings. */
|
||||||
struct address_space *mss; /* 'mss' area mappings. */
|
struct address_space *mss; /* 'mss' area mappings. */
|
||||||
struct address_space *psmap; /* 'psmap' area mappings. */
|
struct address_space *psmap; /* 'psmap' area mappings. */
|
||||||
|
spinlock_t mapping_lock;
|
||||||
u64 object_id; /* user space pointer for oprofile */
|
u64 object_id; /* user space pointer for oprofile */
|
||||||
|
|
||||||
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
|
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
|
||||||
struct mutex state_mutex;
|
struct mutex state_mutex;
|
||||||
struct semaphore run_sema;
|
struct mutex run_mutex;
|
||||||
|
|
||||||
struct mm_struct *owner;
|
struct mm_struct *owner;
|
||||||
|
|
||||||
@ -140,6 +141,7 @@ struct spu_context_ops {
|
|||||||
struct spu_dma_info * info);
|
struct spu_dma_info * info);
|
||||||
void (*proxydma_info_read) (struct spu_context * ctx,
|
void (*proxydma_info_read) (struct spu_context * ctx,
|
||||||
struct spu_proxydma_info * info);
|
struct spu_proxydma_info * info);
|
||||||
|
void (*restart_dma)(struct spu_context *ctx);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct spu_context_ops spu_hw_ops;
|
extern struct spu_context_ops spu_hw_ops;
|
||||||
@ -149,6 +151,7 @@ struct spufs_inode_info {
|
|||||||
struct spu_context *i_ctx;
|
struct spu_context *i_ctx;
|
||||||
struct spu_gang *i_gang;
|
struct spu_gang *i_gang;
|
||||||
struct inode vfs_inode;
|
struct inode vfs_inode;
|
||||||
|
int i_openers;
|
||||||
};
|
};
|
||||||
#define SPUFS_I(inode) \
|
#define SPUFS_I(inode) \
|
||||||
container_of(inode, struct spufs_inode_info, vfs_inode)
|
container_of(inode, struct spufs_inode_info, vfs_inode)
|
||||||
@ -170,6 +173,9 @@ int put_spu_gang(struct spu_gang *gang);
|
|||||||
void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
||||||
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
||||||
|
|
||||||
|
/* fault handling */
|
||||||
|
int spufs_handle_class1(struct spu_context *ctx);
|
||||||
|
|
||||||
/* context management */
|
/* context management */
|
||||||
static inline void spu_acquire(struct spu_context *ctx)
|
static inline void spu_acquire(struct spu_context *ctx)
|
||||||
{
|
{
|
||||||
@ -190,7 +196,6 @@ void spu_unmap_mappings(struct spu_context *ctx);
|
|||||||
void spu_forget(struct spu_context *ctx);
|
void spu_forget(struct spu_context *ctx);
|
||||||
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
|
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
|
||||||
void spu_acquire_saved(struct spu_context *ctx);
|
void spu_acquire_saved(struct spu_context *ctx);
|
||||||
int spu_acquire_exclusive(struct spu_context *ctx);
|
|
||||||
|
|
||||||
int spu_activate(struct spu_context *ctx, unsigned long flags);
|
int spu_activate(struct spu_context *ctx, unsigned long flags);
|
||||||
void spu_deactivate(struct spu_context *ctx);
|
void spu_deactivate(struct spu_context *ctx);
|
||||||
@ -218,14 +223,13 @@ extern char *isolated_loader;
|
|||||||
prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
|
prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
|
||||||
if (condition) \
|
if (condition) \
|
||||||
break; \
|
break; \
|
||||||
if (!signal_pending(current)) { \
|
if (signal_pending(current)) { \
|
||||||
spu_release(ctx); \
|
__ret = -ERESTARTSYS; \
|
||||||
schedule(); \
|
break; \
|
||||||
spu_acquire(ctx); \
|
|
||||||
continue; \
|
|
||||||
} \
|
} \
|
||||||
__ret = -ERESTARTSYS; \
|
spu_release(ctx); \
|
||||||
break; \
|
schedule(); \
|
||||||
|
spu_acquire(ctx); \
|
||||||
} \
|
} \
|
||||||
finish_wait(&(wq), &__wait); \
|
finish_wait(&(wq), &__wait); \
|
||||||
__ret; \
|
__ret; \
|
||||||
|
@ -2084,6 +2084,10 @@ int spu_save(struct spu_state *prev, struct spu *spu)
|
|||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
acquire_spu_lock(spu); /* Step 1. */
|
acquire_spu_lock(spu); /* Step 1. */
|
||||||
|
prev->dar = spu->dar;
|
||||||
|
prev->dsisr = spu->dsisr;
|
||||||
|
spu->dar = 0;
|
||||||
|
spu->dsisr = 0;
|
||||||
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
|
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
|
||||||
release_spu_lock(spu);
|
release_spu_lock(spu);
|
||||||
if (rc != 0 && rc != 2 && rc != 6) {
|
if (rc != 0 && rc != 2 && rc != 6) {
|
||||||
@ -2109,9 +2113,9 @@ int spu_restore(struct spu_state *new, struct spu *spu)
|
|||||||
|
|
||||||
acquire_spu_lock(spu);
|
acquire_spu_lock(spu);
|
||||||
harvest(NULL, spu);
|
harvest(NULL, spu);
|
||||||
spu->dar = 0;
|
|
||||||
spu->dsisr = 0;
|
|
||||||
spu->slb_replace = 0;
|
spu->slb_replace = 0;
|
||||||
|
new->dar = 0;
|
||||||
|
new->dsisr = 0;
|
||||||
spu->class_0_pending = 0;
|
spu->class_0_pending = 0;
|
||||||
rc = __do_spu_restore(new, spu);
|
rc = __do_spu_restore(new, spu);
|
||||||
release_spu_lock(spu);
|
release_spu_lock(spu);
|
||||||
|
@ -234,6 +234,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
|
|||||||
unsigned long vsid, pte_t *ptep, unsigned long trap,
|
unsigned long vsid, pte_t *ptep, unsigned long trap,
|
||||||
unsigned int local);
|
unsigned int local);
|
||||||
struct mm_struct;
|
struct mm_struct;
|
||||||
|
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
|
||||||
extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
||||||
unsigned long ea, unsigned long vsid, int local,
|
unsigned long ea, unsigned long vsid, int local,
|
||||||
unsigned long trap);
|
unsigned long trap);
|
||||||
|
@ -242,6 +242,7 @@ struct spu_state {
|
|||||||
u64 spu_chnldata_RW[32];
|
u64 spu_chnldata_RW[32];
|
||||||
u32 spu_mailbox_data[4];
|
u32 spu_mailbox_data[4];
|
||||||
u32 pu_mailbox_data[1];
|
u32 pu_mailbox_data[1];
|
||||||
|
u64 dar, dsisr;
|
||||||
unsigned long suspend_time;
|
unsigned long suspend_time;
|
||||||
spinlock_t register_lock;
|
spinlock_t register_lock;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user