summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/fault.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c51
1 files changed, 35 insertions, 16 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 0f75c07e29d8..917eab4be486 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -33,7 +33,8 @@
* function. Currently, there are a few corner cases that we haven't had
* to handle fortunately.
*/
-static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
+static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, unsigned *flt)
{
struct vm_area_struct *vma;
unsigned long is_write;
@@ -73,22 +74,21 @@ good_area:
goto bad_area;
}
ret = 0;
- switch (handle_mm_fault(mm, vma, ea, is_write)) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- ret = -EFAULT;
- goto bad_area;
- case VM_FAULT_OOM:
- ret = -ENOMEM;
- goto bad_area;
- default:
+ *flt = handle_mm_fault(mm, vma, ea, is_write);
+ if (unlikely(*flt & VM_FAULT_ERROR)) {
+ if (*flt & VM_FAULT_OOM) {
+ ret = -ENOMEM;
+ goto bad_area;
+ } else if (*flt & VM_FAULT_SIGBUS) {
+ ret = -EFAULT;
+ goto bad_area;
+ }
BUG();
}
+ if (*flt & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
up_read(&mm->mmap_sem);
return ret;
@@ -153,6 +153,7 @@ int spufs_handle_class1(struct spu_context *ctx)
{
u64 ea, dsisr, access;
unsigned long flags;
+ unsigned flt = 0;
int ret;
/*
@@ -178,9 +179,15 @@ int spufs_handle_class1(struct spu_context *ctx)
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0;
+ spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
+
pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
dsisr, ctx->state);
+ ctx->stats.hash_flt++;
+ if (ctx->state == SPU_STATE_RUNNABLE)
+ ctx->spu->stats.hash_flt++;
+
/* we must not hold the lock when entering spu_handle_mm_fault */
spu_release(ctx);
@@ -192,7 +199,7 @@ int spufs_handle_class1(struct spu_context *ctx)
/* hashing failed, so try the actual fault handler */
if (ret)
- ret = spu_handle_mm_fault(current->mm, ea, dsisr);
+ ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
spu_acquire(ctx);
/*
@@ -201,11 +208,23 @@ int spufs_handle_class1(struct spu_context *ctx)
* In case of unhandled error report the problem to user space.
*/
if (!ret) {
+ if (flt & VM_FAULT_MAJOR)
+ ctx->stats.maj_flt++;
+ else
+ ctx->stats.min_flt++;
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ if (flt & VM_FAULT_MAJOR)
+ ctx->spu->stats.maj_flt++;
+ else
+ ctx->spu->stats.min_flt++;
+ }
+
if (ctx->spu)
ctx->ops->restart_dma(ctx);
} else
spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+ spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret;
}
EXPORT_SYMBOL_GPL(spufs_handle_class1);