powerpc/cell: Move spu_handle_mm_fault() out of cell platform

Currently spu_handle_mm_fault() is in the cell platform.

This code is generically useful for other non-cell co-processors on powerpc.

This patch moves this function out of the cell platform into arch/powerpc/mm so
that others may use it.

Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Ian Munsie 2014-10-08 19:54:50 +11:00 committed by Michael Ellerman
parent 60666de2da
commit e83d016975
8 changed files with 33 additions and 14 deletions

View File

@ -608,6 +608,10 @@ config PPC_SUBPAGE_PROT
to set access permissions (read/write, readonly, or no access)
on the 4k subpages of each 64k page.
config PPC_COPRO_BASE
bool
default n
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP

View File

@ -0,0 +1,16 @@
/*
* Copyright 2014 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_POWERPC_COPRO_H
#define _ASM_POWERPC_COPRO_H
int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, unsigned *flt);
#endif /* _ASM_POWERPC_COPRO_H */

View File

@ -27,6 +27,8 @@
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <asm/reg.h>
#include <asm/copro.h>
#define LS_SIZE (256 * 1024)
#define LS_ADDR_MASK (LS_SIZE - 1)
@ -277,9 +279,6 @@ void spu_remove_dev_attr(struct device_attribute *attr);
int spu_add_dev_attr_group(struct attribute_group *attrs);
void spu_remove_dev_attr_group(struct attribute_group *attrs);
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, unsigned *flt);
/*
* Notifier blocks:
*

View File

@ -34,3 +34,4 @@ obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o

View File

@ -1,5 +1,5 @@
/*
* SPU mm fault handler
* CoProcessor (SPU/AFU) mm fault handler
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2007
*
@ -23,16 +23,14 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include <asm/reg.h>
/*
* This ought to be kept in sync with the powerpc specific do_page_fault
* function. Currently, there are a few corner cases that we haven't had
* to handle fortunately.
*/
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, unsigned *flt)
{
struct vm_area_struct *vma;
@ -58,12 +56,12 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
goto out_unlock;
}
is_write = dsisr & MFC_DSISR_ACCESS_PUT;
is_write = dsisr & DSISR_ISSTORE;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock;
} else {
if (dsisr & MFC_DSISR_ACCESS_DENIED)
if (dsisr & DSISR_PROTFAULT)
goto out_unlock;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out_unlock;
@ -91,4 +89,4 @@ out_unlock:
up_read(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);

View File

@ -86,6 +86,7 @@ config SPU_FS_64K_LS
config SPU_BASE
bool
default n
select PPC_COPRO_BASE
config CBE_RAS
bool "RAS features for bare metal Cell BE"

View File

@ -20,7 +20,7 @@ spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
spu_notify.o \
spu_syscalls.o spu_fault.o \
spu_syscalls.o \
$(spu-priv1-y) \
$(spu-manage-y) \
spufs/

View File

@ -138,7 +138,7 @@ int spufs_handle_class1(struct spu_context *ctx)
if (ctx->state == SPU_STATE_RUNNABLE)
ctx->spu->stats.hash_flt++;
/* we must not hold the lock when entering spu_handle_mm_fault */
/* we must not hold the lock when entering copro_handle_mm_fault */
spu_release(ctx);
access = (_PAGE_PRESENT | _PAGE_USER);
@ -149,7 +149,7 @@ int spufs_handle_class1(struct spu_context *ctx)
/* hashing failed, so try the actual fault handler */
if (ret)
ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt);
/*
* This is nasty: we need the state_mutex for all the bookkeeping even