xen: add pages parameter to xen_remap_domain_mfn_range

Also introduce xen_unmap_domain_mfn_range. These are the parts of
Mukesh's "xen/pvh: Implement MMU changes for PVH" which are also
needed as a baseline for ARM privcmd support.

The original patch was:

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

This derivative is also:

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
This commit is contained in:
Ian Campbell 2012-10-17 13:37:49 -07:00
parent b3e40b72bb
commit 9a032e393a
3 changed files with 21 additions and 4 deletions

View File

@ -2479,7 +2479,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, unsigned long mfn, int nr,
pgprot_t prot, unsigned domid) pgprot_t prot, unsigned domid,
struct page **pages)
{ {
struct remap_data rmd; struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE]; struct mmu_update mmu_update[REMAP_BATCH_SIZE];
@ -2523,3 +2525,14 @@ out:
return err; return err;
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
{
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
return 0;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);

View File

@ -178,7 +178,7 @@ static int mmap_mfn_range(void *data, void *state)
msg->va & PAGE_MASK, msg->va & PAGE_MASK,
msg->mfn, msg->npages, msg->mfn, msg->npages,
vma->vm_page_prot, vma->vm_page_prot,
st->domain); st->domain, NULL);
if (rc < 0) if (rc < 0)
return rc; return rc;
@ -267,7 +267,8 @@ static int mmap_batch_fn(void *data, void *state)
int ret; int ret;
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
st->vma->vm_page_prot, st->domain); st->vma->vm_page_prot, st->domain,
NULL);
/* Store error code for second pass. */ /* Store error code for second pass. */
*(st->err++) = ret; *(st->err++) = ret;

View File

@ -27,6 +27,9 @@ struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, unsigned long mfn, int nr,
pgprot_t prot, unsigned domid); pgprot_t prot, unsigned domid,
struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
#endif /* INCLUDE_XEN_OPS_H */ #endif /* INCLUDE_XEN_OPS_H */