exec: extract address_space_translate_iommu, fix page_mask corner case

This will be used to process IOMMUs in a MemoryRegionCache.  This
includes a small bugfix, in that the returned page_mask is now
correctly -1 if the IOMMU memory region maps the entire address
space directly.  Previously, address_space_get_iotlb_entry would
return ~TARGET_PAGE_MASK.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2018-03-03 17:24:04 +01:00
parent ad2804d9e4
commit a411c84b56
1 changed files with 75 additions and 35 deletions

110
exec.c
View File

@ -461,6 +461,70 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
return section;
}
/**
* address_space_translate_iommu - translate an address through an IOMMU
* memory region and then through the target address space.
*
* @iommu_mr: the IOMMU memory region that we start the translation from
* @addr: the address to be translated through the MMU
* @xlat: the translated address offset within the destination memory region.
* It cannot be %NULL.
* @plen_out: valid read/write length of the translated address. It
* cannot be %NULL.
* @page_mask_out: page mask for the translated address. This
* should only be meaningful for IOMMU translated
* addresses, since there may be huge pages that this bit
* would tell. It can be %NULL if we don't care about it.
* @is_write: whether the translation operation is for write
* @is_mmio: whether this can be MMIO, set true if it can
* @target_as: the address space targeted by the IOMMU
*
* This function is called from RCU critical section. It is the common
* part of flatview_do_translate and address_space_translate_cached.
*/
static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
hwaddr *xlat,
hwaddr *plen_out,
hwaddr *page_mask_out,
bool is_write,
bool is_mmio,
AddressSpace **target_as)
{
MemoryRegionSection *section;
hwaddr page_mask = (hwaddr)-1;
do {
hwaddr addr = *xlat;
IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
IOMMUTLBEntry iotlb = imrc->translate(iommu_mr, addr, is_write ?
IOMMU_WO : IOMMU_RO);
if (!(iotlb.perm & (1 << is_write))) {
goto unassigned;
}
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
page_mask &= iotlb.addr_mask;
*plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
*target_as = iotlb.target_as;
section = address_space_translate_internal(
address_space_to_dispatch(iotlb.target_as), addr, xlat,
plen_out, is_mmio);
iommu_mr = memory_region_get_iommu(section->mr);
} while (unlikely(iommu_mr));
if (page_mask_out) {
*page_mask_out = page_mask;
}
return *section;
unassigned:
return (MemoryRegionSection) { .mr = &io_mem_unassigned };
}
/**
* flatview_do_translate - translate an address in FlatView
*
@ -489,55 +553,31 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
bool is_mmio,
AddressSpace **target_as)
{
IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
IOMMUMemoryRegion *iommu_mr;
IOMMUMemoryRegionClass *imrc;
hwaddr page_mask = (hwaddr)(-1);
hwaddr plen = (hwaddr)(-1);
if (!plen_out) {
plen_out = &plen;
}
for (;;) {
section = address_space_translate_internal(
flatview_to_dispatch(fv), addr, xlat,
plen_out, is_mmio);
section = address_space_translate_internal(
flatview_to_dispatch(fv), addr, xlat,
plen_out, is_mmio);
iommu_mr = memory_region_get_iommu(section->mr);
if (!iommu_mr) {
break;
}
imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
addr = *xlat;
iotlb = imrc->translate(iommu_mr, addr, is_write ?
IOMMU_WO : IOMMU_RO);
if (!(iotlb.perm & (1 << is_write))) {
goto translate_fail;
}
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
page_mask &= iotlb.addr_mask;
*plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
fv = address_space_to_flatview(iotlb.target_as);
*target_as = iotlb.target_as;
iommu_mr = memory_region_get_iommu(section->mr);
if (unlikely(iommu_mr)) {
return address_space_translate_iommu(iommu_mr, xlat,
plen_out, page_mask_out,
is_write, is_mmio,
target_as);
}
if (page_mask_out) {
if (page_mask == (hwaddr)(-1)) {
/* Not behind an IOMMU, use default page size. */
page_mask = ~TARGET_PAGE_MASK;
}
*page_mask_out = page_mask;
/* Not behind an IOMMU, use default page size. */
*page_mask_out = ~TARGET_PAGE_MASK;
}
return *section;
translate_fail:
return (MemoryRegionSection) { .mr = &io_mem_unassigned };
}
/* Called from RCU critical section */