KVM: PPC: iommu: Unify TCE checking

This reworks helpers for checking TCE update parameters in way they
can be used in KVM.

This should cause no behavioral change.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Acked-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Alexey Kardashevskiy 2017-03-22 15:21:55 +11:00 committed by Paul Mackerras
parent da6f59e192
commit b1af23d836
4 changed files with 40 additions and 56 deletions

View File

@ -296,11 +296,21 @@ static inline void iommu_restore(void)
#endif
/* The API to support IOMMU operations for VFIO */
extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
unsigned long ioba, unsigned long tce_value,
unsigned long npages);
extern int iommu_tce_put_param_check(struct iommu_table *tbl,
unsigned long ioba, unsigned long tce);
extern int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long offset, unsigned long size,
unsigned long ioba, unsigned long npages);
extern int iommu_tce_check_gpa(unsigned long page_shift,
unsigned long gpa);
#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
(iommu_tce_check_ioba((tbl)->it_page_shift, \
(tbl)->it_offset, (tbl)->it_size, \
(ioba), (npages)) || (tce_value))
#define iommu_tce_put_param_check(tbl, ioba, gpa) \
(iommu_tce_check_ioba((tbl)->it_page_shift, \
(tbl)->it_offset, (tbl)->it_size, \
(ioba), 1) || \
iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
extern void iommu_flush_tce(struct iommu_table *tbl);
extern int iommu_take_ownership(struct iommu_table *tbl);

View File

@ -178,8 +178,10 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args);
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
struct kvm *kvm, unsigned long liobn);
extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
unsigned long ioba, unsigned long npages);
#define kvmppc_ioba_validate(stt, ioba, npages) \
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
unsigned long tce);
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,

View File

@ -963,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl)
}
EXPORT_SYMBOL_GPL(iommu_flush_tce);
int iommu_tce_clear_param_check(struct iommu_table *tbl,
unsigned long ioba, unsigned long tce_value,
unsigned long npages)
int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long offset, unsigned long size,
unsigned long ioba, unsigned long npages)
{
/* tbl->it_ops->clear() does not support any value but 0 */
if (tce_value)
unsigned long mask = (1UL << page_shift) - 1;
if (ioba & mask)
return -EINVAL;
if (ioba & ~IOMMU_PAGE_MASK(tbl))
ioba >>= page_shift;
if (ioba < offset)
return -EINVAL;
ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
if ((ioba + 1) > (offset + size))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
int iommu_tce_put_param_check(struct iommu_table *tbl,
unsigned long ioba, unsigned long tce)
int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
{
if (tce & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
unsigned long mask = (1UL << page_shift) - 1;
if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
if (gpa & mask)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction)

View File

@ -61,27 +61,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvmppc_find_table);
/*
* Validates IO address.
*
* WARNING: This will be called in real-mode on HV KVM and virtual
* mode on PR KVM
*/
long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
unsigned long ioba, unsigned long npages)
{
unsigned long mask = (1ULL << stt->page_shift) - 1;
unsigned long idx = ioba >> stt->page_shift;
if ((ioba & mask) || (idx < stt->offset) ||
(idx - stt->offset + npages > stt->size) ||
(idx + npages < idx))
return H_PARAMETER;
return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
/*
* Validates TCE address.
* At the moment flags and page mask are validated.
@ -95,10 +74,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
*/
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
{
unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
enum dma_data_direction dir = iommu_tce_direction(tce);
if (tce & mask)
/* Allow userspace to poison TCE table */
if (dir == DMA_NONE)
return H_SUCCESS;
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
return H_SUCCESS;