spapr_iommu: Enable multiple TCE requests

Currently only single TCE entry per request is supported (H_PUT_TCE).
However PAPR+ specification allows multiple entry requests such as
H_PUT_TCE_INDIRECT and H_STUFF_TCE. Having less transitions to the host
kernel via ioctls, support of these calls can accelerate IOMMU operations.

This implements H_STUFF_TCE and H_PUT_TCE_INDIRECT.

This advertises "multi-tce" capability to the guest if the host kernel
supports it (KVM_CAP_SPAPR_MULTITCE) or guest is running in TCG mode.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Alexey Kardashevskiy 2014-05-27 15:36:30 +10:00 committed by Alexander Graf
parent a1d59c0ffa
commit da95324ebe
5 changed files with 96 additions and 0 deletions

View File

@ -537,6 +537,9 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
/* RTAS */
_FDT((fdt_begin_node(fdt, "rtas")));
if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
add_str(hypertas, "hcall-multi-tce");
}
_FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
hypertas->len)));
g_string_free(hypertas, TRUE);

View File

@ -226,6 +226,82 @@ static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
return H_SUCCESS;
}
static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
int i;
target_ulong liobn = args[0];
target_ulong ioba = args[1];
target_ulong ioba1 = ioba;
target_ulong tce_list = args[2];
target_ulong npages = args[3];
target_ulong ret = H_PARAMETER;
sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
CPUState *cs = CPU(cpu);
if (!tcet) {
return H_PARAMETER;
}
if (npages > 512) {
return H_PARAMETER;
}
ioba &= ~SPAPR_TCE_PAGE_MASK;
tce_list &= ~SPAPR_TCE_PAGE_MASK;
for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
target_ulong tce = ldq_phys(cs->as, tce_list +
i * sizeof(target_ulong));
ret = put_tce_emu(tcet, ioba, tce);
if (ret) {
break;
}
}
/* Trace last successful or the first problematic entry */
i = i ? (i - 1) : 0;
trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i,
ldq_phys(cs->as,
tce_list + i * sizeof(target_ulong)),
ret);
return ret;
}
static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
int i;
target_ulong liobn = args[0];
target_ulong ioba = args[1];
target_ulong tce_value = args[2];
target_ulong npages = args[3];
target_ulong ret = H_PARAMETER;
sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
if (!tcet) {
return H_PARAMETER;
}
if (npages > tcet->nb_table) {
return H_PARAMETER;
}
ioba &= ~SPAPR_TCE_PAGE_MASK;
for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
ret = put_tce_emu(tcet, ioba, tce_value);
if (ret) {
break;
}
}
trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
return ret;
}
static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
@ -333,6 +409,8 @@ static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
/* hcall-tce */
spapr_register_hypercall(H_PUT_TCE, h_put_tce);
spapr_register_hypercall(H_GET_TCE, h_get_tce);
spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
}
static TypeInfo spapr_tce_table_info = {

View File

@ -62,6 +62,7 @@ static int cap_booke_sregs;
static int cap_ppc_smt;
static int cap_ppc_rma;
static int cap_spapr_tce;
static int cap_spapr_multitce;
static int cap_hior;
static int cap_one_reg;
static int cap_epr;
@ -98,6 +99,7 @@ int kvm_arch_init(KVMState *s)
cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
@ -1613,6 +1615,11 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
}
#endif
bool kvmppc_spapr_use_multitce(void)
{
return cap_spapr_multitce;
}
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
{
struct kvm_create_spapr_tce args = {

View File

@ -32,6 +32,7 @@ int kvmppc_set_tcr(PowerPCCPU *cpu);
int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
#ifndef CONFIG_USER_ONLY
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem);
bool kvmppc_spapr_use_multitce(void);
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd);
int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
int kvmppc_reset_htab(int shift_hint);
@ -136,6 +137,11 @@ static inline off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
return 0;
}
static inline bool kvmppc_spapr_use_multitce(void)
{
return false;
}
static inline void *kvmppc_create_spapr_tce(uint32_t liobn,
uint32_t window_size, int *fd)
{

View File

@ -1200,6 +1200,8 @@ spapr_cas_pvr(uint32_t cur_pvr, bool cpu_match, uint32_t new_pvr, uint64_t pcr)
# hw/ppc/spapr_iommu.c
spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64
spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64
spapr_iommu_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t iobaN, uint64_t tceN, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcelist=0x%"PRIx64" iobaN=0x%"PRIx64" tceN=0x%"PRIx64" ret=%"PRId64
spapr_iommu_stuff(uint64_t liobn, uint64_t ioba, uint64_t tce_value, uint64_t npages, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcevalue=0x%"PRIx64" npages=%"PRId64" ret=%"PRId64
spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, unsigned pgsize) "liobn=%"PRIx64" 0x%"PRIx64" -> 0x%"PRIx64" perm=%u mask=%x"
spapr_iommu_new_table(uint64_t liobn, void *tcet, void *table, int fd) "liobn=%"PRIx64" tcet=%p table=%p fd=%d"