target-ppc: Update ppc_hash64_store_hpte to support updating in-kernel htab

This support updating htab managed by the hypervisor. Currently we don't have
any user for this feature. This actually bring the store_hpte interface
in-line with the load_hpte one. We may want to use this when we want to
emulate henter hcall in qemu for HV kvm.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
[ folded fix for the "warn_unused_result" build break in
  kvmppc_hash64_write_pte(), Greg Kurz <gkurz@linux.vnet.ibm.com> ]
Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Aneesh Kumar K.V 2014-02-20 18:52:38 +01:00 committed by Alexander Graf
parent 3f94170be3
commit c138593380
4 changed files with 68 additions and 15 deletions

View File

@ -1992,3 +1992,39 @@ void kvmppc_hash64_free_pteg(uint64_t token)
g_free(htab_buf);
return;
}
void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
int htab_fd;
struct kvm_get_htab_fd ghf;
struct kvm_get_htab_buf hpte_buf;
ghf.flags = 0;
ghf.start_index = 0; /* Ignored */
htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
if (htab_fd < 0) {
goto error_out;
}
hpte_buf.header.n_valid = 1;
hpte_buf.header.n_invalid = 0;
hpte_buf.header.index = pte_index;
hpte_buf.hpte[0] = pte0;
hpte_buf.hpte[1] = pte1;
/*
* Write the hpte entry.
* CAUTION: write() has the warn_unused_result attribute. Hence we
* need to check the return value, even though we do nothing.
*/
if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
goto out_close;
}
out_close:
close(htab_fd);
return;
error_out:
return;
}

View File

@ -47,6 +47,9 @@ int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index);
void kvmppc_hash64_free_pteg(uint64_t token);
void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
target_ulong pte0, target_ulong pte1);
#else
static inline uint32_t kvmppc_get_tbfreq(void)
@ -207,6 +210,13 @@ static inline void kvmppc_hash64_free_pteg(uint64_t token)
abort();
}
static inline void kvmppc_hash64_write_pte(CPUPPCState *env,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
abort();
}
#endif
#ifndef CONFIG_KVM

View File

@ -603,3 +603,23 @@ hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
}
void ppc_hash64_store_hpte(CPUPPCState *env,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
CPUState *cs = ENV_GET_CPU(env);
if (kvmppc_kern_htab) {
return kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
}
pte_index *= HASH_PTE_SIZE_64;
if (env->external_htab) {
stq_p(env->external_htab + pte_index, pte0);
stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
} else {
stq_phys(cs->as, env->htab_base + pte_index, pte0);
stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
}
}

View File

@ -9,6 +9,8 @@ int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs);
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
int mmu_idx);
void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
target_ulong pte0, target_ulong pte1);
#endif
/*
@ -106,21 +108,6 @@ static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
}
}
static inline void ppc_hash64_store_hpte(CPUPPCState *env,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
CPUState *cs = ENV_GET_CPU(env);
pte_index *= HASH_PTE_SIZE_64;
if (env->external_htab) {
stq_p(env->external_htab + pte_index, pte0);
stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
} else {
stq_phys(cs->as, env->htab_base + pte_index, pte0);
stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
}
}
typedef struct {
uint64_t pte0, pte1;
} ppc_hash_pte64_t;