KVM/arm fixes for 5.4, take #2
Special PMU edition: - Fix cycle counter truncation - Fix cycle counter overflow limit on pure 64bit system - Allow chained events to be actually functional - Correct sample period after overflow -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl2sMDwPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDyWEP/iKeWKFPoFIV2o4buIBSLlNOwPDzEF8pEABx Wq5dw3cPEQFx5/n5vABLvUC0SoU6rhEWXseNNlOoo1r0pQzS0GpZ5B6BCuuMtk9X DSgBc3YqrRPFVdMSCUtTSiM2en9fuLPSalh819KWqWkeMQg+meRtvjkzoXMh3gYt KBeeaJHuwHMNlqjKSKdq4XtdQQUBzN+MbtIGTQ83hYbkvep5Z3AVuvS4CapcpeJE OVByj0qcyHY4MG+jcTWPYepRZhAQQj8Joj3Z6hEc0ZVpw11GwqG3PcIryxAlhJp3 ON5teMeV1PiumR1fA90A6Q3M3tSoyR+5oHjS2Y7Y/W5ao6BBrytBDNtPGLYFQkXh DKhyIHxFTNPaziSn1jGuvmZUmK9iDD8qowNCHFspAwoqqajjmb5YyiS/FQvfq+Ga Zm5JA+f7jheGJq3zmV8oVdLoLt1ldsJb5iWDFZ/oGxLBZbITKAk5diZx+Jvr7Sgm CyC8uoEiaoiQdabUwWymrGfrU1JKjLyKejtp/q4lZGG3e5y3jUn1F7qh7Q+N9eSX l2cPPcH2iAcMZdFwBedUNll3JZHm3aSVg03Ub6GoYppzxc+phmr7p+Lzyxtm9dYd JUF49yDySaiWkWoMG0sMBVSDml8JyEEEAJ1ypwQdGxlizy5/WFy41a0sxjMnCHjP ljAsx/3n =ORrS -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm fixes for 5.4, take #2 Special PMU edition: - Fix cycle counter truncation - Fix cycle counter overflow limit on pure 64bit system - Allow chained events to be actually functional - Correct sample period after overflow
This commit is contained in:
commit
9800c24e2f
|
@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
*/
|
||||
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
||||
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
||||
if (!system_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
|
||||
|
@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val &= ~ARMV8_PMU_PMCR_MASK;
|
||||
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
||||
if (!system_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <kvm/arm_pmu.h>
|
||||
|
@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
|||
if (kvm_pmu_pmc_is_chained(pmc) &&
|
||||
kvm_pmu_idx_is_high_counter(select_idx))
|
||||
counter = upper_32_bits(counter);
|
||||
|
||||
else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
|
||||
else if (select_idx != ARMV8_PMU_CYCLE_IDX)
|
||||
counter = lower_32_bits(counter);
|
||||
|
||||
return counter;
|
||||
|
@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
|
|||
*/
|
||||
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 counter, reg;
|
||||
u64 counter, reg, val;
|
||||
|
||||
pmc = kvm_pmu_get_canonical_pmc(pmc);
|
||||
if (!pmc->perf_event)
|
||||
|
@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
|
|||
|
||||
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
||||
|
||||
if (kvm_pmu_pmc_is_chained(pmc)) {
|
||||
reg = PMEVCNTR0_EL0 + pmc->idx;
|
||||
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
|
||||
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
|
||||
if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
|
||||
reg = PMCCNTR_EL0;
|
||||
val = counter;
|
||||
} else {
|
||||
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
|
||||
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
|
||||
reg = PMEVCNTR0_EL0 + pmc->idx;
|
||||
val = lower_32_bits(counter);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
|
||||
if (kvm_pmu_pmc_is_chained(pmc))
|
||||
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
|
||||
|
||||
kvm_pmu_release_perf_event(pmc);
|
||||
}
|
||||
|
||||
|
@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
int idx = pmc->idx;
|
||||
u64 period;
|
||||
|
||||
cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
|
||||
|
||||
/*
|
||||
* Reset the sample period to the architectural limit,
|
||||
* i.e. the point where the counter overflows.
|
||||
*/
|
||||
period = -(local64_read(&perf_event->count));
|
||||
|
||||
if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
|
||||
period &= GENMASK(31, 0);
|
||||
|
||||
local64_set(&perf_event->hw.period_left, 0);
|
||||
perf_event->attr.sample_period = period;
|
||||
perf_event->hw.sample_period = period;
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
|
||||
|
||||
|
@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
|||
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
|||
* high counter.
|
||||
*/
|
||||
attr.sample_period = (-counter) & GENMASK(63, 0);
|
||||
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
|
||||
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
|
||||
|
||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||
kvm_pmu_perf_overflow,
|
||||
pmc + 1);
|
||||
|
||||
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
|
||||
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
|
||||
} else {
|
||||
/* The initial sample period (overflow count) of an event. */
|
||||
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
|
||||
|
|
Loading…
Reference in New Issue