2021-10-18 03:01:20 +02:00
|
|
|
/*
|
|
|
|
* PMU register read/write functions for TCG IBM POWER chips
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2021
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Daniel Henrique Barboza <danielhb413@gmail.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
|
|
|
|
* PMCs) has problem state read access.
|
|
|
|
*
|
|
|
|
* Read acccess is granted for all PMCC values but 0b01, where a
|
|
|
|
* Facility Unavailable Interrupt will occur.
|
|
|
|
*/
|
|
|
|
static bool spr_groupA_read_allowed(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
|
|
|
|
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
|
|
|
|
* PMCs) has problem state write access.
|
|
|
|
*
|
|
|
|
* Write acccess is granted for PMCC values 0b10 and 0b11. Userspace
|
|
|
|
* writing with PMCC 0b00 will generate a Hypervisor Emulation
|
|
|
|
* Assistance Interrupt. Userspace writing with PMCC 0b01 will
|
|
|
|
* generate a Facility Unavailable Interrupt.
|
|
|
|
*/
|
|
|
|
static bool spr_groupA_write_allowed(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->mmcr0_pmcc0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->mmcr0_pmcc1) {
|
|
|
|
/* PMCC = 0b01 */
|
|
|
|
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
|
|
|
|
} else {
|
|
|
|
/* PMCC = 0b00 */
|
|
|
|
gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-10-18 03:01:21 +02:00
|
|
|
/*
|
|
|
|
* Helper function to avoid code repetition between MMCR0 and
|
|
|
|
* MMCR2 problem state write functions.
|
|
|
|
*
|
|
|
|
* 'ret' must be tcg_temp_freed() by the caller.
|
|
|
|
*/
|
|
|
|
static TCGv masked_gprn_for_spr_write(int gprn, int sprn,
|
|
|
|
uint64_t spr_mask)
|
|
|
|
{
|
|
|
|
TCGv ret = tcg_temp_new();
|
|
|
|
TCGv t0 = tcg_temp_new();
|
|
|
|
|
|
|
|
/* 'ret' starts with all mask bits cleared */
|
|
|
|
gen_load_spr(ret, sprn);
|
|
|
|
tcg_gen_andi_tl(ret, ret, ~(spr_mask));
|
|
|
|
|
|
|
|
/* Apply the mask into 'gprn' in a temp var */
|
|
|
|
tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask);
|
|
|
|
|
|
|
|
/* Add the masked gprn bits into 'ret' */
|
|
|
|
tcg_gen_or_tl(ret, ret, t0);
|
|
|
|
|
|
|
|
tcg_temp_free(t0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-10-18 03:01:20 +02:00
|
|
|
void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
TCGv t0;
|
|
|
|
|
|
|
|
if (!spr_groupA_read_allowed(ctx)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = tcg_temp_new();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filter out all bits but FC, PMAO, and PMAE, according
|
|
|
|
* to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
|
|
|
|
* fourth paragraph.
|
|
|
|
*/
|
|
|
|
gen_load_spr(t0, SPR_POWER_MMCR0);
|
|
|
|
tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK);
|
|
|
|
tcg_gen_mov_tl(cpu_gpr[gprn], t0);
|
|
|
|
|
|
|
|
tcg_temp_free(t0);
|
|
|
|
}
|
|
|
|
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 17:57:18 +01:00
|
|
|
static void write_MMCR0_common(DisasContext *ctx, TCGv val)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* helper_store_mmcr0 will make clock based operations that
|
|
|
|
* will cause 'bad icount read' errors if we do not execute
|
|
|
|
* gen_icount_io_start() beforehand.
|
|
|
|
*/
|
|
|
|
gen_icount_io_start(ctx);
|
|
|
|
gen_helper_store_mmcr0(cpu_env, val);
|
2021-12-17 17:57:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* End the translation block because MMCR0 writes can change
|
|
|
|
* ctx->pmu_insn_cnt.
|
|
|
|
*/
|
|
|
|
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 17:57:18 +01:00
|
|
|
}
|
|
|
|
|
2021-10-18 03:01:20 +02:00
|
|
|
void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
2021-10-18 03:01:21 +02:00
|
|
|
TCGv masked_gprn;
|
2021-10-18 03:01:20 +02:00
|
|
|
|
|
|
|
if (!spr_groupA_write_allowed(ctx)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filter out all bits but FC, PMAO, and PMAE, according
|
|
|
|
* to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
|
|
|
|
* fourth paragraph.
|
|
|
|
*/
|
2021-10-18 03:01:21 +02:00
|
|
|
masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0,
|
|
|
|
MMCR0_UREG_MASK);
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 17:57:18 +01:00
|
|
|
write_MMCR0_common(ctx, masked_gprn);
|
2021-10-18 03:01:21 +02:00
|
|
|
|
|
|
|
tcg_temp_free(masked_gprn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
TCGv t0;
|
|
|
|
|
|
|
|
if (!spr_groupA_read_allowed(ctx)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = tcg_temp_new();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On read, filter out all bits that are not FCnP0 bits.
|
|
|
|
* When MMCR0[PMCC] is set to 0b10 or 0b11, providing
|
|
|
|
* problem state programs read/write access to MMCR2,
|
|
|
|
* only the FCnP0 bits can be accessed. All other bits are
|
|
|
|
* not changed when mtspr is executed in problem state, and
|
|
|
|
* all other bits return 0s when mfspr is executed in problem
|
|
|
|
* state, according to ISA v3.1, section 10.4.6 Monitor Mode
|
|
|
|
* Control Register 2, p. 1316, third paragraph.
|
|
|
|
*/
|
|
|
|
gen_load_spr(t0, SPR_POWER_MMCR2);
|
|
|
|
tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK);
|
|
|
|
tcg_gen_mov_tl(cpu_gpr[gprn], t0);
|
2021-10-18 03:01:20 +02:00
|
|
|
|
|
|
|
tcg_temp_free(t0);
|
2021-10-18 03:01:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
TCGv masked_gprn;
|
|
|
|
|
|
|
|
if (!spr_groupA_write_allowed(ctx)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filter the bits that can be written using MMCR2_UREG_MASK,
|
|
|
|
* similar to what is done in spr_write_MMCR0_ureg().
|
|
|
|
*/
|
|
|
|
masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2,
|
|
|
|
MMCR2_UREG_MASK);
|
|
|
|
gen_store_spr(SPR_POWER_MMCR2, masked_gprn);
|
|
|
|
|
|
|
|
tcg_temp_free(masked_gprn);
|
2021-10-18 03:01:20 +02:00
|
|
|
}
|
2021-10-18 03:01:22 +02:00
|
|
|
|
2021-12-17 17:57:18 +01:00
|
|
|
void spr_read_PMC(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
TCGv_i32 t_sprn = tcg_const_i32(sprn);
|
|
|
|
|
|
|
|
gen_icount_io_start(ctx);
|
|
|
|
gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(t_sprn);
|
|
|
|
}
|
|
|
|
|
2021-10-18 03:01:22 +02:00
|
|
|
void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
if (!spr_groupA_read_allowed(ctx)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-12-17 17:57:18 +01:00
|
|
|
spr_read_PMC(ctx, gprn, sprn + 0x10);
|
2021-10-18 03:01:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
|
|
|
|
* Monitor, and a read attempt results in a Facility Unavailable
|
|
|
|
* Interrupt.
|
|
|
|
*/
|
|
|
|
if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
|
|
|
|
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The remaining steps are similar to PMCs 1-4 userspace read */
|
|
|
|
spr_read_PMC14_ureg(ctx, gprn, sprn);
|
|
|
|
}
|
|
|
|
|
2021-12-17 17:57:18 +01:00
|
|
|
void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
TCGv_i32 t_sprn = tcg_const_i32(sprn);
|
|
|
|
|
|
|
|
gen_icount_io_start(ctx);
|
|
|
|
gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(t_sprn);
|
|
|
|
}
|
|
|
|
|
2021-10-18 03:01:22 +02:00
|
|
|
void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
if (!spr_groupA_write_allowed(ctx)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-12-17 17:57:18 +01:00
|
|
|
spr_write_PMC(ctx, sprn + 0x10, gprn);
|
2021-10-18 03:01:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
|
|
|
|
* Monitor, and a write attempt results in a Facility Unavailable
|
|
|
|
* Interrupt.
|
|
|
|
*/
|
|
|
|
if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
|
|
|
|
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The remaining steps are similar to PMCs 1-4 userspace write */
|
|
|
|
spr_write_PMC14_ureg(ctx, sprn, gprn);
|
|
|
|
}
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 17:57:18 +01:00
|
|
|
|
|
|
|
void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
write_MMCR0_common(ctx, cpu_gpr[gprn]);
|
|
|
|
}
|
2021-12-17 17:57:18 +01:00
|
|
|
|
|
|
|
void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
gen_icount_io_start(ctx);
|
|
|
|
gen_helper_store_mmcr1(cpu_env, cpu_gpr[gprn]);
|
|
|
|
}
|
2021-10-18 03:01:20 +02:00
|
|
|
#else
|
|
|
|
void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
spr_read_ureg(ctx, gprn, sprn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_noaccess(ctx, gprn, sprn);
|
|
|
|
}
|
2021-10-18 03:01:21 +02:00
|
|
|
|
|
|
|
void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
spr_read_ureg(ctx, gprn, sprn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_noaccess(ctx, gprn, sprn);
|
|
|
|
}
|
2021-10-18 03:01:22 +02:00
|
|
|
|
|
|
|
void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
spr_read_ureg(ctx, gprn, sprn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
|
|
|
|
{
|
|
|
|
spr_read_ureg(ctx, gprn, sprn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_noaccess(ctx, gprn, sprn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_noaccess(ctx, gprn, sprn);
|
|
|
|
}
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 17:57:18 +01:00
|
|
|
|
|
|
|
void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_write_generic(ctx, sprn, gprn);
|
|
|
|
}
|
2021-12-17 17:57:18 +01:00
|
|
|
|
2021-12-17 17:57:18 +01:00
|
|
|
void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_write_generic(ctx, sprn, gprn);
|
|
|
|
}
|
|
|
|
|
2021-12-17 17:57:18 +01:00
|
|
|
void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
|
|
|
|
{
|
|
|
|
spr_write_generic(ctx, sprn, gprn);
|
|
|
|
}
|
2021-10-18 03:01:20 +02:00
|
|
|
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
|