perf/imx_ddr: Add enhanced AXI ID filter support
[ Upstream commit 44f8bd014a
]
With DDR_CAP_AXI_ID_FILTER quirk, indicating HW supports AXI ID filter
which only can get bursts from DDR transaction, i.e. DDR read/write
requests.
This patch add DDR_CAP_AXI_ID_ENHANCED_FILTER quirk, indicating HW
supports AXI ID filter which can get bursts and bytes from DDR
transaction at the same time. We hope PMU always return bytes in the
driver due to it is more meaningful for users.
Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
b02b0a6bcc
commit
81909bd070
|
@ -45,7 +45,8 @@
|
|||
static DEFINE_IDA(ddr_ida);
|
||||
|
||||
/* DDR Perf hardware feature */
|
||||
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
|
||||
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
|
||||
#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
|
||||
|
||||
struct fsl_ddr_devtype_data {
|
||||
unsigned int quirks; /* quirks needed for different DDR Perf core */
|
||||
|
@ -178,6 +179,36 @@ static const struct attribute_group *attr_groups[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static bool ddr_perf_is_filtered(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config == 0x41 || event->attr.config == 0x42;
|
||||
}
|
||||
|
||||
static u32 ddr_perf_filter_val(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config1;
|
||||
}
|
||||
|
||||
static bool ddr_perf_filters_compatible(struct perf_event *a,
|
||||
struct perf_event *b)
|
||||
{
|
||||
if (!ddr_perf_is_filtered(a))
|
||||
return true;
|
||||
if (!ddr_perf_is_filtered(b))
|
||||
return true;
|
||||
return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
|
||||
}
|
||||
|
||||
static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
|
||||
{
|
||||
unsigned int filt;
|
||||
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
|
||||
|
||||
filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
|
||||
return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
|
||||
ddr_perf_is_filtered(event);
|
||||
}
|
||||
|
||||
static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
|
||||
{
|
||||
int i;
|
||||
|
@ -209,27 +240,17 @@ static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
|
|||
|
||||
static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
|
||||
{
|
||||
return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
|
||||
}
|
||||
struct perf_event *event = pmu->events[counter];
|
||||
void __iomem *base = pmu->base;
|
||||
|
||||
static bool ddr_perf_is_filtered(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config == 0x41 || event->attr.config == 0x42;
|
||||
}
|
||||
|
||||
static u32 ddr_perf_filter_val(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config1;
|
||||
}
|
||||
|
||||
static bool ddr_perf_filters_compatible(struct perf_event *a,
|
||||
struct perf_event *b)
|
||||
{
|
||||
if (!ddr_perf_is_filtered(a))
|
||||
return true;
|
||||
if (!ddr_perf_is_filtered(b))
|
||||
return true;
|
||||
return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
|
||||
/*
|
||||
* return bytes instead of bursts from ddr transaction for
|
||||
* axid-read and axid-write event if PMU core supports enhanced
|
||||
* filter.
|
||||
*/
|
||||
base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
|
||||
COUNTER_READ;
|
||||
return readl_relaxed(base + counter * 4);
|
||||
}
|
||||
|
||||
static int ddr_perf_event_init(struct perf_event *event)
|
||||
|
|
Loading…
Reference in New Issue