s390/cpum_cf: cleanup event/counter validation

The validate_event() function just checked for reserved counters
in particular CPU-MF counter sets.  Because the number of counters
in counter sets vary among different hardware models, remove the
explicit check to tolerate new models.

Reserved counters are not accounted and, thus, will return zero.

Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Hendrik Brueckner 2017-02-13 12:32:17 +01:00 committed by Martin Schwidefsky
parent 20ba46da36
commit db17160dce
1 changed files with 1 additions and 30 deletions

View File

@ -1,7 +1,7 @@
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
* Copyright IBM Corp. 2012
* Copyright IBM Corp. 2012, 2017
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
@ -102,26 +102,6 @@ static int get_counter_set(u64 event)
return set;
}
static int validate_event(const struct hw_perf_event *hwc)
{
switch (hwc->config_base) {
case CPUMF_CTR_SET_BASIC:
case CPUMF_CTR_SET_USER:
case CPUMF_CTR_SET_CRYPTO:
case CPUMF_CTR_SET_EXT:
/* check for reserved counters */
if ((hwc->config >= 6 && hwc->config <= 31) ||
(hwc->config >= 38 && hwc->config <= 63) ||
(hwc->config >= 80 && hwc->config <= 127))
return -EOPNOTSUPP;
break;
default:
return -EINVAL;
}
return 0;
}
static int validate_ctr_version(const struct hw_perf_event *hwc)
{
struct cpu_hw_events *cpuhw;
@ -381,15 +361,6 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->config = ev;
hwc->config_base = get_counter_set(ev);
/* Validate the counter that is assigned to this event.
* Because the counter facility can use numerous counters at the
* same time without constraints, it is not necessary to explicitly
* validate event groups (event->group_leader != event).
*/
err = validate_event(hwc);
if (err)
return err;
/* Initialize for using the CPU-measurement counter facility */
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);