e4f387d8db
Unpaired calling of probe_hcall_entry and probe_hcall_exit might happen as following, which could cause incorrect preempt count. __trace_hcall_entry => trace_hcall_entry -> probe_hcall_entry => get_cpu_var => preempt_disable __trace_hcall_exit => trace_hcall_exit -> probe_hcall_exit => put_cpu_var => preempt_enable where: A => B and A -> B means A calls B, but => means A will call B through function name, and B will definitely be called. -> means A will call B through function pointer, so B might not be called if the function pointer is not set. So error happens when only one of probe_hcall_entry and probe_hcall_exit get called during a hcall. This patch tries to move the preempt count operations from probe_hcall_entry and probe_hcall_exit to its callers. Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com> Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> CC: stable@kernel.org [v2.6.32+] Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
166 lines
3.9 KiB
C
166 lines
3.9 KiB
C
/*
|
|
* Copyright (C) 2006 Mike Kravetz IBM Corporation
|
|
*
|
|
* Hypervisor Call Instrumentation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/cpumask.h>
|
|
#include <asm/hvcall.h>
|
|
#include <asm/firmware.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/trace.h>
|
|
|
|
DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
|
|
|
|
/*
|
|
* Routines for displaying the statistics in debugfs
|
|
*/
|
|
static void *hc_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1))
|
|
return (void *)(unsigned long)(*pos + 1);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void *hc_next(struct seq_file *m, void *p, loff_t * pos)
|
|
{
|
|
++*pos;
|
|
|
|
return hc_start(m, pos);
|
|
}
|
|
|
|
static void hc_stop(struct seq_file *m, void *p)
|
|
{
|
|
}
|
|
|
|
static int hc_show(struct seq_file *m, void *p)
|
|
{
|
|
unsigned long h_num = (unsigned long)p;
|
|
struct hcall_stats *hs = m->private;
|
|
|
|
if (hs[h_num].num_calls) {
|
|
if (cpu_has_feature(CPU_FTR_PURR))
|
|
seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2,
|
|
hs[h_num].num_calls,
|
|
hs[h_num].tb_total,
|
|
hs[h_num].purr_total);
|
|
else
|
|
seq_printf(m, "%lu %lu %lu\n", h_num<<2,
|
|
hs[h_num].num_calls,
|
|
hs[h_num].tb_total);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations hcall_inst_seq_ops = {
|
|
.start = hc_start,
|
|
.next = hc_next,
|
|
.stop = hc_stop,
|
|
.show = hc_show
|
|
};
|
|
|
|
static int hcall_inst_seq_open(struct inode *inode, struct file *file)
|
|
{
|
|
int rc;
|
|
struct seq_file *seq;
|
|
|
|
rc = seq_open(file, &hcall_inst_seq_ops);
|
|
seq = file->private_data;
|
|
seq->private = file->f_path.dentry->d_inode->i_private;
|
|
|
|
return rc;
|
|
}
|
|
|
|
static const struct file_operations hcall_inst_seq_fops = {
|
|
.open = hcall_inst_seq_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
#define HCALL_ROOT_DIR "hcall_inst"
|
|
#define CPU_NAME_BUF_SIZE 32
|
|
|
|
|
|
static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
|
|
{
|
|
struct hcall_stats *h;
|
|
|
|
if (opcode > MAX_HCALL_OPCODE)
|
|
return;
|
|
|
|
h = &__get_cpu_var(hcall_stats)[opcode / 4];
|
|
h->tb_start = mftb();
|
|
h->purr_start = mfspr(SPRN_PURR);
|
|
}
|
|
|
|
static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long retval,
|
|
unsigned long *retbuf)
|
|
{
|
|
struct hcall_stats *h;
|
|
|
|
if (opcode > MAX_HCALL_OPCODE)
|
|
return;
|
|
|
|
h = &__get_cpu_var(hcall_stats)[opcode / 4];
|
|
h->num_calls++;
|
|
h->tb_total += mftb() - h->tb_start;
|
|
h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
|
|
}
|
|
|
|
static int __init hcall_inst_init(void)
|
|
{
|
|
struct dentry *hcall_root;
|
|
struct dentry *hcall_file;
|
|
char cpu_name_buf[CPU_NAME_BUF_SIZE];
|
|
int cpu;
|
|
|
|
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
|
return 0;
|
|
|
|
if (register_trace_hcall_entry(probe_hcall_entry, NULL))
|
|
return -EINVAL;
|
|
|
|
if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
|
|
unregister_trace_hcall_entry(probe_hcall_entry, NULL);
|
|
return -EINVAL;
|
|
}
|
|
|
|
hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL);
|
|
if (!hcall_root)
|
|
return -ENOMEM;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
|
|
hcall_file = debugfs_create_file(cpu_name_buf, S_IRUGO,
|
|
hcall_root,
|
|
per_cpu(hcall_stats, cpu),
|
|
&hcall_inst_seq_fops);
|
|
if (!hcall_file)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
__initcall(hcall_inst_init);
|