From 966dc11fcc48866a24d9b3fd16eeebb3f5e05931 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Fri, 6 May 2005 05:25:00 -0700 Subject: [PATCH 01/12] [IA64] Fix stack placement when INIT hits in kernel mode. Without this patch, the stack is placed _below_ the current task structure, which is risky at best. Tony, I think this patch needs to go into 2.6.12, since it fixes a real bug. Without it, INIT may case secondary errors, which would be most unpleasant. Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck --- arch/ia64/kernel/minstate.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 1dbc7b2497c9..f6d8a010d99b 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -41,7 +41,7 @@ (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \ (pKStk) ld8 r3 = [r3];; \ (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \ -(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ +(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ ;; \ @@ -50,7 +50,6 @@ (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ (pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \ ;; \ -(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ ;; \ (pUStk) mov r18=ar.bsp; \ From 66302f211a21bb9439a2ae7f7b6a4c386bb10ecd Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Tue, 12 Apr 2005 11:04:00 -0700 Subject: [PATCH 02/12] [IA64] fix "section mismatch" compile-time-error I noticed this typo when trying to compile a kernel which had CONFIG_HOTPLUG turned off. In that case, __devinit is no longer a no-op and the compiler then detects a section-conflict. Fix by using __devinitdata instead of __devinit. Same patch also submitted by Darren Williams to fix compilation error using sim_defconfig (which has CONFIG_HOTPLUG=n). Signed-off-by: David Mosberger-Tang Signed-off-by: Darren Williams Signed-off-by: Tony Luck --- arch/ia64/kernel/smpboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 0d5ee57c9865..3865f088ffa2 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -624,7 +624,7 @@ static struct { __u16 thread_id; __u16 proc_fixed_addr; __u8 valid; -}mt_info[NR_CPUS] __devinit; +} mt_info[NR_CPUS] __devinitdata; #ifdef CONFIG_HOTPLUG_CPU static inline void From bfd68594082d8384781c242aa72a7950b5cf51aa Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Wed, 4 May 2005 06:42:00 -0700 Subject: [PATCH 03/12] [IA64] Avoid .spillpsp directive in handcoded assembly Some time ago, GAS was fixed to bring the .spillpsp directive in line with the Intel assembler manual (there was some disagreement as to whether or not there is a built-in 16-byte offset). Unfortunately, there are two places in the kernel where this directive is used in handwritten assembly files and those of course relied on the "buggy" behavior. As a result, when using a "fixed" assembler, the kernel picks up the UNaT bits from the wrong place (off by 16) and randomly sets NaT bits on the scratch registers. This can be noticed easily by looking at a coredump and finding various scratch registers with unexpected NaT values. The patch below fixes this by using the .spillsp directive instead, which works correctly no matter what assembler is in use. Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck --- arch/ia64/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 81c45d447394..d99316c9be28 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1182,7 +1182,7 @@ ENTRY(notify_resume_user) ;; (pNonSys) mov out2=0 // out2==0 => not a syscall .fframe 16 - .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) + .spillsp ar.unat, 16 st8 [sp]=r9,-16 // allocate space for ar.unat and save it st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch .body @@ -1208,7 +1208,7 @@ GLOBAL_ENTRY(sys_rt_sigsuspend) adds out2=8,sp // out2=&sigscratch->ar_pfs ;; .fframe 16 - .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) + .spillsp ar.unat, 16 st8 [sp]=r9,-16 // allocate space for ar.unat and save it st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch .body From 02a017a9f37ae6842e065da919b5952eb847e869 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Tue, 10 May 2005 11:35:00 -0700 Subject: [PATCH 04/12] [IA64] Correct convert_to_non_syscall() convert_to_non_syscall() has the same problem that unwind_to_user() used to have. Fix it likewise. Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck --- arch/ia64/kernel/ptrace.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 907464ee7273..08c8a5eb25ab 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -692,16 +692,30 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, unsigned long cfm) { struct unw_frame_info info, prev_info; - unsigned long ip, pr; + unsigned long ip, sp, pr; unw_init_from_blocked_task(&info, child); while (1) { prev_info = info; if (unw_unwind(&info) < 0) return; - if (unw_get_rp(&info, &ip) < 0) + + unw_get_sp(&info, &sp); + if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) + < IA64_PT_REGS_SIZE) { + dprintk("ptrace.%s: ran off the top of the kernel " + "stack\n", __FUNCTION__); return; - if (ip < FIXADDR_USER_END) + } + if (unw_get_pr (&prev_info, &pr) < 0) { + unw_get_rp(&prev_info, &ip); + dprintk("ptrace.%s: failed to read " + "predicate register (ip=0x%lx)\n", + __FUNCTION__, ip); + return; + } + if (unw_is_intr_frame(&info) + && (pr & (1UL << PRED_USER_STACK))) break; } From 056b5033fb8d16095fb074fc910fad90186374f3 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 9 May 2005 14:52:00 -0700 Subject: [PATCH 05/12] [PATCH] update sn2 maintainer Now that I'm no longer at SGI and don't have access to Altix equipment, it's time to pass on the job of patch monkey to someone else. Greg Edwards has foolishly^Wkindly volunteered for the job, so here's a patch to update the MAINTAINERS file with the appropriate information. Signed-off-by: Jesse Barnes Signed-off-by: Tony Luck --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 5b8483334de1..97bc927d2c55 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1023,8 +1023,8 @@ W: http://www.ia64-linux.org/ S: Maintained SN-IA64 (Itanium) SUB-PLATFORM -P: Jesse Barnes -M: jbarnes@sgi.com +P: Greg Edwards +M: edwardsg@sgi.com L: linux-altix@sgi.com L: linux-ia64@vger.kernel.org W: http://www.sgi.com/altix From bb68c12b40c3c745381d7ce61d7b2e371f157505 Mon Sep 17 00:00:00 2001 From: Russ Anderson <(rja@sgi.com)> Date: Mon, 9 May 2005 15:03:00 -0700 Subject: [PATCH 06/12] [IA64-SGI] cpe interrupts are not being enabled. acpi_request_vector() is called in ia64_mca_init() to get the cpe_vector. The problem is that acpi_request_vector() looks in platform_intr_list[] to get the vector, but platform_intr_list[] is not initialized with a valid vector until later (in sn_setup()). Without a valid vector the code defaults to polling mode. This patch moves the call to acpi_request_vector() from ia64_mca_init() to ia64_mca_late_init(), which is after platform_intr_list[] is initialized. Signed-off-by: Russ Anderson (rja@sgi.com) Signed-off-by: Tony Luck --- arch/ia64/kernel/mca.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4d6c7b8f667b..2c75741dcc66 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1390,8 +1390,7 @@ ia64_mca_init(void) register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); #ifdef CONFIG_ACPI - /* Setup the CPEI/P vector and handler */ - cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); + /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif @@ -1436,6 +1435,7 @@ ia64_mca_late_init(void) #ifdef CONFIG_ACPI /* Setup the CPEI/P vector and handler */ + cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); init_timer(&cpe_poll_timer); cpe_poll_timer.function = ia64_mca_cpe_poll; From 8eac3757158ccd9c6b43f44f228a5762fec33781 Mon Sep 17 00:00:00 2001 From: Russ Anderson <(rja@sgi.com)> Date: Mon, 16 May 2005 15:19:00 -0700 Subject: [PATCH 07/12] [IA64-SGI] Make Altix SAL call to POD reentrant Change the SAL call for POD mode to be reentrant. This change is SN specific. Signed-off-by: Russ Anderson (rja@sgi.com) Signed-off-by: Tony Luck --- include/asm-ia64/sn/sn_sal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 56d74ca76b5d..94cc5392533a 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -472,7 +472,7 @@ static inline u64 ia64_sn_pod_mode(void) { struct ia64_sal_retval isrv; - SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0); + SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0); if (isrv.status) return 0; return isrv.v0; From 6872ec548970e9fb3ccd61013f84f9bb8b30fa9a Mon Sep 17 00:00:00 2001 From: Russ Anderson <(rja@sgi.com)> Date: Mon, 16 May 2005 15:30:00 -0700 Subject: [PATCH 08/12] [IS64-SGI] Set Altix error handling features The 2.6 kernel has CPE error thresholding. This patch lets SAL know of this error handling feature. The changes are SN specific. Signed-off-by: Russ Anderson (rja@sgi.com) Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/setup.c | 2 ++ include/asm-ia64/sn/sn_sal.h | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 4fb44984afe6..e64cb8175f7a 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -271,6 +271,8 @@ void __init sn_setup(char **cmdline_p) int major = sn_sal_rev_major(), minor = sn_sal_rev_minor(); extern void sn_cpu_init(void); + ia64_sn_plat_set_error_handling_features(); + /* * If the generic code has enabled vga console support - lets * get rid of it again. This is a kludge for the fact that ACPI diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 94cc5392533a..eb0395ad0d6a 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -115,6 +115,13 @@ #define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT #define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV +/* + * Error Handling Features + */ +#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 +#define SAL_ERR_FEAT_LOG_SBES 0x2 +#define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 +#define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 /* * SAL Error Codes @@ -341,6 +348,25 @@ ia64_sn_plat_cpei_handler(void) return ret_stuff.status; } +/* + * Set Error Handling Features + */ +static inline u64 +ia64_sn_plat_set_error_handling_features(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES, + (SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV | SAL_ERR_FEAT_LOG_SBES), + 0, 0, 0, 0, 0, 0); + + return ret_stuff.status; +} + /* * Checks for console input. */ From a1ecf7f6e65637ba4470405ad39794710dbf85d4 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 18 May 2005 16:06:00 -0700 Subject: [PATCH 09/12] [IA64] alternate perfmon handler Patch from Charles Spirakis Some linux customers want to optimize their applications on the latest hardware but are not yet willing to upgrade to the latest kernel. This patch provides a way to plug in an alternate, basic, and GPL'ed PMU subsystem to help with their monitoring needs or for specialty work. It can also be used in case of serious unexpected bugs in perfmon. Mutual exclusion between the two subsystems is guaranteed, hence no conflict can arise from both subsystem being present. Acked-by: Stephane Eranian Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 175 +++++++++++++++++++++++++++++++++---- include/asm-ia64/perfmon.h | 8 ++ 2 files changed, 168 insertions(+), 15 deletions(-) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71c101601e3e..ab9682f8b044 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -11,7 +11,7 @@ * Version Perfmon-2.x is a rewrite of perfmon-1.x * by Stephane Eranian, Hewlett Packard Co. * - * Copyright (C) 1999-2003, 2005 Hewlett Packard Co + * Copyright (C) 1999-2005 Hewlett Packard Co * Stephane Eranian * David Mosberger-Tang * @@ -497,6 +497,9 @@ typedef struct { static pfm_stats_t pfm_stats[NR_CPUS]; static pfm_session_t pfm_sessions; /* global sessions information */ +static spinlock_t pfm_alt_install_check; +static pfm_intr_handler_desc_t *pfm_alt_intr_handler; + static struct proc_dir_entry *perfmon_dir; static pfm_uuid_t pfm_null_uuid = {0,}; @@ -606,6 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info); DEFINE_PER_CPU(struct task_struct *, pmu_owner); DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); DEFINE_PER_CPU(unsigned long, pmu_activation_number); +EXPORT_SYMBOL_GPL(per_cpu__pfm_syst_info); /* forward declaration */ @@ -1325,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) error_conflict: DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", pfm_sessions.pfs_sys_session[cpu]->pid, - smp_processor_id())); + cpu)); abort: UNLOCK_PFS(flags); @@ -5555,26 +5559,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) int ret; this_cpu = get_cpu(); - min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; - max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; + if (likely(!pfm_alt_intr_handler)) { + min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; + max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; - start_cycles = ia64_get_itc(); + start_cycles = ia64_get_itc(); - ret = pfm_do_interrupt_handler(irq, arg, regs); + ret = pfm_do_interrupt_handler(irq, arg, regs); - total_cycles = ia64_get_itc(); + total_cycles = ia64_get_itc(); - /* - * don't measure spurious interrupts - */ - if (likely(ret == 0)) { - total_cycles -= start_cycles; + /* + * don't measure spurious interrupts + */ + if (likely(ret == 0)) { + total_cycles -= start_cycles; - if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; - if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; + if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; + if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; - pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; + pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; + } } + else { + (*pfm_alt_intr_handler->handler)(irq, arg, regs); + } + put_cpu_no_resched(); return IRQ_HANDLED; } @@ -6425,6 +6435,141 @@ static struct irqaction perfmon_irqaction = { .name = "perfmon" }; +static void +pfm_alt_save_pmu_state(void *data) +{ + struct pt_regs *regs; + + regs = ia64_task_regs(current); + + DPRINT(("called\n")); + + /* + * should not be necessary but + * let's take not risk + */ + pfm_clear_psr_up(); + pfm_clear_psr_pp(); + ia64_psr(regs)->pp = 0; + + /* + * This call is required + * May cause a spurious interrupt on some processors + */ + pfm_freeze_pmu(); + + ia64_srlz_d(); +} + +void +pfm_alt_restore_pmu_state(void *data) +{ + struct pt_regs *regs; + + regs = ia64_task_regs(current); + + DPRINT(("called\n")); + + /* + * put PMU back in state expected + * by perfmon + */ + pfm_clear_psr_up(); + pfm_clear_psr_pp(); + ia64_psr(regs)->pp = 0; + + /* + * perfmon runs with PMU unfrozen at all times + */ + pfm_unfreeze_pmu(); + + ia64_srlz_d(); +} + +int +pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) +{ + int ret, i; + int reserve_cpu; + + /* some sanity checks */ + if (hdl == NULL || hdl->handler == NULL) return -EINVAL; + + /* do the easy test first */ + if (pfm_alt_intr_handler) return -EBUSY; + + /* one at a time in the install or remove, just fail the others */ + if (!spin_trylock(&pfm_alt_install_check)) { + return -EBUSY; + } + + /* reserve our session */ + for_each_online_cpu(reserve_cpu) { + ret = pfm_reserve_session(NULL, 1, reserve_cpu); + if (ret) goto cleanup_reserve; + } + + /* save the current system wide pmu states */ + ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); + if (ret) { + DPRINT(("on_each_cpu() failed: %d\n", ret)); + goto cleanup_reserve; + } + + /* officially change to the alternate interrupt handler */ + pfm_alt_intr_handler = hdl; + + spin_unlock(&pfm_alt_install_check); + + return 0; + +cleanup_reserve: + for_each_online_cpu(i) { + /* don't unreserve more than we reserved */ + if (i >= reserve_cpu) break; + + pfm_unreserve_session(NULL, 1, i); + } + + spin_unlock(&pfm_alt_install_check); + + return ret; +} +EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt); + +int +pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) +{ + int i; + int ret; + + if (hdl == NULL) return -EINVAL; + + /* cannot remove someone else's handler! */ + if (pfm_alt_intr_handler != hdl) return -EINVAL; + + /* one at a time in the install or remove, just fail the others */ + if (!spin_trylock(&pfm_alt_install_check)) { + return -EBUSY; + } + + pfm_alt_intr_handler = NULL; + + ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); + if (ret) { + DPRINT(("on_each_cpu() failed: %d\n", ret)); + } + + for_each_online_cpu(i) { + pfm_unreserve_session(NULL, 1, i); + } + + spin_unlock(&pfm_alt_install_check); + + return 0; +} +EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt); + /* * perfmon initialization routine, called from the initcall() table */ diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h index ed5416c5b1ac..7f3333dd00e4 100644 --- a/include/asm-ia64/perfmon.h +++ b/include/asm-ia64/perfmon.h @@ -177,6 +177,10 @@ typedef union { extern long perfmonctl(int fd, int cmd, void *arg, int narg); +typedef struct { + void (*handler)(int irq, void *arg, struct pt_regs *regs); +} pfm_intr_handler_desc_t; + extern void pfm_save_regs (struct task_struct *); extern void pfm_load_regs (struct task_struct *); @@ -187,6 +191,10 @@ extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs); extern void pfm_init_percpu(void); extern void pfm_handle_work(void); +extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h); +extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h); + + /* * Reset PMD register flags From fe12e25ebdd195a57d3fd655061fd2525609b16b Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 18 May 2005 17:09:06 -0700 Subject: [PATCH 10/12] [IA64] initialize spinlock pfm_alt_install_check I applied the penultimate version of the perfmon patch, which didn't have the initialization of the new spinlock that was added. Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index ab9682f8b044..0e58ec7db128 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -497,7 +497,7 @@ typedef struct { static pfm_stats_t pfm_stats[NR_CPUS]; static pfm_session_t pfm_sessions; /* global sessions information */ -static spinlock_t pfm_alt_install_check; +static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED; static pfm_intr_handler_desc_t *pfm_alt_intr_handler; static struct proc_dir_entry *perfmon_dir; From d11cf326bd5e785cc5a3f5a3d3f4e3a5522f4fb7 Mon Sep 17 00:00:00 2001 From: Zhang Yanmin Date: Sun, 22 May 2005 17:47:00 -0700 Subject: [PATCH 11/12] [IA64] sys_mmap doesn't follow posix.1 when parameter len=0 In IA64 kernel, sys_mmap calls do_mmap2 and do_mmap2 returns addr if len=0, which means the mmap sys call succeeds. Posix.1 says: The mmap() function shall fail if: [EINVAL] The value of len is zero. Here is a patch to fix it. Signed-off-by: Zhang Yanmin Acked-by: David Mosberger Signed-off-by: Tony Luck --- arch/ia64/kernel/sys_ia64.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index a8cf6d8a509c..770fab37928e 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -182,13 +182,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un } } - /* - * A zero mmap always succeeds in Linux, independent of whether or not the - * remaining arguments are valid. - */ - if (len == 0) - goto out; - /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len || len > TASK_SIZE) { From fffcc150a21853651ea890a605832c5cccbb6279 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 31 May 2005 10:38:32 -0700 Subject: [PATCH 12/12] [IA64] Use "PER_CPU" form of EXPORT macro I was gently reminded that there are per-cpu forms of the EXPORT_SYMBOL macros. Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 0e58ec7db128..6407bff6bfd7 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -609,7 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info); DEFINE_PER_CPU(struct task_struct *, pmu_owner); DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); DEFINE_PER_CPU(unsigned long, pmu_activation_number); -EXPORT_SYMBOL_GPL(per_cpu__pfm_syst_info); +EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info); /* forward declaration */