[PATCH] kdump: save registers early (inline functions)

- If system panics then cpu register states are captured through funciton
  crash_get_current_regs().  This is not a inline function hence a stack frame
  is pushed on to the stack and then cpu register state is captured.  Later
  this frame is popped and new frames are pushed (machine_kexec).

- In theory this is not very right as we are capturing register states for a
  frame and that frame is no more valid.  This seems to have created back
  trace problems for ppc64.

- This patch fixes it up.  The very first thing it does after entering
  crash_kexec() is to capture the register states.  Anyway we don't want the
  back trace beyond crash_kexec().  crash_get_current_regs() has been made
  inline

- crash_setup_regs() is the top architecture dependent function which should
  be responsible for capturing the register states as well as to do some
  architecture dependent tricks.  For ex.  fixing up ss and esp for i386.
  crash_setup_regs() has also been made inline to ensure no new call frame is
  pushed onto stack.

Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Vivek Goyal 2006-01-09 20:51:44 -08:00 committed by Linus Torvalds
parent 35ed319a36
commit e996e58133
3 changed files with 51 additions and 45 deletions

View File

@ -82,53 +82,12 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
final_note(buf);
}
static void crash_get_current_regs(struct pt_regs *regs)
static void crash_save_self(struct pt_regs *regs)
{
__asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
__asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
__asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
__asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
__asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
__asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
__asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
__asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
__asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
__asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
__asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
__asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
__asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
regs->eip = (unsigned long)current_text_addr();
}
/* CPU does not save ss and esp on stack if execution is already
* running in kernel mode at the time of NMI occurrence. This code
* fixes it.
*/
static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
{
memcpy(newregs, oldregs, sizeof(*newregs));
newregs->esp = (unsigned long)&(oldregs->esp);
__asm__ __volatile__(
"xorl %%eax, %%eax\n\t"
"movw %%ss, %%ax\n\t"
:"=a"(newregs->xss));
}
/* We may have saved_regs from where the error came from
* or it is NULL if via a direct panic().
*/
static void crash_save_self(struct pt_regs *saved_regs)
{
struct pt_regs regs;
int cpu;
cpu = smp_processor_id();
if (saved_regs)
crash_setup_regs(&regs, saved_regs);
else
crash_get_current_regs(&regs);
crash_save_this_cpu(&regs, cpu);
crash_save_this_cpu(regs, cpu);
}
#ifdef CONFIG_SMP
@ -147,7 +106,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
local_irq_disable();
if (!user_mode(regs)) {
crash_setup_regs(&fixed_regs, regs);
crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
}
crash_save_this_cpu(regs, cpu);

View File

@ -2,6 +2,7 @@
#define _I386_KEXEC_H
#include <asm/fixmap.h>
#include <asm/ptrace.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@ -27,4 +28,48 @@
#define MAX_NOTE_BYTES 1024
/* CPU does not save ss and esp on stack if execution is already
* running in kernel mode at the time of NMI occurrence. This code
* fixes it.
*/
static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
memcpy(newregs, oldregs, sizeof(*newregs));
newregs->esp = (unsigned long)&(oldregs->esp);
__asm__ __volatile__(
"xorl %%eax, %%eax\n\t"
"movw %%ss, %%ax\n\t"
:"=a"(newregs->xss));
}
/*
* This function is responsible for capturing register states if coming
* via panic otherwise just fix up the ss and esp if coming via kernel
* mode exception.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
crash_fixup_ss_esp(newregs, oldregs);
else {
__asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
__asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
__asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
__asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
__asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
__asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
__asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
__asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
__asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
__asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
__asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
__asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
__asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
newregs->eip = (unsigned long)current_text_addr();
}
}
#endif /* _I386_KEXEC_H */

View File

@ -1057,7 +1057,9 @@ void crash_kexec(struct pt_regs *regs)
if (!locked) {
image = xchg(&kexec_crash_image, NULL);
if (image) {
machine_crash_shutdown(regs);
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
machine_crash_shutdown(&fixed_regs);
machine_kexec(image);
}
xchg(&kexec_lock, 0);