parisc: Fix and improve kernel stack unwinding

This patchset fixes and improves stack unwinding a lot:
1. Show backward stack traces with up to 30 callsites
2. Add callinfo to ENTRY_CFI() such that every assembler function will get an
   entry in the unwind table
3. Use constants instead of numbers in call_on_stack()
4. Do not depend on CONFIG_KALLSYMS to generate backtraces.
5. Speed up backtrace generation

Make sure you have this patch to GNU as installed:
https://sourceware.org/ml/binutils/2018-07/msg00474.html
Without this patch, unwind info in the kernel is often wrong for various
functions.

Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Helge Deller 2018-08-05 00:03:29 +02:00
parent 3b885ac1dc
commit c8921d72e3
10 changed files with 88 additions and 232 deletions

View File

@ -36,6 +36,7 @@
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
#define REG_SZ 8
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
@ -50,6 +51,7 @@
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
#define REG_SZ 4
#define ASM_ULONG_INSN .word
#endif

View File

@ -18,9 +18,9 @@
#ifdef __ASSEMBLY__
#define ENTRY(name) \
.export name !\
ALIGN !\
name:
ALIGN !\
name: ASM_NL\
.export name
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
@ -31,13 +31,18 @@ name:
END(name)
#endif
#define ENTRY_CFI(name) \
#define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\
.proc ASM_NL\
.callinfo __VA_ARGS__ ASM_NL\
.entry ASM_NL\
CFI_STARTPROC
#define ENDPROC_CFI(name) \
ENDPROC(name) ASM_NL\
CFI_ENDPROC
CFI_ENDPROC ASM_NL\
.exit ASM_NL\
.procend ASM_NL\
ENDPROC(name)
#endif /* __ASSEMBLY__ */

View File

@ -4,6 +4,9 @@
#include <linux/list.h>
/* Max number of levels to backtrace */
#define MAX_UNWIND_ENTRIES 30
/* From ABI specifications */
struct unwind_table_entry {
unsigned int region_start;

View File

@ -766,7 +766,6 @@ END(fault_vector_11)
#endif
/* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE
ENTRY(end_fault_vector)
.import handle_interruption,code
.import do_cpu_irq_mask,code
@ -778,7 +777,6 @@ ENTRY(end_fault_vector)
*/
ENTRY_CFI(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
nop
@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to)
LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2)
mtctl %r25,%cr30
ENDPROC_CFI(_switch_to)
_switch_to_ret:
ENTRY_CFI(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
callee_rest_float
@ -826,7 +825,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
ENDPROC_CFI(_switch_to)
ENDPROC_CFI(_switch_to_ret)
/*
* Common rfi return path for interruptions, kernel execve, and
@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16)
ENDPROC_CFI(syscall_exit_rfi)
intr_return:
ENTRY_CFI(intr_return)
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
ENDPROC_CFI(intr_return)
.import do_notify_resume,code
intr_check_sig:
@ -1048,7 +1049,6 @@ intr_extint:
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
.align L1_CACHE_BYTES
.globl mcount
.type mcount, @function
ENTRY(mcount)
ENTRY_CFI(mcount, caller)
_mcount:
.export _mcount,data
.proc
.callinfo caller,frame=0
.entry
/*
* The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for
@ -2026,18 +2023,13 @@ ftrace_stub:
.dword mcount
.dword 0 /* code in head.S puts value of global gp here */
#endif
.exit
.procend
ENDPROC(mcount)
ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
.globl return_to_handler
.type return_to_handler, @function
ENTRY_CFI(return_to_handler)
.proc
.callinfo caller,frame=FRAME_SIZE
.entry
ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data
parisc_return_to_handler:
copy %r3,%r1
@ -2076,8 +2068,6 @@ parisc_return_to_handler:
bv %r0(%rp)
#endif
LDREGM -FRAME_SIZE(%sp),%r3
.exit
.procend
ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
ENTRY_CFI(call_on_stack)
ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */
/* Switch to new stack. We allocate two frames. */
ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
/* Switch to new stack. We allocate two 128 byte frames. */
ldo 256(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
STREG %rp, -144(%sp)
STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */
LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp
STREG %r1, -136(%sp)
LDREG -144(%sp), %rp
STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp)
LDREG -136(%sp), %sp
LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else
/* Switch to new stack. We allocate two 64 byte frames. */
ldo 128(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
STREG %r1, -68(%sp)
STREG %rp, -84(%sp)
STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1
@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
1:
be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp
LDREG -84(%sp), %rp
LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp)
LDREG -68(%sp), %sp
LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */

View File

@ -44,10 +44,6 @@
.align 16
ENTRY_CFI(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
/*
* The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening
@ -189,18 +185,11 @@ fdtdone:
2: bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
load32 cache_info, %r1
/* Flush Instruction Cache */
@ -256,18 +245,11 @@ fisync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
load32 cache_info, %r1
/* Flush Data Cache */
@ -324,9 +306,6 @@ fdsync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
.proc
.callinfo NO_CALLS
.entry
#ifdef CONFIG_64BIT
/* Unroll the loop. */
@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm)
.proc
.callinfo NO_CALLS
.entry
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(copy_page_asm)
/*
@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
*/
ENTRY_CFI(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
/* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT
@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
*/
.align 256
ENTRY_CFI(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
/*
* Switch to real mode
*/
@ -1308,9 +1186,6 @@ srdis_done:
2: bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(disable_sr_hashing_asm)
.end

View File

@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
} while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}

View File

@ -35,12 +35,6 @@ real32_stack:
real64_stack:
.block 8192
#ifdef CONFIG_64BIT
# define REG_SZ 8
#else
# define REG_SZ 4
#endif
#define N_SAVED_REGS 9
save_cr_space:

View File

@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
while (i <= 16) {
while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;

View File

@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
pr_warn("Out of order unwind entry! %px and %px\n",
start, start+1);
}
start->region_start += base_addr;
@ -203,26 +203,61 @@ int __init unwind_init(void)
return 0;
}
#ifdef CONFIG_64BIT
#define get_func_addr(fptr) fptr[2]
#else
#define get_func_addr(fptr) fptr[0]
#endif
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
extern void handle_interruption(int, struct pt_regs *);
static unsigned long *hi = (unsigned long *)&handle_interruption;
/*
* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text
*/
extern void * const handle_interruption;
extern void * const ret_from_kernel_thread;
extern void * const syscall_exit;
extern void * const intr_return;
extern void * const _switch_to_ret;
#ifdef CONFIG_IRQSTACKS
extern void * const call_on_stack;
#endif /* CONFIG_IRQSTACKS */
if (pc == get_func_addr(hi)) {
if (pc == (unsigned long) &handle_interruption) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
return 1;
}
if (pc == (unsigned long) &ret_from_kernel_thread ||
pc == (unsigned long) &syscall_exit) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
if (pc == (unsigned long) &intr_return) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
info->rp = regs->gr[2];
return 1;
}
if (pc == (unsigned long) &_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
#ifdef CONFIG_IRQSTACKS
if (pc == (unsigned long) &call_on_stack) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
}
#endif
return 0;
}
@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
if (e == NULL) {
unsigned long sp;
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
char symname[KSYM_NAME_LEN];
char *modname;
kallsyms_lookup(info->ip, NULL, NULL, &modname,
symname);
dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
if (strcmp(symname, "_switch_to_ret") == 0) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
dbg("_switch_to_ret @ %lx - setting "
"prev_sp=%lx prev_ip=%lx\n",
info->ip, info->prev_sp,
info->prev_ip);
return;
} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
strcmp(symname, "syscall_exit") == 0) {
info->prev_ip = info->prev_sp = 0;
return;
}
}
#endif
dbg("Cannot find unwind entry for %pS; forced unwinding\n",
(void *) info->ip);
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp

View File

@ -64,9 +64,6 @@
*/
ENTRY_CFI(lclear_user)
.proc
.callinfo NO_CALLS
.entry
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
@ -81,13 +78,9 @@ $lclu_done:
ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
.exit
ENDPROC_CFI(lclear_user)
.procend
/*
* long lstrnlen_user(char *s, long n)
*
@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
*/
ENTRY_CFI(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
comib,= 0,%r25,$lslen_nzero
copy %r26,%r24
get_sr
@ -111,7 +101,6 @@ $lslen_loop:
$lslen_done:
bv %r0(%r2)
sub %r26,%r24,%r28
.exit
$lslen_nzero:
b $lslen_done
@ -125,9 +114,6 @@ $lslen_nzero:
ENDPROC_CFI(lstrnlen_user)
.procend
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
save_len = r31
ENTRY_CFI(pa_memcpy)
.proc
.callinfo NO_CALLS
.entry
/* Last destination address */
add dst,len,end
@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit
ENDPROC_CFI(pa_memcpy)
.procend
.end