diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting index 4e686a2ed91e..a341d87d276e 100644 --- a/Documentation/arm/Booting +++ b/Documentation/arm/Booting @@ -164,3 +164,8 @@ In either case, the following conditions must be met: - The boot loader is expected to call the kernel image by jumping directly to the first instruction of the kernel image. + On CPUs supporting the ARM instruction set, the entry must be + made in ARM state, even for a Thumb-2 kernel. + + On CPUs supporting only the Thumb instruction set such as + Cortex-M class CPUs, the entry must be made in Thumb state. diff --git a/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt b/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt new file mode 100644 index 000000000000..441959846e1a --- /dev/null +++ b/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt @@ -0,0 +1,42 @@ +ROM-able zImage boot from eSD +----------------------------- + +An ROM-able zImage compiled with ZBOOT_ROM_SDHI may be written to eSD and +SuperH Mobile ARM will to boot directly from the SDHI hardware block. + +This is achieved by the mask ROM loading the first portion of the image into +MERAM and then jumping to it. This portion contains loader code which +copies the entire image to SDRAM and jumps to it. From there the zImage +boot code proceeds as normal, uncompressing the image into its final +location and then jumping to it. + +This code has been tested on an mackerel board using the developer 1A eSD +boot mode which is configured using the following jumper settings. + + 8 7 6 5 4 3 2 1 + x|x|x|x| |x|x| +S4 -+-+-+-+-+-+-+- + | | | |x| | |x on + +The eSD card needs to be present in SDHI slot 1 (CN7). +As such S1 and S33 also need to be configured as per +the notes in arch/arm/mach-shmobile/board-mackerel.c. + +A partial zImage must be written to physical partition #1 (boot) +of the eSD at sector 0 in vrl4 format. A utility vrl4 is supplied to +accomplish this. + +e.g. + vrl4 < zImage | dd of=/dev/sdX bs=512 count=17 + +A full copy of _the same_ zImage should be written to physical partition #1 +(boot) of the eSD at sector 0. This should _not_ be in vrl4 format. + + vrl4 < zImage | dd of=/dev/sdX bs=512 + +Note: The commands above assume that the physical partition has been +switched. No such facility currently exists in the Linux Kernel. + +Physical partitions are described in the eSD specification. At the time of +writing they are not the same as partitions that are typically configured +using fdisk and visible through /proc/partitions diff --git a/Documentation/arm/kernel_user_helpers.txt b/Documentation/arm/kernel_user_helpers.txt new file mode 100644 index 000000000000..a17df9f91d16 --- /dev/null +++ b/Documentation/arm/kernel_user_helpers.txt @@ -0,0 +1,267 @@ +Kernel-provided User Helpers +============================ + +These are segment of kernel provided user code reachable from user space +at a fixed address in kernel memory. This is used to provide user space +with some operations which require kernel help because of unimplemented +native feature and/or instructions in many ARM CPUs. The idea is for this +code to be executed directly in user mode for best efficiency but which is +too intimate with the kernel counter part to be left to user libraries. +In fact this code might even differ from one CPU to another depending on +the available instruction set, or whether it is a SMP systems. In other +words, the kernel reserves the right to change this code as needed without +warning. Only the entry points and their results as documented here are +guaranteed to be stable. + +This is different from (but doesn't preclude) a full blown VDSO +implementation, however a VDSO would prevent some assembly tricks with +constants that allows for efficient branching to those code segments. And +since those code segments only use a few cycles before returning to user +code, the overhead of a VDSO indirect far call would add a measurable +overhead to such minimalistic operations. + +User space is expected to bypass those helpers and implement those things +inline (either in the code emitted directly by the compiler, or part of +the implementation of a library call) when optimizing for a recent enough +processor that has the necessary native support, but only if resulting +binaries are already to be incompatible with earlier ARM processors due to +useage of similar native instructions for other things. In other words +don't make binaries unable to run on earlier processors just for the sake +of not using these kernel helpers if your compiled code is not going to +use new instructions for other purpose. + +New helpers may be added over time, so an older kernel may be missing some +helpers present in a newer kernel. For this reason, programs must check +the value of __kuser_helper_version (see below) before assuming that it is +safe to call any particular helper. This check should ideally be +performed only once at process startup time, and execution aborted early +if the required helpers are not provided by the kernel version that +process is running on. + +kuser_helper_version +-------------------- + +Location: 0xffff0ffc + +Reference declaration: + + extern int32_t __kuser_helper_version; + +Definition: + + This field contains the number of helpers being implemented by the + running kernel. User space may read this to determine the availability + of a particular helper. + +Usage example: + +#define __kuser_helper_version (*(int32_t *)0xffff0ffc) + +void check_kuser_version(void) +{ + if (__kuser_helper_version < 2) { + fprintf(stderr, "can't do atomic operations, kernel too old\n"); + abort(); + } +} + +Notes: + + User space may assume that the value of this field never changes + during the lifetime of any single process. This means that this + field can be read once during the initialisation of a library or + startup phase of a program. + +kuser_get_tls +------------- + +Location: 0xffff0fe0 + +Reference prototype: + + void * __kuser_get_tls(void); + +Input: + + lr = return address + +Output: + + r0 = TLS value + +Clobbered registers: + + none + +Definition: + + Get the TLS value as previously set via the __ARM_NR_set_tls syscall. + +Usage example: + +typedef void * (__kuser_get_tls_t)(void); +#define __kuser_get_tls (*(__kuser_get_tls_t *)0xffff0fe0) + +void foo() +{ + void *tls = __kuser_get_tls(); + printf("TLS = %p\n", tls); +} + +Notes: + + - Valid only if __kuser_helper_version >= 1 (from kernel version 2.6.12). + +kuser_cmpxchg +------------- + +Location: 0xffff0fc0 + +Reference prototype: + + int __kuser_cmpxchg(int32_t oldval, int32_t newval, volatile int32_t *ptr); + +Input: + + r0 = oldval + r1 = newval + r2 = ptr + lr = return address + +Output: + + r0 = success code (zero or non-zero) + C flag = set if r0 == 0, clear if r0 != 0 + +Clobbered registers: + + r3, ip, flags + +Definition: + + Atomically store newval in *ptr only if *ptr is equal to oldval. + Return zero if *ptr was changed or non-zero if no exchange happened. + The C flag is also set if *ptr was changed to allow for assembly + optimization in the calling code. + +Usage example: + +typedef int (__kuser_cmpxchg_t)(int oldval, int newval, volatile int *ptr); +#define __kuser_cmpxchg (*(__kuser_cmpxchg_t *)0xffff0fc0) + +int atomic_add(volatile int *ptr, int val) +{ + int old, new; + + do { + old = *ptr; + new = old + val; + } while(__kuser_cmpxchg(old, new, ptr)); + + return new; +} + +Notes: + + - This routine already includes memory barriers as needed. + + - Valid only if __kuser_helper_version >= 2 (from kernel version 2.6.12). + +kuser_memory_barrier +-------------------- + +Location: 0xffff0fa0 + +Reference prototype: + + void __kuser_memory_barrier(void); + +Input: + + lr = return address + +Output: + + none + +Clobbered registers: + + none + +Definition: + + Apply any needed memory barrier to preserve consistency with data modified + manually and __kuser_cmpxchg usage. + +Usage example: + +typedef void (__kuser_dmb_t)(void); +#define __kuser_dmb (*(__kuser_dmb_t *)0xffff0fa0) + +Notes: + + - Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15). + +kuser_cmpxchg64 +--------------- + +Location: 0xffff0f60 + +Reference prototype: + + int __kuser_cmpxchg64(const int64_t *oldval, + const int64_t *newval, + volatile int64_t *ptr); + +Input: + + r0 = pointer to oldval + r1 = pointer to newval + r2 = pointer to target value + lr = return address + +Output: + + r0 = success code (zero or non-zero) + C flag = set if r0 == 0, clear if r0 != 0 + +Clobbered registers: + + r3, lr, flags + +Definition: + + Atomically store the 64-bit value pointed by *newval in *ptr only if *ptr + is equal to the 64-bit value pointed by *oldval. Return zero if *ptr was + changed or non-zero if no exchange happened. + + The C flag is also set if *ptr was changed to allow for assembly + optimization in the calling code. + +Usage example: + +typedef int (__kuser_cmpxchg64_t)(const int64_t *oldval, + const int64_t *newval, + volatile int64_t *ptr); +#define __kuser_cmpxchg64 (*(__kuser_cmpxchg64_t *)0xffff0f60) + +int64_t atomic_add64(volatile int64_t *ptr, int64_t val) +{ + int64_t old, new; + + do { + old = *ptr; + new = old + val; + } while(__kuser_cmpxchg64(&old, &new, ptr)); + + return new; +} + +Notes: + + - This routine already includes memory barriers as needed. + + - Due to the length of this sequence, this spans 2 conventional kuser + "slots", therefore 0xffff0f80 is not used as a valid entry point. + + - Valid only if __kuser_helper_version >= 5 (from kernel version 3.1). diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt new file mode 100644 index 000000000000..1c044eb320cc --- /dev/null +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -0,0 +1,21 @@ +* ARM Performance Monitor Units + +ARM cores often have a PMU for counting cpu and cache events like cache misses +and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU +representation in the device tree should be done as under:- + +Required properties: + +- compatible : should be one of + "arm,cortex-a9-pmu" + "arm,cortex-a8-pmu" + "arm,arm1176-pmu" + "arm,arm1136-pmu" +- interrupts : 1 combined interrupt or 1 per core. + +Example: + +pmu { + compatible = "arm,cortex-a9-pmu"; + interrupts = <100 101>; +}; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e04fa9d7637c..1478c6171b00 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -10,7 +10,7 @@ config ARM select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_ARCH_KGDB - select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) + select HAVE_KPROBES if !XIP_KERNEL select HAVE_KRETPROBES if (HAVE_KPROBES) select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) @@ -37,6 +37,9 @@ config ARM Europe. There is an ARM Linux project with a web page at . +config ARM_HAS_SG_CHAIN + bool + config HAVE_PWM bool @@ -1347,7 +1350,6 @@ config SMP_ON_UP config HAVE_ARM_SCU bool - depends on SMP help This option enables support for the ARM system coherency unit @@ -1716,17 +1718,34 @@ config ZBOOT_ROM Say Y here if you intend to execute your compressed kernel image (zImage) directly from ROM or flash. If unsure, say N. +choice + prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)" + depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL + default ZBOOT_ROM_NONE + help + Include experimental SD/MMC loading code in the ROM-able zImage. + With this enabled it is possible to write the the ROM-able zImage + kernel image to an MMC or SD card and boot the kernel straight + from the reset vector. At reset the processor Mask ROM will load + the first part of the the ROM-able zImage which in turn loads the + rest the kernel image to RAM. + +config ZBOOT_ROM_NONE + bool "No SD/MMC loader in zImage (EXPERIMENTAL)" + help + Do not load image from SD or MMC + config ZBOOT_ROM_MMCIF bool "Include MMCIF loader in zImage (EXPERIMENTAL)" - depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL help - Say Y here to include experimental MMCIF loading code in the - ROM-able zImage. With this enabled it is possible to write the - the ROM-able zImage kernel image to an MMC card and boot the - kernel straight from the reset vector. At reset the processor - Mask ROM will load the first part of the the ROM-able zImage - which in turn loads the rest the kernel image to RAM using the - MMCIF hardware block. + Load image from MMCIF hardware block. + +config ZBOOT_ROM_SH_MOBILE_SDHI + bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)" + help + Load image from SDHI hardware block + +endchoice config CMDLINE string "Default kernel command string" diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 23aad0722303..0c74a6fab952 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -6,13 +6,19 @@ OBJS = -# Ensure that mmcif loader code appears early in the image +# Ensure that MMCIF loader code appears early in the image # to minimise that number of bocks that have to be read in # order to load it. ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) -ifeq ($(CONFIG_ARCH_SH7372),y) OBJS += mmcif-sh7372.o endif + +# Ensure that SDHI loader code appears early in the image +# to minimise that number of bocks that have to be read in +# order to load it. +ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y) +OBJS += sdhi-shmobile.o +OBJS += sdhi-sh7372.o endif AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S index c943d2e7da9d..fe3719b516fd 100644 --- a/arch/arm/boot/compressed/head-shmobile.S +++ b/arch/arm/boot/compressed/head-shmobile.S @@ -25,14 +25,14 @@ /* load board-specific initialization code */ #include -#ifdef CONFIG_ZBOOT_ROM_MMCIF - /* Load image from MMC */ - adr sp, __tmp_stack + 128 +#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI) + /* Load image from MMC/SD */ + adr sp, __tmp_stack + 256 ldr r0, __image_start ldr r1, __image_end subs r1, r1, r0 ldr r0, __load_base - bl mmcif_loader + bl mmc_loader /* Jump to loaded code */ ldr r0, __loaded @@ -51,9 +51,9 @@ __loaded: .long __continue .align __tmp_stack: - .space 128 + .space 256 __continue: -#endif /* CONFIG_ZBOOT_ROM_MMCIF */ +#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */ b 1f __atags:@ tag #1 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 940b20178107..e95a5989602a 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -353,7 +353,8 @@ not_relocated: mov r0, #0 mov r0, #0 @ must be zero mov r1, r7 @ restore architecture number mov r2, r8 @ restore atags pointer - mov pc, r4 @ call kernel + ARM( mov pc, r4 ) @ call kernel + THUMB( bx r4 ) @ entry point is always ARM .align 2 .type LC0, #object diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c index 7453c8337b83..b6f61d9a5a1b 100644 --- a/arch/arm/boot/compressed/mmcif-sh7372.c +++ b/arch/arm/boot/compressed/mmcif-sh7372.c @@ -40,7 +40,7 @@ * to an MMC card * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 */ -asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len) +asmlinkage void mmc_loader(unsigned char *buf, unsigned long len) { mmc_init_progress(); mmc_update_progress(MMC_PROGRESS_ENTER); diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c new file mode 100644 index 000000000000..d403a8b24d7f --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-sh7372.c @@ -0,0 +1,95 @@ +/* + * SuperH Mobile SDHI + * + * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 Kuninori Morimoto + * Copyright (C) 2010 Simon Horman + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Parts inspired by u-boot + */ + +#include +#include +#include +#include + +#include "sdhi-shmobile.h" + +#define PORT179CR 0xe60520b3 +#define PORT180CR 0xe60520b4 +#define PORT181CR 0xe60520b5 +#define PORT182CR 0xe60520b6 +#define PORT183CR 0xe60520b7 +#define PORT184CR 0xe60520b8 + +#define SMSTPCR3 0xe615013c + +#define CR_INPUT_ENABLE 0x10 +#define CR_FUNCTION1 0x01 + +#define SDHI1_BASE (void __iomem *)0xe6860000 +#define SDHI_BASE SDHI1_BASE + +/* SuperH Mobile SDHI loader + * + * loads the zImage from an SD card starting from block 0 + * on physical partition 1 + * + * The image must be start with a vrl4 header and + * the zImage must start at offset 512 of the image. That is, + * at block 1 (=byte 512) of physical partition 1 + * + * Use the following line to write the vrl4 formated zImage + * to an SD card + * # dd if=vrl4.out of=/dev/sdx bs=512 + */ +asmlinkage void mmc_loader(unsigned short *buf, unsigned long len) +{ + int high_capacity; + + mmc_init_progress(); + + mmc_update_progress(MMC_PROGRESS_ENTER); + /* Initialise SDHI1 */ + /* PORT184CR: GPIO_FN_SDHICMD1 Control */ + __raw_writeb(CR_FUNCTION1, PORT184CR); + /* PORT179CR: GPIO_FN_SDHICLK1 Control */ + __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR); + /* PORT181CR: GPIO_FN_SDHID1_3 Control */ + __raw_writeb(CR_FUNCTION1, PORT183CR); + /* PORT182CR: GPIO_FN_SDHID1_2 Control */ + __raw_writeb(CR_FUNCTION1, PORT182CR); + /* PORT183CR: GPIO_FN_SDHID1_1 Control */ + __raw_writeb(CR_FUNCTION1, PORT181CR); + /* PORT180CR: GPIO_FN_SDHID1_0 Control */ + __raw_writeb(CR_FUNCTION1, PORT180CR); + + /* Enable clock to SDHI1 hardware block */ + __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3); + + /* setup SDHI hardware */ + mmc_update_progress(MMC_PROGRESS_INIT); + high_capacity = sdhi_boot_init(SDHI_BASE); + if (high_capacity < 0) + goto err; + + mmc_update_progress(MMC_PROGRESS_LOAD); + /* load kernel */ + if (sdhi_boot_do_read(SDHI_BASE, high_capacity, + 0, /* Kernel is at block 1 */ + (len + TMIO_BBS - 1) / TMIO_BBS, buf)) + goto err; + + /* Disable clock to SDHI1 hardware block */ + __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); + + mmc_update_progress(MMC_PROGRESS_DONE); + + return; +err: + for(;;); +} diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c new file mode 100644 index 000000000000..bd3d46980955 --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.c @@ -0,0 +1,449 @@ +/* + * SuperH Mobile SDHI + * + * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 Kuninori Morimoto + * Copyright (C) 2010 Simon Horman + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Parts inspired by u-boot + */ + +#include +#include +#include +#include +#include +#include +#include + +#define OCR_FASTBOOT (1<<29) +#define OCR_HCS (1<<30) +#define OCR_BUSY (1<<31) + +#define RESP_CMD12 0x00000030 + +static inline u16 sd_ctrl_read16(void __iomem *base, int addr) +{ + return __raw_readw(base + addr); +} + +static inline u32 sd_ctrl_read32(void __iomem *base, int addr) +{ + return __raw_readw(base + addr) | + __raw_readw(base + addr + 2) << 16; +} + +static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val) +{ + __raw_writew(val, base + addr); +} + +static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val) +{ + __raw_writew(val, base + addr); + __raw_writew(val >> 16, base + addr + 2); +} + +#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \ + TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \ + TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \ + TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \ + TMIO_STAT_ILL_FUNC) + +static int sdhi_intr(void __iomem *base) +{ + unsigned long state = sd_ctrl_read32(base, CTL_STATUS); + + if (state & ALL_ERROR) { + sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR); + sd_ctrl_write32(base, CTL_IRQ_MASK, + ALL_ERROR | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return -EINVAL; + } + if (state & TMIO_STAT_CMDRESPEND) { + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); + sd_ctrl_write32(base, CTL_IRQ_MASK, + TMIO_STAT_CMDRESPEND | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return 0; + } + if (state & TMIO_STAT_RXRDY) { + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY); + sd_ctrl_write32(base, CTL_IRQ_MASK, + TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return 0; + } + if (state & TMIO_STAT_DATAEND) { + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND); + sd_ctrl_write32(base, CTL_IRQ_MASK, + TMIO_STAT_DATAEND | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return 0; + } + + return -EAGAIN; +} + +static int sdhi_boot_wait_resp_end(void __iomem *base) +{ + int err = -EAGAIN, timeout = 10000000; + + while (timeout--) { + err = sdhi_intr(base); + if (err != -EAGAIN) + break; + udelay(1); + } + + return err; +} + +/* SDHI_CLK_CTRL */ +#define CLK_MMC_ENABLE (1 << 8) +#define CLK_MMC_INIT (1 << 6) /* clk / 256 */ + +static void sdhi_boot_mmc_clk_stop(void __iomem *base) +{ + sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000); + msleep(10); + sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE & + sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); + msleep(10); +} + +static void sdhi_boot_mmc_clk_start(void __iomem *base) +{ + sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE | + sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); + msleep(10); + sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE); + msleep(10); +} + +static void sdhi_boot_reset(void __iomem *base) +{ + sd_ctrl_write16(base, CTL_RESET_SD, 0x0000); + msleep(10); + sd_ctrl_write16(base, CTL_RESET_SD, 0x0001); + msleep(10); +} + +/* Set MMC clock / power. + * Note: This controller uses a simple divider scheme therefore it cannot + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as + * MMC wont run that fast, it has to be clocked at 12MHz which is the next + * slowest setting. + */ +static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios) +{ + if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY) + return -EBUSY; + + if (ios->clock) + sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, + ios->clock | CLK_MMC_ENABLE); + + /* Power sequence - OFF -> ON -> UP */ + switch (ios->power_mode) { + case MMC_POWER_OFF: /* power down SD bus */ + sdhi_boot_mmc_clk_stop(base); + break; + case MMC_POWER_ON: /* power up SD bus */ + break; + case MMC_POWER_UP: /* start bus clock */ + sdhi_boot_mmc_clk_start(base); + break; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0); + break; + case MMC_BUS_WIDTH_4: + sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0); + break; + } + + /* Let things settle. delay taken from winCE driver */ + udelay(140); + + return 0; +} + +/* These are the bitmasks the tmio chip requires to implement the MMC response + * types. Note that R1 and R6 are the same in this scheme. */ +#define RESP_NONE 0x0300 +#define RESP_R1 0x0400 +#define RESP_R1B 0x0500 +#define RESP_R2 0x0600 +#define RESP_R3 0x0700 +#define DATA_PRESENT 0x0800 +#define TRANSFER_READ 0x1000 + +static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd) +{ + int err, c = cmd->opcode; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: c |= RESP_NONE; break; + case MMC_RSP_R1: c |= RESP_R1; break; + case MMC_RSP_R1B: c |= RESP_R1B; break; + case MMC_RSP_R2: c |= RESP_R2; break; + case MMC_RSP_R3: c |= RESP_R3; break; + default: + return -EINVAL; + } + + /* No interrupts so this may not be cleared */ + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); + + sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg); + sd_ctrl_write16(base, CTL_SD_CMD, c); + + + sd_ctrl_write32(base, CTL_IRQ_MASK, + ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) & + sd_ctrl_read32(base, CTL_IRQ_MASK)); + + err = sdhi_boot_wait_resp_end(base); + if (err) + return err; + + cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE); + + return 0; +} + +static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity, + unsigned long block, unsigned short *buf) +{ + int err, i; + + /* CMD17 - Read */ + { + struct mmc_command cmd; + + cmd.opcode = MMC_READ_SINGLE_BLOCK | \ + TRANSFER_READ | DATA_PRESENT; + if (high_capacity) + cmd.arg = block; + else + cmd.arg = block * TMIO_BBS; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + sd_ctrl_write32(base, CTL_IRQ_MASK, + ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY | + TMIO_STAT_TXUNDERRUN) & + sd_ctrl_read32(base, CTL_IRQ_MASK)); + err = sdhi_boot_wait_resp_end(base); + if (err) + return err; + + sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS); + for (i = 0; i < TMIO_BBS / sizeof(*buf); i++) + *buf++ = sd_ctrl_read16(base, RESP_CMD12); + + err = sdhi_boot_wait_resp_end(base); + if (err) + return err; + + return 0; +} + +int sdhi_boot_do_read(void __iomem *base, int high_capacity, + unsigned long offset, unsigned short count, + unsigned short *buf) +{ + unsigned long i; + int err = 0; + + for (i = 0; i < count; i++) { + err = sdhi_boot_do_read_single(base, high_capacity, offset + i, + buf + (i * TMIO_BBS / + sizeof(*buf))); + if (err) + return err; + } + + return 0; +} + +#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34) + +int sdhi_boot_init(void __iomem *base) +{ + bool sd_v2 = false, sd_v1_0 = false; + unsigned short cid; + int err, high_capacity = 0; + + sdhi_boot_mmc_clk_stop(base); + sdhi_boot_reset(base); + + /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */ + { + struct mmc_ios ios; + ios.power_mode = MMC_POWER_ON; + ios.bus_width = MMC_BUS_WIDTH_1; + ios.clock = CLK_MMC_INIT; + err = sdhi_boot_mmc_set_ios(base, &ios); + if (err) + return err; + } + + /* CMD0 */ + { + struct mmc_command cmd; + msleep(1); + cmd.opcode = MMC_GO_IDLE_STATE; + cmd.arg = 0; + cmd.flags = MMC_RSP_NONE; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + msleep(2); + } + + /* CMD8 - Test for SD version 2 */ + { + struct mmc_command cmd; + cmd.opcode = SD_SEND_IF_COND; + cmd.arg = (VOLTAGES != 0) << 8 | 0xaa; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); /* Ignore error */ + if ((cmd.resp[0] & 0xff) == 0xaa) + sd_v2 = true; + } + + /* CMD55 - Get OCR (SD) */ + { + int timeout = 1000; + struct mmc_command cmd; + + cmd.arg = 0; + + do { + cmd.opcode = MMC_APP_CMD; + cmd.flags = MMC_RSP_R1; + cmd.arg = 0; + err = sdhi_boot_request(base, &cmd); + if (err) + break; + + cmd.opcode = SD_APP_OP_COND; + cmd.flags = MMC_RSP_R3; + cmd.arg = (VOLTAGES & 0xff8000); + if (sd_v2) + cmd.arg |= OCR_HCS; + cmd.arg |= OCR_FASTBOOT; + err = sdhi_boot_request(base, &cmd); + if (err) + break; + + msleep(1); + } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); + + if (!err && timeout) { + if (!sd_v2) + sd_v1_0 = true; + high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; + } + } + + /* CMD1 - Get OCR (MMC) */ + if (!sd_v2 && !sd_v1_0) { + int timeout = 1000; + struct mmc_command cmd; + + do { + cmd.opcode = MMC_SEND_OP_COND; + cmd.arg = VOLTAGES | OCR_HCS; + cmd.flags = MMC_RSP_R3; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + + msleep(1); + } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); + + if (!timeout) + return -EAGAIN; + + high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; + } + + /* CMD2 - Get CID */ + { + struct mmc_command cmd; + cmd.opcode = MMC_ALL_SEND_CID; + cmd.arg = 0; + cmd.flags = MMC_RSP_R2; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + /* CMD3 + * MMC: Set the relative address + * SD: Get the relative address + * Also puts the card into the standby state + */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SET_RELATIVE_ADDR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + cid = cmd.resp[0] >> 16; + } + + /* CMD9 - Get CSD */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SEND_CSD; + cmd.arg = cid << 16; + cmd.flags = MMC_RSP_R2; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + /* CMD7 - Select the card */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SELECT_CARD; + //cmd.arg = rca << 16; + cmd.arg = cid << 16; + //cmd.flags = MMC_RSP_R1B; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + /* CMD16 - Set the block size */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SET_BLOCKLEN; + cmd.arg = TMIO_BBS; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + return high_capacity; +} diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h new file mode 100644 index 000000000000..92eaa09f985e --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.h @@ -0,0 +1,11 @@ +#ifndef SDHI_MOBILE_H +#define SDHI_MOBILE_H + +#include + +int sdhi_boot_do_read(void __iomem *base, int high_capacity, + unsigned long offset, unsigned short count, + unsigned short *buf); +int sdhi_boot_init(void __iomem *base); + +#endif diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index ea80abe78844..4e728834a1b9 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in @@ -33,20 +33,24 @@ SECTIONS *(.text.*) *(.fixup) *(.gnu.warning) + *(.glue_7t) + *(.glue_7) + } + .rodata : { *(.rodata) *(.rodata.*) - *(.glue_7) - *(.glue_7t) + } + .piggydata : { *(.piggydata) - . = ALIGN(4); } + . = ALIGN(4); _etext = .; + .got.plt : { *(.got.plt) } _got_start = .; .got : { *(.got) } _got_end = .; - .got.plt : { *(.got.plt) } _edata = .; . = BSS_START; diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 841df7d21c2f..595ecd290ebf 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -79,6 +79,8 @@ struct dmabounce_device_info { struct dmabounce_pool large; rwlock_t lock; + + int (*needs_bounce)(struct device *, dma_addr_t, size_t); }; #ifdef STATS @@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, if (!dev || !dev->archdata.dmabounce) return NULL; if (dma_mapping_error(dev, dma_addr)) { - if (dev) - dev_err(dev, "Trying to %s invalid mapping\n", where); - else - pr_err("unknown device: Trying to %s invalid mapping\n", where); + dev_err(dev, "Trying to %s invalid mapping\n", where); return NULL; } return find_safe_buffer(dev->archdata.dmabounce, dma_addr); } +static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + if (!dev || !dev->archdata.dmabounce) + return 0; + + if (dev->dma_mask) { + unsigned long limit, mask = *dev->dma_mask; + + limit = (mask + 1) & ~mask; + if (limit && size > limit) { + dev_err(dev, "DMA mapping too big (requested %#x " + "mask %#Lx)\n", size, *dev->dma_mask); + return -E2BIG; + } + + /* Figure out if we need to bounce from the DMA mask. */ + if ((dma_addr | (dma_addr + size - 1)) & ~mask) + return 1; + } + + return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); +} + static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - dma_addr_t dma_addr; - int needs_bounce = 0; + struct safe_buffer *buf; if (device_info) DO_STATS ( device_info->map_op_count++ ); - dma_addr = virt_to_dma(dev, ptr); - - if (dev->dma_mask) { - unsigned long mask = *dev->dma_mask; - unsigned long limit; - - limit = (mask + 1) & ~mask; - if (limit && size > limit) { - dev_err(dev, "DMA mapping too big (requested %#x " - "mask %#Lx)\n", size, *dev->dma_mask); - return ~0; - } - - /* - * Figure out if we need to bounce from the DMA mask. - */ - needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; + buf = alloc_safe_buffer(device_info, ptr, size, dir); + if (buf == NULL) { + dev_err(dev, "%s: unable to map unsafe buffer %p!\n", + __func__, ptr); + return ~0; } - if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { - struct safe_buffer *buf; + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); - buf = alloc_safe_buffer(device_info, ptr, size, dir); - if (buf == 0) { - dev_err(dev, "%s: unable to map unsafe buffer %p!\n", - __func__, ptr); - return ~0; - } - - dev_dbg(dev, - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - if ((dir == DMA_TO_DEVICE) || - (dir == DMA_BIDIRECTIONAL)) { - dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", - __func__, ptr, buf->safe, size); - memcpy(buf->safe, ptr, size); - } - ptr = buf->safe; - - dma_addr = buf->safe_dma_addr; - } else { - /* - * We don't need to sync the DMA buffer since - * it was allocated via the coherent allocators. - */ - __dma_single_cpu_to_dev(ptr, size, dir); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", + __func__, ptr, buf->safe, size); + memcpy(buf->safe, ptr, size); } - return dma_addr; + return buf->safe_dma_addr; } -static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, +static inline void unmap_single(struct device *dev, struct safe_buffer *buf, size_t size, enum dma_data_direction dir) { - struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); + BUG_ON(buf->size != size); + BUG_ON(buf->direction != dir); - if (buf) { - BUG_ON(buf->size != size); - BUG_ON(buf->direction != dir); + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); - dev_dbg(dev, - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); + DO_STATS(dev->archdata.dmabounce->bounce_count++); - DO_STATS(dev->archdata.dmabounce->bounce_count++); + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + void *ptr = buf->ptr; - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { - void *ptr = buf->ptr; + dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", + __func__, buf->safe, ptr, size); + memcpy(ptr, buf->safe, size); - dev_dbg(dev, - "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe, ptr, size); - memcpy(ptr, buf->safe, size); - - /* - * Since we may have written to a page cache page, - * we need to ensure that the data will be coherent - * with user mappings. - */ - __cpuc_flush_dcache_area(ptr, size); - } - free_safe_buffer(dev->archdata.dmabounce, buf); - } else { - __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); + /* + * Since we may have written to a page cache page, + * we need to ensure that the data will be coherent + * with user mappings. + */ + __cpuc_flush_dcache_area(ptr, size); } + free_safe_buffer(dev->archdata.dmabounce, buf); } /* ************************************************** */ @@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, * substitute the safe buffer for the unsafe one. * (basically move the buffer from an unsafe area to a safe one) */ -dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir) -{ - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", - __func__, ptr, size, dir); - - BUG_ON(!valid_dma_direction(dir)); - - return map_single(dev, ptr, size, dir); -} -EXPORT_SYMBOL(__dma_map_single); - -/* - * see if a mapped address was really a "safe" buffer and if so, copy - * the data from the safe buffer back to the unsafe buffer and free up - * the safe buffer. (basically return things back to the way they - * should be) - */ -void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir) -{ - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", - __func__, (void *) dma_addr, size, dir); - - unmap_single(dev, dma_addr, size, dir); -} -EXPORT_SYMBOL(__dma_unmap_single); - dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { + dma_addr_t dma_addr; + int ret; + dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", __func__, page, offset, size, dir); - BUG_ON(!valid_dma_direction(dir)); + dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; + + ret = needs_bounce(dev, dma_addr, size); + if (ret < 0) + return ~0; + + if (ret == 0) { + __dma_page_cpu_to_dev(page, offset, size, dir); + return dma_addr; + } if (PageHighMem(page)) { - dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " - "is not supported\n"); + dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); return ~0; } @@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page); void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", - __func__, (void *) dma_addr, size, dir); + struct safe_buffer *buf; - unmap_single(dev, dma_addr, size, dir); + dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", + __func__, dma_addr, size, dir); + + buf = find_safe_buffer_dev(dev, dma_addr, __func__); + if (!buf) { + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), + dma_addr & ~PAGE_MASK, size, dir); + return; + } + + unmap_single(dev, buf, size, dir); } EXPORT_SYMBOL(__dma_unmap_page); @@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, } int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, - unsigned long large_buffer_size) + unsigned long large_buffer_size, + int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) { struct dmabounce_device_info *device_info; int ret; @@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, device_info->dev = dev; INIT_LIST_HEAD(&device_info->safe_buffers); rwlock_init(&device_info->lock); + device_info->needs_bounce = needs_bounce_fn; #ifdef STATS device_info->total_allocs = 0; diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 4ddd0a6ac7ff..7bdd91766d65 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int shift = (d->irq % 4) * 8; - unsigned int cpu = cpumask_first(mask_val); + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); u32 val, mask, bit; - if (cpu >= 8) + if (cpu >= 8 || cpu >= nr_cpu_ids) return -EINVAL; mask = 0xff << shift; bit = 1 << (cpu + shift); spin_lock(&irq_controller_lock); - d->node = cpu; val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); spin_unlock(&irq_controller_lock); - return 0; + return IRQ_SET_MASK_OK; } #endif diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7a21927c52e1..14ad62e16dd1 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -243,6 +243,12 @@ static struct resource it8152_mem = { * ITE8152 chip can address up to 64MByte, so all the devices * connected to ITE8152 (PCI and USB) should have limited DMA window */ +static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", + __func__, dma_addr, size); + return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; +} /* * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all @@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev) if (dev->dma_mask) *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; - dmabounce_register_dev(dev, 2048, 4096); + dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); } return 0; } @@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev) return 0; } -int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) -{ - dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", - __func__, dma_addr, size); - return (dev->bus == &pci_bus_type) && - ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); -} - int dma_set_coherent_mask(struct device *dev, u64 mask) { if (mask >= PHYS_OFFSET + SZ_64M - 1) diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 9c49a46a2b7a..0569de6acfba 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; } +#endif +#ifdef CONFIG_DMABOUNCE +/* + * According to the "Intel StrongARM SA-1111 Microprocessor Companion + * Chip Specification Update" (June 2000), erratum #7, there is a + * significant bug in the SA1111 SDRAM shared memory controller. If + * an access to a region of memory above 1MB relative to the bank base, + * it is important that address bit 10 _NOT_ be asserted. Depending + * on the configuration of the RAM, bit 10 may correspond to one + * of several different (processor-relative) address bits. + * + * This routine only identifies whether or not a given DMA address + * is susceptible to the bug. + * + * This should only get called for sa1111_device types due to the + * way we configure our device dma_masks. + */ +static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) +{ + /* + * Section 4.6 of the "Intel StrongARM SA-1111 Development Module + * User's Guide" mentions that jumpers R51 and R52 control the + * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or + * SDRAM bank 1 on Neponset). The default configuration selects + * Assabet, so any address in bank 1 is necessarily invalid. + */ + return (machine_is_assabet() || machine_is_pfs168()) && + (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); +} #endif static void sa1111_dev_release(struct device *_dev) @@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, dev->dev.dma_mask = &dev->dma_mask; if (dev->dma_mask != 0xffffffffUL) { - ret = dmabounce_register_dev(&dev->dev, 1024, 4096); + ret = dmabounce_register_dev(&dev->dev, 1024, 4096, + sa1111_needs_bounce); if (ret) { dev_err(&dev->dev, "SA1111: Failed to register" " with dmabounce\n"); @@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip) kfree(sachip); } -/* - * According to the "Intel StrongARM SA-1111 Microprocessor Companion - * Chip Specification Update" (June 2000), erratum #7, there is a - * significant bug in the SA1111 SDRAM shared memory controller. If - * an access to a region of memory above 1MB relative to the bank base, - * it is important that address bit 10 _NOT_ be asserted. Depending - * on the configuration of the RAM, bit 10 may correspond to one - * of several different (processor-relative) address bits. - * - * This routine only identifies whether or not a given DMA address - * is susceptible to the bug. - * - * This should only get called for sa1111_device types due to the - * way we configure our device dma_masks. - */ -int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) -{ - /* - * Section 4.6 of the "Intel StrongARM SA-1111 Development Module - * User's Guide" mentions that jumpers R51 and R52 control the - * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or - * SDRAM bank 1 on Neponset). The default configuration selects - * Assabet, so any address in bank 1 is necessarily invalid. - */ - return ((machine_is_assabet() || machine_is_pfs168()) && - (addr >= 0xc8000000 || (addr + size) >= 0xc8000000)); -} - struct sa1111_save_data { unsigned int skcr; unsigned int skpcr; diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 65c3f2474f5e..29035e86a59d 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -293,4 +293,13 @@ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm + +/* Utility macro for declaring string literals */ + .macro string name:req, string + .type \name , #object +\name: + .asciz "\string" + .size \name , . - \name + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b4892a06442c..f4280593dfa3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -26,8 +26,8 @@ #include #include -#define smp_mb__before_clear_bit() mb() -#define smp_mb__after_clear_bit() mb() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() /* * These functions are the basis of our bit ops. diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363ed..7a21d0bf7134 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ___dma_page_dev_to_cpu(page, off, size, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - * - * FIXME: This should really be a platform specific issue - we should - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. - */ -static inline int dma_supported(struct device *dev, u64 mask) -{ - if (mask < ISA_DMA_THRESHOLD) - return 0; - return 1; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_DMABOUNCE - if (dev->archdata.dmabounce) { - if (dma_mask >= ISA_DMA_THRESHOLD) - return 0; - else - return -EIO; - } -#endif - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} +extern int dma_supported(struct device *, u64); +extern int dma_set_mask(struct device *, u64); /* * DMA errors are defined by all-bits-set in the DMA address. @@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, * @dev: valid struct device pointer * @small_buf_size: size of buffers to use with small buffer pool * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * @needs_bounce_fn: called to determine whether buffer needs bouncing * * This function should be called by low-level platform code to register * a device as requireing DMA buffer bouncing. The function will allocate * appropriate DMA pools for the device. - * */ extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long); + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); /** * dmabounce_unregister_dev @@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t __dma_map_single(struct device *, void *, size_t, - enum dma_data_direction); -extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, - enum dma_data_direction); extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, @@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } -static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - __dma_single_cpu_to_dev(cpu_addr, size, dir); - return virt_to_dma(dev, cpu_addr); -} - static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { @@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, return pfn_to_dma(dev, page_to_pfn(page)) + offset; } -static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + unsigned long offset; + struct page *page; dma_addr_t addr; + BUG_ON(!virt_addr_valid(cpu_addr)); + BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); BUG_ON(!valid_dma_direction(dir)); - addr = __dma_map_single(dev, cpu_addr, size, dir); - debug_dma_map_page(dev, virt_to_page(cpu_addr), - (unsigned long)cpu_addr & ~PAGE_MASK, size, - dir, addr, true); + page = virt_to_page(cpu_addr); + offset = (unsigned long)cpu_addr & ~PAGE_MASK; + addr = __dma_map_page(dev, page, offset, size, dir); + debug_dma_map_page(dev, page, offset, size, dir, addr, true); return addr; } @@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_single(dev, handle, size, dir); + __dma_unmap_page(dev, handle, size, dir); } /** diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 42005542932b..628670e9d7c9 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -1,15 +1,16 @@ #ifndef __ASM_ARM_DMA_H #define __ASM_ARM_DMA_H -#include - /* * This is the maximum virtual address which can be DMA'd from. */ -#ifndef ARM_DMA_ZONE_SIZE -#define MAX_DMA_ADDRESS 0xffffffff +#ifndef CONFIG_ZONE_DMA +#define MAX_DMA_ADDRESS 0xffffffffUL #else -#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) +#define MAX_DMA_ADDRESS ({ \ + extern unsigned long arm_dma_zone_size; \ + arm_dma_zone_size ? \ + (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif #ifdef CONFIG_ISA_DMA_API diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 2da8547de6d6..2f1e2098dfe7 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -4,8 +4,8 @@ * Interrupt handling. Preserves r7, r8, r9 */ .macro arch_irq_handler_default - get_irqnr_preamble r5, lr -1: get_irqnr_and_base r0, r6, r5, lr + get_irqnr_preamble r6, lr +1: get_irqnr_and_base r0, r2, r6, lr movne r1, sp @ @ routine called with r0 = irq number, r1 = struct pt_regs * @@ -17,17 +17,17 @@ /* * XXX * - * this macro assumes that irqstat (r6) and base (r5) are + * this macro assumes that irqstat (r2) and base (r6) are * preserved from get_irqnr_and_base above */ - ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp adrne lr, BSYM(1b) bne do_IPI #ifdef CONFIG_LOCAL_TIMERS - test_for_ltirq r0, r6, r5, lr + test_for_ltirq r0, r2, r6, lr movne r0, sp adrne lr, BSYM(1b) bne do_local_timer @@ -40,7 +40,7 @@ .align 5 .global \symbol_name \symbol_name: - mov r4, lr + mov r8, lr arch_irq_handler_default - mov pc, r4 + mov pc, r8 .endm diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c1062c317103..c93a22a8b924 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -4,22 +4,26 @@ /* * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP */ -#define HWCAP_SWP 1 -#define HWCAP_HALF 2 -#define HWCAP_THUMB 4 -#define HWCAP_26BIT 8 /* Play it safe */ -#define HWCAP_FAST_MULT 16 -#define HWCAP_FPA 32 -#define HWCAP_VFP 64 -#define HWCAP_EDSP 128 -#define HWCAP_JAVA 256 -#define HWCAP_IWMMXT 512 -#define HWCAP_CRUNCH 1024 -#define HWCAP_THUMBEE 2048 -#define HWCAP_NEON 4096 -#define HWCAP_VFPv3 8192 -#define HWCAP_VFPv3D16 16384 -#define HWCAP_TLS 32768 +#define HWCAP_SWP (1 << 0) +#define HWCAP_HALF (1 << 1) +#define HWCAP_THUMB (1 << 2) +#define HWCAP_26BIT (1 << 3) /* Play it safe */ +#define HWCAP_FAST_MULT (1 << 4) +#define HWCAP_FPA (1 << 5) +#define HWCAP_VFP (1 << 6) +#define HWCAP_EDSP (1 << 7) +#define HWCAP_JAVA (1 << 8) +#define HWCAP_IWMMXT (1 << 9) +#define HWCAP_CRUNCH (1 << 10) +#define HWCAP_THUMBEE (1 << 11) +#define HWCAP_NEON (1 << 12) +#define HWCAP_VFPv3 (1 << 13) +#define HWCAP_VFPv3D16 (1 << 14) +#define HWCAP_TLS (1 << 15) +#define HWCAP_VFPv4 (1 << 16) +#define HWCAP_IDIVA (1 << 17) +#define HWCAP_IDIVT (1 << 18) +#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #if defined(__KERNEL__) && !defined(__ASSEMBLY__) /* diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index e46bdd0097eb..feec86768f9c 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -24,12 +24,6 @@ #define MAX_INSN_SIZE 2 #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ -/* - * This undefined instruction must be unique and - * reserved solely for kprobes' use. - */ -#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 - #define regs_return_value(regs) ((regs)->ARM_r0) #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 @@ -38,14 +32,17 @@ typedef u32 kprobe_opcode_t; struct kprobe; typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); - typedef unsigned long (kprobe_check_cc)(unsigned long); +typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); +typedef void (kprobe_insn_fn_t)(void); /* Architecture specific copy of original instruction. */ struct arch_specific_insn { - kprobe_opcode_t *insn; - kprobe_insn_handler_t *insn_handler; - kprobe_check_cc *insn_check_cc; + kprobe_opcode_t *insn; + kprobe_insn_handler_t *insn_handler; + kprobe_check_cc *insn_check_cc; + kprobe_insn_singlestep_t *insn_singlestep; + kprobe_insn_fn_t *insn_fn; }; struct prev_kprobe { @@ -62,20 +59,9 @@ struct kprobe_ctlblk { }; void arch_remove_kprobe(struct kprobe *); -void kretprobe_trampoline(void); - int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -enum kprobe_insn { - INSN_REJECTED, - INSN_GOOD, - INSN_GOOD_NO_SLOT -}; - -enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, - struct arch_specific_insn *); -void __init arm_kprobe_decode_init(void); #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 946f4d778f71..3281fb4b12e3 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -23,6 +23,10 @@ struct machine_desc { unsigned int nr_irqs; /* number of IRQs */ +#ifdef CONFIG_ZONE_DMA + unsigned long dma_zone_size; /* size of DMA-able area */ +#endif + unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index af44a8fb3480..b8de516e600e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -203,18 +203,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define PHYS_OFFSET PLAT_PHYS_OFFSET #endif -/* - * The DMA mask corresponding to the maximum bus address allocatable - * using GFP_DMA. The default here places no restriction on DMA - * allocations. This must be the smallest DMA mask in the system, - * so a successful GFP_DMA allocation will always satisfy this. - */ -#ifndef ARM_DMA_ZONE_SIZE -#define ISA_DMA_THRESHOLD (0xffffffffULL) -#else -#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) -#endif - /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index c4aa4e8c6af9..0f8e3827a89b 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -24,6 +24,8 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_V6MP, ARM_PERF_PMU_ID_CA8, ARM_PERF_PMU_ID_CA9, + ARM_PERF_PMU_ID_CA5, + ARM_PERF_PMU_ID_CA15, ARM_NUM_PMU_IDS, }; diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 7544ce6b481a..67c70a31a1be 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device); * a cookie. */ extern int -release_pmu(struct platform_device *pdev); +release_pmu(enum arm_pmu_type type); /** * init_pmu() - Initialise the PMU. diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8ec535e11fd7..633d1cb84d87 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #else -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() -#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) +#define cpu_proc_init processor._proc_init +#define cpu_proc_fin processor._proc_fin +#define cpu_reset processor.reset +#define cpu_do_idle processor._do_idle +#define cpu_dcache_clean_area processor.dcache_clean_area +#define cpu_set_pte_ext processor.set_pte_ext +#define cpu_do_switch_mm processor.switch_mm #endif extern void cpu_resume(void); diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 312d10877bd7..96187ff58c24 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -69,8 +69,9 @@ #define PSR_c 0x000000ff /* Control */ /* - * ARMv7 groups of APSR bits + * ARMv7 groups of PSR bits */ +#define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ #define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ @@ -199,6 +200,14 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define predicate(x) ((x) & 0xf0000000) #define PREDICATE_ALWAYS 0xe0000000 +/* + * True if instr is a 32-bit thumb instruction. This works if instr + * is the first or only half-word of a thumb instruction. It also works + * when instr holds all 32-bits of a wide thumb instruction if stored + * in the form (first_half<<16)|(second_half) + */ +#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800) + /* * kprobe-based event tracer support */ diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index 2f87870d9347..cefdb8f898a1 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -1,6 +1,10 @@ #ifndef _ASMARM_SCATTERLIST_H #define _ASMARM_SCATTERLIST_H +#ifdef CONFIG_ARM_HAS_SG_CHAIN +#define ARCH_HAS_SG_CHAIN +#endif + #include #include #include diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee2ad8ae07af..915696dd9c7c 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -187,12 +187,16 @@ struct tagtable { #define __tag __used __attribute__((__section__(".taglist.init"))) #define __tagtable(tag, fn) \ -static struct tagtable __tagtable_##fn __tag = { tag, fn } +static const struct tagtable __tagtable_##fn __tag = { tag, fn } /* * Memory map description */ -#define NR_BANKS 8 +#ifdef CONFIG_ARCH_EP93XX +# define NR_BANKS 16 +#else +# define NR_BANKS 8 +#endif struct membank { phys_addr_t start; diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 000000000000..b0e4e1a02318 --- /dev/null +++ b/arch/arm/include/asm/suspend.h @@ -0,0 +1,22 @@ +#ifndef __ASM_ARM_SUSPEND_H +#define __ASM_ARM_SUSPEND_H + +#include +#include + +extern void cpu_resume(void); + +/* + * Hide the first two arguments to __cpu_suspend - these are an implementation + * detail which platform code shouldn't have to know about. + */ +static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + extern int __cpu_suspend(int, long, unsigned long, + int (*)(unsigned long)); + int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); + flush_tlb_all(); + return ret; +} + +#endif diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h index 5929ef5d927a..8578d726ad78 100644 --- a/arch/arm/include/asm/tcm.h +++ b/arch/arm/include/asm/tcm.h @@ -27,5 +27,7 @@ void *tcm_alloc(size_t len); void tcm_free(void *addr, size_t len); +bool tcm_dtcm_present(void); +bool tcm_itcm_present(void); #endif diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index d2005de383b8..8077145698ff 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -34,16 +34,12 @@ #define TLB_V6_D_ASID (1 << 17) #define TLB_V6_I_ASID (1 << 18) -#define TLB_BTB (1 << 28) - /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ #define TLB_V7_UIS_PAGE (1 << 19) #define TLB_V7_UIS_FULL (1 << 20) #define TLB_V7_UIS_ASID (1 << 21) -/* Inner Shareable BTB operation (ARMv7 MP extensions) */ -#define TLB_V7_IS_BTB (1 << 22) - +#define TLB_BARRIER (1 << 28) #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ #define TLB_DCLEAN (1 << 30) #define TLB_WB (1 << 31) @@ -58,7 +54,7 @@ * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) - * fa - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB)) + * fa - Faraday (v4 with write buffer with UTLB) * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction * v7wbi - identical to v6wbi */ @@ -99,7 +95,7 @@ # define v4_always_flags (-1UL) #endif -#define fa_tlb_flags (TLB_WB | TLB_BTB | TLB_DCLEAN | \ +#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V4_U_FULL | TLB_V4_U_PAGE) #ifdef CONFIG_CPU_TLB_FA @@ -166,7 +162,7 @@ # define v4wb_always_flags (-1UL) #endif -#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V6_I_FULL | TLB_V6_D_FULL | \ TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ TLB_V6_I_ASID | TLB_V6_D_ASID) @@ -184,9 +180,9 @@ # define v6wbi_always_flags (-1UL) #endif -#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) -#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) #ifdef CONFIG_CPU_TLB_V7 @@ -341,15 +337,7 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_V7_UIS_FULL)) asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); - dsb(); - isb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) { dsb(); isb(); } @@ -389,17 +377,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); #endif - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) dsb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); - dsb(); - isb(); - } } static inline void @@ -439,17 +418,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); #endif - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) dsb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); - dsb(); - isb(); - } } static inline void local_flush_tlb_kernel_page(unsigned long kaddr) @@ -482,15 +452,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_V7_UIS_PAGE)) asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); - dsb(); - isb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) { dsb(); isb(); } diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index f90756dc16dc..5b29a6673625 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -3,6 +3,9 @@ #include +struct pt_regs; +struct task_struct; + struct undef_hook { struct list_head node; u32 instr_mask; diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index a5b31af5c2b8..f7887dc53c1f 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -37,7 +37,12 @@ obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o -obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o +obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o +ifdef CONFIG_THUMB2_KERNEL +obj-$(CONFIG_KPROBES) += kprobes-thumb.o +else +obj-$(CONFIG_KPROBES) += kprobes-arm.o +endif obj-$(CONFIG_ATAGS_PROC) += atags.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 927522cfc12e..16baba2e4369 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -59,6 +59,9 @@ int main(void) DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); +#ifdef CONFIG_SMP + DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); +#endif #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd51ca9..a87cbf889ff4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -29,21 +29,53 @@ #include /* - * Interrupt handling. Preserves r7, r8, r9 + * Interrupt handling. */ .macro irq_handler #ifdef CONFIG_MULTI_IRQ_HANDLER - ldr r5, =handle_arch_irq + ldr r1, =handle_arch_irq mov r0, sp - ldr r5, [r5] + ldr r1, [r1] adr lr, BSYM(9997f) - teq r5, #0 - movne pc, r5 + teq r1, #0 + movne pc, r1 #endif arch_irq_handler_default 9997: .endm + .macro pabt_helper + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 +#ifdef MULTI_PABORT + ldr ip, .LCprocfns + mov lr, pc + ldr pc, [ip, #PROCESSOR_PABT_FUNC] +#else + bl CPU_PABORT_HANDLER +#endif + .endm + + .macro dabt_helper + + @ + @ Call the processor-specific abort handler: + @ + @ r2 - pt_regs + @ r4 - aborted context pc + @ r5 - aborted context psr + @ + @ The abort handler must return the aborted address in r0, and + @ the fault status register in r1. r9 must be preserved. + @ +#ifdef MULTI_DABORT + ldr ip, .LCprocfns + mov lr, pc + ldr pc, [ip, #PROCESSOR_DABT_FUNC] +#else + bl CPU_DABORT_HANDLER +#endif + .endm + #ifdef CONFIG_KPROBES .section .kprobes.text,"ax",%progbits #else @@ -126,106 +158,74 @@ ENDPROC(__und_invalid) SPFIX( subeq sp, sp, #4 ) stmia sp, {r1 - r12} - ldmia r0, {r1 - r3} - add r5, sp, #S_SP - 4 @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" - add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) - SPFIX( addeq r0, r0, #4 ) - str r1, [sp, #-4]! @ save the "real" r0 copied + ldmia r0, {r3 - r5} + add r7, sp, #S_SP - 4 @ here for interlock avoidance + mov r6, #-1 @ "" "" "" "" + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + SPFIX( addeq r2, r2, #4 ) + str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack - mov r1, lr + mov r3, lr @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r0 - sp_svc - @ r1 - lr_svc - @ r2 - lr_, already fixed up for correct return/restart - @ r3 - spsr_ - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r2 - sp_svc + @ r3 - lr_svc + @ r4 - lr_, already fixed up for correct return/restart + @ r5 - spsr_ + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ - stmia r5, {r0 - r4} + stmia r7, {r2 - r6} + +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif .endm .align 5 __dabt_svc: svc_entry - - @ - @ get ready to re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT - - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. r9 must be preserved. - @ -#ifdef MULTI_DABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] -#else - bl CPU_DABORT_HANDLER -#endif - - @ - @ set desired IRQ state, then call main handler - @ - debug_entry r1 - msr cpsr_c, r9 mov r2, sp - bl do_DataAbort + dabt_helper @ @ IRQs off again before pulling preserved data off the stack @ disable_irq_notrace - @ - @ restore SPSR and restart the instruction - @ - ldr r2, [sp, #S_PSR] - svc_exit r2 @ return from exception +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__dabt_svc) .align 5 __irq_svc: svc_entry + irq_handler -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - - irq_handler -#ifdef CONFIG_PREEMPT - str r8, [tsk, #TI_PREEMPT] @ restore preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 movne r0, #0 @ force flags to 0 tst r0, #_TIF_NEED_RESCHED blne svc_preempt #endif - ldr r4, [sp, #S_PSR] @ irqs are already disabled + #ifdef CONFIG_TRACE_IRQFLAGS - tst r4, #PSR_I_BIT - bleq trace_hardirqs_on + @ The parent context IRQs must have been enabled to get here in + @ the first place, so there's no point checking the PSR I bit. + bl trace_hardirqs_on #endif - svc_exit r4 @ return from exception + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__irq_svc) @@ -251,7 +251,6 @@ __und_svc: #else svc_entry #endif - @ @ call emulation code, which returns using r9 if it has emulated @ the instruction, or the more conventional lr if we are to treat @@ -260,15 +259,16 @@ __und_svc: @ r0 - instruction @ #ifndef CONFIG_THUMB2_KERNEL - ldr r0, [r2, #-4] + ldr r0, [r4, #-4] #else - ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 + ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 and r9, r0, #0xf800 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 - ldrhhs r9, [r2] @ bottom 16 bits + ldrhhs r9, [r4] @ bottom 16 bits orrhs r0, r9, r0, lsl #16 #endif adr r9, BSYM(1f) + mov r2, r4 bl call_fpe mov r0, sp @ struct pt_regs *regs @@ -282,45 +282,35 @@ __und_svc: @ @ restore SPSR and restart the instruction @ - ldr r2, [sp, #S_PSR] @ Get SVC cpsr - svc_exit r2 @ return from exception + ldr r5, [sp, #S_PSR] @ Get SVC cpsr +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__und_svc) .align 5 __pabt_svc: svc_entry - - @ - @ re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT - - mov r0, r2 @ pass address of aborted instruction. -#ifdef MULTI_PABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] -#else - bl CPU_PABORT_HANDLER -#endif - debug_entry r1 - msr cpsr_c, r9 @ Maybe enable interrupts mov r2, sp @ regs - bl do_PrefetchAbort @ call abort handler + pabt_helper @ @ IRQs off again before pulling preserved data off the stack @ disable_irq_notrace - @ - @ restore SPSR and restart the instruction - @ - ldr r2, [sp, #S_PSR] - svc_exit r2 @ return from exception +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__pabt_svc) @@ -351,23 +341,23 @@ ENDPROC(__pabt_svc) ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) - ldmia r0, {r1 - r3} + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" + mov r6, #-1 @ "" "" "" "" - str r1, [sp] @ save the "real" r0 copied + str r3, [sp] @ save the "real" r0 copied @ from the exception stack @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r2 - lr_, already fixed up for correct return/restart - @ r3 - spsr_ - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r4 - lr_, already fixed up for correct return/restart + @ r5 - spsr_ + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ @ Also, separately save sp_usr and lr_usr @ - stmia r0, {r2 - r4} + stmia r0, {r4 - r6} ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) @@ -380,10 +370,14 @@ ENDPROC(__pabt_svc) @ Clear FP to mark the first stack frame @ zero_fp + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif .endm .macro kuser_cmpxchg_check -#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) #ifndef CONFIG_MMU #warning "NPTL on non MMU needs fixing" #else @@ -391,8 +385,8 @@ ENDPROC(__pabt_svc) @ if it was interrupted in a critical region. Here we @ perform a quick test inline since it should be false @ 99.9999% of the time. The rest is done out of line. - cmp r2, #TASK_SIZE - blhs kuser_cmpxchg_fixup + cmp r4, #TASK_SIZE + blhs kuser_cmpxchg64_fixup #endif #endif .endm @@ -401,32 +395,9 @@ ENDPROC(__pabt_svc) __dabt_usr: usr_entry kuser_cmpxchg_check - - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. - @ -#ifdef MULTI_DABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] -#else - bl CPU_DABORT_HANDLER -#endif - - @ - @ IRQs on, then call the main handler - @ - debug_entry r1 - enable_irq mov r2, sp - adr lr, BSYM(ret_from_exception) - b do_DataAbort + dabt_helper + b ret_from_exception UNWIND(.fnend ) ENDPROC(__dabt_usr) @@ -434,28 +405,8 @@ ENDPROC(__dabt_usr) __irq_usr: usr_entry kuser_cmpxchg_check - -#ifdef CONFIG_IRQSOFF_TRACER - bl trace_hardirqs_off -#endif - - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - irq_handler -#ifdef CONFIG_PREEMPT - ldr r0, [tsk, #TI_PREEMPT] - str r8, [tsk, #TI_PREEMPT] - teq r0, r7 - ARM( strne r0, [r0, -r0] ) - THUMB( movne r0, #0 ) - THUMB( strne r0, [r0] ) -#endif - + get_thread_info tsk mov why, #0 b ret_to_user_from_irq UNWIND(.fnend ) @@ -467,6 +418,9 @@ ENDPROC(__irq_usr) __und_usr: usr_entry + mov r2, r4 + mov r3, r5 + @ @ fall through to the emulation code, which returns using r9 if @ it has emulated the instruction, or the more conventional lr @@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry - - mov r0, r2 @ pass address of aborted instruction. -#ifdef MULTI_PABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] -#else - bl CPU_PABORT_HANDLER -#endif - debug_entry r1 - enable_irq @ Enable interrupts mov r2, sp @ regs - bl do_PrefetchAbort @ call abort handler + pabt_helper UNWIND(.fnend ) /* fall through */ /* @@ -758,31 +701,12 @@ ENDPROC(__switch_to) /* * User helpers. * - * These are segment of kernel provided user code reachable from user space - * at a fixed address in kernel memory. This is used to provide user space - * with some operations which require kernel help because of unimplemented - * native feature and/or instructions in many ARM CPUs. The idea is for - * this code to be executed directly in user mode for best efficiency but - * which is too intimate with the kernel counter part to be left to user - * libraries. In fact this code might even differ from one CPU to another - * depending on the available instruction set and restrictions like on - * SMP systems. In other words, the kernel reserves the right to change - * this code as needed without warning. Only the entry points and their - * results are guaranteed to be stable. - * * Each segment is 32-byte aligned and will be moved to the top of the high * vector page. New segments (if ever needed) must be added in front of * existing ones. This mechanism should be used only for things that are * really small and justified, and not be abused freely. * - * User space is expected to implement those things inline when optimizing - * for a processor that has the necessary native support, but only if such - * resulting binaries are already to be incompatible with earlier ARM - * processors due to the use of unsupported instructions other than what - * is provided here. In other words don't make binaries unable to run on - * earlier processors just for the sake of not using these kernel helpers - * if your compiled code is not going to use the new instructions for other - * purpose. + * See Documentation/arm/kernel_user_helpers.txt for formal definitions. */ THUMB( .arm ) @@ -799,97 +723,104 @@ ENDPROC(__switch_to) __kuser_helper_start: /* - * Reference prototype: - * - * void __kernel_memory_barrier(void) - * - * Input: - * - * lr = return address - * - * Output: - * - * none - * - * Clobbered: - * - * none - * - * Definition and user space usage example: - * - * typedef void (__kernel_dmb_t)(void); - * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) - * - * Apply any needed memory barrier to preserve consistency with data modified - * manually and __kuser_cmpxchg usage. - * - * This could be used as follows: - * - * #define __kernel_dmb() \ - * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ - * : : : "r0", "lr","cc" ) + * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular + * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. */ +__kuser_cmpxchg64: @ 0xffff0f60 + +#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) + + /* + * Poor you. No fast solution possible... + * The kernel itself must perform the operation. + * A special ghost syscall is used for that (see traps.c). + */ + stmfd sp!, {r7, lr} + ldr r7, 1f @ it's 20 bits + swi __ARM_NR_cmpxchg64 + ldmfd sp!, {r7, pc} +1: .word __ARM_NR_cmpxchg64 + +#elif defined(CONFIG_CPU_32v6K) + + stmfd sp!, {r4, r5, r6, r7} + ldrd r4, r5, [r0] @ load old val + ldrd r6, r7, [r1] @ load new val + smp_dmb arm +1: ldrexd r0, r1, [r2] @ load current val + eors r3, r0, r4 @ compare with oldval (1) + eoreqs r3, r1, r5 @ compare with oldval (2) + strexdeq r3, r6, r7, [r2] @ store newval if eq + teqeq r3, #1 @ success? + beq 1b @ if no then retry + smp_dmb arm + rsbs r0, r3, #0 @ set returned val and C flag + ldmfd sp!, {r4, r5, r6, r7} + bx lr + +#elif !defined(CONFIG_SMP) + +#ifdef CONFIG_MMU + + /* + * The only thing that can break atomicity in this cmpxchg64 + * implementation is either an IRQ or a data abort exception + * causing another process/thread to be scheduled in the middle of + * the critical sequence. The same strategy as for cmpxchg is used. + */ + stmfd sp!, {r4, r5, r6, lr} + ldmia r0, {r4, r5} @ load old val + ldmia r1, {r6, lr} @ load new val +1: ldmia r2, {r0, r1} @ load current val + eors r3, r0, r4 @ compare with oldval (1) + eoreqs r3, r1, r5 @ compare with oldval (2) +2: stmeqia r2, {r6, lr} @ store newval if eq + rsbs r0, r3, #0 @ set return val and C flag + ldmfd sp!, {r4, r5, r6, pc} + + .text +kuser_cmpxchg64_fixup: + @ Called from kuser_cmpxchg_fixup. + @ r4 = address of interrupted insn (must be preserved). + @ sp = saved regs. r7 and r8 are clobbered. + @ 1b = first critical insn, 2b = last critical insn. + @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. + mov r7, #0xffff0fff + sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) + subs r8, r4, r7 + rsbcss r8, r8, #(2b - 1b) + strcs r7, [sp, #S_PC] +#if __LINUX_ARM_ARCH__ < 6 + bcc kuser_cmpxchg32_fixup +#endif + mov pc, lr + .previous + +#else +#warning "NPTL on non MMU needs fixing" + mov r0, #-1 + adds r0, r0, #0 + usr_ret lr +#endif + +#else +#error "incoherent kernel configuration" +#endif + + /* pad to next slot */ + .rept (16 - (. - __kuser_cmpxchg64)/4) + .word 0 + .endr + + .align 5 + __kuser_memory_barrier: @ 0xffff0fa0 smp_dmb arm usr_ret lr .align 5 -/* - * Reference prototype: - * - * int __kernel_cmpxchg(int oldval, int newval, int *ptr) - * - * Input: - * - * r0 = oldval - * r1 = newval - * r2 = ptr - * lr = return address - * - * Output: - * - * r0 = returned value (zero or non-zero) - * C flag = set if r0 == 0, clear if r0 != 0 - * - * Clobbered: - * - * r3, ip, flags - * - * Definition and user space usage example: - * - * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); - * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) - * - * Atomically store newval in *ptr if *ptr is equal to oldval for user space. - * Return zero if *ptr was changed or non-zero if no exchange happened. - * The C flag is also set if *ptr was changed to allow for assembly - * optimization in the calling code. - * - * Notes: - * - * - This routine already includes memory barriers as needed. - * - * For example, a user space atomic_add implementation could look like this: - * - * #define atomic_add(ptr, val) \ - * ({ register unsigned int *__ptr asm("r2") = (ptr); \ - * register unsigned int __result asm("r1"); \ - * asm volatile ( \ - * "1: @ atomic_add\n\t" \ - * "ldr r0, [r2]\n\t" \ - * "mov r3, #0xffff0fff\n\t" \ - * "add lr, pc, #4\n\t" \ - * "add r1, r0, %2\n\t" \ - * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ - * "bcc 1b" \ - * : "=&r" (__result) \ - * : "r" (__ptr), "rIL" (val) \ - * : "r0","r3","ip","lr","cc","memory" ); \ - * __result; }) - */ - __kuser_cmpxchg: @ 0xffff0fc0 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) @@ -925,15 +856,15 @@ __kuser_cmpxchg: @ 0xffff0fc0 usr_ret lr .text -kuser_cmpxchg_fixup: +kuser_cmpxchg32_fixup: @ Called from kuser_cmpxchg_check macro. - @ r2 = address of interrupted insn (must be preserved). + @ r4 = address of interrupted insn (must be preserved). @ sp = saved regs. r7 and r8 are clobbered. @ 1b = first critical insn, 2b = last critical insn. - @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. + @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) - subs r8, r2, r7 + subs r8, r4, r7 rsbcss r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] mov pc, lr @@ -963,39 +894,6 @@ kuser_cmpxchg_fixup: .align 5 -/* - * Reference prototype: - * - * int __kernel_get_tls(void) - * - * Input: - * - * lr = return address - * - * Output: - * - * r0 = TLS value - * - * Clobbered: - * - * none - * - * Definition and user space usage example: - * - * typedef int (__kernel_get_tls_t)(void); - * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) - * - * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. - * - * This could be used as follows: - * - * #define __kernel_get_tls() \ - * ({ register unsigned int __val asm("r0"); \ - * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ - * : "=r" (__val) : : "lr","cc" ); \ - * __val; }) - */ - __kuser_get_tls: @ 0xffff0fe0 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init usr_ret lr @@ -1004,19 +902,6 @@ __kuser_get_tls: @ 0xffff0fe0 .word 0 @ 0xffff0ff0 software TLS value, then .endr @ pad up to __kuser_helper_version -/* - * Reference declaration: - * - * extern unsigned int __kernel_helper_version; - * - * Definition and user space usage example: - * - * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) - * - * User space may read this to determine the curent number of helpers - * available. - */ - __kuser_helper_version: @ 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 051166c2a932..9a8531eadd3d 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -121,15 +121,13 @@ .endm #else /* CONFIG_THUMB2_KERNEL */ .macro svc_exit, rpsr + ldr lr, [sp, #S_SP] @ top of the stack + ldrd r0, r1, [sp, #S_LR] @ calling lr and pc clrex @ clear the exclusive monitor - ldr r0, [sp, #S_SP] @ top of the stack - ldr r1, [sp, #S_PC] @ return address - tst r0, #4 @ orig stack 8-byte aligned? - stmdb r0, {r1, \rpsr} @ rfe context + stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context ldmia sp, {r0 - r12} - ldr lr, [sp, #S_LR] - addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned - addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned + mov sp, lr + ldr lr, [sp], #4 rfeia sp! .endm @@ -165,25 +163,6 @@ .endm #endif /* !CONFIG_THUMB2_KERNEL */ - @ - @ Debug exceptions are taken as prefetch or data aborts. - @ We must disable preemption during the handler so that - @ we can access the debug registers safely. - @ - .macro debug_entry, fsr -#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) - ldr r4, =0x40f @ mask out fsr.fs - and r5, r4, \fsr - cmp r5, #2 @ debug exception - bne 1f - get_thread_info r10 - ldr r6, [r10, #TI_PREEMPT] @ get preempt count - add r11, r6, #1 @ increment it - str r11, [r10, #TI_PREEMPT] -1: -#endif - .endm - /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - r0 to r6. diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b1e0ad9ec3b..d46f25968bec 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -32,8 +32,16 @@ * numbers for r1. * */ + .arm + __HEAD ENTRY(stext) + + THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, + THUMB( .thumb ) @ switch to Thumb now. + THUMB(1: ) + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled #ifndef CONFIG_CPU_CP15 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 278c1b0ebb2e..742b6108a001 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -71,8 +71,16 @@ * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ + .arm + __HEAD ENTRY(stext) + + THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, + THUMB( .thumb ) @ switch to Thumb now. + THUMB(1: ) + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled mrc p15, 0, r9, c0, c0 @ get processor id diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 87acc25d7a3e..a927ca1f5566 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -796,7 +796,7 @@ unlock: /* * Called from either the Data Abort Handler [watchpoint] or the - * Prefetch Abort Handler [breakpoint] with preemption disabled. + * Prefetch Abort Handler [breakpoint] with interrupts disabled. */ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, int ret = 0; u32 dscr; - /* We must be called with preemption disabled. */ - WARN_ON(preemptible()); + preempt_disable(); + + if (interrupts_enabled(regs)) + local_irq_enable(); /* We only handle watchpoints and hardware breakpoints. */ ARM_DBG_READ(c1, 0, dscr); @@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, ret = 1; /* Unhandled fault. */ } - /* - * Re-enable preemption after it was disabled in the - * low-level exception handling code. - */ preempt_enable(); return ret; diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 83bbad03fcc6..0f928a131af8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void) #ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_data *d) +static bool migrate_one_irq(struct irq_desc *desc) { - unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); + struct irq_data *d = irq_desc_get_irq_data(desc); + const struct cpumask *affinity = d->affinity; + struct irq_chip *c; bool ret = false; - if (cpu >= nr_cpu_ids) { - cpu = cpumask_any(cpu_online_mask); + /* + * If this is a per-CPU interrupt, or the affinity does not + * include this CPU, then we have nothing to do. + */ + if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) + return false; + + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity = cpu_online_mask; ret = true; } - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); - - d->chip->irq_set_affinity(d, cpumask_of(cpu), true); + c = irq_data_get_irq_chip(d); + if (c->irq_set_affinity) + c->irq_set_affinity(d, affinity, true); + else + pr_debug("IRQ%u: unable to set affinity\n", d->irq); return ret; } /* - * The CPU has been marked offline. Migrate IRQs off this CPU. If - * the affinity settings do not allow other CPUs, force them onto any + * The current CPU has been marked offline. Migrate IRQs off this CPU. + * If the affinity settings do not allow other CPUs, force them onto any * available CPU. + * + * Note: we must iterate over all IRQs, whether they have an attached + * action structure or not, as we need to get chained interrupts too. */ void migrate_irqs(void) { - unsigned int i, cpu = smp_processor_id(); + unsigned int i; struct irq_desc *desc; unsigned long flags; local_irq_save(flags); for_each_irq_desc(i, desc) { - struct irq_data *d = &desc->irq_data; bool affinity_broken = false; + if (!desc) + continue; + raw_spin_lock(&desc->lock); - do { - if (desc->action == NULL) - break; - - if (d->node != cpu) - break; - - affinity_broken = migrate_one_irq(d); - } while (0); + affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) - pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); + pr_warning("IRQ%u no longer affine to CPU%u\n", i, + smp_processor_id()); } local_irq_restore(flags); diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c new file mode 100644 index 000000000000..79203ee1d039 --- /dev/null +++ b/arch/arm/kernel/kprobes-arm.c @@ -0,0 +1,999 @@ +/* + * arch/arm/kernel/kprobes-decode.c + * + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/* + * We do not have hardware single-stepping on ARM, This + * effort is further complicated by the ARM not having a + * "next PC" register. Instructions that change the PC + * can't be safely single-stepped in a MP environment, so + * we have a lot of work to do: + * + * In the prepare phase: + * *) If it is an instruction that does anything + * with the CPU mode, we reject it for a kprobe. + * (This is out of laziness rather than need. The + * instructions could be simulated.) + * + * *) Otherwise, decode the instruction rewriting its + * registers to take fixed, ordered registers and + * setting a handler for it to run the instruction. + * + * In the execution phase by an instruction's handler: + * + * *) If the PC is written to by the instruction, the + * instruction must be fully simulated in software. + * + * *) Otherwise, a modified form of the instruction is + * directly executed. Its handler calls the + * instruction in insn[0]. In insn[1] is a + * "mov pc, lr" to return. + * + * Before calling, load up the reordered registers + * from the original instruction's registers. If one + * of the original input registers is the PC, compute + * and adjust the appropriate input register. + * + * After call completes, copy the output registers to + * the original instruction's original registers. + * + * We don't use a real breakpoint instruction since that + * would have us in the kernel go from SVC mode to SVC + * mode losing the link register. Instead we use an + * undefined instruction. To simplify processing, the + * undefined instruction used for kprobes must be reserved + * exclusively for kprobes use. + * + * TODO: ifdef out some instruction decoding based on architecture. + */ + +#include +#include + +#include "kprobes.h" + +#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) + +#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) + +#if __LINUX_ARM_ARCH__ >= 6 +#define BLX(reg) "blx "reg" \n\t" +#else +#define BLX(reg) "mov lr, pc \n\t" \ + "mov pc, "reg" \n\t" +#endif + +/* + * To avoid the complications of mimicing single-stepping on a + * processor without a Next-PC or a single-step mode, and to + * avoid having to deal with the side-effects of boosting, we + * simulate or emulate (almost) all ARM instructions. + * + * "Simulation" is where the instruction's behavior is duplicated in + * C code. "Emulation" is where the original instruction is rewritten + * and executed, often by altering its registers. + * + * By having all behavior of the kprobe'd instruction completed before + * returning from the kprobe_handler(), all locks (scheduler and + * interrupt) can safely be released. There is no need for secondary + * breakpoints, no race with MP or preemptable kernels, nor having to + * clean up resources counts at a later time impacting overall system + * performance. By rewriting the instruction, only the minimum registers + * need to be loaded and saved back optimizing performance. + * + * Calling the insnslot_*_rwflags version of a function doesn't hurt + * anything even when the CPSR flags aren't updated by the + * instruction. It's just a little slower in return for saving + * a little space by not having a duplicate function that doesn't + * update the flags. (The same optimization can be said for + * instructions that do or don't perform register writeback) + * Also, instructions can either read the flags, only write the + * flags, or read and write the flags. To save combinations + * rather than for sheer performance, flag functions just assume + * read and write of flags. + */ + +static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + long iaddr = (long)p->addr; + int disp = branch_displacement(insn); + + if (insn & (1 << 24)) + regs->ARM_lr = iaddr + 4; + + regs->ARM_pc = iaddr + 8 + disp; +} + +static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + long iaddr = (long)p->addr; + int disp = branch_displacement(insn); + + regs->ARM_lr = iaddr + 4; + regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); + regs->ARM_cpsr |= PSR_T_BIT; +} + +static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rm = insn & 0xf; + long rmv = regs->uregs[rm]; + + if (insn & (1 << 5)) + regs->ARM_lr = (long)p->addr + 4; + + regs->ARM_pc = rmv & ~0x1; + regs->ARM_cpsr &= ~PSR_T_BIT; + if (rmv & 0x1) + regs->ARM_cpsr |= PSR_T_BIT; +} + +static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 12) & 0xf; + unsigned long mask = 0xf8ff03df; /* Mask out execution state */ + regs->uregs[rd] = regs->ARM_cpsr & mask; +} + +static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) +{ + regs->uregs[12] = regs->uregs[13]; +} + +static void __kprobes +emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = (unsigned long)p->addr + 8; + int rt = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rtv asm("r0") = regs->uregs[rt]; + register unsigned long rt2v asm("r1") = regs->uregs[rt+1]; + register unsigned long rnv asm("r2") = (rn == 15) ? pc + : regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + + __asm__ __volatile__ ( + BLX("%[fn]") + : "=r" (rtv), "=r" (rt2v), "=r" (rnv) + : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv), + [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rt] = rtv; + regs->uregs[rt+1] = rt2v; + if (is_writeback(insn)) + regs->uregs[rn] = rnv; +} + +static void __kprobes +emulate_ldr(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = (unsigned long)p->addr + 8; + int rt = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rtv asm("r0"); + register unsigned long rnv asm("r2") = (rn == 15) ? pc + : regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + + __asm__ __volatile__ ( + BLX("%[fn]") + : "=r" (rtv), "=r" (rnv) + : "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + if (rt == 15) + load_write_pc(rtv, regs); + else + regs->uregs[rt] = rtv; + + if (is_writeback(insn)) + regs->uregs[rn] = rnv; +} + +static void __kprobes +emulate_str(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long rtpc = (unsigned long)p->addr + str_pc_offset; + unsigned long rnpc = (unsigned long)p->addr + 8; + int rt = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rtv asm("r0") = (rt == 15) ? rtpc + : regs->uregs[rt]; + register unsigned long rnv asm("r2") = (rn == 15) ? rnpc + : regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + + __asm__ __volatile__ ( + BLX("%[fn]") + : "=r" (rnv) + : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + if (is_writeback(insn)) + regs->uregs[rn] = rnv; +} + +static void __kprobes +emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = (unsigned long)p->addr + 8; + int rd = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + int rs = (insn >> 8) & 0xf; + + register unsigned long rdv asm("r0") = regs->uregs[rd]; + register unsigned long rnv asm("r2") = (rn == 15) ? pc + : regs->uregs[rn]; + register unsigned long rmv asm("r3") = (rm == 15) ? pc + : regs->uregs[rm]; + register unsigned long rsv asm("r1") = regs->uregs[rs]; + unsigned long cpsr = regs->ARM_cpsr; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[cpsr] \n\t" + BLX("%[fn]") + "mrs %[cpsr], cpsr \n\t" + : "=r" (rdv), [cpsr] "=r" (cpsr) + : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv), + "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + if (rd == 15) + alu_write_pc(rdv, regs); + else + regs->uregs[rd] = rdv; + regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); +} + +static void __kprobes +emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rdv asm("r0") = regs->uregs[rd]; + register unsigned long rnv asm("r2") = regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + unsigned long cpsr = regs->ARM_cpsr; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[cpsr] \n\t" + BLX("%[fn]") + "mrs %[cpsr], cpsr \n\t" + : "=r" (rdv), [cpsr] "=r" (cpsr) + : "0" (rdv), "r" (rnv), "r" (rmv), + "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rd] = rdv; + regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); +} + +static void __kprobes +emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 16) & 0xf; + int rn = (insn >> 12) & 0xf; + int rm = insn & 0xf; + int rs = (insn >> 8) & 0xf; + + register unsigned long rdv asm("r2") = regs->uregs[rd]; + register unsigned long rnv asm("r0") = regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + register unsigned long rsv asm("r1") = regs->uregs[rs]; + unsigned long cpsr = regs->ARM_cpsr; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[cpsr] \n\t" + BLX("%[fn]") + "mrs %[cpsr], cpsr \n\t" + : "=r" (rdv), [cpsr] "=r" (cpsr) + : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv), + "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rd] = rdv; + regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); +} + +static void __kprobes +emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 12) & 0xf; + int rm = insn & 0xf; + + register unsigned long rdv asm("r0") = regs->uregs[rd]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + + __asm__ __volatile__ ( + BLX("%[fn]") + : "=r" (rdv) + : "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rd] = rdv; +} + +static void __kprobes +emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rdlo = (insn >> 12) & 0xf; + int rdhi = (insn >> 16) & 0xf; + int rn = insn & 0xf; + int rm = (insn >> 8) & 0xf; + + register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; + register unsigned long rdhiv asm("r2") = regs->uregs[rdhi]; + register unsigned long rnv asm("r3") = regs->uregs[rn]; + register unsigned long rmv asm("r1") = regs->uregs[rm]; + unsigned long cpsr = regs->ARM_cpsr; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[cpsr] \n\t" + BLX("%[fn]") + "mrs %[cpsr], cpsr \n\t" + : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr) + : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), + "2" (cpsr), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rdlo] = rdlov; + regs->uregs[rdhi] = rdhiv; + regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); +} + +/* + * For the instruction masking and comparisons in all the "space_*" + * functions below, Do _not_ rearrange the order of tests unless + * you're very, very sure of what you are doing. For the sake of + * efficiency, the masks for some tests sometimes assume other test + * have been done prior to them so the number of patterns to test + * for an instruction set can be as broad as possible to reduce the + * number of tests needed. + */ + +static const union decode_item arm_1111_table[] = { + /* Unconditional instructions */ + + /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */ + /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */ + /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */ + /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */ + DECODE_SIMULATE (0xfe300000, 0xf4100000, kprobe_simulate_nop), + + /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */ + /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */ + /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */ + /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */ + DECODE_SIMULATE (0xfe300010, 0xf6100000, kprobe_simulate_nop), + + /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */ + DECODE_SIMULATE (0xfe000000, 0xfa000000, simulate_blx1), + + /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ + /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ + /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ + /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ + + /* Coprocessor instructions... */ + /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */ + /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */ + /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ + /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ + /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ + /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ + /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ + + /* Other unallocated instructions... */ + DECODE_END +}; + +static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = { + /* Miscellaneous instructions */ + + /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ + DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs, + REGS(0, NOPC, 0, 0, 0)), + + /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ + DECODE_SIMULATE (0x0ff000f0, 0x01200010, simulate_blx2bx), + + /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ + DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx, + REGS(0, 0, 0, 0, NOPC)), + + /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ + DECODE_EMULATEX (0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, NOPC)), + + /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */ + /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */ + /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */ + /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */ + DECODE_EMULATEX (0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc, + REGS(NOPC, NOPC, 0, 0, NOPC)), + + /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ + /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ + /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ + /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ + /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ + /* And unallocated instructions... */ + DECODE_END +}; + +static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = { + /* Halfword multiply and multiply-accumulate */ + + /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ + DECODE_EMULATEX (0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc, + REGS(NOPC, NOPC, NOPC, 0, NOPC)), + + /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ + DECODE_OR (0x0ff000b0, 0x012000a0), + /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ + DECODE_EMULATEX (0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, 0, NOPC, 0, NOPC)), + + /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */ + DECODE_OR (0x0ff00090, 0x01000080), + /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */ + DECODE_EMULATEX (0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, NOPC, NOPC, 0, NOPC)), + + DECODE_END +}; + +static const union decode_item arm_cccc_0000_____1001_table[] = { + /* Multiply and multiply-accumulate */ + + /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */ + /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */ + DECODE_EMULATEX (0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, 0, NOPC, 0, NOPC)), + + /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */ + /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */ + DECODE_OR (0x0fe000f0, 0x00200090), + /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */ + DECODE_EMULATEX (0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, NOPC, NOPC, 0, NOPC)), + + /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */ + DECODE_OR (0x0ff000f0, 0x00400090), + /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */ + /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */ + /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */ + /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */ + /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */ + /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */ + /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */ + /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */ + DECODE_EMULATEX (0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc, + REGS(NOPC, NOPC, NOPC, 0, NOPC)), + + DECODE_END +}; + +static const union decode_item arm_cccc_0001_____1001_table[] = { + /* Synchronization primitives */ + + /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ + DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, + REGS(NOPC, NOPC, 0, 0, NOPC)), + + /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ + /* And unallocated instructions... */ + DECODE_END +}; + +static const union decode_item arm_cccc_000x_____1xx1_table[] = { + /* Extra load/store instructions */ + + /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */ + /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */ + /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */ + /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */ + /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */ + DECODE_REJECT (0x0f200090, 0x00200090), + + /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */ + DECODE_REJECT (0x0e10e0d0, 0x0000e0d0), + + /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */ + /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */ + DECODE_EMULATEX (0x0e5000d0, 0x000000d0, emulate_ldrdstrd, + REGS(NOPCWB, NOPCX, 0, 0, NOPC)), + + /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */ + /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */ + DECODE_EMULATEX (0x0e5000d0, 0x004000d0, emulate_ldrdstrd, + REGS(NOPCWB, NOPCX, 0, 0, 0)), + + /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */ + DECODE_EMULATEX (0x0e5000f0, 0x000000b0, emulate_str, + REGS(NOPCWB, NOPC, 0, 0, NOPC)), + + /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */ + /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */ + /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */ + DECODE_EMULATEX (0x0e500090, 0x00100090, emulate_ldr, + REGS(NOPCWB, NOPC, 0, 0, NOPC)), + + /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */ + DECODE_EMULATEX (0x0e5000f0, 0x004000b0, emulate_str, + REGS(NOPCWB, NOPC, 0, 0, 0)), + + /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */ + /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */ + /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */ + DECODE_EMULATEX (0x0e500090, 0x00500090, emulate_ldr, + REGS(NOPCWB, NOPC, 0, 0, 0)), + + DECODE_END +}; + +static const union decode_item arm_cccc_000x_table[] = { + /* Data-processing (register) */ + + /* S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ + DECODE_REJECT (0x0e10f000, 0x0010f000), + + /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */ + DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, simulate_mov_ipsp), + + /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */ + /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */ + /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */ + /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */ + DECODE_EMULATEX (0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags, + REGS(ANY, 0, 0, 0, ANY)), + + /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */ + /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */ + DECODE_EMULATEX (0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags, + REGS(0, ANY, 0, 0, ANY)), + + /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */ + /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */ + /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */ + /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */ + /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */ + /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */ + /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */ + /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */ + /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */ + /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */ + DECODE_EMULATEX (0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags, + REGS(ANY, ANY, 0, 0, ANY)), + + /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */ + /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */ + /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */ + /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */ + DECODE_EMULATEX (0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags, + REGS(ANY, 0, NOPC, 0, ANY)), + + /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */ + /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */ + DECODE_EMULATEX (0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags, + REGS(0, ANY, NOPC, 0, ANY)), + + /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */ + /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */ + /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */ + /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */ + /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */ + /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */ + /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */ + /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */ + /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */ + /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */ + DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags, + REGS(ANY, ANY, NOPC, 0, ANY)), + + DECODE_END +}; + +static const union decode_item arm_cccc_001x_table[] = { + /* Data-processing (immediate) */ + + /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ + /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, 0)), + + /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ + DECODE_OR (0x0fff00ff, 0x03200001), + /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ + DECODE_EMULATE (0x0fff00ff, 0x03200004, kprobe_emulate_none), + /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ + /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ + /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ + DECODE_SIMULATE (0x0fff00fc, 0x03200000, kprobe_simulate_nop), + /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */ + /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ + /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0x0fb00000, 0x03200000), + + /* S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ + DECODE_REJECT (0x0e10f000, 0x0210f000), + + /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */ + /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */ + /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */ + /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags, + REGS(ANY, 0, 0, 0, 0)), + + /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */ + /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags, + REGS(0, ANY, 0, 0, 0)), + + /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */ + /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */ + /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */ + /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */ + /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */ + /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */ + /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */ + /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */ + /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */ + /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags, + REGS(ANY, ANY, 0, 0, 0)), + + DECODE_END +}; + +static const union decode_item arm_cccc_0110_____xxx1_table[] = { + /* Media instructions */ + + /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */ + DECODE_EMULATEX (0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc, + REGS(NOPC, NOPC, 0, 0, NOPC)), + + /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */ + /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */ + DECODE_OR(0x0fa00030, 0x06a00010), + /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */ + /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */ + DECODE_EMULATEX (0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc, + REGS(0, NOPC, 0, 0, NOPC)), + + /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ + /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ + /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ + /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ + DECODE_EMULATEX (0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, NOPC)), + + /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */ + DECODE_REJECT (0x0fb00010, 0x06000010), + /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */ + DECODE_REJECT (0x0f8000f0, 0x060000b0), + /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */ + DECODE_REJECT (0x0f8000f0, 0x060000d0), + /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */ + /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */ + /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */ + /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */ + /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */ + /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */ + /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */ + /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */ + /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */ + /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */ + /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */ + /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */ + /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */ + /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */ + /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */ + /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */ + /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */ + /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */ + /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */ + /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */ + /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */ + /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */ + /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */ + /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */ + /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */ + /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */ + /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */ + /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */ + /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */ + /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */ + /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */ + /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */ + /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */ + /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */ + /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */ + /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */ + DECODE_EMULATEX (0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc, + REGS(NOPC, NOPC, 0, 0, NOPC)), + + /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */ + /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */ + DECODE_EMULATEX (0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc, + REGS(NOPC, NOPC, 0, 0, NOPC)), + + /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */ + /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */ + DECODE_REJECT (0x0fb000f0, 0x06900070), + + /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */ + /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */ + /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */ + /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */ + /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */ + /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */ + DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, NOPC)), + + /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */ + /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */ + /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */ + /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */ + /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */ + /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */ + DECODE_EMULATEX (0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc, + REGS(NOPCX, NOPC, 0, 0, NOPC)), + + DECODE_END +}; + +static const union decode_item arm_cccc_0111_____xxx1_table[] = { + /* Media instructions */ + + /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ + DECODE_REJECT (0x0ff000f0, 0x07f000f0), + + /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ + /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ + DECODE_EMULATEX (0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc, + REGS(NOPC, NOPC, NOPC, 0, NOPC)), + + /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */ + /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */ + DECODE_OR (0x0ff0f090, 0x0700f010), + /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */ + DECODE_OR (0x0ff0f0d0, 0x0750f010), + /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ + DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, 0, NOPC, 0, NOPC)), + + /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */ + /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */ + DECODE_OR (0x0ff00090, 0x07000010), + /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */ + DECODE_OR (0x0ff000d0, 0x07500010), + /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ + DECODE_EMULATEX (0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, NOPCX, NOPC, 0, NOPC)), + + /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */ + DECODE_EMULATEX (0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc, + REGS(NOPC, NOPC, NOPC, 0, NOPC)), + + /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */ + /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */ + DECODE_EMULATEX (0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, NOPC)), + + /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */ + DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, 0)), + + /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */ + DECODE_EMULATEX (0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc, + REGS(0, NOPC, 0, 0, NOPCX)), + + DECODE_END +}; + +static const union decode_item arm_cccc_01xx_table[] = { + /* Load/store word and unsigned byte */ + + /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0x0c40f000, 0x0440f000), + + /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ + /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ + /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ + /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0x0d200000, 0x04200000), + + /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */ + /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0e100000, 0x04000000, emulate_str, + REGS(NOPCWB, ANY, 0, 0, 0)), + + /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */ + /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0e100000, 0x04100000, emulate_ldr, + REGS(NOPCWB, ANY, 0, 0, 0)), + + /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */ + /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0e100000, 0x06000000, emulate_str, + REGS(NOPCWB, ANY, 0, 0, NOPC)), + + /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */ + /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0x0e100000, 0x06100000, emulate_ldr, + REGS(NOPCWB, ANY, 0, 0, NOPC)), + + DECODE_END +}; + +static const union decode_item arm_cccc_100x_table[] = { + /* Block data transfer instructions */ + + /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ + /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ + DECODE_CUSTOM (0x0e400000, 0x08000000, kprobe_decode_ldmstm), + + /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ + /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */ + /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ + DECODE_END +}; + +const union decode_item kprobe_decode_arm_table[] = { + /* + * Unconditional instructions + * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table), + + /* + * Miscellaneous instructions + * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx + */ + DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table), + + /* + * Halfword multiply and multiply-accumulate + * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx + */ + DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table), + + /* + * Multiply and multiply-accumulate + * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx + */ + DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table), + + /* + * Synchronization primitives + * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx + */ + DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table), + + /* + * Extra load/store instructions + * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx + */ + DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table), + + /* + * Data-processing (register) + * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx + * Data-processing (register-shifted register) + * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx + */ + DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table), + + /* + * Data-processing (immediate) + * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table), + + /* + * Media instructions + * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx + */ + DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table), + DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table), + + /* + * Load/store word and unsigned byte + * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table), + + /* + * Block data transfer instructions + * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table), + + /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ + /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ + DECODE_SIMULATE (0x0e000000, 0x0a000000, simulate_bbl), + + /* + * Supervisor Call, and coprocessor instructions + */ + + /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */ + /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */ + /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ + /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ + /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ + /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ + /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ + /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0x0c000000, 0x0c000000), + + DECODE_END +}; + +static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 4; + p->ainsn.insn_handler(p, regs); +} + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD If instruction is supported and uses instruction slot, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + * + * For instructions we don't want to kprobe (INSN_REJECTED return result): + * These are generally ones that modify the processor state making + * them "hard" to simulate such as switches processor modes or + * make accesses in alternate modes. Any of these could be simulated + * if the work was put into it, but low return considering they + * should also be very rare. + */ +enum kprobe_insn __kprobes +arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + asi->insn_singlestep = arm_singlestep; + asi->insn_check_cc = kprobe_condition_checks[insn>>28]; + return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false); +} diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c new file mode 100644 index 000000000000..a5394fb4e4e0 --- /dev/null +++ b/arch/arm/kernel/kprobes-common.c @@ -0,0 +1,577 @@ +/* + * arch/arm/kernel/kprobes-common.c + * + * Copyright (C) 2011 Jon Medhurst . + * + * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include "kprobes.h" + + +#ifndef find_str_pc_offset + +/* + * For STR and STM instructions, an ARM core may choose to use either + * a +8 or a +12 displacement from the current instruction's address. + * Whichever value is chosen for a given core, it must be the same for + * both instructions and may not change. This function measures it. + */ + +int str_pc_offset; + +void __init find_str_pc_offset(void) +{ + int addr, scratch, ret; + + __asm__ ( + "sub %[ret], pc, #4 \n\t" + "str pc, %[addr] \n\t" + "ldr %[scr], %[addr] \n\t" + "sub %[ret], %[scr], %[ret] \n\t" + : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr)); + + str_pc_offset = ret; +} + +#endif /* !find_str_pc_offset */ + + +#ifndef test_load_write_pc_interworking + +bool load_write_pc_interworks; + +void __init test_load_write_pc_interworking(void) +{ + int arch = cpu_architecture(); + BUG_ON(arch == CPU_ARCH_UNKNOWN); + load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T; +} + +#endif /* !test_load_write_pc_interworking */ + + +#ifndef test_alu_write_pc_interworking + +bool alu_write_pc_interworks; + +void __init test_alu_write_pc_interworking(void) +{ + int arch = cpu_architecture(); + BUG_ON(arch == CPU_ARCH_UNKNOWN); + alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7; +} + +#endif /* !test_alu_write_pc_interworking */ + + +void __init arm_kprobe_decode_init(void) +{ + find_str_pc_offset(); + test_load_write_pc_interworking(); + test_alu_write_pc_interworking(); +} + + +static unsigned long __kprobes __check_eq(unsigned long cpsr) +{ + return cpsr & PSR_Z_BIT; +} + +static unsigned long __kprobes __check_ne(unsigned long cpsr) +{ + return (~cpsr) & PSR_Z_BIT; +} + +static unsigned long __kprobes __check_cs(unsigned long cpsr) +{ + return cpsr & PSR_C_BIT; +} + +static unsigned long __kprobes __check_cc(unsigned long cpsr) +{ + return (~cpsr) & PSR_C_BIT; +} + +static unsigned long __kprobes __check_mi(unsigned long cpsr) +{ + return cpsr & PSR_N_BIT; +} + +static unsigned long __kprobes __check_pl(unsigned long cpsr) +{ + return (~cpsr) & PSR_N_BIT; +} + +static unsigned long __kprobes __check_vs(unsigned long cpsr) +{ + return cpsr & PSR_V_BIT; +} + +static unsigned long __kprobes __check_vc(unsigned long cpsr) +{ + return (~cpsr) & PSR_V_BIT; +} + +static unsigned long __kprobes __check_hi(unsigned long cpsr) +{ + cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return cpsr & PSR_C_BIT; +} + +static unsigned long __kprobes __check_ls(unsigned long cpsr) +{ + cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (~cpsr) & PSR_C_BIT; +} + +static unsigned long __kprobes __check_ge(unsigned long cpsr) +{ + cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (~cpsr) & PSR_N_BIT; +} + +static unsigned long __kprobes __check_lt(unsigned long cpsr) +{ + cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return cpsr & PSR_N_BIT; +} + +static unsigned long __kprobes __check_gt(unsigned long cpsr) +{ + unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ + return (~temp) & PSR_N_BIT; +} + +static unsigned long __kprobes __check_le(unsigned long cpsr) +{ + unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ + return temp & PSR_N_BIT; +} + +static unsigned long __kprobes __check_al(unsigned long cpsr) +{ + return true; +} + +kprobe_check_cc * const kprobe_condition_checks[16] = { + &__check_eq, &__check_ne, &__check_cs, &__check_cc, + &__check_mi, &__check_pl, &__check_vs, &__check_vc, + &__check_hi, &__check_ls, &__check_ge, &__check_lt, + &__check_gt, &__check_le, &__check_al, &__check_al +}; + + +void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs) +{ +} + +void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs) +{ + p->ainsn.insn_fn(); +} + +static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rn = (insn >> 16) & 0xf; + int lbit = insn & (1 << 20); + int wbit = insn & (1 << 21); + int ubit = insn & (1 << 23); + int pbit = insn & (1 << 24); + long *addr = (long *)regs->uregs[rn]; + int reg_bit_vector; + int reg_count; + + reg_count = 0; + reg_bit_vector = insn & 0xffff; + while (reg_bit_vector) { + reg_bit_vector &= (reg_bit_vector - 1); + ++reg_count; + } + + if (!ubit) + addr -= reg_count; + addr += (!pbit == !ubit); + + reg_bit_vector = insn & 0xffff; + while (reg_bit_vector) { + int reg = __ffs(reg_bit_vector); + reg_bit_vector &= (reg_bit_vector - 1); + if (lbit) + regs->uregs[reg] = *addr++; + else + *addr++ = regs->uregs[reg]; + } + + if (wbit) { + if (!ubit) + addr -= reg_count; + addr -= (!pbit == !ubit); + regs->uregs[rn] = (long)addr; + } +} + +static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc = (long)p->addr + str_pc_offset; + simulate_ldm1stm1(p, regs); + regs->ARM_pc = (long)p->addr + 4; +} + +static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs) +{ + simulate_ldm1stm1(p, regs); + load_write_pc(regs->ARM_pc, regs); +} + +static void __kprobes +emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs) +{ + register void *rregs asm("r1") = regs; + register void *rfn asm("lr") = p->ainsn.insn_fn; + + __asm__ __volatile__ ( + "stmdb sp!, {%[regs], r11} \n\t" + "ldmia %[regs], {r0-r12} \n\t" +#if __LINUX_ARM_ARCH__ >= 6 + "blx %[fn] \n\t" +#else + "str %[fn], [sp, #-4]! \n\t" + "adr lr, 1f \n\t" + "ldr pc, [sp], #4 \n\t" + "1: \n\t" +#endif + "ldr lr, [sp], #4 \n\t" /* lr = regs */ + "stmia lr, {r0-r12} \n\t" + "ldr r11, [sp], #4 \n\t" + : [regs] "=r" (rregs), [fn] "=r" (rfn) + : "0" (rregs), "1" (rfn) + : "r0", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r12", "memory", "cc" + ); +} + +static void __kprobes +emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs) +{ + emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2)); +} + +static void __kprobes +emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs) +{ + emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3)); + load_write_pc(regs->ARM_pc, regs); +} + +enum kprobe_insn __kprobes +kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + kprobe_insn_handler_t *handler = 0; + unsigned reglist = insn & 0xffff; + int is_ldm = insn & 0x100000; + int rn = (insn >> 16) & 0xf; + + if (rn <= 12 && (reglist & 0xe000) == 0) { + /* Instruction only uses registers in the range R0..R12 */ + handler = emulate_generic_r0_12_noflags; + + } else if (rn >= 2 && (reglist & 0x8003) == 0) { + /* Instruction only uses registers in the range R2..R14 */ + rn -= 2; + reglist >>= 2; + handler = emulate_generic_r2_14_noflags; + + } else if (rn >= 3 && (reglist & 0x0007) == 0) { + /* Instruction only uses registers in the range R3..R15 */ + if (is_ldm && (reglist & 0x8000)) { + rn -= 3; + reglist >>= 3; + handler = emulate_ldm_r3_15; + } + } + + if (handler) { + /* We can emulate the instruction in (possibly) modified form */ + asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist; + asi->insn_handler = handler; + return INSN_GOOD; + } + + /* Fallback to slower simulation... */ + if (reglist & 0x8000) + handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc; + else + handler = simulate_ldm1stm1; + asi->insn_handler = handler; + return INSN_GOOD_NO_SLOT; +} + + +/* + * Prepare an instruction slot to receive an instruction for emulating. + * This is done by placing a subroutine return after the location where the + * instruction will be placed. We also modify ARM instructions to be + * unconditional as the condition code will already be checked before any + * emulation handler is called. + */ +static kprobe_opcode_t __kprobes +prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, + bool thumb) +{ +#ifdef CONFIG_THUMB2_KERNEL + if (thumb) { + u16 *thumb_insn = (u16 *)asi->insn; + thumb_insn[1] = 0x4770; /* Thumb bx lr */ + thumb_insn[2] = 0x4770; /* Thumb bx lr */ + return insn; + } + asi->insn[1] = 0xe12fff1e; /* ARM bx lr */ +#else + asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */ +#endif + /* Make an ARM instruction unconditional */ + if (insn < 0xe0000000) + insn = (insn | 0xe0000000) & ~0x10000000; + return insn; +} + +/* + * Write a (probably modified) instruction into the slot previously prepared by + * prepare_emulated_insn + */ +static void __kprobes +set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, + bool thumb) +{ +#ifdef CONFIG_THUMB2_KERNEL + if (thumb) { + u16 *ip = (u16 *)asi->insn; + if (is_wide_instruction(insn)) + *ip++ = insn >> 16; + *ip++ = insn; + return; + } +#endif + asi->insn[0] = insn; +} + +/* + * When we modify the register numbers encoded in an instruction to be emulated, + * the new values come from this define. For ARM and 32-bit Thumb instructions + * this gives... + * + * bit position 16 12 8 4 0 + * ---------------+---+---+---+---+---+ + * register r2 r0 r1 -- r3 + */ +#define INSN_NEW_BITS 0x00020103 + +/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */ +#define INSN_SAMEAS16_BITS 0x22222222 + +/* + * Validate and modify each of the registers encoded in an instruction. + * + * Each nibble in regs contains a value from enum decode_reg_type. For each + * non-zero value, the corresponding nibble in pinsn is validated and modified + * according to the type. + */ +static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs) +{ + kprobe_opcode_t insn = *pinsn; + kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */ + + for (; regs != 0; regs >>= 4, mask <<= 4) { + + kprobe_opcode_t new_bits = INSN_NEW_BITS; + + switch (regs & 0xf) { + + case REG_TYPE_NONE: + /* Nibble not a register, skip to next */ + continue; + + case REG_TYPE_ANY: + /* Any register is allowed */ + break; + + case REG_TYPE_SAMEAS16: + /* Replace register with same as at bit position 16 */ + new_bits = INSN_SAMEAS16_BITS; + break; + + case REG_TYPE_SP: + /* Only allow SP (R13) */ + if ((insn ^ 0xdddddddd) & mask) + goto reject; + break; + + case REG_TYPE_PC: + /* Only allow PC (R15) */ + if ((insn ^ 0xffffffff) & mask) + goto reject; + break; + + case REG_TYPE_NOSP: + /* Reject SP (R13) */ + if (((insn ^ 0xdddddddd) & mask) == 0) + goto reject; + break; + + case REG_TYPE_NOSPPC: + case REG_TYPE_NOSPPCX: + /* Reject SP and PC (R13 and R15) */ + if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0) + goto reject; + break; + + case REG_TYPE_NOPCWB: + if (!is_writeback(insn)) + break; /* No writeback, so any register is OK */ + /* fall through... */ + case REG_TYPE_NOPC: + case REG_TYPE_NOPCX: + /* Reject PC (R15) */ + if (((insn ^ 0xffffffff) & mask) == 0) + goto reject; + break; + } + + /* Replace value of nibble with new register number... */ + insn &= ~mask; + insn |= new_bits & mask; + } + + *pinsn = insn; + return true; + +reject: + return false; +} + +static const int decode_struct_sizes[NUM_DECODE_TYPES] = { + [DECODE_TYPE_TABLE] = sizeof(struct decode_table), + [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom), + [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate), + [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate), + [DECODE_TYPE_OR] = sizeof(struct decode_or), + [DECODE_TYPE_REJECT] = sizeof(struct decode_reject) +}; + +/* + * kprobe_decode_insn operates on data tables in order to decode an ARM + * architecture instruction onto which a kprobe has been placed. + * + * These instruction decoding tables are a concatenation of entries each + * of which consist of one of the following structs: + * + * decode_table + * decode_custom + * decode_simulate + * decode_emulate + * decode_or + * decode_reject + * + * Each of these starts with a struct decode_header which has the following + * fields: + * + * type_regs + * mask + * value + * + * The least significant DECODE_TYPE_BITS of type_regs contains a value + * from enum decode_type, this indicates which of the decode_* structs + * the entry contains. The value DECODE_TYPE_END indicates the end of the + * table. + * + * When the table is parsed, each entry is checked in turn to see if it + * matches the instruction to be decoded using the test: + * + * (insn & mask) == value + * + * If no match is found before the end of the table is reached then decoding + * fails with INSN_REJECTED. + * + * When a match is found, decode_regs() is called to validate and modify each + * of the registers encoded in the instruction; the data it uses to do this + * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding + * to fail with INSN_REJECTED. + * + * Once the instruction has passed the above tests, further processing + * depends on the type of the table entry's decode struct. + * + */ +int __kprobes +kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, + const union decode_item *table, bool thumb) +{ + const struct decode_header *h = (struct decode_header *)table; + const struct decode_header *next; + bool matched = false; + + insn = prepare_emulated_insn(insn, asi, thumb); + + for (;; h = next) { + enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; + u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS; + + if (type == DECODE_TYPE_END) + return INSN_REJECTED; + + next = (struct decode_header *) + ((uintptr_t)h + decode_struct_sizes[type]); + + if (!matched && (insn & h->mask.bits) != h->value.bits) + continue; + + if (!decode_regs(&insn, regs)) + return INSN_REJECTED; + + switch (type) { + + case DECODE_TYPE_TABLE: { + struct decode_table *d = (struct decode_table *)h; + next = (struct decode_header *)d->table.table; + break; + } + + case DECODE_TYPE_CUSTOM: { + struct decode_custom *d = (struct decode_custom *)h; + return (*d->decoder.decoder)(insn, asi); + } + + case DECODE_TYPE_SIMULATE: { + struct decode_simulate *d = (struct decode_simulate *)h; + asi->insn_handler = d->handler.handler; + return INSN_GOOD_NO_SLOT; + } + + case DECODE_TYPE_EMULATE: { + struct decode_emulate *d = (struct decode_emulate *)h; + asi->insn_handler = d->handler.handler; + set_emulated_insn(insn, asi, thumb); + return INSN_GOOD; + } + + case DECODE_TYPE_OR: + matched = true; + break; + + case DECODE_TYPE_REJECT: + default: + return INSN_REJECTED; + } + } + } diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c deleted file mode 100644 index 15eeff6aea0e..000000000000 --- a/arch/arm/kernel/kprobes-decode.c +++ /dev/null @@ -1,1670 +0,0 @@ -/* - * arch/arm/kernel/kprobes-decode.c - * - * Copyright (C) 2006, 2007 Motorola Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* - * We do not have hardware single-stepping on ARM, This - * effort is further complicated by the ARM not having a - * "next PC" register. Instructions that change the PC - * can't be safely single-stepped in a MP environment, so - * we have a lot of work to do: - * - * In the prepare phase: - * *) If it is an instruction that does anything - * with the CPU mode, we reject it for a kprobe. - * (This is out of laziness rather than need. The - * instructions could be simulated.) - * - * *) Otherwise, decode the instruction rewriting its - * registers to take fixed, ordered registers and - * setting a handler for it to run the instruction. - * - * In the execution phase by an instruction's handler: - * - * *) If the PC is written to by the instruction, the - * instruction must be fully simulated in software. - * - * *) Otherwise, a modified form of the instruction is - * directly executed. Its handler calls the - * instruction in insn[0]. In insn[1] is a - * "mov pc, lr" to return. - * - * Before calling, load up the reordered registers - * from the original instruction's registers. If one - * of the original input registers is the PC, compute - * and adjust the appropriate input register. - * - * After call completes, copy the output registers to - * the original instruction's original registers. - * - * We don't use a real breakpoint instruction since that - * would have us in the kernel go from SVC mode to SVC - * mode losing the link register. Instead we use an - * undefined instruction. To simplify processing, the - * undefined instruction used for kprobes must be reserved - * exclusively for kprobes use. - * - * TODO: ifdef out some instruction decoding based on architecture. - */ - -#include -#include - -#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) - -#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) - -#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos)) - -/* - * Test if load/store instructions writeback the address register. - * if P (bit 24) == 0 or W (bit 21) == 1 - */ -#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) - -#define PSR_fs (PSR_f|PSR_s) - -#define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */ - -typedef long (insn_0arg_fn_t)(void); -typedef long (insn_1arg_fn_t)(long); -typedef long (insn_2arg_fn_t)(long, long); -typedef long (insn_3arg_fn_t)(long, long, long); -typedef long (insn_4arg_fn_t)(long, long, long, long); -typedef long long (insn_llret_0arg_fn_t)(void); -typedef long long (insn_llret_3arg_fn_t)(long, long, long); -typedef long long (insn_llret_4arg_fn_t)(long, long, long, long); - -union reg_pair { - long long dr; -#ifdef __LITTLE_ENDIAN - struct { long r0, r1; }; -#else - struct { long r1, r0; }; -#endif -}; - -/* - * For STR and STM instructions, an ARM core may choose to use either - * a +8 or a +12 displacement from the current instruction's address. - * Whichever value is chosen for a given core, it must be the same for - * both instructions and may not change. This function measures it. - */ - -static int str_pc_offset; - -static void __init find_str_pc_offset(void) -{ - int addr, scratch, ret; - - __asm__ ( - "sub %[ret], pc, #4 \n\t" - "str pc, %[addr] \n\t" - "ldr %[scr], %[addr] \n\t" - "sub %[ret], %[scr], %[ret] \n\t" - : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr)); - - str_pc_offset = ret; -} - -/* - * The insnslot_?arg_r[w]flags() functions below are to keep the - * msr -> *fn -> mrs instruction sequences indivisible so that - * the state of the CPSR flags aren't inadvertently modified - * just before or just after the call. - */ - -static inline long __kprobes -insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn) -{ - register long ret asm("r0"); - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret) - : [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - return ret; -} - -static inline long long __kprobes -insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn) -{ - register long ret0 asm("r0"); - register long ret1 asm("r1"); - union reg_pair fnr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret0), "=r" (ret1) - : [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - fnr.r0 = ret0; - fnr.r1 = ret1; - return fnr.dr; -} - -static inline long __kprobes -insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long ret asm("r0"); - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret) - : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - return ret; -} - -static inline long __kprobes -insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long ret asm("r0"); - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret) - : "0" (rr0), "r" (rr1), - [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - return ret; -} - -static inline long __kprobes -insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long rr2 asm("r2") = r2; - register long ret asm("r0"); - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret) - : "0" (rr0), "r" (rr1), "r" (rr2), - [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - return ret; -} - -static inline long long __kprobes -insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr, - insn_llret_3arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long rr2 asm("r2") = r2; - register long ret0 asm("r0"); - register long ret1 asm("r1"); - union reg_pair fnr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret0), "=r" (ret1) - : "0" (rr0), "r" (rr1), "r" (rr2), - [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - fnr.r0 = ret0; - fnr.r1 = ret1; - return fnr.dr; -} - -static inline long __kprobes -insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr, - insn_4arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long rr2 asm("r2") = r2; - register long rr3 asm("r3") = r3; - register long ret asm("r0"); - - __asm__ __volatile__ ( - "msr cpsr_fs, %[cpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - : "=r" (ret) - : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), - [cpsr] "r" (cpsr), [fn] "r" (fn) - : "lr", "cc" - ); - return ret; -} - -static inline long __kprobes -insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long ret asm("r0"); - long oldcpsr = *cpsr; - long newcpsr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[oldcpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - "mrs %[newcpsr], cpsr \n\t" - : "=r" (ret), [newcpsr] "=r" (newcpsr) - : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) - : "lr", "cc" - ); - *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); - return ret; -} - -static inline long __kprobes -insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long ret asm("r0"); - long oldcpsr = *cpsr; - long newcpsr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[oldcpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - "mrs %[newcpsr], cpsr \n\t" - : "=r" (ret), [newcpsr] "=r" (newcpsr) - : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) - : "lr", "cc" - ); - *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); - return ret; -} - -static inline long __kprobes -insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr, - insn_3arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long rr2 asm("r2") = r2; - register long ret asm("r0"); - long oldcpsr = *cpsr; - long newcpsr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[oldcpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - "mrs %[newcpsr], cpsr \n\t" - : "=r" (ret), [newcpsr] "=r" (newcpsr) - : "0" (rr0), "r" (rr1), "r" (rr2), - [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) - : "lr", "cc" - ); - *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); - return ret; -} - -static inline long __kprobes -insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, - insn_4arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long rr2 asm("r2") = r2; - register long rr3 asm("r3") = r3; - register long ret asm("r0"); - long oldcpsr = *cpsr; - long newcpsr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[oldcpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - "mrs %[newcpsr], cpsr \n\t" - : "=r" (ret), [newcpsr] "=r" (newcpsr) - : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), - [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) - : "lr", "cc" - ); - *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); - return ret; -} - -static inline long long __kprobes -insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, - insn_llret_4arg_fn_t *fn) -{ - register long rr0 asm("r0") = r0; - register long rr1 asm("r1") = r1; - register long rr2 asm("r2") = r2; - register long rr3 asm("r3") = r3; - register long ret0 asm("r0"); - register long ret1 asm("r1"); - long oldcpsr = *cpsr; - long newcpsr; - union reg_pair fnr; - - __asm__ __volatile__ ( - "msr cpsr_fs, %[oldcpsr] \n\t" - "mov lr, pc \n\t" - "mov pc, %[fn] \n\t" - "mrs %[newcpsr], cpsr \n\t" - : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr) - : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), - [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) - : "lr", "cc" - ); - *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); - fnr.r0 = ret0; - fnr.r1 = ret1; - return fnr.dr; -} - -/* - * To avoid the complications of mimicing single-stepping on a - * processor without a Next-PC or a single-step mode, and to - * avoid having to deal with the side-effects of boosting, we - * simulate or emulate (almost) all ARM instructions. - * - * "Simulation" is where the instruction's behavior is duplicated in - * C code. "Emulation" is where the original instruction is rewritten - * and executed, often by altering its registers. - * - * By having all behavior of the kprobe'd instruction completed before - * returning from the kprobe_handler(), all locks (scheduler and - * interrupt) can safely be released. There is no need for secondary - * breakpoints, no race with MP or preemptable kernels, nor having to - * clean up resources counts at a later time impacting overall system - * performance. By rewriting the instruction, only the minimum registers - * need to be loaded and saved back optimizing performance. - * - * Calling the insnslot_*_rwflags version of a function doesn't hurt - * anything even when the CPSR flags aren't updated by the - * instruction. It's just a little slower in return for saving - * a little space by not having a duplicate function that doesn't - * update the flags. (The same optimization can be said for - * instructions that do or don't perform register writeback) - * Also, instructions can either read the flags, only write the - * flags, or read and write the flags. To save combinations - * rather than for sheer performance, flag functions just assume - * read and write of flags. - */ - -static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) -{ - kprobe_opcode_t insn = p->opcode; - long iaddr = (long)p->addr; - int disp = branch_displacement(insn); - - if (insn & (1 << 24)) - regs->ARM_lr = iaddr + 4; - - regs->ARM_pc = iaddr + 8 + disp; -} - -static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) -{ - kprobe_opcode_t insn = p->opcode; - long iaddr = (long)p->addr; - int disp = branch_displacement(insn); - - regs->ARM_lr = iaddr + 4; - regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); - regs->ARM_cpsr |= PSR_T_BIT; -} - -static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) -{ - kprobe_opcode_t insn = p->opcode; - int rm = insn & 0xf; - long rmv = regs->uregs[rm]; - - if (insn & (1 << 5)) - regs->ARM_lr = (long)p->addr + 4; - - regs->ARM_pc = rmv & ~0x1; - regs->ARM_cpsr &= ~PSR_T_BIT; - if (rmv & 0x1) - regs->ARM_cpsr |= PSR_T_BIT; -} - -static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) -{ - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - unsigned long mask = 0xf8ff03df; /* Mask out execution state */ - regs->uregs[rd] = regs->ARM_cpsr & mask; -} - -static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) -{ - kprobe_opcode_t insn = p->opcode; - int rn = (insn >> 16) & 0xf; - int lbit = insn & (1 << 20); - int wbit = insn & (1 << 21); - int ubit = insn & (1 << 23); - int pbit = insn & (1 << 24); - long *addr = (long *)regs->uregs[rn]; - int reg_bit_vector; - int reg_count; - - reg_count = 0; - reg_bit_vector = insn & 0xffff; - while (reg_bit_vector) { - reg_bit_vector &= (reg_bit_vector - 1); - ++reg_count; - } - - if (!ubit) - addr -= reg_count; - addr += (!pbit == !ubit); - - reg_bit_vector = insn & 0xffff; - while (reg_bit_vector) { - int reg = __ffs(reg_bit_vector); - reg_bit_vector &= (reg_bit_vector - 1); - if (lbit) - regs->uregs[reg] = *addr++; - else - *addr++ = regs->uregs[reg]; - } - - if (wbit) { - if (!ubit) - addr -= reg_count; - addr -= (!pbit == !ubit); - regs->uregs[rn] = (long)addr; - } -} - -static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) -{ - regs->ARM_pc = (long)p->addr + str_pc_offset; - simulate_ldm1stm1(p, regs); - regs->ARM_pc = (long)p->addr + 4; -} - -static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) -{ - regs->uregs[12] = regs->uregs[13]; -} - -static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) -{ - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - int rm = insn & 0xf; /* rm may be invalid, don't care. */ - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - - /* Not following the C calling convention here, so need asm(). */ - __asm__ __volatile__ ( - "ldr r0, %[rn] \n\t" - "ldr r1, %[rm] \n\t" - "msr cpsr_fs, %[cpsr]\n\t" - "mov lr, pc \n\t" - "mov pc, %[i_fn] \n\t" - "str r0, %[rn] \n\t" /* in case of writeback */ - "str r2, %[rd0] \n\t" - "str r3, %[rd1] \n\t" - : [rn] "+m" (rnv), - [rd0] "=m" (regs->uregs[rd]), - [rd1] "=m" (regs->uregs[rd+1]) - : [rm] "m" (rmv), - [cpsr] "r" (regs->ARM_cpsr), - [i_fn] "r" (i_fn) - : "r0", "r1", "r2", "r3", "lr", "cc" - ); - if (is_writeback(insn)) - regs->uregs[rn] = rnv; -} - -static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs) -{ - insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - int rm = insn & 0xf; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - /* rm/rmv may be invalid, don't care. */ - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rnv_wb; - - rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], - regs->uregs[rd+1], - regs->ARM_cpsr, i_fn); - if (is_writeback(insn)) - regs->uregs[rn] = rnv_wb; -} - -static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) -{ - insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - union reg_pair fnr; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - int rm = insn & 0xf; - long rdv; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long cpsr = regs->ARM_cpsr; - - fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); - if (rn != 15) - regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ - rdv = fnr.r1; - - if (rd == 15) { -#if __LINUX_ARM_ARCH__ >= 5 - cpsr &= ~PSR_T_BIT; - if (rdv & 0x1) - cpsr |= PSR_T_BIT; - regs->ARM_cpsr = cpsr; - rdv &= ~0x1; -#else - rdv &= ~0x2; -#endif - } - regs->uregs[rd] = rdv; -} - -static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) -{ - insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long iaddr = (long)p->addr; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - int rm = insn & 0xf; - long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; - long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ - long rnv_wb; - - rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); - if (rn != 15) - regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ -} - -static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rm = insn & 0xf; - long rmv = regs->uregs[rm]; - - /* Writes Q flag */ - regs->uregs[rd] = insnslot_1arg_rwflags(rmv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs) -{ - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - int rm = insn & 0xf; - long rnv = regs->uregs[rn]; - long rmv = regs->uregs[rm]; - - /* Reads GE bits */ - regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn); -} - -static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs) -{ - insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; - - insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); -} - -static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs) -{ -} - -static void __kprobes -emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - long rdv = regs->uregs[rd]; - - regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs) -{ - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = insn & 0xf; - long rdv = regs->uregs[rd]; - long rnv = regs->uregs[rn]; - - regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn); -} - -static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rm = insn & 0xf; - long rmv = regs->uregs[rm]; - - regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - int rm = insn & 0xf; - long rnv = regs->uregs[rn]; - long rmv = regs->uregs[rm]; - - regs->uregs[rd] = - insnslot_2arg_rwflags(rnv, rmv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 16) & 0xf; - int rn = (insn >> 12) & 0xf; - int rs = (insn >> 8) & 0xf; - int rm = insn & 0xf; - long rnv = regs->uregs[rn]; - long rsv = regs->uregs[rs]; - long rmv = regs->uregs[rm]; - - regs->uregs[rd] = - insnslot_3arg_rwflags(rnv, rsv, rmv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 16) & 0xf; - int rs = (insn >> 8) & 0xf; - int rm = insn & 0xf; - long rsv = regs->uregs[rs]; - long rmv = regs->uregs[rm]; - - regs->uregs[rd] = - insnslot_2arg_rwflags(rsv, rmv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - union reg_pair fnr; - int rdhi = (insn >> 16) & 0xf; - int rdlo = (insn >> 12) & 0xf; - int rs = (insn >> 8) & 0xf; - int rm = insn & 0xf; - long rsv = regs->uregs[rs]; - long rmv = regs->uregs[rm]; - - fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi], - regs->uregs[rdlo], rsv, rmv, - ®s->ARM_cpsr, i_fn); - regs->uregs[rdhi] = fnr.r0; - regs->uregs[rdlo] = fnr.r1; -} - -static void __kprobes -emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; - - regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; - - regs->uregs[rd] = insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rn = (insn >> 16) & 0xf; - long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; - - insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */ - int rs = (insn >> 8) & 0xf; /* invalid, don't care. */ - int rm = insn & 0xf; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rsv = regs->uregs[rs]; - - regs->uregs[rd] = - insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs) -{ - insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */ - int rs = (insn >> 8) & 0xf; /* invalid, don't care. */ - int rm = insn & 0xf; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rsv = regs->uregs[rs]; - - regs->uregs[rd] = - insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); -} - -static void __kprobes -emulate_alu_tests(struct kprobe *p, struct pt_regs *regs) -{ - insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - int rn = (insn >> 16) & 0xf; - int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */ - int rm = insn & 0xf; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rsv = regs->uregs[rs]; - - insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); -} - -static enum kprobe_insn __kprobes -prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25)) - : (~insn & (1 << 22)); - - if (is_writeback(insn) && is_r15(insn, 16)) - return INSN_REJECTED; /* Writeback to PC */ - - insn &= 0xfff00fff; - insn |= 0x00001000; /* Rn = r0, Rd = r1 */ - if (not_imm) { - insn &= ~0xf; - insn |= 2; /* Rm = r2 */ - } - asi->insn[0] = insn; - asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xffff0fff; /* Rd = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd12_modify; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd12rn0_modify(kprobe_opcode_t insn, - struct arch_specific_insn *asi) -{ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xffff0ff0; /* Rd = r0 */ - insn |= 0x00000001; /* Rn = r1 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd12rn0_modify; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd12rm0; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn, - struct arch_specific_insn *asi) -{ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ - insn |= 0x00000001; /* Rm = r1 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd12rn16rm0_rwflags; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn, - struct arch_specific_insn *asi) -{ - if (is_r15(insn, 16)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */ - insn |= 0x00000001; /* Rm = r1 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd16rs8rm0_rwflags; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn, - struct arch_specific_insn *asi) -{ - if (is_r15(insn, 16)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */ - insn |= 0x00000102; /* Rs = r1, Rm = r2 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, - struct arch_specific_insn *asi) -{ - if (is_r15(insn, 16) || is_r15(insn, 12)) - return INSN_REJECTED; /* RdHi or RdLo is PC */ - - insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */ - insn |= 0x00001203; /* Rs = r2, Rm = r3 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags; - return INSN_GOOD; -} - -/* - * For the instruction masking and comparisons in all the "space_*" - * functions below, Do _not_ rearrange the order of tests unless - * you're very, very sure of what you are doing. For the sake of - * efficiency, the masks for some tests sometimes assume other test - * have been done prior to them so the number of patterns to test - * for an instruction set can be as broad as possible to reduce the - * number of tests needed. - */ - -static enum kprobe_insn __kprobes -space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */ - /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */ - /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */ - /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */ - if ((insn & 0xfe300000) == 0xf4100000) { - asi->insn_handler = emulate_nop; - return INSN_GOOD_NO_SLOT; - } - - /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */ - if ((insn & 0xfe000000) == 0xfa000000) { - asi->insn_handler = simulate_blx1; - return INSN_GOOD_NO_SLOT; - } - - /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ - /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ - - /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ - /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ - - /* Coprocessor instructions... */ - /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ - /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ - /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ - /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ - /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ - /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ - /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ - - return INSN_REJECTED; -} - -static enum kprobe_insn __kprobes -space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */ - if ((insn & 0x0f900010) == 0x01000000) { - - /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ - if ((insn & 0x0ff000f0) == 0x01000000) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - asi->insn_handler = simulate_mrs; - return INSN_GOOD_NO_SLOT; - } - - /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ - if ((insn & 0x0ff00090) == 0x01400080) - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, - asi); - - /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ - /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ - if ((insn & 0x0ff000b0) == 0x012000a0 || - (insn & 0x0ff00090) == 0x01600080) - return prep_emulate_rd16rs8rm0_wflags(insn, asi); - - /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */ - /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */ - if ((insn & 0x0ff00090) == 0x01000080 || - (insn & 0x0ff000b0) == 0x01200080) - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - - /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ - /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ - /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ - - /* Other instruction encodings aren't yet defined */ - return INSN_REJECTED; - } - - /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */ - else if ((insn & 0x0f900090) == 0x01000010) { - - /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ - /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ - if ((insn & 0x0ff000d0) == 0x01200010) { - if ((insn & 0x0ff000ff) == 0x0120003f) - return INSN_REJECTED; /* BLX pc */ - asi->insn_handler = simulate_blx2bx; - return INSN_GOOD_NO_SLOT; - } - - /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ - if ((insn & 0x0ff000f0) == 0x01600010) - return prep_emulate_rd12rm0(insn, asi); - - /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */ - /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */ - /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */ - /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */ - if ((insn & 0x0f9000f0) == 0x01000050) - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - - /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ - /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ - - /* Other instruction encodings aren't yet defined */ - return INSN_REJECTED; - } - - /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */ - else if ((insn & 0x0f0000f0) == 0x00000090) { - - /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */ - /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */ - /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */ - /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */ - /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */ - /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */ - /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */ - /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */ - /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */ - /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */ - /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */ - /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */ - /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */ - /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */ - /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */ - /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */ - if ((insn & 0x00d00000) == 0x00500000) - return INSN_REJECTED; - else if ((insn & 0x00e00000) == 0x00000000) - return prep_emulate_rd16rs8rm0_wflags(insn, asi); - else if ((insn & 0x00a00000) == 0x00200000) - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - else - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, - asi); - } - - /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */ - else if ((insn & 0x0e000090) == 0x00000090) { - - /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */ - /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */ - /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */ - /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */ - /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */ - /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ - /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ - /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */ - /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */ - /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */ - /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */ - /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */ - /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */ - - /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ - /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ - /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */ - /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */ - /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */ - /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */ - if ((insn & 0x0f0000f0) == 0x01000090) { - if ((insn & 0x0fb000f0) == 0x01000090) { - /* SWP/SWPB */ - return prep_emulate_rd12rn16rm0_wflags(insn, - asi); - } else { - /* STREX/LDREX variants and unallocaed space */ - return INSN_REJECTED; - } - - } else if ((insn & 0x0e1000d0) == 0x00000d0) { - /* STRD/LDRD */ - if ((insn & 0x0000e000) == 0x0000e000) - return INSN_REJECTED; /* Rd is LR or PC */ - if (is_writeback(insn) && is_r15(insn, 16)) - return INSN_REJECTED; /* Writeback to PC */ - - insn &= 0xfff00fff; - insn |= 0x00002000; /* Rn = r0, Rd = r2 */ - if (!(insn & (1 << 22))) { - /* Register index */ - insn &= ~0xf; - insn |= 1; /* Rm = r1 */ - } - asi->insn[0] = insn; - asi->insn_handler = - (insn & (1 << 5)) ? emulate_strd : emulate_ldrd; - return INSN_GOOD; - } - - /* LDRH/STRH/LDRSB/LDRSH */ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - return prep_emulate_ldr_str(insn, asi); - } - - /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ - - /* - * ALU op with S bit and Rd == 15 : - * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx - */ - if ((insn & 0x0e10f000) == 0x0010f000) - return INSN_REJECTED; - - /* - * "mov ip, sp" is the most common kprobe'd instruction by far. - * Check and optimize for it explicitly. - */ - if (insn == 0xe1a0c00d) { - asi->insn_handler = simulate_mov_ipsp; - return INSN_GOOD_NO_SLOT; - } - - /* - * Data processing: Immediate-shift / Register-shift - * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx - * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx - * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx - * *S (bit 20) updates condition codes - * ADC/SBC/RSC reads the C flag - */ - insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */ - insn |= 0x00000001; /* Rm = r1 */ - if (insn & 0x010) { - insn &= 0xfffff0ff; /* register shift */ - insn |= 0x00000200; /* Rs = r2 */ - } - asi->insn[0] = insn; - - if ((insn & 0x0f900000) == 0x01100000) { - /* - * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx - * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx - * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx - * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx - */ - asi->insn_handler = emulate_alu_tests; - } else { - /* ALU ops which write to Rd */ - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ - emulate_alu_rwflags : emulate_alu_rflags; - } - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ - /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ - if ((insn & 0x0fb00000) == 0x03000000) - return prep_emulate_rd12_modify(insn, asi); - - /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ - if ((insn & 0x0fff0000) == 0x03200000) { - unsigned op2 = insn & 0x000000ff; - if (op2 == 0x01 || op2 == 0x04) { - /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ - /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_none; - return INSN_GOOD; - } else if (op2 <= 0x03) { - /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ - /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ - /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ - /* - * We make WFE and WFI true NOPs to avoid stalls due - * to missing events whilst processing the probe. - */ - asi->insn_handler = emulate_nop; - return INSN_GOOD_NO_SLOT; - } - /* For DBG and unallocated hints it's safest to reject them */ - return INSN_REJECTED; - } - - /* - * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx - * ALU op with S bit and Rd == 15 : - * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx - */ - if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ - (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ - return INSN_REJECTED; - - /* - * Data processing: 32-bit Immediate - * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx - * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx - * *S (bit 20) updates condition codes - * ADC/SBC/RSC reads the C flag - */ - insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */ - asi->insn[0] = insn; - - if ((insn & 0x0f900000) == 0x03100000) { - /* - * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx - * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx - * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx - * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx - */ - asi->insn_handler = emulate_alu_tests_imm; - } else { - /* ALU ops which write to Rd */ - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ - emulate_alu_imm_rwflags : emulate_alu_imm_rflags; - } - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */ - if ((insn & 0x0ff000f0) == 0x068000b0) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ - insn |= 0x00000001; /* Rm = r1 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_sel; - return INSN_GOOD; - } - - /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */ - /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */ - /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */ - /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */ - if ((insn & 0x0fa00030) == 0x06a00010 || - (insn & 0x0fb000f0) == 0x06a00030) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_sat; - return INSN_GOOD; - } - - /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ - /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ - /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ - /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ - if ((insn & 0x0ff00070) == 0x06b00030 || - (insn & 0x0ff00070) == 0x06f00030) - return prep_emulate_rd12rm0(insn, asi); - - /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */ - /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */ - /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */ - /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */ - /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */ - /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */ - /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */ - /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */ - /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */ - /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */ - /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */ - /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */ - /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */ - /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */ - /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */ - /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */ - /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */ - /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */ - /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */ - /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */ - /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */ - /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */ - /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */ - /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */ - /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */ - /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */ - /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */ - /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */ - /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */ - /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */ - /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */ - /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */ - /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */ - /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */ - /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */ - /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */ - /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */ - /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */ - /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */ - /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */ - if ((insn & 0x0f800010) == 0x06000010) { - if ((insn & 0x00300000) == 0x00000000 || - (insn & 0x000000e0) == 0x000000a0 || - (insn & 0x000000e0) == 0x000000c0) - return INSN_REJECTED; /* Unallocated space */ - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - } - - /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */ - /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */ - if ((insn & 0x0ff00030) == 0x06800010) - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - - /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */ - /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */ - /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */ - /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */ - /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */ - /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */ - if ((insn & 0x0f8000f0) == 0x06800070) { - if ((insn & 0x00300000) == 0x00100000) - return INSN_REJECTED; /* Unallocated space */ - - if ((insn & 0x000f0000) == 0x000f0000) - return prep_emulate_rd12rm0(insn, asi); - else - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - } - - /* Other instruction encodings aren't yet defined */ - return INSN_REJECTED; -} - -static enum kprobe_insn __kprobes -space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ - if ((insn & 0x0ff000f0) == 0x03f000f0) - return INSN_REJECTED; - - /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ - /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ - if ((insn & 0x0ff00090) == 0x07400010) - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); - - /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */ - /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ - /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */ - /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */ - /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */ - /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ - /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */ - /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */ - if ((insn & 0x0ff00090) == 0x07000010 || - (insn & 0x0ff000d0) == 0x07500010 || - (insn & 0x0ff000f0) == 0x07800010) { - - if ((insn & 0x0000f000) == 0x0000f000) - return prep_emulate_rd16rs8rm0_wflags(insn, asi); - else - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - } - - /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ - if ((insn & 0x0ff000d0) == 0x075000d0) - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - - /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */ - /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */ - if ((insn & 0x0fa00070) == 0x07a00050) - return prep_emulate_rd12rm0(insn, asi); - - /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */ - /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */ - if ((insn & 0x0fe00070) == 0x07c00010) { - - if ((insn & 0x0000000f) == 0x0000000f) - return prep_emulate_rd12_modify(insn, asi); - else - return prep_emulate_rd12rn0_modify(insn, asi); - } - - return INSN_REJECTED; -} - -static enum kprobe_insn __kprobes -space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */ - /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */ - /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ - /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ - /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */ - /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */ - /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ - /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ - - if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12)) - return INSN_REJECTED; /* LDRB into PC */ - - return prep_emulate_ldr_str(insn, asi); -} - -static enum kprobe_insn __kprobes -space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */ - /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ - if ((insn & 0x0e708000) == 0x85000000 || - (insn & 0x0e508000) == 0x85010000) - return INSN_REJECTED; - - /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ - /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ - asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */ - simulate_stm1_pc : simulate_ldm1stm1; - return INSN_GOOD_NO_SLOT; -} - -static enum kprobe_insn __kprobes -space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ - /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ - asi->insn_handler = simulate_bbl; - return INSN_GOOD_NO_SLOT; -} - -static enum kprobe_insn __kprobes -space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - /* Coprocessor instructions... */ - /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ - /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ - /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ - /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ - /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ - /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ - /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ - - /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ - - return INSN_REJECTED; -} - -static unsigned long __kprobes __check_eq(unsigned long cpsr) -{ - return cpsr & PSR_Z_BIT; -} - -static unsigned long __kprobes __check_ne(unsigned long cpsr) -{ - return (~cpsr) & PSR_Z_BIT; -} - -static unsigned long __kprobes __check_cs(unsigned long cpsr) -{ - return cpsr & PSR_C_BIT; -} - -static unsigned long __kprobes __check_cc(unsigned long cpsr) -{ - return (~cpsr) & PSR_C_BIT; -} - -static unsigned long __kprobes __check_mi(unsigned long cpsr) -{ - return cpsr & PSR_N_BIT; -} - -static unsigned long __kprobes __check_pl(unsigned long cpsr) -{ - return (~cpsr) & PSR_N_BIT; -} - -static unsigned long __kprobes __check_vs(unsigned long cpsr) -{ - return cpsr & PSR_V_BIT; -} - -static unsigned long __kprobes __check_vc(unsigned long cpsr) -{ - return (~cpsr) & PSR_V_BIT; -} - -static unsigned long __kprobes __check_hi(unsigned long cpsr) -{ - cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return cpsr & PSR_C_BIT; -} - -static unsigned long __kprobes __check_ls(unsigned long cpsr) -{ - cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return (~cpsr) & PSR_C_BIT; -} - -static unsigned long __kprobes __check_ge(unsigned long cpsr) -{ - cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return (~cpsr) & PSR_N_BIT; -} - -static unsigned long __kprobes __check_lt(unsigned long cpsr) -{ - cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return cpsr & PSR_N_BIT; -} - -static unsigned long __kprobes __check_gt(unsigned long cpsr) -{ - unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ - return (~temp) & PSR_N_BIT; -} - -static unsigned long __kprobes __check_le(unsigned long cpsr) -{ - unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ - return temp & PSR_N_BIT; -} - -static unsigned long __kprobes __check_al(unsigned long cpsr) -{ - return true; -} - -static kprobe_check_cc * const condition_checks[16] = { - &__check_eq, &__check_ne, &__check_cs, &__check_cc, - &__check_mi, &__check_pl, &__check_vs, &__check_vc, - &__check_hi, &__check_ls, &__check_ge, &__check_lt, - &__check_gt, &__check_le, &__check_al, &__check_al -}; - -/* Return: - * INSN_REJECTED If instruction is one not allowed to kprobe, - * INSN_GOOD If instruction is supported and uses instruction slot, - * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. - * - * For instructions we don't want to kprobe (INSN_REJECTED return result): - * These are generally ones that modify the processor state making - * them "hard" to simulate such as switches processor modes or - * make accesses in alternate modes. Any of these could be simulated - * if the work was put into it, but low return considering they - * should also be very rare. - */ -enum kprobe_insn __kprobes -arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - asi->insn_check_cc = condition_checks[insn>>28]; - asi->insn[1] = KPROBE_RETURN_INSTRUCTION; - - if ((insn & 0xf0000000) == 0xf0000000) - - return space_1111(insn, asi); - - else if ((insn & 0x0e000000) == 0x00000000) - - return space_cccc_000x(insn, asi); - - else if ((insn & 0x0e000000) == 0x02000000) - - return space_cccc_001x(insn, asi); - - else if ((insn & 0x0f000010) == 0x06000010) - - return space_cccc_0110__1(insn, asi); - - else if ((insn & 0x0f000010) == 0x07000010) - - return space_cccc_0111__1(insn, asi); - - else if ((insn & 0x0c000000) == 0x04000000) - - return space_cccc_01xx(insn, asi); - - else if ((insn & 0x0e000000) == 0x08000000) - - return space_cccc_100x(insn, asi); - - else if ((insn & 0x0e000000) == 0x0a000000) - - return space_cccc_101x(insn, asi); - - return space_cccc_11xx(insn, asi); -} - -void __init arm_kprobe_decode_init(void) -{ - find_str_pc_offset(); -} diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c new file mode 100644 index 000000000000..902ca59e8b11 --- /dev/null +++ b/arch/arm/kernel/kprobes-thumb.c @@ -0,0 +1,1462 @@ +/* + * arch/arm/kernel/kprobes-thumb.c + * + * Copyright (C) 2011 Jon Medhurst . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include "kprobes.h" + + +/* + * True if current instruction is in an IT block. + */ +#define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000) + +/* + * Return the condition code to check for the currently executing instruction. + * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if + * in_it_block returns true. + */ +#define current_cond(cpsr) ((cpsr >> 12) & 0xf) + +/* + * Return the PC value for a probe in thumb code. + * This is the address of the probed instruction plus 4. + * We subtract one because the address will have bit zero set to indicate + * a pointer to thumb code. + */ +static inline unsigned long __kprobes thumb_probe_pc(struct kprobe *p) +{ + return (unsigned long)p->addr - 1 + 4; +} + +static void __kprobes +t32_simulate_table_branch(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn]; + unsigned long rmv = regs->uregs[rm]; + unsigned int halfwords; + + if (insn & 0x10) /* TBH */ + halfwords = ((u16 *)rnv)[rmv]; + else /* TBB */ + halfwords = ((u8 *)rnv)[rmv]; + + regs->ARM_pc = pc + 2 * halfwords; +} + +static void __kprobes +t32_simulate_mrs(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 8) & 0xf; + unsigned long mask = 0xf8ff03df; /* Mask out execution state */ + regs->uregs[rd] = regs->ARM_cpsr & mask; +} + +static void __kprobes +t32_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + + long offset = insn & 0x7ff; /* imm11 */ + offset += (insn & 0x003f0000) >> 5; /* imm6 */ + offset += (insn & 0x00002000) << 4; /* J1 */ + offset += (insn & 0x00000800) << 7; /* J2 */ + offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */ + + regs->ARM_pc = pc + (offset * 2); +} + +static enum kprobe_insn __kprobes +t32_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + int cc = (insn >> 22) & 0xf; + asi->insn_check_cc = kprobe_condition_checks[cc]; + asi->insn_handler = t32_simulate_cond_branch; + return INSN_GOOD_NO_SLOT; +} + +static void __kprobes +t32_simulate_branch(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + + long offset = insn & 0x7ff; /* imm11 */ + offset += (insn & 0x03ff0000) >> 5; /* imm10 */ + offset += (insn & 0x00002000) << 9; /* J1 */ + offset += (insn & 0x00000800) << 10; /* J2 */ + if (insn & 0x04000000) + offset -= 0x00800000; /* Apply sign bit */ + else + offset ^= 0x00600000; /* Invert J1 and J2 */ + + if (insn & (1 << 14)) { + /* BL or BLX */ + regs->ARM_lr = (unsigned long)p->addr + 4; + if (!(insn & (1 << 12))) { + /* BLX so switch to ARM mode */ + regs->ARM_cpsr &= ~PSR_T_BIT; + pc &= ~3; + } + } + + regs->ARM_pc = pc + (offset * 2); +} + +static void __kprobes +t32_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long addr = thumb_probe_pc(p) & ~3; + int rt = (insn >> 12) & 0xf; + unsigned long rtv; + + long offset = insn & 0xfff; + if (insn & 0x00800000) + addr += offset; + else + addr -= offset; + + if (insn & 0x00400000) { + /* LDR */ + rtv = *(unsigned long *)addr; + if (rt == 15) { + bx_write_pc(rtv, regs); + return; + } + } else if (insn & 0x00200000) { + /* LDRH */ + if (insn & 0x01000000) + rtv = *(s16 *)addr; + else + rtv = *(u16 *)addr; + } else { + /* LDRB */ + if (insn & 0x01000000) + rtv = *(s8 *)addr; + else + rtv = *(u8 *)addr; + } + + regs->uregs[rt] = rtv; +} + +static enum kprobe_insn __kprobes +t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi); + + /* Fixup modified instruction to have halfwords in correct order...*/ + insn = asi->insn[0]; + ((u16 *)asi->insn)[0] = insn >> 16; + ((u16 *)asi->insn)[1] = insn & 0xffff; + + return ret; +} + +static void __kprobes +t32_emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p) & ~3; + int rt1 = (insn >> 12) & 0xf; + int rt2 = (insn >> 8) & 0xf; + int rn = (insn >> 16) & 0xf; + + register unsigned long rt1v asm("r0") = regs->uregs[rt1]; + register unsigned long rt2v asm("r1") = regs->uregs[rt2]; + register unsigned long rnv asm("r2") = (rn == 15) ? pc + : regs->uregs[rn]; + + __asm__ __volatile__ ( + "blx %[fn]" + : "=r" (rt1v), "=r" (rt2v), "=r" (rnv) + : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + if (rn != 15) + regs->uregs[rn] = rnv; /* Writeback base register */ + regs->uregs[rt1] = rt1v; + regs->uregs[rt2] = rt2v; +} + +static void __kprobes +t32_emulate_ldrstr(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rt = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rtv asm("r0") = regs->uregs[rt]; + register unsigned long rnv asm("r2") = regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + + __asm__ __volatile__ ( + "blx %[fn]" + : "=r" (rtv), "=r" (rnv) + : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rn] = rnv; /* Writeback base register */ + if (rt == 15) /* Can't be true for a STR as they aren't allowed */ + bx_write_pc(rtv, regs); + else + regs->uregs[rt] = rtv; +} + +static void __kprobes +t32_emulate_rd8rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 8) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rdv asm("r1") = regs->uregs[rd]; + register unsigned long rnv asm("r2") = regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + unsigned long cpsr = regs->ARM_cpsr; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[cpsr] \n\t" + "blx %[fn] \n\t" + "mrs %[cpsr], cpsr \n\t" + : "=r" (rdv), [cpsr] "=r" (cpsr) + : "0" (rdv), "r" (rnv), "r" (rmv), + "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rd] = rdv; + regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); +} + +static void __kprobes +t32_emulate_rd8pc16_noflags(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + int rd = (insn >> 8) & 0xf; + + register unsigned long rdv asm("r1") = regs->uregs[rd]; + register unsigned long rnv asm("r2") = pc & ~3; + + __asm__ __volatile__ ( + "blx %[fn]" + : "=r" (rdv) + : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rd] = rdv; +} + +static void __kprobes +t32_emulate_rd8rn16_noflags(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 8) & 0xf; + int rn = (insn >> 16) & 0xf; + + register unsigned long rdv asm("r1") = regs->uregs[rd]; + register unsigned long rnv asm("r2") = regs->uregs[rn]; + + __asm__ __volatile__ ( + "blx %[fn]" + : "=r" (rdv) + : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rd] = rdv; +} + +static void __kprobes +t32_emulate_rdlo12rdhi8rn16rm0_noflags(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rdlo = (insn >> 12) & 0xf; + int rdhi = (insn >> 8) & 0xf; + int rn = (insn >> 16) & 0xf; + int rm = insn & 0xf; + + register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; + register unsigned long rdhiv asm("r1") = regs->uregs[rdhi]; + register unsigned long rnv asm("r2") = regs->uregs[rn]; + register unsigned long rmv asm("r3") = regs->uregs[rm]; + + __asm__ __volatile__ ( + "blx %[fn]" + : "=r" (rdlov), "=r" (rdhiv) + : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), + [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + regs->uregs[rdlo] = rdlov; + regs->uregs[rdhi] = rdhiv; +} + +/* These emulation encodings are functionally equivalent... */ +#define t32_emulate_rd8rn16rm0ra12_noflags \ + t32_emulate_rdlo12rdhi8rn16rm0_noflags + +static const union decode_item t32_table_1110_100x_x0xx[] = { + /* Load/store multiple instructions */ + + /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfe4f0000, 0xe80f0000), + + /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */ + /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xffc00000, 0xe8000000), + /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */ + /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xffc00000, 0xe9800000), + + /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfe508000, 0xe8008000), + /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */ + DECODE_REJECT (0xfe50c000, 0xe810c000), + /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */ + DECODE_REJECT (0xfe402000, 0xe8002000), + + /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */ + /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */ + /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */ + /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */ + DECODE_CUSTOM (0xfe400000, 0xe8000000, t32_decode_ldmstm), + + DECODE_END +}; + +static const union decode_item t32_table_1110_100x_x1xx[] = { + /* Load/store dual, load/store exclusive, table branch */ + + /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */ + /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */ + DECODE_OR (0xff600000, 0xe8600000), + /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */ + /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xff400000, 0xe9400000, t32_emulate_ldrdstrd, + REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)), + + /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */ + /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */ + DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, t32_simulate_table_branch, + REGS(NOSP, 0, 0, 0, NOSPPC)), + + /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */ + /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */ + /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */ + /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */ + /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */ + /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */ + /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */ + /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */ + /* And unallocated instructions... */ + DECODE_END +}; + +static const union decode_item t32_table_1110_101x[] = { + /* Data-processing (shifted register) */ + + /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */ + /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */ + DECODE_EMULATEX (0xff700f00, 0xea100f00, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, 0, 0, NOSPPC)), + + /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */ + DECODE_OR (0xfff00f00, 0xeb100f00), + /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */ + DECODE_EMULATEX (0xfff00f00, 0xebb00f00, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOPC, 0, 0, 0, NOSPPC)), + + /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */ + /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xffcf0000, 0xea4f0000, t32_emulate_rd8rn16rm0_rwflags, + REGS(0, 0, NOSPPC, 0, NOSPPC)), + + /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */ + /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xffa00000, 0xeaa00000), + /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xffe00000, 0xeb200000), + /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xffe00000, 0xeb800000), + /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xffe00000, 0xebe00000), + + /* ADD/SUB SP, SP, Rm, LSL #0..3 */ + /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */ + DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, t32_emulate_rd8rn16rm0_rwflags, + REGS(SP, 0, SP, 0, NOSPPC)), + + /* ADD/SUB SP, SP, Rm, shift */ + /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */ + DECODE_REJECT (0xff4f0f00, 0xeb0d0d00), + + /* ADD/SUB Rd, SP, Rm, shift */ + /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, t32_emulate_rd8rn16rm0_rwflags, + REGS(SP, 0, NOPC, 0, NOSPPC)), + + /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */ + /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */ + /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */ + /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */ + /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */ + /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */ + /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */ + /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */ + /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */ + /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */ + /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfe000000, 0xea000000, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), + + DECODE_END +}; + +static const union decode_item t32_table_1111_0x0x___0[] = { + /* Data-processing (modified immediate) */ + + /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */ + /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */ + DECODE_EMULATEX (0xfb708f00, 0xf0100f00, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, 0, 0, 0)), + + /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */ + DECODE_OR (0xfbf08f00, 0xf1100f00), + /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */ + DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOPC, 0, 0, 0, 0)), + + /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */ + /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, t32_emulate_rd8rn16rm0_rwflags, + REGS(0, 0, NOSPPC, 0, 0)), + + /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfbe08000, 0xf0a00000), + /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */ + /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfbc08000, 0xf0c00000), + /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfbe08000, 0xf1200000), + /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfbe08000, 0xf1800000), + /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfbe08000, 0xf1e00000), + + /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */ + /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, t32_emulate_rd8rn16rm0_rwflags, + REGS(SP, 0, NOPC, 0, 0)), + + /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */ + /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */ + /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */ + /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */ + /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */ + /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */ + /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */ + /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */ + /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */ + /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfa008000, 0xf0000000, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, NOSPPC, 0, 0)), + + DECODE_END +}; + +static const union decode_item t32_table_1111_0x1x___0[] = { + /* Data-processing (plain binary immediate) */ + + /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */ + DECODE_OR (0xfbff8000, 0xf20f0000), + /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfbff8000, 0xf2af0000, t32_emulate_rd8pc16_noflags, + REGS(PC, 0, NOSPPC, 0, 0)), + + /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */ + DECODE_OR (0xfbff8f00, 0xf20d0d00), + /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */ + DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, t32_emulate_rd8rn16_noflags, + REGS(SP, 0, SP, 0, 0)), + + /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */ + DECODE_OR (0xfbf08000, 0xf2000000), + /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfbf08000, 0xf2a00000, t32_emulate_rd8rn16_noflags, + REGS(NOPCX, 0, NOSPPC, 0, 0)), + + /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */ + /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfb708000, 0xf2400000, t32_emulate_rd8rn16_noflags, + REGS(0, 0, NOSPPC, 0, 0)), + + /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */ + /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */ + /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */ + /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfb508000, 0xf3000000, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, NOSPPC, 0, 0)), + + /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */ + /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfb708000, 0xf3400000, t32_emulate_rd8rn16_noflags, + REGS(NOSPPC, 0, NOSPPC, 0, 0)), + + /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfbff8000, 0xf36f0000, t32_emulate_rd8rn16_noflags, + REGS(0, 0, NOSPPC, 0, 0)), + + /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfbf08000, 0xf3600000, t32_emulate_rd8rn16_noflags, + REGS(NOSPPCX, 0, NOSPPC, 0, 0)), + + DECODE_END +}; + +static const union decode_item t32_table_1111_0xxx___1[] = { + /* Branches and miscellaneous control */ + + /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */ + DECODE_OR (0xfff0d7ff, 0xf3a08001), + /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */ + DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, kprobe_emulate_none), + /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */ + /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */ + /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */ + DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, kprobe_simulate_nop), + + /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */ + DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, t32_simulate_mrs, + REGS(0, 0, NOSPPC, 0, 0)), + + /* + * Unsupported instructions + * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx + * + * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx + * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx + * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx + * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx + * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx + * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx + * SUBS PC,LR,# 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx + * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx + * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx + * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx + * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx + */ + DECODE_REJECT (0xfb80d000, 0xf3808000), + + /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */ + DECODE_CUSTOM (0xf800d000, 0xf0008000, t32_decode_cond_branch), + + /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */ + DECODE_OR (0xf800d001, 0xf000c000), + /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */ + /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */ + DECODE_SIMULATE (0xf8009000, 0xf0009000, t32_simulate_branch), + + DECODE_END +}; + +static const union decode_item t32_table_1111_100x_x0x1__1111[] = { + /* Memory hints */ + + /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */ + /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */ + DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, kprobe_simulate_nop), + + /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */ + DECODE_OR (0xffd0f000, 0xf890f000), + /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */ + DECODE_OR (0xffd0ff00, 0xf810fc00), + /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */ + DECODE_OR (0xfff0f000, 0xf990f000), + /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */ + DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, kprobe_simulate_nop, + REGS(NOPCX, 0, 0, 0, 0)), + + /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */ + DECODE_OR (0xffd0ffc0, 0xf810f000), + /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */ + DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, kprobe_simulate_nop, + REGS(NOPCX, 0, 0, 0, NOSPPC)), + + /* Other unallocated instructions... */ + DECODE_END +}; + +static const union decode_item t32_table_1111_100x[] = { + /* Store/Load single data item */ + + /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfe600000, 0xf8600000), + + /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xfff00000, 0xf9500000), + + /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */ + DECODE_REJECT (0xfe800d00, 0xf8000800), + + /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */ + /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */ + /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */ + /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */ + /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */ + /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */ + /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */ + /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */ + DECODE_REJECT (0xfe800f00, 0xf8000e00), + + /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */ + DECODE_REJECT (0xff1f0000, 0xf80f0000), + + /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */ + DECODE_REJECT (0xff10f000, 0xf800f000), + + /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */ + DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, t32_simulate_ldr_literal, + REGS(PC, ANY, 0, 0, 0)), + + /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */ + /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */ + DECODE_OR (0xffe00800, 0xf8400800), + /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */ + /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xffe00000, 0xf8c00000, t32_emulate_ldrstr, + REGS(NOPCX, ANY, 0, 0, 0)), + + /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */ + /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */ + DECODE_EMULATEX (0xffe00fc0, 0xf8400000, t32_emulate_ldrstr, + REGS(NOPCX, ANY, 0, 0, NOSPPC)), + + /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */ + /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ + /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ + /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, + REGS(PC, NOSPPCX, 0, 0, 0)), + + /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ + /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */ + /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */ + /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */ + /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */ + /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */ + DECODE_OR (0xfec00800, 0xf8000800), + /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */ + /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */ + /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */ + /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */ + /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */ + /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */ + DECODE_EMULATEX (0xfec00000, 0xf8800000, t32_emulate_ldrstr, + REGS(NOPCX, NOSPPCX, 0, 0, 0)), + + /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */ + /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */ + /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */ + /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */ + /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */ + /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */ + DECODE_EMULATEX (0xfe800fc0, 0xf8000000, t32_emulate_ldrstr, + REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)), + + /* Other unallocated instructions... */ + DECODE_END +}; + +static const union decode_item t32_table_1111_1010___1111[] = { + /* Data-processing (register) */ + + /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */ + DECODE_REJECT (0xffe0f080, 0xfa60f080), + + /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */ + /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */ + /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */ + /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */ + /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */ + /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */ + DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, t32_emulate_rd8rn16rm0_rwflags, + REGS(0, 0, NOSPPC, 0, NOSPPC)), + + + /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */ + DECODE_REJECT (0xff80f0b0, 0xfa80f030), + /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */ + DECODE_REJECT (0xffb0f080, 0xfab0f000), + + /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */ + /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */ + /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */ + /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */ + /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */ + /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */ + + /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */ + /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */ + /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */ + /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */ + /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */ + /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */ + + /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */ + /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */ + /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */ + /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */ + /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */ + /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */ + + /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */ + /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */ + /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */ + /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */ + /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */ + /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */ + + /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */ + /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */ + /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */ + /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */ + /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */ + /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */ + + /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */ + /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */ + /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */ + /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */ + /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */ + /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */ + DECODE_OR (0xff80f080, 0xfa80f000), + + /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */ + /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */ + /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */ + /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */ + /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */ + /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */ + DECODE_OR (0xff80f080, 0xfa00f080), + + /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */ + /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */ + /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */ + /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */ + DECODE_OR (0xfff0f0c0, 0xfa80f080), + + /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ + DECODE_OR (0xfff0f0f0, 0xfaa0f080), + + /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */ + /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */ + /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */ + /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */ + DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), + + /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ + DECODE_OR (0xfff0f0f0, 0xfab0f080), + + /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */ + /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */ + /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */ + /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */ + DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, t32_emulate_rd8rn16_noflags, + REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)), + + /* Other unallocated instructions... */ + DECODE_END +}; + +static const union decode_item t32_table_1111_1011_0[] = { + /* Multiply, multiply accumulate, and absolute difference */ + + /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */ + DECODE_REJECT (0xfff0f0f0, 0xfb00f010), + /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */ + DECODE_REJECT (0xfff0f0f0, 0xfb70f010), + + /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */ + DECODE_OR (0xfff0f0c0, 0xfb10f000), + /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */ + /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */ + /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */ + /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */ + /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */ + /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */ + DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, t32_emulate_rd8rn16rm0_rwflags, + REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), + + /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */ + DECODE_REJECT (0xfff000f0, 0xfb700010), + + /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */ + DECODE_OR (0xfff000c0, 0xfb100000), + /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */ + /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */ + /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */ + /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */ + /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */ + /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */ + /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */ + /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */ + DECODE_EMULATEX (0xff8000c0, 0xfb000000, t32_emulate_rd8rn16rm0ra12_noflags, + REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)), + + /* Other unallocated instructions... */ + DECODE_END +}; + +static const union decode_item t32_table_1111_1011_1[] = { + /* Long multiply, long multiply accumulate, and divide */ + + /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */ + DECODE_OR (0xfff000f0, 0xfbe00060), + /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */ + DECODE_OR (0xfff000c0, 0xfbc00080), + /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */ + /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */ + DECODE_OR (0xffe000e0, 0xfbc000c0), + /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */ + /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */ + /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */ + /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */ + DECODE_EMULATEX (0xff9000f0, 0xfb800000, t32_emulate_rdlo12rdhi8rn16rm0_noflags, + REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)), + + /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */ + /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */ + /* Other unallocated instructions... */ + DECODE_END +}; + +const union decode_item kprobe_decode_thumb32_table[] = { + + /* + * Load/store multiple instructions + * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx), + + /* + * Load/store dual, load/store exclusive, table branch + * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx), + + /* + * Data-processing (shifted register) + * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x), + + /* + * Coprocessor instructions + * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_REJECT (0xfc000000, 0xec000000), + + /* + * Data-processing (modified immediate) + * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0), + + /* + * Data-processing (plain binary immediate) + * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0), + + /* + * Branches and miscellaneous control + * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1), + + /* + * Advanced SIMD element or structure load/store instructions + * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx + */ + DECODE_REJECT (0xff100000, 0xf9000000), + + /* + * Memory hints + * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx + */ + DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111), + + /* + * Store single data item + * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx + * Load single data items + * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x), + + /* + * Data-processing (register) + * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx + */ + DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111), + + /* + * Multiply, multiply accumulate, and absolute difference + * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0), + + /* + * Long multiply, long multiply accumulate, and divide + * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1), + + /* + * Coprocessor instructions + * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx + */ + DECODE_END +}; + +static void __kprobes +t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + int rm = (insn >> 3) & 0xf; + unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm]; + + if (insn & (1 << 7)) /* BLX ? */ + regs->ARM_lr = (unsigned long)p->addr + 2; + + bx_write_pc(rmv, regs); +} + +static void __kprobes +t16_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long* base = (unsigned long *)(thumb_probe_pc(p) & ~3); + long index = insn & 0xff; + int rt = (insn >> 8) & 0x7; + regs->uregs[rt] = base[index]; +} + +static void __kprobes +t16_simulate_ldrstr_sp_relative(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long* base = (unsigned long *)regs->ARM_sp; + long index = insn & 0xff; + int rt = (insn >> 8) & 0x7; + if (insn & 0x800) /* LDR */ + regs->uregs[rt] = base[index]; + else /* STR */ + base[index] = regs->uregs[rt]; +} + +static void __kprobes +t16_simulate_reladr(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long base = (insn & 0x800) ? regs->ARM_sp + : (thumb_probe_pc(p) & ~3); + long offset = insn & 0xff; + int rt = (insn >> 8) & 0x7; + regs->uregs[rt] = base + offset * 4; +} + +static void __kprobes +t16_simulate_add_sp_imm(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + long imm = insn & 0x7f; + if (insn & 0x80) /* SUB */ + regs->ARM_sp -= imm * 4; + else /* ADD */ + regs->ARM_sp += imm * 4; +} + +static void __kprobes +t16_simulate_cbz(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rn = insn & 0x7; + kprobe_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn; + if (nonzero & 0x800) { + long i = insn & 0x200; + long imm5 = insn & 0xf8; + unsigned long pc = thumb_probe_pc(p); + regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2); + } +} + +static void __kprobes +t16_simulate_it(struct kprobe *p, struct pt_regs *regs) +{ + /* + * The 8 IT state bits are split into two parts in CPSR: + * ITSTATE<1:0> are in CPSR<26:25> + * ITSTATE<7:2> are in CPSR<15:10> + * The new IT state is in the lower byte of insn. + */ + kprobe_opcode_t insn = p->opcode; + unsigned long cpsr = regs->ARM_cpsr; + cpsr &= ~PSR_IT_MASK; + cpsr |= (insn & 0xfc) << 8; + cpsr |= (insn & 0x03) << 25; + regs->ARM_cpsr = cpsr; +} + +static void __kprobes +t16_singlestep_it(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 2; + t16_simulate_it(p, regs); +} + +static enum kprobe_insn __kprobes +t16_decode_it(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + asi->insn_singlestep = t16_singlestep_it; + return INSN_GOOD_NO_SLOT; +} + +static void __kprobes +t16_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + long offset = insn & 0x7f; + offset -= insn & 0x80; /* Apply sign bit */ + regs->ARM_pc = pc + (offset * 2); +} + +static enum kprobe_insn __kprobes +t16_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + int cc = (insn >> 8) & 0xf; + asi->insn_check_cc = kprobe_condition_checks[cc]; + asi->insn_handler = t16_simulate_cond_branch; + return INSN_GOOD_NO_SLOT; +} + +static void __kprobes +t16_simulate_branch(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + long offset = insn & 0x3ff; + offset -= insn & 0x400; /* Apply sign bit */ + regs->ARM_pc = pc + (offset * 2); +} + +static unsigned long __kprobes +t16_emulate_loregs(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long oldcpsr = regs->ARM_cpsr; + unsigned long newcpsr; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[oldcpsr] \n\t" + "ldmia %[regs], {r0-r7} \n\t" + "blx %[fn] \n\t" + "stmia %[regs], {r0-r7} \n\t" + "mrs %[newcpsr], cpsr \n\t" + : [newcpsr] "=r" (newcpsr) + : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs), + [fn] "r" (p->ainsn.insn_fn) + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "lr", "memory", "cc" + ); + + return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK); +} + +static void __kprobes +t16_emulate_loregs_rwflags(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_cpsr = t16_emulate_loregs(p, regs); +} + +static void __kprobes +t16_emulate_loregs_noitrwflags(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long cpsr = t16_emulate_loregs(p, regs); + if (!in_it_block(cpsr)) + regs->ARM_cpsr = cpsr; +} + +static void __kprobes +t16_emulate_hiregs(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + unsigned long pc = thumb_probe_pc(p); + int rdn = (insn & 0x7) | ((insn & 0x80) >> 4); + int rm = (insn >> 3) & 0xf; + + register unsigned long rdnv asm("r1"); + register unsigned long rmv asm("r0"); + unsigned long cpsr = regs->ARM_cpsr; + + rdnv = (rdn == 15) ? pc : regs->uregs[rdn]; + rmv = (rm == 15) ? pc : regs->uregs[rm]; + + __asm__ __volatile__ ( + "msr cpsr_fs, %[cpsr] \n\t" + "blx %[fn] \n\t" + "mrs %[cpsr], cpsr \n\t" + : "=r" (rdnv), [cpsr] "=r" (cpsr) + : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) + : "lr", "memory", "cc" + ); + + if (rdn == 15) + rdnv &= ~1; + + regs->uregs[rdn] = rdnv; + regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); +} + +static enum kprobe_insn __kprobes +t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + insn &= ~0x00ff; + insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */ + ((u16 *)asi->insn)[0] = insn; + asi->insn_handler = t16_emulate_hiregs; + return INSN_GOOD; +} + +static void __kprobes +t16_emulate_push(struct kprobe *p, struct pt_regs *regs) +{ + __asm__ __volatile__ ( + "ldr r9, [%[regs], #13*4] \n\t" + "ldr r8, [%[regs], #14*4] \n\t" + "ldmia %[regs], {r0-r7} \n\t" + "blx %[fn] \n\t" + "str r9, [%[regs], #13*4] \n\t" + : + : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn) + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", + "lr", "memory", "cc" + ); +} + +static enum kprobe_insn __kprobes +t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + /* + * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}" + * and call it with R9=SP and LR in the register list represented + * by R8. + */ + ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */ + ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */ + asi->insn_handler = t16_emulate_push; + return INSN_GOOD; +} + +static void __kprobes +t16_emulate_pop_nopc(struct kprobe *p, struct pt_regs *regs) +{ + __asm__ __volatile__ ( + "ldr r9, [%[regs], #13*4] \n\t" + "ldmia %[regs], {r0-r7} \n\t" + "blx %[fn] \n\t" + "stmia %[regs], {r0-r7} \n\t" + "str r9, [%[regs], #13*4] \n\t" + : + : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn) + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9", + "lr", "memory", "cc" + ); +} + +static void __kprobes +t16_emulate_pop_pc(struct kprobe *p, struct pt_regs *regs) +{ + register unsigned long pc asm("r8"); + + __asm__ __volatile__ ( + "ldr r9, [%[regs], #13*4] \n\t" + "ldmia %[regs], {r0-r7} \n\t" + "blx %[fn] \n\t" + "stmia %[regs], {r0-r7} \n\t" + "str r9, [%[regs], #13*4] \n\t" + : "=r" (pc) + : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn) + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9", + "lr", "memory", "cc" + ); + + bx_write_pc(pc, regs); +} + +static enum kprobe_insn __kprobes +t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + /* + * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}" + * and call it with R9=SP and PC in the register list represented + * by R8. + */ + ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */ + ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */ + asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc + : t16_emulate_pop_nopc; + return INSN_GOOD; +} + +static const union decode_item t16_table_1011[] = { + /* Miscellaneous 16-bit instructions */ + + /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */ + /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */ + DECODE_SIMULATE (0xff00, 0xb000, t16_simulate_add_sp_imm), + + /* CBZ 1011 00x1 xxxx xxxx */ + /* CBNZ 1011 10x1 xxxx xxxx */ + DECODE_SIMULATE (0xf500, 0xb100, t16_simulate_cbz), + + /* SXTH 1011 0010 00xx xxxx */ + /* SXTB 1011 0010 01xx xxxx */ + /* UXTH 1011 0010 10xx xxxx */ + /* UXTB 1011 0010 11xx xxxx */ + /* REV 1011 1010 00xx xxxx */ + /* REV16 1011 1010 01xx xxxx */ + /* ??? 1011 1010 10xx xxxx */ + /* REVSH 1011 1010 11xx xxxx */ + DECODE_REJECT (0xffc0, 0xba80), + DECODE_EMULATE (0xf500, 0xb000, t16_emulate_loregs_rwflags), + + /* PUSH 1011 010x xxxx xxxx */ + DECODE_CUSTOM (0xfe00, 0xb400, t16_decode_push), + /* POP 1011 110x xxxx xxxx */ + DECODE_CUSTOM (0xfe00, 0xbc00, t16_decode_pop), + + /* + * If-Then, and hints + * 1011 1111 xxxx xxxx + */ + + /* YIELD 1011 1111 0001 0000 */ + DECODE_OR (0xffff, 0xbf10), + /* SEV 1011 1111 0100 0000 */ + DECODE_EMULATE (0xffff, 0xbf40, kprobe_emulate_none), + /* NOP 1011 1111 0000 0000 */ + /* WFE 1011 1111 0010 0000 */ + /* WFI 1011 1111 0011 0000 */ + DECODE_SIMULATE (0xffcf, 0xbf00, kprobe_simulate_nop), + /* Unassigned hints 1011 1111 xxxx 0000 */ + DECODE_REJECT (0xff0f, 0xbf00), + /* IT 1011 1111 xxxx xxxx */ + DECODE_CUSTOM (0xff00, 0xbf00, t16_decode_it), + + /* SETEND 1011 0110 010x xxxx */ + /* CPS 1011 0110 011x xxxx */ + /* BKPT 1011 1110 xxxx xxxx */ + /* And unallocated instructions... */ + DECODE_END +}; + +const union decode_item kprobe_decode_thumb16_table[] = { + + /* + * Shift (immediate), add, subtract, move, and compare + * 00xx xxxx xxxx xxxx + */ + + /* CMP (immediate) 0010 1xxx xxxx xxxx */ + DECODE_EMULATE (0xf800, 0x2800, t16_emulate_loregs_rwflags), + + /* ADD (register) 0001 100x xxxx xxxx */ + /* SUB (register) 0001 101x xxxx xxxx */ + /* LSL (immediate) 0000 0xxx xxxx xxxx */ + /* LSR (immediate) 0000 1xxx xxxx xxxx */ + /* ASR (immediate) 0001 0xxx xxxx xxxx */ + /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */ + /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */ + /* MOV (immediate) 0010 0xxx xxxx xxxx */ + /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */ + /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */ + DECODE_EMULATE (0xc000, 0x0000, t16_emulate_loregs_noitrwflags), + + /* + * 16-bit Thumb data-processing instructions + * 0100 00xx xxxx xxxx + */ + + /* TST (register) 0100 0010 00xx xxxx */ + DECODE_EMULATE (0xffc0, 0x4200, t16_emulate_loregs_rwflags), + /* CMP (register) 0100 0010 10xx xxxx */ + /* CMN (register) 0100 0010 11xx xxxx */ + DECODE_EMULATE (0xff80, 0x4280, t16_emulate_loregs_rwflags), + /* AND (register) 0100 0000 00xx xxxx */ + /* EOR (register) 0100 0000 01xx xxxx */ + /* LSL (register) 0100 0000 10xx xxxx */ + /* LSR (register) 0100 0000 11xx xxxx */ + /* ASR (register) 0100 0001 00xx xxxx */ + /* ADC (register) 0100 0001 01xx xxxx */ + /* SBC (register) 0100 0001 10xx xxxx */ + /* ROR (register) 0100 0001 11xx xxxx */ + /* RSB (immediate) 0100 0010 01xx xxxx */ + /* ORR (register) 0100 0011 00xx xxxx */ + /* MUL 0100 0011 00xx xxxx */ + /* BIC (register) 0100 0011 10xx xxxx */ + /* MVN (register) 0100 0011 10xx xxxx */ + DECODE_EMULATE (0xfc00, 0x4000, t16_emulate_loregs_noitrwflags), + + /* + * Special data instructions and branch and exchange + * 0100 01xx xxxx xxxx + */ + + /* BLX pc 0100 0111 1111 1xxx */ + DECODE_REJECT (0xfff8, 0x47f8), + + /* BX (register) 0100 0111 0xxx xxxx */ + /* BLX (register) 0100 0111 1xxx xxxx */ + DECODE_SIMULATE (0xff00, 0x4700, t16_simulate_bxblx), + + /* ADD pc, pc 0100 0100 1111 1111 */ + DECODE_REJECT (0xffff, 0x44ff), + + /* ADD (register) 0100 0100 xxxx xxxx */ + /* CMP (register) 0100 0101 xxxx xxxx */ + /* MOV (register) 0100 0110 xxxx xxxx */ + DECODE_CUSTOM (0xfc00, 0x4400, t16_decode_hiregs), + + /* + * Load from Literal Pool + * LDR (literal) 0100 1xxx xxxx xxxx + */ + DECODE_SIMULATE (0xf800, 0x4800, t16_simulate_ldr_literal), + + /* + * 16-bit Thumb Load/store instructions + * 0101 xxxx xxxx xxxx + * 011x xxxx xxxx xxxx + * 100x xxxx xxxx xxxx + */ + + /* STR (register) 0101 000x xxxx xxxx */ + /* STRH (register) 0101 001x xxxx xxxx */ + /* STRB (register) 0101 010x xxxx xxxx */ + /* LDRSB (register) 0101 011x xxxx xxxx */ + /* LDR (register) 0101 100x xxxx xxxx */ + /* LDRH (register) 0101 101x xxxx xxxx */ + /* LDRB (register) 0101 110x xxxx xxxx */ + /* LDRSH (register) 0101 111x xxxx xxxx */ + /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */ + /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */ + /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */ + /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */ + DECODE_EMULATE (0xc000, 0x4000, t16_emulate_loregs_rwflags), + /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */ + /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */ + DECODE_EMULATE (0xf000, 0x8000, t16_emulate_loregs_rwflags), + /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */ + /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */ + DECODE_SIMULATE (0xf000, 0x9000, t16_simulate_ldrstr_sp_relative), + + /* + * Generate PC-/SP-relative address + * ADR (literal) 1010 0xxx xxxx xxxx + * ADD (SP plus immediate) 1010 1xxx xxxx xxxx + */ + DECODE_SIMULATE (0xf000, 0xa000, t16_simulate_reladr), + + /* + * Miscellaneous 16-bit instructions + * 1011 xxxx xxxx xxxx + */ + DECODE_TABLE (0xf000, 0xb000, t16_table_1011), + + /* STM 1100 0xxx xxxx xxxx */ + /* LDM 1100 1xxx xxxx xxxx */ + DECODE_EMULATE (0xf000, 0xc000, t16_emulate_loregs_rwflags), + + /* + * Conditional branch, and Supervisor Call + */ + + /* Permanently UNDEFINED 1101 1110 xxxx xxxx */ + /* SVC 1101 1111 xxxx xxxx */ + DECODE_REJECT (0xfe00, 0xde00), + + /* Conditional branch 1101 xxxx xxxx xxxx */ + DECODE_CUSTOM (0xf000, 0xd000, t16_decode_cond_branch), + + /* + * Unconditional branch + * B 1110 0xxx xxxx xxxx + */ + DECODE_SIMULATE (0xf800, 0xe000, t16_simulate_branch), + + DECODE_END +}; + +static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) +{ + if (unlikely(in_it_block(cpsr))) + return kprobe_condition_checks[current_cond(cpsr)](cpsr); + return true; +} + +static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 2; + p->ainsn.insn_handler(p, regs); + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); +} + +static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 4; + p->ainsn.insn_handler(p, regs); + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); +} + +enum kprobe_insn __kprobes +thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + asi->insn_singlestep = thumb16_singlestep; + asi->insn_check_cc = thumb_check_cc; + return kprobe_decode_insn(insn, asi, kprobe_decode_thumb16_table, true); +} + +enum kprobe_insn __kprobes +thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + asi->insn_singlestep = thumb32_singlestep; + asi->insn_check_cc = thumb_check_cc; + return kprobe_decode_insn(insn, asi, kprobe_decode_thumb32_table, true); +} diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 1656c87501c0..129c1163248b 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -28,14 +28,16 @@ #include #include +#include "kprobes.h" + #define MIN_STACK_SIZE(addr) \ min((unsigned long)MAX_STACK_SIZE, \ (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) -#define flush_insns(addr, cnt) \ +#define flush_insns(addr, size) \ flush_icache_range((unsigned long)(addr), \ (unsigned long)(addr) + \ - sizeof(kprobe_opcode_t) * (cnt)) + (size)) /* Used as a marker in ARM_pc to note when we're in a jprobe. */ #define JPROBE_MAGIC_ADDR 0xffffffff @@ -49,16 +51,35 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) kprobe_opcode_t insn; kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; unsigned long addr = (unsigned long)p->addr; + bool thumb; + kprobe_decode_insn_t *decode_insn; int is; - if (addr & 0x3 || in_exception_text(addr)) + if (in_exception_text(addr)) return -EINVAL; +#ifdef CONFIG_THUMB2_KERNEL + thumb = true; + addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ + insn = ((u16 *)addr)[0]; + if (is_wide_instruction(insn)) { + insn <<= 16; + insn |= ((u16 *)addr)[1]; + decode_insn = thumb32_kprobe_decode_insn; + } else + decode_insn = thumb16_kprobe_decode_insn; +#else /* !CONFIG_THUMB2_KERNEL */ + thumb = false; + if (addr & 0x3) + return -EINVAL; insn = *p->addr; + decode_insn = arm_kprobe_decode_insn; +#endif + p->opcode = insn; p->ainsn.insn = tmp_insn; - switch (arm_kprobe_decode_insn(insn, &p->ainsn)) { + switch ((*decode_insn)(insn, &p->ainsn)) { case INSN_REJECTED: /* not supported */ return -EINVAL; @@ -68,7 +89,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return -ENOMEM; for (is = 0; is < MAX_INSN_SIZE; ++is) p->ainsn.insn[is] = tmp_insn[is]; - flush_insns(p->ainsn.insn, MAX_INSN_SIZE); + flush_insns(p->ainsn.insn, + sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); + p->ainsn.insn_fn = (kprobe_insn_fn_t *) + ((uintptr_t)p->ainsn.insn | thumb); break; case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ @@ -79,24 +103,88 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } +#ifdef CONFIG_THUMB2_KERNEL + +/* + * For a 32-bit Thumb breakpoint spanning two memory words we need to take + * special precautions to insert the breakpoint atomically, especially on SMP + * systems. This is achieved by calling this arming function using stop_machine. + */ +static int __kprobes set_t32_breakpoint(void *addr) +{ + ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16; + ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff; + flush_insns(addr, 2*sizeof(u16)); + return 0; +} + void __kprobes arch_arm_kprobe(struct kprobe *p) { - *p->addr = KPROBE_BREAKPOINT_INSTRUCTION; - flush_insns(p->addr, 1); + uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ + + if (!is_wide_instruction(p->opcode)) { + *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; + flush_insns(addr, sizeof(u16)); + } else if (addr & 2) { + /* A 32-bit instruction spanning two words needs special care */ + stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); + } else { + /* Word aligned 32-bit instruction can be written atomically */ + u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; +#ifndef __ARMEB__ /* Swap halfwords for little-endian */ + bkp = (bkp >> 16) | (bkp << 16); +#endif + *(u32 *)addr = bkp; + flush_insns(addr, sizeof(u32)); + } } +#else /* !CONFIG_THUMB2_KERNEL */ + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + kprobe_opcode_t insn = p->opcode; + kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; + if (insn >= 0xe0000000) + brkp |= 0xe0000000; /* Unconditional instruction */ + else + brkp |= insn & 0xf0000000; /* Copy condition from insn */ + *p->addr = brkp; + flush_insns(p->addr, sizeof(p->addr[0])); +} + +#endif /* !CONFIG_THUMB2_KERNEL */ + /* * The actual disarming is done here on each CPU and synchronized using * stop_machine. This synchronization is necessary on SMP to avoid removing * a probe between the moment the 'Undefined Instruction' exception is raised * and the moment the exception handler reads the faulting instruction from - * memory. + * memory. It is also needed to atomically set the two half-words of a 32-bit + * Thumb breakpoint. */ int __kprobes __arch_disarm_kprobe(void *p) { struct kprobe *kp = p; +#ifdef CONFIG_THUMB2_KERNEL + u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1); + kprobe_opcode_t insn = kp->opcode; + unsigned int len; + + if (is_wide_instruction(insn)) { + ((u16 *)addr)[0] = insn>>16; + ((u16 *)addr)[1] = insn; + len = 2*sizeof(u16); + } else { + ((u16 *)addr)[0] = insn; + len = sizeof(u16); + } + flush_insns(addr, len); + +#else /* !CONFIG_THUMB2_KERNEL */ *kp->addr = kp->opcode; - flush_insns(kp->addr, 1); + flush_insns(kp->addr, sizeof(kp->addr[0])); +#endif return 0; } @@ -130,12 +218,24 @@ static void __kprobes set_current_kprobe(struct kprobe *p) __get_cpu_var(current_kprobe) = p; } -static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static void __kprobes +singlestep_skip(struct kprobe *p, struct pt_regs *regs) { +#ifdef CONFIG_THUMB2_KERNEL + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); + if (is_wide_instruction(p->opcode)) + regs->ARM_pc += 4; + else + regs->ARM_pc += 2; +#else regs->ARM_pc += 4; - if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) - p->ainsn.insn_handler(p, regs); +#endif +} + +static inline void __kprobes +singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +{ + p->ainsn.insn_singlestep(p, regs); } /* @@ -149,11 +249,23 @@ void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur; struct kprobe_ctlblk *kcb; - kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc; kcb = get_kprobe_ctlblk(); cur = kprobe_running(); - p = get_kprobe(addr); + +#ifdef CONFIG_THUMB2_KERNEL + /* + * First look for a probe which was registered using an address with + * bit 0 set, this is the usual situation for pointers to Thumb code. + * If not found, fallback to looking for one with bit 0 clear. + */ + p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); + if (!p) + p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); + +#else /* ! CONFIG_THUMB2_KERNEL */ + p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); +#endif if (p) { if (cur) { @@ -173,7 +285,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) /* impossible cases */ BUG(); } - } else { + } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; @@ -193,6 +306,13 @@ void __kprobes kprobe_handler(struct pt_regs *regs) } reset_current_kprobe(); } + } else { + /* + * Probe hit but conditional execution check failed, + * so just skip the instruction and continue as if + * nothing had happened. + */ + singlestep_skip(p, regs); } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ @@ -300,7 +420,11 @@ void __naked __kprobes kretprobe_trampoline(void) "bl trampoline_handler \n\t" "mov lr, r0 \n\t" "ldmia sp!, {r0 - r11} \n\t" +#ifdef CONFIG_THUMB2_KERNEL + "bx lr \n\t" +#else "mov pc, lr \n\t" +#endif : : : "memory"); } @@ -378,11 +502,22 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); long sp_addr = regs->ARM_sp; + long cpsr; kcb->jprobe_saved_regs = *regs; memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); regs->ARM_pc = (long)jp->entry; - regs->ARM_cpsr |= PSR_I_BIT; + + cpsr = regs->ARM_cpsr | PSR_I_BIT; +#ifdef CONFIG_THUMB2_KERNEL + /* Set correct Thumb state in cpsr */ + if (regs->ARM_pc & 1) + cpsr |= PSR_T_BIT; + else + cpsr &= ~PSR_T_BIT; +#endif + regs->ARM_cpsr = cpsr; + preempt_disable(); return 1; } @@ -404,7 +539,12 @@ void __kprobes jprobe_return(void) * This is to prevent any simulated instruction from writing * over the regs when they are accessing the stack. */ +#ifdef CONFIG_THUMB2_KERNEL + "sub r0, %0, %1 \n\t" + "mov sp, r0 \n\t" +#else "sub sp, %0, %1 \n\t" +#endif "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" "str %0, [sp, %2] \n\t" "str r0, [sp, %3] \n\t" @@ -415,15 +555,28 @@ void __kprobes jprobe_return(void) * Return to the context saved by setjmp_pre_handler * and restored by longjmp_break_handler. */ +#ifdef CONFIG_THUMB2_KERNEL + "ldr lr, [sp, %2] \n\t" /* lr = saved sp */ + "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */ + "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */ + "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */ + /* rfe context */ + "ldmia sp, {r0 - r12} \n\t" + "mov sp, lr \n\t" + "ldr lr, [sp], #4 \n\t" + "rfeia sp! \n\t" +#else "ldr r0, [sp, %4] \n\t" "msr cpsr_cxsf, r0 \n\t" "ldmia sp, {r0 - pc} \n\t" +#endif : : "r" (kcb->jprobe_saved_regs.ARM_sp), "I" (sizeof(struct pt_regs) * 2), "J" (offsetof(struct pt_regs, ARM_sp)), "J" (offsetof(struct pt_regs, ARM_pc)), - "J" (offsetof(struct pt_regs, ARM_cpsr)) + "J" (offsetof(struct pt_regs, ARM_cpsr)), + "J" (offsetof(struct pt_regs, ARM_lr)) : "memory", "cc"); } @@ -460,17 +613,44 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } -static struct undef_hook kprobes_break_hook = { - .instr_mask = 0xffffffff, - .instr_val = KPROBE_BREAKPOINT_INSTRUCTION, +#ifdef CONFIG_THUMB2_KERNEL + +static struct undef_hook kprobes_thumb16_break_hook = { + .instr_mask = 0xffff, + .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; +static struct undef_hook kprobes_thumb32_break_hook = { + .instr_mask = 0xffffffff, + .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION, + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, + .fn = kprobe_trap_handler, +}; + +#else /* !CONFIG_THUMB2_KERNEL */ + +static struct undef_hook kprobes_arm_break_hook = { + .instr_mask = 0x0fffffff, + .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION, + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, + .fn = kprobe_trap_handler, +}; + +#endif /* !CONFIG_THUMB2_KERNEL */ + int __init arch_init_kprobes() { arm_kprobe_decode_init(); - register_undef_hook(&kprobes_break_hook); +#ifdef CONFIG_THUMB2_KERNEL + register_undef_hook(&kprobes_thumb16_break_hook); + register_undef_hook(&kprobes_thumb32_break_hook); +#else + register_undef_hook(&kprobes_arm_break_hook); +#endif return 0; } diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h new file mode 100644 index 000000000000..a6aeda0a6c7f --- /dev/null +++ b/arch/arm/kernel/kprobes.h @@ -0,0 +1,420 @@ +/* + * arch/arm/kernel/kprobes.h + * + * Copyright (C) 2011 Jon Medhurst . + * + * Some contents moved here from arch/arm/include/asm/kprobes.h which is + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ARM_KERNEL_KPROBES_H +#define _ARM_KERNEL_KPROBES_H + +/* + * These undefined instructions must be unique and + * reserved solely for kprobes' use. + */ +#define KPROBE_ARM_BREAKPOINT_INSTRUCTION 0x07f001f8 +#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18 +#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018 + + +enum kprobe_insn { + INSN_REJECTED, + INSN_GOOD, + INSN_GOOD_NO_SLOT +}; + +typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t, + struct arch_specific_insn *); + +#ifdef CONFIG_THUMB2_KERNEL + +enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t, + struct arch_specific_insn *); +enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t, + struct arch_specific_insn *); + +#else /* !CONFIG_THUMB2_KERNEL */ + +enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, + struct arch_specific_insn *); +#endif + +void __init arm_kprobe_decode_init(void); + +extern kprobe_check_cc * const kprobe_condition_checks[16]; + + +#if __LINUX_ARM_ARCH__ >= 7 + +/* str_pc_offset is architecturally defined from ARMv7 onwards */ +#define str_pc_offset 8 +#define find_str_pc_offset() + +#else /* __LINUX_ARM_ARCH__ < 7 */ + +/* We need a run-time check to determine str_pc_offset */ +extern int str_pc_offset; +void __init find_str_pc_offset(void); + +#endif + + +/* + * Update ITSTATE after normal execution of an IT block instruction. + * + * The 8 IT state bits are split into two parts in CPSR: + * ITSTATE<1:0> are in CPSR<26:25> + * ITSTATE<7:2> are in CPSR<15:10> + */ +static inline unsigned long it_advance(unsigned long cpsr) + { + if ((cpsr & 0x06000400) == 0) { + /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ + cpsr &= ~PSR_IT_MASK; + } else { + /* We need to shift left ITSTATE<4:0> */ + const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ + unsigned long it = cpsr & mask; + it <<= 1; + it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ + it &= mask; + cpsr &= ~mask; + cpsr |= it; + } + return cpsr; +} + +static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs) +{ + long cpsr = regs->ARM_cpsr; + if (pcv & 0x1) { + cpsr |= PSR_T_BIT; + pcv &= ~0x1; + } else { + cpsr &= ~PSR_T_BIT; + pcv &= ~0x2; /* Avoid UNPREDICTABLE address allignment */ + } + regs->ARM_cpsr = cpsr; + regs->ARM_pc = pcv; +} + + +#if __LINUX_ARM_ARCH__ >= 6 + +/* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */ +#define load_write_pc_interworks true +#define test_load_write_pc_interworking() + +#else /* __LINUX_ARM_ARCH__ < 6 */ + +/* We need run-time testing to determine if load_write_pc() should interwork. */ +extern bool load_write_pc_interworks; +void __init test_load_write_pc_interworking(void); + +#endif + +static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs) +{ + if (load_write_pc_interworks) + bx_write_pc(pcv, regs); + else + regs->ARM_pc = pcv; +} + + +#if __LINUX_ARM_ARCH__ >= 7 + +#define alu_write_pc_interworks true +#define test_alu_write_pc_interworking() + +#elif __LINUX_ARM_ARCH__ <= 5 + +/* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */ +#define alu_write_pc_interworks false +#define test_alu_write_pc_interworking() + +#else /* __LINUX_ARM_ARCH__ == 6 */ + +/* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */ +extern bool alu_write_pc_interworks; +void __init test_alu_write_pc_interworking(void); + +#endif /* __LINUX_ARM_ARCH__ == 6 */ + +static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs) +{ + if (alu_write_pc_interworks) + bx_write_pc(pcv, regs); + else + regs->ARM_pc = pcv; +} + + +void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs); +void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs); + +enum kprobe_insn __kprobes +kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi); + +/* + * Test if load/store instructions writeback the address register. + * if P (bit 24) == 0 or W (bit 21) == 1 + */ +#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) + +/* + * The following definitions and macros are used to build instruction + * decoding tables for use by kprobe_decode_insn. + * + * These tables are a concatenation of entries each of which consist of one of + * the decode_* structs. All of the fields in every type of decode structure + * are of the union type decode_item, therefore the entire decode table can be + * viewed as an array of these and declared like: + * + * static const union decode_item table_name[] = {}; + * + * In order to construct each entry in the table, macros are used to + * initialise a number of sequential decode_item values in a layout which + * matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct + * decode_simulate by initialising four decode_item objects like this... + * + * {.bits = _type}, + * {.bits = _mask}, + * {.bits = _value}, + * {.handler = _handler}, + * + * Initialising a specified member of the union means that the compiler + * will produce a warning if the argument is of an incorrect type. + * + * Below is a list of each of the macros used to initialise entries and a + * description of the action performed when that entry is matched to an + * instruction. A match is found when (instruction & mask) == value. + * + * DECODE_TABLE(mask, value, table) + * Instruction decoding jumps to parsing the new sub-table 'table'. + * + * DECODE_CUSTOM(mask, value, decoder) + * The custom function 'decoder' is called to the complete decoding + * of an instruction. + * + * DECODE_SIMULATE(mask, value, handler) + * Set the probes instruction handler to 'handler', this will be used + * to simulate the instruction when the probe is hit. Decoding returns + * with INSN_GOOD_NO_SLOT. + * + * DECODE_EMULATE(mask, value, handler) + * Set the probes instruction handler to 'handler', this will be used + * to emulate the instruction when the probe is hit. The modified + * instruction (see below) is placed in the probes instruction slot so it + * may be called by the emulation code. Decoding returns with INSN_GOOD. + * + * DECODE_REJECT(mask, value) + * Instruction decoding fails with INSN_REJECTED + * + * DECODE_OR(mask, value) + * This allows the mask/value test of multiple table entries to be + * logically ORed. Once an 'or' entry is matched the decoding action to + * be performed is that of the next entry which isn't an 'or'. E.g. + * + * DECODE_OR (mask1, value1) + * DECODE_OR (mask2, value2) + * DECODE_SIMULATE (mask3, value3, simulation_handler) + * + * This means that if any of the three mask/value pairs match the + * instruction being decoded, then 'simulation_handler' will be used + * for it. + * + * Both the SIMULATE and EMULATE macros have a second form which take an + * additional 'regs' argument. + * + * DECODE_SIMULATEX(mask, value, handler, regs) + * DECODE_EMULATEX (mask, value, handler, regs) + * + * These are used to specify what kind of CPU register is encoded in each of the + * least significant 5 nibbles of the instruction being decoded. The regs value + * is specified using the REGS macro, this takes any of the REG_TYPE_* values + * from enum decode_reg_type as arguments; only the '*' part of the name is + * given. E.g. + * + * REGS(0, ANY, NOPC, 0, ANY) + * + * This indicates an instruction is encoded like: + * + * bits 19..16 ignore + * bits 15..12 any register allowed here + * bits 11.. 8 any register except PC allowed here + * bits 7.. 4 ignore + * bits 3.. 0 any register allowed here + * + * This register specification is checked after a decode table entry is found to + * match an instruction (through the mask/value test). Any invalid register then + * found in the instruction will cause decoding to fail with INSN_REJECTED. In + * the above example this would happen if bits 11..8 of the instruction were + * 1111, indicating R15 or PC. + * + * As well as checking for legal combinations of registers, this data is also + * used to modify the registers encoded in the instructions so that an + * emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.) + * + * Here is a real example which matches ARM instructions of the form + * "AND ,,, " + * + * DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags, + * REGS(ANY, ANY, NOPC, 0, ANY)), + * ^ ^ ^ ^ + * Rn Rd Rs Rm + * + * Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because + * Rs == R15 + * + * Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the + * instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into + * the kprobes instruction slot. This can then be called later by the handler + * function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction. + */ + +enum decode_type { + DECODE_TYPE_END, + DECODE_TYPE_TABLE, + DECODE_TYPE_CUSTOM, + DECODE_TYPE_SIMULATE, + DECODE_TYPE_EMULATE, + DECODE_TYPE_OR, + DECODE_TYPE_REJECT, + NUM_DECODE_TYPES /* Must be last enum */ +}; + +#define DECODE_TYPE_BITS 4 +#define DECODE_TYPE_MASK ((1 << DECODE_TYPE_BITS) - 1) + +enum decode_reg_type { + REG_TYPE_NONE = 0, /* Not a register, ignore */ + REG_TYPE_ANY, /* Any register allowed */ + REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */ + REG_TYPE_SP, /* Register must be SP */ + REG_TYPE_PC, /* Register must be PC */ + REG_TYPE_NOSP, /* Register must not be SP */ + REG_TYPE_NOSPPC, /* Register must not be SP or PC */ + REG_TYPE_NOPC, /* Register must not be PC */ + REG_TYPE_NOPCWB, /* No PC if load/store write-back flag also set */ + + /* The following types are used when the encoding for PC indicates + * another instruction form. This distiction only matters for test + * case coverage checks. + */ + REG_TYPE_NOPCX, /* Register must not be PC */ + REG_TYPE_NOSPPCX, /* Register must not be SP or PC */ + + /* Alias to allow '0' arg to be used in REGS macro. */ + REG_TYPE_0 = REG_TYPE_NONE +}; + +#define REGS(r16, r12, r8, r4, r0) \ + ((REG_TYPE_##r16) << 16) + \ + ((REG_TYPE_##r12) << 12) + \ + ((REG_TYPE_##r8) << 8) + \ + ((REG_TYPE_##r4) << 4) + \ + (REG_TYPE_##r0) + +union decode_item { + u32 bits; + const union decode_item *table; + kprobe_insn_handler_t *handler; + kprobe_decode_insn_t *decoder; +}; + + +#define DECODE_END \ + {.bits = DECODE_TYPE_END} + + +struct decode_header { + union decode_item type_regs; + union decode_item mask; + union decode_item value; +}; + +#define DECODE_HEADER(_type, _mask, _value, _regs) \ + {.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)}, \ + {.bits = (_mask)}, \ + {.bits = (_value)} + + +struct decode_table { + struct decode_header header; + union decode_item table; +}; + +#define DECODE_TABLE(_mask, _value, _table) \ + DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0), \ + {.table = (_table)} + + +struct decode_custom { + struct decode_header header; + union decode_item decoder; +}; + +#define DECODE_CUSTOM(_mask, _value, _decoder) \ + DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0), \ + {.decoder = (_decoder)} + + +struct decode_simulate { + struct decode_header header; + union decode_item handler; +}; + +#define DECODE_SIMULATEX(_mask, _value, _handler, _regs) \ + DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs), \ + {.handler = (_handler)} + +#define DECODE_SIMULATE(_mask, _value, _handler) \ + DECODE_SIMULATEX(_mask, _value, _handler, 0) + + +struct decode_emulate { + struct decode_header header; + union decode_item handler; +}; + +#define DECODE_EMULATEX(_mask, _value, _handler, _regs) \ + DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs), \ + {.handler = (_handler)} + +#define DECODE_EMULATE(_mask, _value, _handler) \ + DECODE_EMULATEX(_mask, _value, _handler, 0) + + +struct decode_or { + struct decode_header header; +}; + +#define DECODE_OR(_mask, _value) \ + DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0) + + +struct decode_reject { + struct decode_header header; +}; + +#define DECODE_REJECT(_mask, _value) \ + DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0) + + +int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, + const union decode_item *table, bool thumb16); + + +#endif /* _ARM_KERNEL_KPROBES_H */ diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 2b5b1421596c..53c9c2610cbc 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -435,7 +435,7 @@ armpmu_reserve_hardware(void) if (irq >= 0) free_irq(irq, NULL); } - release_pmu(pmu_device); + release_pmu(ARM_PMU_DEVICE_CPU); pmu_device = NULL; } @@ -454,7 +454,7 @@ armpmu_release_hardware(void) } armpmu->stop(); - release_pmu(pmu_device); + release_pmu(ARM_PMU_DEVICE_CPU); pmu_device = NULL; } @@ -662,6 +662,12 @@ init_hw_perf_events(void) case 0xC090: /* Cortex-A9 */ armpmu = armv7_a9_pmu_init(); break; + case 0xC050: /* Cortex-A5 */ + armpmu = armv7_a5_pmu_init(); + break; + case 0xC0F0: /* Cortex-A15 */ + armpmu = armv7_a15_pmu_init(); + break; } /* Intel CPUs [xscale]. */ } else if (0x69 == implementor) { diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index e20ca9cafef5..4c851834f68e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -17,17 +17,23 @@ */ #ifdef CONFIG_CPU_V7 -/* Common ARMv7 event types */ +/* + * Common ARMv7 event types + * + * Note: An implementation may not be able to count all of these events + * but the encodings are considered to be `reserved' in the case that + * they are not available. + */ enum armv7_perf_types { ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, ARMV7_PERFCTR_IFETCH_MISS = 0x01, ARMV7_PERFCTR_ITLB_MISS = 0x02, - ARMV7_PERFCTR_DCACHE_REFILL = 0x03, - ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, + ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */ + ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */ ARMV7_PERFCTR_DTLB_REFILL = 0x05, ARMV7_PERFCTR_DREAD = 0x06, ARMV7_PERFCTR_DWRITE = 0x07, - + ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, ARMV7_PERFCTR_EXC_TAKEN = 0x09, ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, ARMV7_PERFCTR_CID_WRITE = 0x0B, @@ -39,21 +45,30 @@ enum armv7_perf_types { */ ARMV7_PERFCTR_PC_WRITE = 0x0C, ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, + ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, + + /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, - - ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, + ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, + ARMV7_PERFCTR_MEM_ACCESS = 0x13, + ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, + ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, + ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16, + ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17, + ARMV7_PERFCTR_L2_DCACHE_WB = 0x18, + ARMV7_PERFCTR_BUS_ACCESS = 0x19, + ARMV7_PERFCTR_MEMORY_ERROR = 0x1A, + ARMV7_PERFCTR_INSTR_SPEC = 0x1B, + ARMV7_PERFCTR_TTBR_WRITE = 0x1C, + ARMV7_PERFCTR_BUS_CYCLES = 0x1D, ARMV7_PERFCTR_CPU_CYCLES = 0xFF }; /* ARMv7 Cortex-A8 specific event types */ enum armv7_a8_perf_types { - ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, - - ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, - ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, @@ -138,6 +153,39 @@ enum armv7_a9_perf_types { ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 }; +/* ARMv7 Cortex-A5 specific event types */ +enum armv7_a5_perf_types { + ARMV7_PERFCTR_IRQ_TAKEN = 0x86, + ARMV7_PERFCTR_FIQ_TAKEN = 0x87, + + ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0, + ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1, + ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2, + ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3, + ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4, + ARMV7_PERFCTR_READ_ALLOC = 0xc5, + + ARMV7_PERFCTR_STALL_SB_FULL = 0xc9, +}; + +/* ARMv7 Cortex-A15 specific event types */ +enum armv7_a15_perf_types { + ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40, + ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41, + ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42, + ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43, + + ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C, + ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D, + + ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50, + ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51, + ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52, + ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53, + + ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76, +}; + /* * Cortex-A8 HW events mapping * @@ -207,11 +255,6 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, }, [C(DTLB)] = { - /* - * Only ITLB misses and DTLB refills are supported. - * If users want the DTLB refills misses a raw counter - * must be used. - */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, @@ -337,11 +380,6 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, }, [C(DTLB)] = { - /* - * Only ITLB misses and DTLB refills are supported. - * If users want the DTLB refills misses a raw counter - * must be used. - */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, @@ -401,6 +439,242 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, }; +/* + * Cortex-A5 HW events mapping + */ +static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + /* + * The prefetch counters don't differentiate between the I + * side and the D side. + */ + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +/* + * Cortex-A15 HW events mapping + */ +static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + /* + * Not all performance counters differentiate between read + * and write accesses/misses so we're not always strictly + * correct, but it's the best we can do. Writes and reads get + * combined in these cases. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DTLB_READ_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + /* * Perf Events counters */ @@ -933,6 +1207,26 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } + +static const struct arm_pmu *__init armv7_a5_pmu_init(void) +{ + armv7pmu.id = ARM_PERF_PMU_ID_CA5; + armv7pmu.name = "ARMv7 Cortex-A5"; + armv7pmu.cache_map = &armv7_a5_perf_cache_map; + armv7pmu.event_map = &armv7_a5_perf_map; + armv7pmu.num_events = armv7_read_num_pmnc_events(); + return &armv7pmu; +} + +static const struct arm_pmu *__init armv7_a15_pmu_init(void) +{ + armv7pmu.id = ARM_PERF_PMU_ID_CA15; + armv7pmu.name = "ARMv7 Cortex-A15"; + armv7pmu.cache_map = &armv7_a15_perf_cache_map; + armv7pmu.event_map = &armv7_a15_perf_map; + armv7pmu.num_events = armv7_read_num_pmnc_events(); + return &armv7pmu; +} #else static const struct arm_pmu *__init armv7_a8_pmu_init(void) { @@ -943,4 +1237,14 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) { return NULL; } + +static const struct arm_pmu *__init armv7_a5_pmu_init(void) +{ + return NULL; +} + +static const struct arm_pmu *__init armv7_a15_pmu_init(void) +{ + return NULL; +} #endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c79eec19262..2b70709376c3 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -25,36 +26,88 @@ static volatile long pmu_lock; static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; -static int __devinit pmu_device_probe(struct platform_device *pdev) +static int __devinit pmu_register(struct platform_device *pdev, + enum arm_pmu_type type) { - - if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { + if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { pr_warning("received registration request for unknown " - "device %d\n", pdev->id); + "device %d\n", type); return -EINVAL; } - if (pmu_devices[pdev->id]) - pr_warning("registering new PMU device type %d overwrites " - "previous registration!\n", pdev->id); - else - pr_info("registered new PMU device of type %d\n", - pdev->id); + if (pmu_devices[type]) { + pr_warning("rejecting duplicate registration of PMU device " + "type %d.", type); + return -ENOSPC; + } - pmu_devices[pdev->id] = pdev; + pr_info("registered new PMU device of type %d\n", type); + pmu_devices[type] = pdev; return 0; } -static struct platform_driver pmu_driver = { +#define OF_MATCH_PMU(_name, _type) { \ + .compatible = _name, \ + .data = (void *)_type, \ +} + +#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU) + +static struct of_device_id armpmu_of_device_ids[] = { + OF_MATCH_CPU("arm,cortex-a9-pmu"), + OF_MATCH_CPU("arm,cortex-a8-pmu"), + OF_MATCH_CPU("arm,arm1136-pmu"), + OF_MATCH_CPU("arm,arm1176-pmu"), + {}, +}; + +#define PLAT_MATCH_PMU(_name, _type) { \ + .name = _name, \ + .driver_data = _type, \ +} + +#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU) + +static struct platform_device_id armpmu_plat_device_ids[] = { + PLAT_MATCH_CPU("arm-pmu"), + {}, +}; + +enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + const struct platform_device_id *pdev_id; + + /* provided by of_device_id table */ + if (pdev->dev.of_node) { + of_id = of_match_device(armpmu_of_device_ids, &pdev->dev); + BUG_ON(!of_id); + return (enum arm_pmu_type)of_id->data; + } + + /* Provided by platform_device_id table */ + pdev_id = platform_get_device_id(pdev); + BUG_ON(!pdev_id); + return pdev_id->driver_data; +} + +static int __devinit armpmu_device_probe(struct platform_device *pdev) +{ + return pmu_register(pdev, armpmu_device_type(pdev)); +} + +static struct platform_driver armpmu_driver = { .driver = { .name = "arm-pmu", + .of_match_table = armpmu_of_device_ids, }, - .probe = pmu_device_probe, + .probe = armpmu_device_probe, + .id_table = armpmu_plat_device_ids, }; static int __init register_pmu_driver(void) { - return platform_driver_register(&pmu_driver); + return platform_driver_register(&armpmu_driver); } device_initcall(register_pmu_driver); @@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device) EXPORT_SYMBOL_GPL(reserve_pmu); int -release_pmu(struct platform_device *pdev) +release_pmu(enum arm_pmu_type device) { - if (WARN_ON(pdev != pmu_devices[pdev->id])) + if (WARN_ON(!pmu_devices[device])) return -EINVAL; - clear_bit_unlock(pdev->id, &pmu_lock); + clear_bit_unlock(device, &pmu_lock); return 0; } EXPORT_SYMBOL_GPL(release_pmu); diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 5c199610719f..2491f3b406bc 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -228,34 +228,12 @@ static struct undef_hook thumb_break_hook = { .fn = break_trap, }; -static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) -{ - unsigned int instr2; - void __user *pc; - - /* Check the second half of the instruction. */ - pc = (void __user *)(instruction_pointer(regs) + 2); - - if (processor_mode(regs) == SVC_MODE) { - instr2 = *(u16 *) pc; - } else { - get_user(instr2, (u16 __user *)pc); - } - - if (instr2 == 0xa000) { - ptrace_break(current, regs); - return 0; - } else { - return 1; - } -} - static struct undef_hook thumb2_break_hook = { - .instr_mask = 0xffff, - .instr_val = 0xf7f0, + .instr_mask = 0xffffffff, + .instr_val = 0xf7f0a000, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, - .fn = thumb2_break_trap, + .fn = break_trap, }; static int __init ptrace_break_init(void) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index acbb447ac6b5..70bca649e925 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -343,6 +343,59 @@ static void __init feat_v6_fixup(void) elf_hwcap &= ~HWCAP_TLS; } +/* + * cpu_init - initialise one CPU. + * + * cpu_init sets up the per-CPU stacks. + */ +void cpu_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct stack *stk = &stacks[cpu]; + + if (cpu >= NR_CPUS) { + printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); + BUG(); + } + + cpu_proc_init(); + + /* + * Define the placement constraint for the inline asm directive below. + * In Thumb-2, msr with an immediate value is not allowed. + */ +#ifdef CONFIG_THUMB2_KERNEL +#define PLC "r" +#else +#define PLC "I" +#endif + + /* + * setup stacks for re-entrant exception handlers + */ + __asm__ ( + "msr cpsr_c, %1\n\t" + "add r14, %0, %2\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %3\n\t" + "add r14, %0, %4\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %5\n\t" + "add r14, %0, %6\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %7" + : + : "r" (stk), + PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + "I" (offsetof(struct stack, irq[0])), + PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + "I" (offsetof(struct stack, abt[0])), + PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), + "I" (offsetof(struct stack, und[0])), + PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + : "r14"); +} + static void __init setup_processor(void) { struct proc_info_list *list; @@ -388,58 +441,7 @@ static void __init setup_processor(void) feat_v6_fixup(); cacheid_init(); - cpu_proc_init(); -} - -/* - * cpu_init - initialise one CPU. - * - * cpu_init sets up the per-CPU stacks. - */ -void cpu_init(void) -{ - unsigned int cpu = smp_processor_id(); - struct stack *stk = &stacks[cpu]; - - if (cpu >= NR_CPUS) { - printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); - BUG(); - } - - /* - * Define the placement constraint for the inline asm directive below. - * In Thumb-2, msr with an immediate value is not allowed. - */ -#ifdef CONFIG_THUMB2_KERNEL -#define PLC "r" -#else -#define PLC "I" -#endif - - /* - * setup stacks for re-entrant exception handlers - */ - __asm__ ( - "msr cpsr_c, %1\n\t" - "add r14, %0, %2\n\t" - "mov sp, r14\n\t" - "msr cpsr_c, %3\n\t" - "add r14, %0, %4\n\t" - "mov sp, r14\n\t" - "msr cpsr_c, %5\n\t" - "add r14, %0, %6\n\t" - "mov sp, r14\n\t" - "msr cpsr_c, %7" - : - : "r" (stk), - PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), - "I" (offsetof(struct stack, irq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), - "I" (offsetof(struct stack, abt[0])), - PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), - "I" (offsetof(struct stack, und[0])), - PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) - : "r14"); + cpu_init(); } void __init dump_machine_table(void) @@ -915,9 +917,14 @@ void __init setup_arch(char **cmdline_p) #endif reserve_crashkernel(); - cpu_init(); tcm_init(); +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + extern unsigned long arm_dma_zone_size; + arm_dma_zone_size = mdesc->dma_zone_size; + } +#endif #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif @@ -979,6 +986,10 @@ static const char *hwcap_str[] = { "neon", "vfpv3", "vfpv3d16", + "tls", + "vfpv4", + "idiva", + "idivt", NULL }; diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 6398ead9d1c0..dc902f2c6845 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -10,64 +10,61 @@ /* * Save CPU state for a suspend * r1 = v:p offset - * r3 = virtual return function - * Note: sp is decremented to allocate space for CPU state on stack - * r0-r3,r9,r10,lr corrupted + * r2 = suspend function arg0 + * r3 = suspend function */ -ENTRY(cpu_suspend) - mov r9, lr +ENTRY(__cpu_suspend) + stmfd sp!, {r4 - r11, lr} #ifdef MULTI_CPU ldr r10, =processor - mov r2, sp @ current virtual SP - ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function - sub sp, sp, r0 @ allocate CPU state on stack - mov r0, sp @ save pointer +#else + ldr r5, =cpu_suspend_size + ldr ip, =cpu_do_resume +#endif + mov r6, sp @ current virtual SP + sub sp, sp, r5 @ allocate CPU state on stack + mov r0, sp @ save pointer to CPU save block add ip, ip, r1 @ convert resume fn to phys - stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn - ldr r3, =sleep_save_sp - add r2, sp, r1 @ convert SP to phys + stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn + ldr r5, =sleep_save_sp + add r6, sp, r1 @ convert SP to phys + stmfd sp!, {r2, r3} @ save suspend func arg and pointer #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, lr, c0, c0, 5) ALT_UP(mov lr, #0) and lr, lr, #15 - str r2, [r3, lr, lsl #2] @ save phys SP + str r6, [r5, lr, lsl #2] @ save phys SP #else - str r2, [r3] @ save phys SP + str r6, [r5] @ save phys SP #endif +#ifdef MULTI_CPU mov lr, pc ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state #else - mov r2, sp @ current virtual SP - ldr r0, =cpu_suspend_size - sub sp, sp, r0 @ allocate CPU state on stack - mov r0, sp @ save pointer - stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn - ldr r3, =sleep_save_sp - add r2, sp, r1 @ convert SP to phys -#ifdef CONFIG_SMP - ALT_SMP(mrc p15, 0, lr, c0, c0, 5) - ALT_UP(mov lr, #0) - and lr, lr, #15 - str r2, [r3, lr, lsl #2] @ save phys SP -#else - str r2, [r3] @ save phys SP -#endif bl cpu_do_suspend #endif @ flush data cache #ifdef MULTI_CACHE ldr r10, =cpu_cache - mov lr, r9 + mov lr, pc ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] #else - mov lr, r9 - b __cpuc_flush_kern_all + bl __cpuc_flush_kern_all #endif -ENDPROC(cpu_suspend) + adr lr, BSYM(cpu_suspend_abort) + ldmfd sp!, {r0, pc} @ call suspend fn +ENDPROC(__cpu_suspend) .ltorg +cpu_suspend_abort: + ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn + mov sp, r2 + ldmfd sp!, {r4 - r11, pc} +ENDPROC(cpu_suspend_abort) + /* * r0 = control register value * r1 = v:p offset (preserved by cpu_do_resume) @@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on) cpu_resume_after_mmu: str r5, [r2, r4, lsl #2] @ restore old mapping mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache - mov pc, lr + bl cpu_init @ restore the und/abt/irq banked regs + mov r0, #0 @ return zero on success + ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) /* @@ -120,20 +119,11 @@ ENTRY(cpu_resume) ldr r0, sleep_save_sp @ stack phys addr #endif setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off -#ifdef MULTI_CPU - @ load v:p, stack, return fn, resume fn - ARM( ldmia r0!, {r1, sp, lr, pc} ) -THUMB( ldmia r0!, {r1, r2, r3, r4} ) + @ load v:p, stack, resume fn + ARM( ldmia r0!, {r1, sp, pc} ) +THUMB( ldmia r0!, {r1, r2, r3} ) THUMB( mov sp, r2 ) -THUMB( mov lr, r3 ) -THUMB( bx r4 ) -#else - @ load v:p, stack, return fn - ARM( ldmia r0!, {r1, sp, lr} ) -THUMB( ldmia r0!, {r1, r2, lr} ) -THUMB( mov sp, r2 ) - b cpu_do_resume -#endif +THUMB( bx r3 ) ENDPROC(cpu_resume) sleep_save_sp: diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e7f92a4321f3..167e3cbe1f2f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -365,14 +365,21 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ if (max_cpus > ncores) max_cpus = ncores; - - if (max_cpus > 1) { + if (ncores > 1 && max_cpus) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); + /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. A platform should + * re-initialize the map in platform_smp_prepare_cpus() if + * present != possible (e.g. physical hotplug). + */ + init_cpu_present(&cpu_possible_map); + /* * Initialise the SCU if there are more than one CPU * and let them know where to start. diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index a1e757c3439b..79ed5e7f204a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -20,6 +20,7 @@ #define SCU_INVALIDATE 0x0c #define SCU_FPGA_REVISION 0x10 +#ifdef CONFIG_SMP /* * Get the number of CPU cores from the SCU configuration */ @@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } +#endif /* * Set the executing CPUs power mode as defined. This will be in diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index f5cf660eefcc..30e302d33e0a 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -19,6 +19,8 @@ #include "tcm.h" static struct gen_pool *tcm_pool; +static bool dtcm_present; +static bool itcm_present; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; @@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len) } EXPORT_SYMBOL(tcm_free); +bool tcm_dtcm_present(void) +{ + return dtcm_present; +} +EXPORT_SYMBOL(tcm_dtcm_present); + +bool tcm_itcm_present(void) +{ + return itcm_present; +} +EXPORT_SYMBOL(tcm_itcm_present); + static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, u32 *offset) { @@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, (tcm_region & 1) ? "" : "not "); } + /* Not much fun you can do with a size 0 bank */ + if (tcm_size == 0) + return 0; + /* Force move the TCM bank to where we want it, enable */ tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; @@ -165,12 +183,20 @@ void __init tcm_init(void) u32 tcm_status = read_cpuid_tcmstatus(); u8 dtcm_banks = (tcm_status >> 16) & 0x03; u8 itcm_banks = (tcm_status & 0x03); + size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; + size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; char *end; char *ram; int ret; int i; + /* Values greater than 2 for D/ITCM banks are "reserved" */ + if (dtcm_banks > 2) + dtcm_banks = 0; + if (itcm_banks > 2) + itcm_banks = 0; + /* Setup DTCM if present */ if (dtcm_banks > 0) { for (i = 0; i < dtcm_banks; i++) { @@ -178,6 +204,13 @@ void __init tcm_init(void) if (ret) return; } + /* This means you compiled more code than fits into DTCM */ + if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { + pr_info("CPU DTCM: %u bytes of code compiled to " + "DTCM but only %lu bytes of DTCM present\n", + dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); + goto no_dtcm; + } dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; @@ -186,12 +219,16 @@ void __init tcm_init(void) start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; - /* This means you compiled more code than fits into DTCM */ - BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); - memcpy(start, ram, (end-start)); - pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); + memcpy(start, ram, dtcm_code_sz); + pr_debug("CPU DTCM: copied data from %p - %p\n", + start, end); + dtcm_present = true; + } else if (dtcm_code_sz) { + pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " + "DTCM banks present in CPU\n", dtcm_code_sz); } +no_dtcm: /* Setup ITCM if present */ if (itcm_banks > 0) { for (i = 0; i < itcm_banks; i++) { @@ -199,6 +236,13 @@ void __init tcm_init(void) if (ret) return; } + /* This means you compiled more code than fits into ITCM */ + if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { + pr_info("CPU ITCM: %u bytes of code compiled to " + "ITCM but only %lu bytes of ITCM present\n", + itcm_code_sz, (itcm_end - ITCM_OFFSET)); + return; + } itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); itcm_iomap[0].length = itcm_end - ITCM_OFFSET; @@ -207,10 +251,13 @@ void __init tcm_init(void) start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; - /* This means you compiled more code than fits into ITCM */ - BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); - memcpy(start, ram, (end-start)); - pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); + memcpy(start, ram, itcm_code_sz); + pr_debug("CPU ITCM: copied code from %p - %p\n", + start, end); + itcm_present = true; + } else if (itcm_code_sz) { + pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " + "ITCM banks present in CPU\n", itcm_code_sz); } } @@ -221,7 +268,6 @@ void __init tcm_init(void) */ static int __init setup_tcm_pool(void) { - u32 tcm_status = read_cpuid_tcmstatus(); u32 dtcm_pool_start = (u32) &__edtcm_data; u32 itcm_pool_start = (u32) &__eitcm_text; int ret; @@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void) pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ - if (tcm_status & (0x03 << 16)) { + if (dtcm_present) { if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, dtcm_end - dtcm_pool_start, -1); @@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void) } /* Add the rest of ITCM to the TCM pool */ - if (tcm_status & 0x03) { + if (itcm_present) { if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, itcm_end - itcm_pool_start, -1); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6807cb1e76dd..2d3436e9f71f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -355,9 +355,24 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { - instr = *(u32 *) pc; +#ifdef CONFIG_THUMB2_KERNEL + if (thumb_mode(regs)) { + instr = ((u16 *)pc)[0]; + if (is_wide_instruction(instr)) { + instr <<= 16; + instr |= ((u16 *)pc)[1]; + } + } else +#endif + instr = *(u32 *) pc; } else if (thumb_mode(regs)) { get_user(instr, (u16 __user *)pc); + if (is_wide_instruction(instr)) { + unsigned int instr2; + get_user(instr2, (u16 __user *)pc+1); + instr <<= 16; + instr |= instr2; + } } else { get_user(instr, (u32 __user *)pc); } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index e5287f21badc..bf977f8514f6 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4; SECTIONS { -#ifdef CONFIG_XIP_KERNEL - . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); -#else - . = PAGE_OFFSET + TEXT_OFFSET; -#endif - - .init : { /* Init code and data */ - _stext = .; - _sinittext = .; - HEAD_TEXT - INIT_TEXT - ARM_EXIT_KEEP(EXIT_TEXT) - _einittext = .; - ARM_CPU_DISCARD(PROC_INFO) - __arch_info_begin = .; - *(.arch.info.init) - __arch_info_end = .; - __tagtable_begin = .; - *(.taglist.init) - __tagtable_end = .; -#ifdef CONFIG_SMP_ON_UP - __smpalt_begin = .; - *(.alt.smp.init) - __smpalt_end = .; -#endif - - __pv_table_begin = .; - *(.pv_table) - __pv_table_end = .; - - INIT_SETUP(16) - - INIT_CALLS - CON_INITCALL - SECURITY_INITCALL - INIT_RAM_FS - -#ifndef CONFIG_XIP_KERNEL - __init_begin = _stext; - INIT_DATA - ARM_EXIT_KEEP(EXIT_DATA) -#endif - } - - PERCPU_SECTION(32) - -#ifndef CONFIG_XIP_KERNEL - . = ALIGN(PAGE_SIZE); - __init_end = .; -#endif - /* * unwind exit sections must be discarded before the rest of the * unwind sections get included. @@ -105,11 +54,23 @@ SECTIONS #ifndef CONFIG_MMU *(.fixup) *(__ex_table) +#endif +#ifndef CONFIG_SMP_ON_UP + *(.alt.smp.init) #endif } +#ifdef CONFIG_XIP_KERNEL + . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); +#else + . = PAGE_OFFSET + TEXT_OFFSET; +#endif + .head.text : { + _text = .; + HEAD_TEXT + } .text : { /* Real text segment */ - _text = .; /* Text and read-only data */ + _stext = .; /* Text and read-only data */ __exception_text_start = .; *(.exception.text) __exception_text_end = .; @@ -122,8 +83,6 @@ SECTIONS *(.fixup) #endif *(.gnu.warning) - *(.rodata) - *(.rodata.*) *(.glue_7) *(.glue_7t) . = ALIGN(4); @@ -152,10 +111,63 @@ SECTIONS _etext = .; /* End of text and rodata section */ +#ifndef CONFIG_XIP_KERNEL + . = ALIGN(PAGE_SIZE); + __init_begin = .; +#endif + + INIT_TEXT_SECTION(8) + .exit.text : { + ARM_EXIT_KEEP(EXIT_TEXT) + } + .init.proc.info : { + ARM_CPU_DISCARD(PROC_INFO) + } + .init.arch.info : { + __arch_info_begin = .; + *(.arch.info.init) + __arch_info_end = .; + } + .init.tagtable : { + __tagtable_begin = .; + *(.taglist.init) + __tagtable_end = .; + } +#ifdef CONFIG_SMP_ON_UP + .init.smpalt : { + __smpalt_begin = .; + *(.alt.smp.init) + __smpalt_end = .; + } +#endif + .init.pv_table : { + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + } + .init.data : { +#ifndef CONFIG_XIP_KERNEL + INIT_DATA +#endif + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + } +#ifndef CONFIG_XIP_KERNEL + .exit.data : { + ARM_EXIT_KEEP(EXIT_DATA) + } +#endif + + PERCPU_SECTION(32) + #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else + __init_end = .; . = ALIGN(THREAD_SIZE); __data_loc = .; #endif @@ -270,12 +282,6 @@ SECTIONS /* Default discards */ DISCARDS - -#ifndef CONFIG_SMP_ON_UP - /DISCARD/ : { - *(.alt.smp.init) - } -#endif } /* diff --git a/arch/arm/mach-bcmring/include/mach/entry-macro.S b/arch/arm/mach-bcmring/include/mach/entry-macro.S index 7d393ca010ac..94c950d783ba 100644 --- a/arch/arm/mach-bcmring/include/mach/entry-macro.S +++ b/arch/arm/mach-bcmring/include/mach/entry-macro.S @@ -80,7 +80,3 @@ .macro arch_ret_to_user, tmp1, tmp2 .endm - - .macro irq_prio_table - .endm - diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 8bc3701aa05c..84fd78684868 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -681,4 +681,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM") .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = da830_evm_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index a7b41bf505f1..29671ef07152 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -1261,4 +1261,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM") .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = da850_evm_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 6e7cad13352c..241a6bd67408 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -356,4 +356,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = dm355_evm_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index 543f9911b281..bee284ca7fd6 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -275,4 +275,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = dm355_leopard_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 09a87e61ffcf..9818f214d4f0 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -617,5 +617,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = dm365_evm_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 556bbd468db3..95607a191e03 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -717,4 +717,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = davinci_evm_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index f6ac9ba74878..6d03643b9bd1 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -802,6 +802,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = evm_init, + .dma_zone_size = SZ_128M, MACHINE_END MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") @@ -810,5 +811,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = evm_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 5f5d78308873..c278226627ad 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -571,4 +571,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808") .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = mityomapl138_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 3e7be2de96de..d60a80028ba3 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -277,4 +277,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = davinci_ntosd2_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 67c38d0ecd10..237332a11421 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -343,4 +343,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard") .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = omapl138_hawk_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 61ac96d8f00d..5f4385c0a089 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -156,4 +156,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR") .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = davinci_sffsdr_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c index 1a656e882262..782892065682 100644 --- a/arch/arm/mach-davinci/board-tnetv107x-evm.c +++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c @@ -282,4 +282,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM") .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = tnetv107x_evm_board_init, + .dma_zone_size = SZ_128M, MACHINE_END diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S index fbdebc7cb409..e14c0dc0e12c 100644 --- a/arch/arm/mach-davinci/include/mach/entry-macro.S +++ b/arch/arm/mach-davinci/include/mach/entry-macro.S @@ -46,6 +46,3 @@ #endif 1002: .endm - - .macro irq_prio_table - .endm diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h index 491249ef209c..78731944a70c 100644 --- a/arch/arm/mach-davinci/include/mach/memory.h +++ b/arch/arm/mach-davinci/include/mach/memory.h @@ -41,11 +41,4 @@ */ #define CONSISTENT_DMA_SIZE (14<<20) -/* - * Restrict DMA-able region to workaround silicon bug. The bug - * restricts buffers available for DMA to video hardware to be - * below 128M - */ -#define ARM_DMA_ZONE_SIZE SZ_128M - #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c index c5e65a02be8d..b68d5bdf04cf 100644 --- a/arch/arm/mach-exynos4/platsmp.c +++ b/arch/arm/mach-exynos4/platsmp.c @@ -154,14 +154,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base_addr()); diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 8755ca8dd48d..533c28f758ca 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c @@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = { SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), }; -void exynos4_cpu_suspend(void) +static int exynos4_cpu_suspend(unsigned long arg) { unsigned long tmp; unsigned long mask = 0xFFFFFFFF; diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index 6b62425417a6..0984078f1eba 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S @@ -32,28 +32,6 @@ .text - /* - * s3c_cpu_save - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - - stmfd sp!, { r3 - r12, lr } - ldr r3, =resume_with_mmu - bl cpu_suspend - - ldr r0, =pm_cpu_sleep - ldr r0, [ r0 ] - mov pc, r0 - -resume_with_mmu: - ldmfd sp!, { r3 - r12, pc } - - .ltorg - /* * sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c index 629454d71c8d..65f1bea958e5 100644 --- a/arch/arm/mach-h720x/h7201-eval.c +++ b/arch/arm/mach-h720x/h7201-eval.c @@ -33,4 +33,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201") .map_io = h720x_map_io, .init_irq = h720x_init_irq, .timer = &h7201_timer, + .dma_zone_size = SZ_256M, MACHINE_END diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c index e9f46b696354..884584a09752 100644 --- a/arch/arm/mach-h720x/h7202-eval.c +++ b/arch/arm/mach-h720x/h7202-eval.c @@ -76,4 +76,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202") .init_irq = h7202_init_irq, .timer = &h7202_timer, .init_machine = init_eval_h7202, + .dma_zone_size = SZ_256M, MACHINE_END diff --git a/arch/arm/mach-h720x/include/mach/entry-macro.S b/arch/arm/mach-h720x/include/mach/entry-macro.S index 6d3b917c4a18..c3948e5ba4a0 100644 --- a/arch/arm/mach-h720x/include/mach/entry-macro.S +++ b/arch/arm/mach-h720x/include/mach/entry-macro.S @@ -57,9 +57,6 @@ tst \irqstat, #1 @ bit 0 should be set .endm - .macro irq_prio_table - .endm - #else #error hynix processor selection missmatch #endif diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h index b0b3baec9acf..96dcf50c51d3 100644 --- a/arch/arm/mach-h720x/include/mach/memory.h +++ b/arch/arm/mach-h720x/include/mach/memory.h @@ -8,11 +8,4 @@ #define __ASM_ARCH_MEMORY_H #define PLAT_PHYS_OFFSET UL(0x40000000) -/* - * This is the maximum DMA address that can be DMAd to. - * There should not be more than (0xd0000000 - 0xc0000000) - * bytes of RAM. - */ -#define ARM_DMA_ZONE_SIZE SZ_256M - #endif diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c index 73745ff102d5..ee19c1d383aa 100644 --- a/arch/arm/mach-ixp4xx/avila-setup.c +++ b/arch/arm/mach-ixp4xx/avila-setup.c @@ -169,6 +169,9 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = avila_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END /* @@ -184,6 +187,9 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = avila_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index e9a589395723..e2e98bbb6413 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r } +static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + return (dma_addr + size) >= SZ_64M; +} + /* * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. */ @@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev) if(dev->bus == &pci_bus_type) { *dev->dma_mask = SZ_64M - 1; dev->coherent_dma_mask = SZ_64M - 1; - dmabounce_register_dev(dev, 2048, 4096); + dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); } return 0; } @@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev) return 0; } -int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) -{ - return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); -} - void __init ixp4xx_pci_preinit(void) { unsigned long cpuid = read_cpuid_id(); diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c index 355e3de38733..e24564b5d935 100644 --- a/arch/arm/mach-ixp4xx/coyote-setup.c +++ b/arch/arm/mach-ixp4xx/coyote-setup.c @@ -114,6 +114,9 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = coyote_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index d398229cfaa5..03e54515e8b3 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c @@ -284,4 +284,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA") .init_irq = ixp4xx_init_irq, .timer = &dsmg600_timer, .init_machine = dsmg600_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c index 727ee39ce11c..23a8b3614568 100644 --- a/arch/arm/mach-ixp4xx/fsg-setup.c +++ b/arch/arm/mach-ixp4xx/fsg-setup.c @@ -275,5 +275,8 @@ MACHINE_START(FSG, "Freecom FSG-3") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = fsg_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c index 9dc0b4eaa65a..d4f851bdd9a4 100644 --- a/arch/arm/mach-ixp4xx/gateway7001-setup.c +++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c @@ -101,5 +101,8 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = gateway7001_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c index 3e8c0e33b59c..5f00ad224fe0 100644 --- a/arch/arm/mach-ixp4xx/goramo_mlr.c +++ b/arch/arm/mach-ixp4xx/goramo_mlr.c @@ -501,4 +501,7 @@ MACHINE_START(GORAMO_MLR, "MultiLink") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = gmlr_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c index 77abead36227..3790dffd3c30 100644 --- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c +++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c @@ -169,6 +169,9 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = gtwx5715_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h index 34e79404671a..4caf1761f1e2 100644 --- a/arch/arm/mach-ixp4xx/include/mach/memory.h +++ b/arch/arm/mach-ixp4xx/include/mach/memory.h @@ -14,8 +14,4 @@ */ #define PLAT_PHYS_OFFSET UL(0x00000000) -#ifdef CONFIG_PCI -#define ARM_DMA_ZONE_SIZE SZ_64M -#endif - #endif diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index dca4f7f9f4f7..6a2927956bf6 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c @@ -258,6 +258,9 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = ixdp425_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif @@ -269,6 +272,9 @@ MACHINE_START(IXDP465, "Intel IXDP465 Development Platform") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = ixdp425_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif @@ -280,6 +286,9 @@ MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = ixdp425_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif @@ -291,5 +300,8 @@ MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = ixdp425_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index f18fee748878..afb51879d9a4 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c @@ -319,4 +319,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d") .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = nas100d_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index f79b62eb7614..69e40f2cf092 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c @@ -305,4 +305,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2") .init_irq = ixp4xx_init_irq, .timer = &nslu2_timer, .init_machine = nslu2_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c index 4e72cfdd3c46..045336c833af 100644 --- a/arch/arm/mach-ixp4xx/vulcan-setup.c +++ b/arch/arm/mach-ixp4xx/vulcan-setup.c @@ -241,4 +241,7 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = vulcan_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c index 5d148c7bc4fb..40b9fad800b8 100644 --- a/arch/arm/mach-ixp4xx/wg302v2-setup.c +++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c @@ -102,5 +102,8 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2") .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = wg302v2_init, +#if defined(CONFIG_PCI) + .dma_zone_size = SZ_64M, +#endif MACHINE_END #endif diff --git a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S index 870227c96602..b725f6c93975 100644 --- a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S +++ b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S @@ -41,7 +41,3 @@ rsb \irqnr, \irqnr, #31 teq \irqstat, #0 .endm - - .macro irq_prio_table - .endm - diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index 2034098cf015..315b9f365329 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c @@ -157,12 +157,4 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); } diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c index da53ba3917ca..aab884fecc55 100644 --- a/arch/arm/mach-omap2/control.c +++ b/arch/arm/mach-omap2/control.c @@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void) scratchpad_contents.boot_config_ptr = 0x0; if (cpu_is_omap3630()) scratchpad_contents.public_restore_ptr = - virt_to_phys(get_omap3630_restore_pointer()); + virt_to_phys(omap3_restore_3630); else if (omap_rev() != OMAP3430_REV_ES3_0 && omap_rev() != OMAP3430_REV_ES3_1) scratchpad_contents.public_restore_ptr = - virt_to_phys(get_restore_pointer()); + virt_to_phys(omap3_restore); else scratchpad_contents.public_restore_ptr = - virt_to_phys(get_es3_restore_pointer()); + virt_to_phys(omap3_restore_es3); + if (omap_type() == OMAP2_DEVICE_TYPE_GP) scratchpad_contents.secure_ram_restore_ptr = 0x0; else diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h index a016c8b59e00..d4ef75d5a382 100644 --- a/arch/arm/mach-omap2/control.h +++ b/arch/arm/mach-omap2/control.h @@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset); extern void omap3_save_scratchpad_contents(void); extern void omap3_clear_scratchpad_contents(void); -extern u32 *get_restore_pointer(void); -extern u32 *get_es3_restore_pointer(void); -extern u32 *get_omap3630_restore_pointer(void); +extern void omap3_restore(void); +extern void omap3_restore_es3(void); +extern void omap3_restore_3630(void); extern u32 omap3_arm_context[128]; extern void omap3_control_save_context(void); extern void omap3_control_restore_context(void); diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S index a48690b90990..ceb8b7e593d7 100644 --- a/arch/arm/mach-omap2/include/mach/entry-macro.S +++ b/arch/arm/mach-omap2/include/mach/entry-macro.S @@ -165,6 +165,3 @@ #endif #endif /* MULTI_OMAP2 */ - - .macro irq_prio_table - .endm diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index ecfe93c4b585..ce65e9329c7b 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -125,14 +125,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); /* * Initialise the SCU and wake up the secondary core using diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 45bcfce77352..04ee56646126 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -88,18 +88,28 @@ extern int pm_dbg_regset_init(int reg_set); #define pm_dbg_regset_init(reg_set) do {} while (0); #endif /* CONFIG_PM_DEBUG */ +/* 24xx */ extern void omap24xx_idle_loop_suspend(void); +extern unsigned int omap24xx_idle_loop_suspend_sz; extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, void __iomem *sdrc_power); -extern void omap34xx_cpu_suspend(u32 *addr, int save_state); -extern int save_secure_ram_context(u32 *addr); -extern void omap3_save_scratchpad_contents(void); - -extern unsigned int omap24xx_idle_loop_suspend_sz; -extern unsigned int save_secure_ram_context_sz; extern unsigned int omap24xx_cpu_suspend_sz; -extern unsigned int omap34xx_cpu_suspend_sz; + +/* 3xxx */ +extern void omap34xx_cpu_suspend(int save_state); + +/* omap3_do_wfi function pointer and size, for copy to SRAM */ +extern void omap3_do_wfi(void); +extern unsigned int omap3_do_wfi_sz; +/* ... and its pointer from SRAM after copy */ +extern void (*omap3_do_wfi_sram)(void); + +/* save_secure_ram_context function pointer and size, for copy to SRAM */ +extern int save_secure_ram_context(u32 *addr); +extern unsigned int save_secure_ram_context_sz; + +extern void omap3_save_scratchpad_contents(void); #define PM_RTA_ERRATUM_i608 (1 << 0) #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index c155c9d1c82c..b77d82665abb 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -31,6 +31,8 @@ #include #include +#include + #include #include "clockdomain.h" #include "powerdomain.h" @@ -40,8 +42,6 @@ #include #include -#include - #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" @@ -64,11 +64,6 @@ static inline bool is_suspending(void) } #endif -/* Scratchpad offsets */ -#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4 -#define OMAP343X_TABLE_VALUE_OFFSET 0xc0 -#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8 - /* pm34xx errata defined in pm.h */ u16 pm34xx_errata; @@ -83,9 +78,8 @@ struct power_state { static LIST_HEAD(pwrst_list); -static void (*_omap_sram_idle)(u32 *addr, int save_state); - static int (*_omap_save_secure_sram)(u32 *addr); +void (*omap3_do_wfi_sram)(void); static struct powerdomain *mpu_pwrdm, *neon_pwrdm; static struct powerdomain *core_pwrdm, *per_pwrdm; @@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) return IRQ_HANDLED; } -/* Function to restore the table entry that was modified for enabling MMU */ -static void restore_table_entry(void) +static void omap34xx_save_context(u32 *save) { - void __iomem *scratchpad_address; - u32 previous_value, control_reg_value; - u32 *address; + u32 val; - scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); + /* Read Auxiliary Control Register */ + asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); + *save++ = 1; + *save++ = val; - /* Get address of entry that was modified */ - address = (u32 *)__raw_readl(scratchpad_address + - OMAP343X_TABLE_ADDRESS_OFFSET); - /* Get the previous value which needs to be restored */ - previous_value = __raw_readl(scratchpad_address + - OMAP343X_TABLE_VALUE_OFFSET); - address = __va(address); - *address = previous_value; - flush_tlb_all(); - control_reg_value = __raw_readl(scratchpad_address - + OMAP343X_CONTROL_REG_VALUE_OFFSET); - /* This will enable caches and prediction */ - set_cr(control_reg_value); + /* Read L2 AUX ctrl register */ + asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); + *save++ = 1; + *save++ = val; +} + +static int omap34xx_do_sram_idle(unsigned long save_state) +{ + omap34xx_cpu_suspend(save_state); + return 0; } void omap_sram_idle(void) @@ -352,9 +343,6 @@ void omap_sram_idle(void) int core_prev_state, per_prev_state; u32 sdrc_pwr = 0; - if (!_omap_sram_idle) - return; - pwrdm_clear_all_prev_pwrst(mpu_pwrdm); pwrdm_clear_all_prev_pwrst(neon_pwrdm); pwrdm_clear_all_prev_pwrst(core_pwrdm); @@ -432,12 +420,16 @@ void omap_sram_idle(void) sdrc_pwr = sdrc_read_reg(SDRC_POWER); /* - * omap3_arm_context is the location where ARM registers - * get saved. The restore path then reads from this - * location and restores them back. + * omap3_arm_context is the location where some ARM context + * get saved. The rest is placed on the stack, and restored + * from there before resuming. */ - _omap_sram_idle(omap3_arm_context, save_state); - cpu_init(); + if (save_state) + omap34xx_save_context(omap3_arm_context); + if (save_state == 1 || save_state == 3) + cpu_suspend(save_state, omap34xx_do_sram_idle); + else + omap34xx_do_sram_idle(save_state); /* Restore normal SDRC POWER settings */ if (omap_rev() >= OMAP3430_REV_ES3_0 && @@ -445,10 +437,6 @@ void omap_sram_idle(void) core_next_state == PWRDM_POWER_OFF) sdrc_write_reg(sdrc_pwr, SDRC_POWER); - /* Restore table entry modified during MMU restoration */ - if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF) - restore_table_entry(); - /* CORE */ if (core_next_state < PWRDM_POWER_ON) { core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); @@ -852,10 +840,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) return 0; } +/* + * Push functions to SRAM + * + * The minimum set of functions is pushed to SRAM for execution: + * - omap3_do_wfi for erratum i581 WA, + * - save_secure_ram_context for security extensions. + */ void omap_push_sram_idle(void) { - _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, - omap34xx_cpu_suspend_sz); + omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); + if (omap_type() != OMAP2_DEVICE_TYPE_GP) _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, save_secure_ram_context_sz); @@ -920,7 +915,6 @@ static int __init omap3_pm_init(void) per_clkdm = clkdm_lookup("per_clkdm"); core_clkdm = clkdm_lookup("core_clkdm"); - omap_push_sram_idle(); #ifdef CONFIG_SUSPEND suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 63f10669571a..f2ea1bd1c691 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -74,46 +74,6 @@ * API functions */ -/* - * The "get_*restore_pointer" functions are used to provide a - * physical restore address where the ROM code jumps while waking - * up from MPU OFF/OSWR state. - * The restore pointer is stored into the scratchpad. - */ - - .text -/* Function call to get the restore pointer for resume from OFF */ -ENTRY(get_restore_pointer) - stmfd sp!, {lr} @ save registers on stack - adr r0, restore - ldmfd sp!, {pc} @ restore regs and return -ENDPROC(get_restore_pointer) - .align -ENTRY(get_restore_pointer_sz) - .word . - get_restore_pointer - - .text -/* Function call to get the restore pointer for 3630 resume from OFF */ -ENTRY(get_omap3630_restore_pointer) - stmfd sp!, {lr} @ save registers on stack - adr r0, restore_3630 - ldmfd sp!, {pc} @ restore regs and return -ENDPROC(get_omap3630_restore_pointer) - .align -ENTRY(get_omap3630_restore_pointer_sz) - .word . - get_omap3630_restore_pointer - - .text -/* Function call to get the restore pointer for ES3 to resume from OFF */ -ENTRY(get_es3_restore_pointer) - stmfd sp!, {lr} @ save registers on stack - adr r0, restore_es3 - ldmfd sp!, {pc} @ restore regs and return -ENDPROC(get_es3_restore_pointer) - .align -ENTRY(get_es3_restore_pointer_sz) - .word . - get_es3_restore_pointer - .text /* * L2 cache needs to be toggled for stable OFF mode functionality on 3630. @@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore) /* Function to call rom code to save secure ram context */ .align 3 ENTRY(save_secure_ram_context) - stmfd sp!, {r1-r12, lr} @ save registers on stack + stmfd sp!, {r4 - r11, lr} @ save registers on stack adr r3, api_params @ r3 points to parameters str r0, [r3,#0x4] @ r0 has sdram address ldr r12, high_mask @@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context) nop nop nop - ldmfd sp!, {r1-r12, pc} + ldmfd sp!, {r4 - r11, pc} .align sram_phy_addr_mask: .word SRAM_BASE_P @@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz) * * * Notes: - * - this code gets copied to internal SRAM at boot and after wake-up - * from OFF mode. The execution pointer in SRAM is _omap_sram_idle. + * - only the minimum set of functions gets copied to internal SRAM at boot + * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function + * pointers in SDRAM or SRAM are called depending on the desired low power + * target state. * - when the OMAP wakes up it continues at different execution points * depending on the low power mode (non-OFF vs OFF modes), * cf. 'Resume path for xxx mode' comments. */ .align 3 ENTRY(omap34xx_cpu_suspend) - stmfd sp!, {r0-r12, lr} @ save registers on stack + stmfd sp!, {r4 - r11, lr} @ save registers on stack /* - * r0 contains CPU context save/restore pointer in sdram - * r1 contains information about saving context: + * r0 contains information about saving context: * 0 - No context lost * 1 - Only L1 and logic lost * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) * 3 - Both L1 and L2 lost and logic lost */ - /* Directly jump to WFI is the context save is not required */ - cmp r1, #0x0 - beq omap3_do_wfi + /* + * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi) + * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram) + */ + ldr r4, omap3_do_wfi_sram_addr + ldr r5, [r4] + cmp r0, #0x0 @ If no context save required, + bxeq r5 @ jump to the WFI code in SRAM + /* Otherwise fall through to the save context code */ save_context_wfi: - mov r8, r0 @ Store SDRAM address in r8 - mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register - mov r4, #0x1 @ Number of parameters for restore call - stmia r8!, {r4-r5} @ Push parameters for restore call - mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register - stmia r8!, {r4-r5} @ Push parameters for restore call - - /* Check what that target sleep state is from r1 */ - cmp r1, #0x2 @ Only L2 lost, no need to save context - beq clean_caches - -l1_logic_lost: - mov r4, sp @ Store sp - mrs r5, spsr @ Store spsr - mov r6, lr @ Store lr - stmia r8!, {r4-r6} - - mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register - mrc p15, 0, r5, c2, c0, 0 @ TTBR0 - mrc p15, 0, r6, c2, c0, 1 @ TTBR1 - mrc p15, 0, r7, c2, c0, 2 @ TTBCR - stmia r8!, {r4-r7} - - mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register - mrc p15, 0, r5, c10, c2, 0 @ PRRR - mrc p15, 0, r6, c10, c2, 1 @ NMRR - stmia r8!,{r4-r6} - - mrc p15, 0, r4, c13, c0, 1 @ Context ID - mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID - mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address - mrs r7, cpsr @ Store current cpsr - stmia r8!, {r4-r7} - - mrc p15, 0, r4, c1, c0, 0 @ save control register - stmia r8!, {r4} - -clean_caches: /* * jump out to kernel flush routine * - reuse that code is better @@ -284,7 +213,32 @@ clean_caches: THUMB( nop ) .arm -omap3_do_wfi: + b omap3_do_wfi + +/* + * Local variables + */ +omap3_do_wfi_sram_addr: + .word omap3_do_wfi_sram +kernel_flush: + .word v7_flush_dcache_all + +/* =================================== + * == WFI instruction => Enter idle == + * =================================== + */ + +/* + * Do WFI instruction + * Includes the resume path for non-OFF modes + * + * This code gets copied to internal SRAM and is accessible + * from both SDRAM and SRAM: + * - executed from SRAM for non-off modes (omap3_do_wfi_sram), + * - executed from SDRAM for OFF mode (omap3_do_wfi). + */ + .align 3 +ENTRY(omap3_do_wfi) ldr r4, sdrc_power @ read the SDRC_POWER register ldr r5, [r4] @ read the contents of SDRC_POWER orr r5, r5, #0x40 @ enable self refresh on idle req @@ -316,8 +270,86 @@ omap3_do_wfi: nop nop nop - bl wait_sdrc_ok +/* + * This function implements the erratum ID i581 WA: + * SDRC state restore before accessing the SDRAM + * + * Only used at return from non-OFF mode. For OFF + * mode the ROM code configures the SDRC and + * the DPLL before calling the restore code directly + * from DDR. + */ + +/* Make sure SDRC accesses are ok */ +wait_sdrc_ok: + +/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ + ldr r4, cm_idlest_ckgen +wait_dpll3_lock: + ldr r5, [r4] + tst r5, #1 + beq wait_dpll3_lock + + ldr r4, cm_idlest1_core +wait_sdrc_ready: + ldr r5, [r4] + tst r5, #0x2 + bne wait_sdrc_ready + /* allow DLL powerdown upon hw idle req */ + ldr r4, sdrc_power + ldr r5, [r4] + bic r5, r5, #0x40 + str r5, [r4] + +/* + * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a + * base instead. + * Be careful not to clobber r7 when maintaing this code. + */ + +is_dll_in_lock_mode: + /* Is dll in lock mode? */ + ldr r4, sdrc_dlla_ctrl + ldr r5, [r4] + tst r5, #0x4 + bne exit_nonoff_modes @ Return if locked + /* wait till dll locks */ + adr r7, kick_counter +wait_dll_lock_timed: + ldr r4, wait_dll_lock_counter + add r4, r4, #1 + str r4, [r7, #wait_dll_lock_counter - kick_counter] + ldr r4, sdrc_dlla_status + /* Wait 20uS for lock */ + mov r6, #8 +wait_dll_lock: + subs r6, r6, #0x1 + beq kick_dll + ldr r5, [r4] + and r5, r5, #0x4 + cmp r5, #0x4 + bne wait_dll_lock + b exit_nonoff_modes @ Return when locked + + /* disable/reenable DLL if not locked */ +kick_dll: + ldr r4, sdrc_dlla_ctrl + ldr r5, [r4] + mov r6, r5 + bic r6, #(1<<3) @ disable dll + str r6, [r4] + dsb + orr r6, r6, #(1<<3) @ enable dll + str r6, [r4] + dsb + ldr r4, kick_counter + add r4, r4, #1 + str r4, [r7] @ kick_counter + b wait_dll_lock_timed + +exit_nonoff_modes: + /* Re-enable C-bit if needed */ mrc p15, 0, r0, c1, c0, 0 tst r0, #(1 << 2) @ Check C bit enabled? orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared @@ -329,7 +361,32 @@ omap3_do_wfi: * == Exit point from non-OFF modes == * =================================== */ - ldmfd sp!, {r0-r12, pc} @ restore regs and return + ldmfd sp!, {r4 - r11, pc} @ restore regs and return + +/* + * Local variables + */ +sdrc_power: + .word SDRC_POWER_V +cm_idlest1_core: + .word CM_IDLEST1_CORE_V +cm_idlest_ckgen: + .word CM_IDLEST_CKGEN_V +sdrc_dlla_status: + .word SDRC_DLLA_STATUS_V +sdrc_dlla_ctrl: + .word SDRC_DLLA_CTRL_V + /* + * When exporting to userspace while the counters are in SRAM, + * these 2 words need to be at the end to facilitate retrival! + */ +kick_counter: + .word 0 +wait_dll_lock_counter: + .word 0 + +ENTRY(omap3_do_wfi_sz) + .word . - omap3_do_wfi /* @@ -346,13 +403,17 @@ omap3_do_wfi: * restore_es3: applies to 34xx >= ES3.0 * restore_3630: applies to 36xx * restore: common code for 3xxx + * + * Note: when back from CORE and MPU OFF mode we are running + * from SDRAM, without MMU, without the caches and prediction. + * Also the SRAM content has been cleared. */ -restore_es3: +ENTRY(omap3_restore_es3) ldr r5, pm_prepwstst_core_p ldr r4, [r5] and r4, r4, #0x3 cmp r4, #0x0 @ Check if previous power state of CORE is OFF - bne restore + bne omap3_restore @ Fall through to OMAP3 common code adr r0, es3_sdrc_fix ldr r1, sram_base ldr r2, es3_sdrc_fix_sz @@ -364,35 +425,32 @@ copy_to_sram: bne copy_to_sram ldr r1, sram_base blx r1 - b restore + b omap3_restore @ Fall through to OMAP3 common code +ENDPROC(omap3_restore_es3) -restore_3630: +ENTRY(omap3_restore_3630) ldr r1, pm_prepwstst_core_p ldr r2, [r1] and r2, r2, #0x3 cmp r2, #0x0 @ Check if previous power state of CORE is OFF - bne restore + bne omap3_restore @ Fall through to OMAP3 common code /* Disable RTA before giving control */ ldr r1, control_mem_rta mov r2, #OMAP36XX_RTA_DISABLE str r2, [r1] +ENDPROC(omap3_restore_3630) /* Fall through to common code for the remaining logic */ -restore: +ENTRY(omap3_restore) /* - * Check what was the reason for mpu reset and store the reason in r9: - * 0 - No context lost - * 1 - Only L1 and logic lost - * 2 - Only L2 lost - In this case, we wont be here - * 3 - Both L1 and L2 lost + * Read the pwstctrl register to check the reason for mpu reset. + * This tells us what was lost. */ ldr r1, pm_pwstctrl_mpu ldr r2, [r1] and r2, r2, #0x3 cmp r2, #0x0 @ Check if target power state was OFF or RET - moveq r9, #0x3 @ MPU OFF => L1 and L2 lost - movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation bne logic_l1_restore ldr r0, l2dis_3630 @@ -471,115 +529,39 @@ logic_l1_restore: orr r1, r1, #2 @ re-enable L2 cache mcr p15, 0, r1, c1, c0, 1 skipl2reen: - mov r1, #0 - /* - * Invalidate all instruction caches to PoU - * and flush branch target cache - */ - mcr p15, 0, r1, c7, c5, 0 - ldr r4, scratchpad_base - ldr r3, [r4,#0xBC] - adds r3, r3, #16 + /* Now branch to the common CPU resume function */ + b cpu_resume +ENDPROC(omap3_restore) - ldmia r3!, {r4-r6} - mov sp, r4 @ Restore sp - msr spsr_cxsf, r5 @ Restore spsr - mov lr, r6 @ Restore lr - - ldmia r3!, {r4-r7} - mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register - mcr p15, 0, r5, c2, c0, 0 @ TTBR0 - mcr p15, 0, r6, c2, c0, 1 @ TTBR1 - mcr p15, 0, r7, c2, c0, 2 @ TTBCR - - ldmia r3!,{r4-r6} - mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register - mcr p15, 0, r5, c10, c2, 0 @ PRRR - mcr p15, 0, r6, c10, c2, 1 @ NMRR - - - ldmia r3!,{r4-r7} - mcr p15, 0, r4, c13, c0, 1 @ Context ID - mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID - mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address - msr cpsr, r7 @ store cpsr - - /* Enabling MMU here */ - mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl - /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ - and r7, #0x7 - cmp r7, #0x0 - beq usettbr0 -ttbr_error: - /* - * More work needs to be done to support N[0:2] value other than 0 - * So looping here so that the error can be detected - */ - b ttbr_error -usettbr0: - mrc p15, 0, r2, c2, c0, 0 - ldr r5, ttbrbit_mask - and r2, r5 - mov r4, pc - ldr r5, table_index_mask - and r4, r5 @ r4 = 31 to 20 bits of pc - /* Extract the value to be written to table entry */ - ldr r1, table_entry - /* r1 has the value to be written to table entry*/ - add r1, r1, r4 - /* Getting the address of table entry to modify */ - lsr r4, #18 - /* r2 has the location which needs to be modified */ - add r2, r4 - /* Storing previous entry of location being modified */ - ldr r5, scratchpad_base - ldr r4, [r2] - str r4, [r5, #0xC0] - /* Modify the table entry */ - str r1, [r2] - /* - * Storing address of entry being modified - * - will be restored after enabling MMU - */ - ldr r5, scratchpad_base - str r2, [r5, #0xC4] - - mov r0, #0 - mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer - mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array - mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB - mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB - /* - * Restore control register. This enables the MMU. - * The caches and prediction are not enabled here, they - * will be enabled after restoring the MMU table entry. - */ - ldmia r3!, {r4} - /* Store previous value of control register in scratchpad */ - str r4, [r5, #0xC8] - ldr r2, cache_pred_disable_mask - and r4, r2 - mcr p15, 0, r4, c1, c0, 0 - dsb - isb - ldr r0, =restoremmu_on - bx r0 + .ltorg /* - * ============================== - * == Exit point from OFF mode == - * ============================== + * Local variables */ -restoremmu_on: - ldmfd sp!, {r0-r12, pc} @ restore regs and return - +pm_prepwstst_core_p: + .word PM_PREPWSTST_CORE_P +pm_pwstctrl_mpu: + .word PM_PWSTCTRL_MPU_P +scratchpad_base: + .word SCRATCHPAD_BASE_P +sram_base: + .word SRAM_BASE_P + 0x8000 +control_stat: + .word CONTROL_STAT +control_mem_rta: + .word CONTROL_MEM_RTA_CTRL +l2dis_3630: + .word 0 /* * Internal functions */ -/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ +/* + * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 + * Copied to and run from SRAM in order to reconfigure the SDRC parameters. + */ .text .align 3 ENTRY(es3_sdrc_fix) @@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix) str r5, [r4] @ kick off refreshes bx lr +/* + * Local variables + */ .align sdrc_syscfg: .word SDRC_SYSCONFIG_P @@ -627,128 +612,3 @@ sdrc_manual_1: ENDPROC(es3_sdrc_fix) ENTRY(es3_sdrc_fix_sz) .word . - es3_sdrc_fix - -/* - * This function implements the erratum ID i581 WA: - * SDRC state restore before accessing the SDRAM - * - * Only used at return from non-OFF mode. For OFF - * mode the ROM code configures the SDRC and - * the DPLL before calling the restore code directly - * from DDR. - */ - -/* Make sure SDRC accesses are ok */ -wait_sdrc_ok: - -/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ - ldr r4, cm_idlest_ckgen -wait_dpll3_lock: - ldr r5, [r4] - tst r5, #1 - beq wait_dpll3_lock - - ldr r4, cm_idlest1_core -wait_sdrc_ready: - ldr r5, [r4] - tst r5, #0x2 - bne wait_sdrc_ready - /* allow DLL powerdown upon hw idle req */ - ldr r4, sdrc_power - ldr r5, [r4] - bic r5, r5, #0x40 - str r5, [r4] - -/* - * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a - * base instead. - * Be careful not to clobber r7 when maintaing this code. - */ - -is_dll_in_lock_mode: - /* Is dll in lock mode? */ - ldr r4, sdrc_dlla_ctrl - ldr r5, [r4] - tst r5, #0x4 - bxne lr @ Return if locked - /* wait till dll locks */ - adr r7, kick_counter -wait_dll_lock_timed: - ldr r4, wait_dll_lock_counter - add r4, r4, #1 - str r4, [r7, #wait_dll_lock_counter - kick_counter] - ldr r4, sdrc_dlla_status - /* Wait 20uS for lock */ - mov r6, #8 -wait_dll_lock: - subs r6, r6, #0x1 - beq kick_dll - ldr r5, [r4] - and r5, r5, #0x4 - cmp r5, #0x4 - bne wait_dll_lock - bx lr @ Return when locked - - /* disable/reenable DLL if not locked */ -kick_dll: - ldr r4, sdrc_dlla_ctrl - ldr r5, [r4] - mov r6, r5 - bic r6, #(1<<3) @ disable dll - str r6, [r4] - dsb - orr r6, r6, #(1<<3) @ enable dll - str r6, [r4] - dsb - ldr r4, kick_counter - add r4, r4, #1 - str r4, [r7] @ kick_counter - b wait_dll_lock_timed - - .align -cm_idlest1_core: - .word CM_IDLEST1_CORE_V -cm_idlest_ckgen: - .word CM_IDLEST_CKGEN_V -sdrc_dlla_status: - .word SDRC_DLLA_STATUS_V -sdrc_dlla_ctrl: - .word SDRC_DLLA_CTRL_V -pm_prepwstst_core_p: - .word PM_PREPWSTST_CORE_P -pm_pwstctrl_mpu: - .word PM_PWSTCTRL_MPU_P -scratchpad_base: - .word SCRATCHPAD_BASE_P -sram_base: - .word SRAM_BASE_P + 0x8000 -sdrc_power: - .word SDRC_POWER_V -ttbrbit_mask: - .word 0xFFFFC000 -table_index_mask: - .word 0xFFF00000 -table_entry: - .word 0x00000C02 -cache_pred_disable_mask: - .word 0xFFFFE7FB -control_stat: - .word CONTROL_STAT -control_mem_rta: - .word CONTROL_MEM_RTA_CTRL -kernel_flush: - .word v7_flush_dcache_all -l2dis_3630: - .word 0 - /* - * When exporting to userspace while the counters are in SRAM, - * these 2 words need to be at the end to facilitate retrival! - */ -kick_counter: - .word 0 -wait_dll_lock_counter: - .word 0 -ENDPROC(omap34xx_cpu_suspend) - -ENTRY(omap34xx_cpu_suspend_sz) - .word . - omap34xx_cpu_suspend diff --git a/arch/arm/mach-pnx4008/include/mach/entry-macro.S b/arch/arm/mach-pnx4008/include/mach/entry-macro.S index 8003037578ed..db7eeebf30d7 100644 --- a/arch/arm/mach-pnx4008/include/mach/entry-macro.S +++ b/arch/arm/mach-pnx4008/include/mach/entry-macro.S @@ -120,8 +120,3 @@ 1003: .endm - - .macro irq_prio_table - .endm - - diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c index a10996782476..bc55d07566ca 100644 --- a/arch/arm/mach-pxa/cm-x2xx.c +++ b/arch/arm/mach-pxa/cm-x2xx.c @@ -518,4 +518,7 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX") .init_irq = cmx2xx_init_irq, .timer = &pxa_timer, .init_machine = cmx2xx_init, +#ifdef CONFIG_PCI + .dma_zone_size = SZ_64M, +#endif MACHINE_END diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h index 07734f37f8fd..d05a59727d66 100644 --- a/arch/arm/mach-pxa/include/mach/memory.h +++ b/arch/arm/mach-pxa/include/mach/memory.h @@ -17,8 +17,4 @@ */ #define PLAT_PHYS_OFFSET UL(0xa0000000) -#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) -#define ARM_DMA_ZONE_SIZE SZ_64M -#endif - #endif diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h index f15afe012995..51558bcee999 100644 --- a/arch/arm/mach-pxa/include/mach/pm.h +++ b/arch/arm/mach-pxa/include/mach/pm.h @@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns { extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; /* sleep.S */ -extern void pxa25x_cpu_suspend(unsigned int, long); -extern void pxa27x_cpu_suspend(unsigned int, long); +extern int pxa25x_finish_suspend(unsigned long); +extern int pxa27x_finish_suspend(unsigned long); extern int pxa_pm_enter(suspend_state_t state); extern int pxa_pm_prepare(void); diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 65f24f0b77e8..5a5329bc33f1 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -33,6 +33,7 @@ #include #include +#include #include #include diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 51e1583265b2..37178a8559b1 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state) /* *** go zzz *** */ pxa_cpu_pm_fns->enter(state); - cpu_init(); if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { /* after sleeping, validate the checksum */ diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fed363cec9c6..9c434d21a271 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -244,7 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) switch (state) { case PM_SUSPEND_MEM: - pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend); break; } } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 2fecbec58d88..9d2400b5f503 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -284,6 +285,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save) void pxa27x_cpu_pm_enter(suspend_state_t state) { extern void pxa_cpu_standby(void); +#ifndef CONFIG_IWMMXT + u64 acc0; + + asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); +#endif /* ensure voltage-change sequencer not initiated, which hangs */ PCFR &= ~PCFR_FVC; @@ -299,7 +305,10 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) pxa_cpu_standby(); break; case PM_SUSPEND_MEM: - pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(pwrmode, pxa27x_finish_suspend); +#ifndef CONFIG_IWMMXT + asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); +#endif break; } } diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 8521d7d6f1da..ef1c56a67afc 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -141,8 +142,13 @@ static void pxa3xx_cpu_pm_suspend(void) { volatile unsigned long *p = (volatile void *)0xc0000000; unsigned long saved_data = *p; +#ifndef CONFIG_IWMMXT + u64 acc0; - extern void pxa3xx_cpu_suspend(long); + asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); +#endif + + extern int pxa3xx_finish_suspend(unsigned long); /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); @@ -162,11 +168,15 @@ static void pxa3xx_cpu_pm_suspend(void) /* overwrite with the resume address */ *p = virt_to_phys(cpu_resume); - pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, pxa3xx_finish_suspend); *p = saved_data; AD3ER = 0; + +#ifndef CONFIG_IWMMXT + asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); +#endif } static void pxa3xx_cpu_pm_enter(suspend_state_t state) diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 6f5368899d84..1e544be9905d 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S @@ -24,20 +24,9 @@ #ifdef CONFIG_PXA3xx /* - * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4) - * - * r0 = v:p offset + * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4) */ -ENTRY(pxa3xx_cpu_suspend) - -#ifndef CONFIG_IWMMXT - mra r2, r3, acc0 -#endif - stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r1, r0 - ldr r3, =pxa_cpu_resume @ resume function - bl cpu_suspend - +ENTRY(pxa3xx_finish_suspend) mov r0, #0x06 @ S2D3C4 mode mcr p14, 0, r0, c7, c0, 0 @ enter sleep @@ -46,28 +35,18 @@ ENTRY(pxa3xx_cpu_suspend) #ifdef CONFIG_PXA27x /* - * pxa27x_cpu_suspend() + * pxa27x_finish_suspend() * * Forces CPU into sleep state. * * r0 = value for PWRMODE M field for desired sleep state - * r1 = v:p offset */ -ENTRY(pxa27x_cpu_suspend) - -#ifndef CONFIG_IWMMXT - mra r2, r3, acc0 -#endif - stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r4, r0 @ save sleep mode - ldr r3, =pxa_cpu_resume @ resume function - bl cpu_suspend - +ENTRY(pxa27x_finish_suspend) @ Put the processor to sleep @ (also workaround for sighting 28071) @ prepare value for sleep mode - mov r1, r4 @ sleep mode + mov r1, r0 @ sleep mode @ prepare pointer to physical address 0 (virtual mapping in generic.c) mov r2, #UNCACHED_PHYS_0 @@ -99,21 +78,16 @@ ENTRY(pxa27x_cpu_suspend) #ifdef CONFIG_PXA25x /* - * pxa25x_cpu_suspend() + * pxa25x_finish_suspend() * * Forces CPU into sleep state. * * r0 = value for PWRMODE M field for desired sleep state - * r1 = v:p offset */ -ENTRY(pxa25x_cpu_suspend) - stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r4, r0 @ save sleep mode - ldr r3, =pxa_cpu_resume @ resume function - bl cpu_suspend +ENTRY(pxa25x_finish_suspend) @ prepare value for sleep mode - mov r1, r4 @ sleep mode + mov r1, r0 @ sleep mode @ prepare pointer to physical address 0 (virtual mapping in generic.c) mov r2, #UNCACHED_PHYS_0 @@ -195,16 +169,3 @@ pxa_cpu_do_suspend: mcr p14, 0, r1, c7, c0, 0 @ PWRMODE 20: b 20b @ loop waiting for sleep - -/* - * pxa_cpu_resume() - * - * entry point from bootloader into kernel during resume - */ - .align 5 -pxa_cpu_resume: - ldmfd sp!, {r2, r3} -#ifndef CONFIG_IWMMXT - mar acc0, r2, r3 -#endif - ldmfd sp!, {r4 - r12, pc} @ return to caller diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 00363c7ac182..9b99cc164de5 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -676,7 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = { static void zeus_power_off(void) { local_irq_disable(); - pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend); } #else #define zeus_power_off NULL diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index b9a9805e4828..dba6d0c1fc17 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig @@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176 bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" select CPU_V6 select ARM_GIC + select HAVE_TCM help Include support for the ARM(R) RealView(R) Platform Baseboard for ARM1176JZF-S. diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h index 1759fa673eea..2022e092f0ca 100644 --- a/arch/arm/mach-realview/include/mach/memory.h +++ b/arch/arm/mach-realview/include/mach/memory.h @@ -29,10 +29,6 @@ #define PLAT_PHYS_OFFSET UL(0x00000000) #endif -#ifdef CONFIG_ZONE_DMA -#define ARM_DMA_ZONE_SIZE SZ_256M -#endif - #ifdef CONFIG_SPARSEMEM /* diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c index 963bf0d8119a..4ae943bafa92 100644 --- a/arch/arm/mach-realview/platsmp.c +++ b/arch/arm/mach-realview/platsmp.c @@ -68,14 +68,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base_addr()); diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 10e75faba4c9..7a4e3b18cb3e 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -470,4 +470,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB") .init_irq = gic_init_irq, .timer = &realview_eb_timer, .init_machine = realview_eb_init, +#ifdef CONFIG_ZONE_DMA + .dma_zone_size = SZ_256M, +#endif MACHINE_END diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index eab6070f66d0..ad5671acb66a 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c @@ -365,4 +365,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176") .init_irq = gic_init_irq, .timer = &realview_pb1176_timer, .init_machine = realview_pb1176_init, +#ifdef CONFIG_ZONE_DMA + .dma_zone_size = SZ_256M, +#endif MACHINE_END diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index b2985fc7cd4e..b43644b3685e 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c @@ -367,4 +367,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore") .init_irq = gic_init_irq, .timer = &realview_pb11mp_timer, .init_machine = realview_pb11mp_init, +#ifdef CONFIG_ZONE_DMA + .dma_zone_size = SZ_256M, +#endif MACHINE_END diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index fb6866558760..763e8f38c15d 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c @@ -317,4 +317,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8") .init_irq = gic_init_irq, .timer = &realview_pba8_timer, .init_machine = realview_pba8_init, +#ifdef CONFIG_ZONE_DMA + .dma_zone_size = SZ_256M, +#endif MACHINE_END diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 92ace2cf2b2c..363b0ab56150 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -400,4 +400,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX") .init_irq = gic_init_irq, .timer = &realview_pbx_timer, .init_machine = realview_pbx_init, +#ifdef CONFIG_ZONE_DMA + .dma_zone_size = SZ_256M, +#endif MACHINE_END diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index 752b13a7b3db..f4077efa51fa 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c @@ -37,12 +37,10 @@ extern void s3c2412_sleep_enter(void); -static void s3c2412_cpu_suspend(void) +static int s3c2412_cpu_suspend(unsigned long arg) { unsigned long tmp; - flush_cache_all(); - /* set our standby method to sleep */ tmp = __raw_readl(S3C2412_PWRCFG); @@ -50,6 +48,8 @@ static void s3c2412_cpu_suspend(void) __raw_writel(tmp, S3C2412_PWRCFG); s3c2412_sleep_enter(); + + panic("sleep resumed to originator?"); } static void s3c2412_pm_prepare(void) diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 41db2b21e213..9ec54f1d8e75 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c @@ -24,10 +24,8 @@ extern void s3c2412_sleep_enter(void); -static void s3c2416_cpu_suspend(void) +static int s3c2416_cpu_suspend(unsigned long arg) { - flush_cache_all(); - /* enable wakeup sources regardless of battery state */ __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); @@ -35,6 +33,8 @@ static void s3c2416_cpu_suspend(void) __raw_writel(0x2BED, S3C2443_PWRMODE); s3c2412_sleep_enter(); + + panic("sleep resumed to originator?"); } static void s3c2416_pm_prepare(void) diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index bc1c470b7de6..8bad64370689 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c @@ -112,7 +112,7 @@ void s3c_pm_save_core(void) * this. */ -static void s3c64xx_cpu_suspend(void) +static int s3c64xx_cpu_suspend(unsigned long arg) { unsigned long tmp; diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S index 1f87732b2320..34313f9c8792 100644 --- a/arch/arm/mach-s3c64xx/sleep.S +++ b/arch/arm/mach-s3c64xx/sleep.S @@ -25,29 +25,6 @@ .text - /* s3c_cpu_save - * - * Save enough processor state to allow the restart of the pm.c - * code after resume. - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - stmfd sp!, { r4 - r12, lr } - ldr r3, =resume_with_mmu - bl cpu_suspend - - @@ call final suspend code - ldr r0, =pm_cpu_sleep - ldr pc, [r0] - - @@ return to the caller, after the MMU is turned on. - @@ restore the last bits of the stack and return. -resume_with_mmu: - ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save - /* Sleep magic, the word before the resume entry point so that the * bootloader can check for a resumeable image. */ diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 24febae3d4c0..309e388a8a83 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c @@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = { SAVE_ITEM(S3C2410_TCNTO(0)), }; -void s5pv210_cpu_suspend(void) +void s5pv210_cpu_suspend(unsigned long arg) { unsigned long tmp; diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S index a3d649466fb1..e3452ccd4b08 100644 --- a/arch/arm/mach-s5pv210/sleep.S +++ b/arch/arm/mach-s5pv210/sleep.S @@ -32,27 +32,6 @@ .text - /* s3c_cpu_save - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - - stmfd sp!, { r3 - r12, lr } - ldr r3, =resume_with_mmu - bl cpu_suspend - - ldr r0, =pm_cpu_sleep - ldr r0, [ r0 ] - mov pc, r0 - -resume_with_mmu: - ldmfd sp!, { r3 - r12, pc } - - .ltorg - /* sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the * s3c_cpu_resume entry. diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 5778274a8260..26257df19b63 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -453,4 +453,7 @@ MACHINE_START(ASSABET, "Intel-Assabet") .init_irq = sa1100_init_irq, .timer = &sa1100_timer, .init_machine = assabet_init, +#ifdef CONFIG_SA1111 + .dma_zone_size = SZ_1M, +#endif MACHINE_END diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c index 4f19ff868b00..b4311b0a4395 100644 --- a/arch/arm/mach-sa1100/badge4.c +++ b/arch/arm/mach-sa1100/badge4.c @@ -306,4 +306,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4") .map_io = badge4_map_io, .init_irq = sa1100_init_irq, .timer = &sa1100_timer, +#ifdef CONFIG_SA1111 + .dma_zone_size = SZ_1M, +#endif MACHINE_END diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h index cff31ee246b7..12d376795abc 100644 --- a/arch/arm/mach-sa1100/include/mach/memory.h +++ b/arch/arm/mach-sa1100/include/mach/memory.h @@ -14,10 +14,6 @@ */ #define PLAT_PHYS_OFFSET UL(0xc0000000) -#ifdef CONFIG_SA1111 -#define ARM_DMA_ZONE_SIZE SZ_1M -#endif - /* * Because of the wide memory address space between physical RAM banks on the * SA1100, it's much convenient to use Linux's SparseMEM support to implement diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c index 491ac9f20fb4..176c066aec7e 100644 --- a/arch/arm/mach-sa1100/jornada720.c +++ b/arch/arm/mach-sa1100/jornada720.c @@ -369,4 +369,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720") .init_irq = sa1100_init_irq, .timer = &sa1100_timer, .init_machine = jornada720_mach_init, +#ifdef CONFIG_SA1111 + .dma_zone_size = SZ_1M, +#endif MACHINE_END diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index c4661aab22fb..bf85b8b259d5 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -29,10 +29,11 @@ #include #include +#include #include #include -extern void sa1100_cpu_suspend(long); +extern int sa1100_finish_suspend(unsigned long); #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] @@ -75,9 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state) PSPR = virt_to_phys(cpu_resume); /* go zzz */ - sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); - - cpu_init(); + cpu_suspend(0, sa1100_finish_suspend); /* * Ensure not to come back here if it wasn't intended diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index 04f2a618d4ef..e8223315b442 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S @@ -22,18 +22,13 @@ .text /* - * sa1100_cpu_suspend() + * sa1100_finish_suspend() * * Causes sa11x0 to enter sleep state * */ -ENTRY(sa1100_cpu_suspend) - stmfd sp!, {r4 - r12, lr} @ save registers on stack - mov r1, r0 - ldr r3, =sa1100_cpu_resume @ return function - bl cpu_suspend - +ENTRY(sa1100_finish_suspend) @ disable clock switching mcr p15, 0, r1, c15, c2, 2 @@ -139,13 +134,3 @@ sa1110_sdram_controller_fix: str r13, [r12] 20: b 20b @ loop waiting for sleep - -/* - * cpu_sa1100_resume() - * - * entry point from bootloader into kernel during resume - */ - .align 5 -sa1100_cpu_resume: - mcr p15, 0, r1, c15, c1, 2 @ enable clock switching - ldmfd sp!, {r4 - r12, pc} @ return to caller diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index 5cf7f94c1f31..ac2873c8014b 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c @@ -156,4 +156,5 @@ MACHINE_START(SHARK, "Shark") .map_io = shark_map_io, .init_irq = shark_init_irq, .timer = &shark_timer, + .dma_zone_size = SZ_4M, MACHINE_END diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S index e2853c0a3333..0bb6cc626eb7 100644 --- a/arch/arm/mach-shark/include/mach/entry-macro.S +++ b/arch/arm/mach-shark/include/mach/entry-macro.S @@ -11,17 +11,17 @@ .endm .macro get_irqnr_preamble, base, tmp + mov \base, #0xe0000000 .endm .macro arch_ret_to_user, tmp1, tmp2 .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov r4, #0xe0000000 mov \irqstat, #0x0C - strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */ - ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7 + strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */ + ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7 and \irqstat, \irqnr, #0x80 teq \irqstat, #0 beq 43f @@ -29,8 +29,8 @@ teq \irqnr, #2 bne 44f 43: mov \irqstat, #0x0C - strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ - ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8 + strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ + ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8 and \irqstat, \irqnr, #0x80 teq \irqstat, #0 beq 44f diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h index 4c0831f83b0c..1cf8d6962617 100644 --- a/arch/arm/mach-shark/include/mach/memory.h +++ b/arch/arm/mach-shark/include/mach/memory.h @@ -17,8 +17,6 @@ */ #define PLAT_PHYS_OFFSET UL(0x08000000) -#define ARM_DMA_ZONE_SIZE SZ_4M - /* * Cache flushing area */ diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h new file mode 100644 index 000000000000..4a81b01f1e8f --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h @@ -0,0 +1,21 @@ +#ifndef SDHI_SH7372_H +#define SDHI_SH7372_H + +#define SDGENCNTA 0xfe40009c + +/* The countdown of SDGENCNTA is controlled by + * ZB3D2CLK which runs at 149.5MHz. + * That is 149.5ticks/us. Approximate this as 150ticks/us. + */ +static void udelay(int us) +{ + __raw_writel(us * 150, SDGENCNTA); + while(__raw_readl(SDGENCNTA)) ; +} + +static void msleep(int ms) +{ + udelay(ms * 1000); +} + +#endif diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h new file mode 100644 index 000000000000..0ec9e69f2c3b --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi.h @@ -0,0 +1,16 @@ +#ifndef SDHI_H +#define SDHI_H + +/************************************************** + * + * CPU specific settings + * + **************************************************/ + +#ifdef CONFIG_ARCH_SH7372 +#include "mach/sdhi-sh7372.h" +#else +#error "unsupported CPU." +#endif + +#endif /* SDHI_H */ diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index f3888feb1c68..66f980625a33 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c @@ -64,10 +64,5 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); - shmobile_smp_prepare_cpus(); } diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index b8ae3c978dee..1a594dce8fbc 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c @@ -129,14 +129,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base); } diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 0c527fe2cebb..a33df5f4c27a 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -172,14 +172,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base_addr()); wakeup_secondary(); diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 765a71ff7f3b..bfd32f52c2db 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void) static void ct_ca9x4_smp_enable(unsigned int max_cpus) { - int i; - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); - scu_enable(MMIO_P2V(A9_MPCORE_SCU)); } #endif diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 4f18f9e87bae..54473cd4aba9 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -3,14 +3,11 @@ /* * Function: v4_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -21,10 +18,8 @@ ENTRY(v4_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r3, [r2] @ read aborted ARM instruction + ldr r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. - mov pc, lr - - + b do_DataAbort diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index b6282548f922..9da704e7b86e 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -4,14 +4,11 @@ /* * Function: v4t_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -22,9 +19,9 @@ ENTRY(v4t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 02251b526c0d..a0908d4653a3 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -4,14 +4,11 @@ /* * Function: v5t_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -22,10 +19,10 @@ ENTRY(v5t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR - do_ldrd_abort + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bce68d601c8b..4006b7a61264 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -4,14 +4,11 @@ /* * Function: v5tj_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR - tst r3, #PSR_J_BIT @ Java? - movne pc, lr - do_thumb_abort - ldreq r3, [r2] @ read aborted ARM instruction - do_ldrd_abort + tst r5, #PSR_J_BIT @ Java? + bne do_DataAbort + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. - mov pc, lr - - + b do_DataAbort diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 1478aa522144..ff1f7cc11f87 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -4,14 +4,11 @@ /* * Function: v6_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -33,16 +30,14 @@ ENTRY(v6_early_abort) * The test below covers all the write situations, including Java bytecodes */ bic r1, r1, #1 << 11 @ clear bit 11 of FSR - tst r3, #PSR_J_BIT @ Java? - movne pc, lr - do_thumb_abort - ldreq r3, [r2] @ read aborted ARM instruction + tst r5, #PSR_J_BIT @ Java? + bne do_DataAbort + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif - do_ldrd_abort + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. - mov pc, lr - - + b do_DataAbort diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index ec88b157d3bb..703375277ba6 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -3,14 +3,11 @@ /* * Function: v7_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. */ @@ -37,18 +34,18 @@ ENTRY(v7_early_abort) ldr r3, =0x40d @ On permission fault and r3, r1, r3 cmp r3, #0x0d - movne pc, lr + bne do_DataAbort mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR isb - mrc p15, 0, r2, c7, c4, 0 @ Read the PAR - and r3, r2, #0x7b @ On translation fault + mrc p15, 0, ip, c7, c4, 0 @ Read the PAR + and r3, ip, #0x7b @ On translation fault cmp r3, #0x0b - movne pc, lr + bne do_DataAbort bic r1, r1, #0xf @ Fix up FSR FS[5:0] - and r2, r2, #0x7e - orr r1, r1, r2, LSR #1 + and ip, ip, #0x7e + orr r1, r1, ip, LSR #1 #endif - mov pc, lr + b do_DataAbort ENDPROC(v7_early_abort) diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 9fb7b0e25ea1..f3982580c273 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -3,14 +3,11 @@ /* * Function: v4t_late_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4-r5, r10-r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -18,7 +15,7 @@ * picture. Unfortunately, this does happen. We live with it. */ ENTRY(v4t_late_abort) - tst r3, #PSR_T_BIT @ check for thumb mode + tst r5, #PSR_T_BIT @ check for thumb mode #ifdef CONFIG_CPU_CP15_MMU mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR @@ -28,7 +25,7 @@ ENTRY(v4t_late_abort) mov r1, #0 #endif bne .data_thumb_abort - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -47,86 +44,84 @@ ENTRY(v4t_late_abort) /* 9 */ b .data_arm_ldmstm @ ldm*b rn, /* a */ b .data_unknown /* b */ b .data_unknown -/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m -/* d */ mov pc, lr @ ldc rd, [rn, #m] +/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m +/* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable - mov r0, r2 + mov r0, r4 mov r1, r8 - mov r2, sp - bl baddataabort - b ret_from_exception + b baddataabort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 - and r2, r8, r7, lsl #1 - add r6, r6, r2, lsr #1 - and r2, r8, r7, lsl #2 - add r6, r6, r2, lsr #2 - and r2, r8, r7, lsl #3 - add r6, r6, r2, lsr #3 + and r9, r8, r7, lsl #1 + add r6, r6, r9, lsr #1 + and r9, r8, r7, lsl #2 + add r6, r6, r9, lsr #2 + and r9, r8, r7, lsl #3 + add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + str r7, [r2, r9, lsr #14] @ Put register 'Rn' + b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit - moveq pc, lr @ No writeback -> no fixup + beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: - and r5, r8, #0x00f @ get Rm / low nibble of immediate value + and r9, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble - orrne r6, r5, r6, lsr #4 @ combine nibbles } else - ldreq r6, [sp, r5, lsl #2] @ { load Rm value } + orrne r6, r9, r6, lsr #4 @ combine nibbles } else + ldreq r6, [r2, r9, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + str r7, [r2, r9, lsr #14] @ Put register 'Rn' + b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r2, r8, lsl #20 @ Get offset - moveq pc, lr @ zero -> no fixup - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + movs r6, r8, lsl #20 @ Get offset + beq do_DataAbort @ zero -> no fixup + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r2, lsr #20 @ Undo increment - addeq r7, r7, r2, lsr #20 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + subne r7, r7, r6, lsr #20 @ Undo increment + addeq r7, r7, r6, lsr #20 @ Undo decrement + str r7, [r2, r9, lsr #14] @ Put register 'Rn' + b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction - ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' - mov r5, r8, lsr #7 @ get shift count - ands r5, r5, #31 + ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' + mov r9, r8, lsr #7 @ get shift count + ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop - mov r6, r6, lsl r5 @ 0: LSL #!0 + mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop @@ -134,7 +129,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ 3: MUL? nop - mov r6, r6, lsr r5 @ 4: LSR #!0 + mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn @@ -142,7 +137,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ 7: MUL? nop - mov r6, r6, asr r5 @ 8: ASR #!0 + mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn @@ -150,7 +145,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ B: MUL? nop - mov r6, r6, ror r5 @ C: ROR #!0 + mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn @@ -159,7 +154,7 @@ ENTRY(v4t_late_abort) b .data_unknown @ F: MUL? .data_thumb_abort: - ldrh r8, [r2] @ read instruction + ldrh r8, [r4] @ read instruction tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 @@ -172,10 +167,10 @@ ENTRY(v4t_late_abort) /* 3 */ b .data_unknown /* 4 */ b .data_unknown /* 5 */ b .data_thumb_reg -/* 6 */ mov pc, lr -/* 7 */ mov pc, lr -/* 8 */ mov pc, lr -/* 9 */ mov pc, lr +/* 6 */ b do_DataAbort +/* 7 */ b do_DataAbort +/* 8 */ b do_DataAbort +/* 9 */ b do_DataAbort /* A */ b .data_unknown /* B */ b .data_thumb_pushpop /* C */ b .data_thumb_ldmstm @@ -185,41 +180,41 @@ ENTRY(v4t_late_abort) .data_thumb_reg: tst r8, #1 << 9 - moveq pc, lr + beq do_DataAbort tst r8, #1 << 10 @ If 'S' (signed) bit is set movne r1, #0 @ it must be a load instr - mov pc, lr + b do_DataAbort .data_thumb_pushpop: tst r8, #1 << 10 beq .data_unknown and r6, r8, #0x55 @ hweight8(r8) + R bit - and r2, r8, #0xaa - add r6, r6, r2, lsr #1 - and r2, r6, #0xcc + and r9, r8, #0xaa + add r6, r6, r9, lsr #1 + and r9, r6, #0xcc and r6, r6, #0x33 - add r6, r6, r2, lsr #2 + add r6, r6, r9, lsr #2 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer - ldr r7, [sp, #13 << 2] + ldr r7, [r2, #13 << 2] tst r8, #1 << 11 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP - str r7, [sp, #13 << 2] - mov pc, lr + str r7, [r2, #13 << 2] + b do_DataAbort .data_thumb_ldmstm: and r6, r8, #0x55 @ hweight8(r8) - and r2, r8, #0xaa - add r6, r6, r2, lsr #1 - and r2, r6, #0xcc + and r9, r8, #0xaa + add r6, r6, r9, lsr #1 + and r9, r6, #0xcc and r6, r6, #0x33 - add r6, r6, r2, lsr #2 + add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 - and r5, r8, #7 << 8 - ldr r7, [sp, r5, lsr #6] + and r9, r8, #7 << 8 + ldr r7, [r2, r9, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement - str r7, [sp, r5, lsr #6] - mov pc, lr + str r7, [r2, r9, lsr #6] + b do_DataAbort diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index d7cb1bfa51a4..52162d59407a 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -9,34 +9,32 @@ * */ - .macro do_thumb_abort - tst r3, #PSR_T_BIT + .macro do_thumb_abort, fsr, pc, psr, tmp + tst \psr, #PSR_T_BIT beq not_thumb - ldrh r3, [r2] @ Read aborted Thumb instruction - and r3, r3, # 0xfe00 @ Mask opcode field - cmp r3, # 0x5600 @ Is it ldrsb? - orreq r3, r3, #1 << 11 @ Set L-bit if yes - tst r3, #1 << 11 @ L = 0 -> write - orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + ldrh \tmp, [\pc] @ Read aborted Thumb instruction + and \tmp, \tmp, # 0xfe00 @ Mask opcode field + cmp \tmp, # 0x5600 @ Is it ldrsb? + orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes + tst \tmp, #1 << 11 @ L = 0 -> write + orreq \psr, \psr, #1 << 11 @ yes. + b do_DataAbort not_thumb: .endm /* - * We check for the following insturction encoding for LDRD. + * We check for the following instruction encoding for LDRD. * - * [27:25] == 0 + * [27:25] == 000 * [7:4] == 1101 * [20] == 0 */ - .macro do_ldrd_abort - tst r3, #0x0e000000 @ [27:25] == 0 + .macro do_ldrd_abort, tmp, insn + tst \insn, #0x0e100000 @ [27:25,20] == 0 bne not_ldrd - and r2, r3, #0x000000f0 @ [7:4] == 1101 - cmp r2, #0x000000d0 - bne not_ldrd - tst r3, #1 << 20 @ [20] == 0 - moveq pc, lr + and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 + cmp \tmp, #0x000000d0 + beq do_DataAbort not_ldrd: .endm diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 625e580945b5..119cb479c2ab 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S @@ -3,11 +3,11 @@ /* * Function: nommu_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * - * Returns : r0 = 0 (abort address) - * : r1 = 0 (FSR) + * Returns : r4 - r11, r13 preserved * * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. * Just fill zero into the registers. @@ -16,5 +16,5 @@ ENTRY(nommu_early_abort) mov r0, #0 @ clear r0, r1 (no FSR/FAR) mov r1, #0 - mov pc, lr + b do_DataAbort ENDPROC(nommu_early_abort) diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 724ba3bce72c..be7c638b648b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) int isize = 4; int thumb2_32b = 0; + if (interrupts_enabled(regs)) + local_irq_enable(); + instrptr = instruction_pointer(regs); fs = get_fs(); diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 1fa6f71470de..072016371093 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -242,16 +242,5 @@ ENDPROC(fa_dma_unmap_area) __INITDATA - .type fa_cache_fns, #object -ENTRY(fa_cache_fns) - .long fa_flush_icache_all - .long fa_flush_kern_cache_all - .long fa_flush_user_cache_all - .long fa_flush_user_cache_range - .long fa_coherent_kern_range - .long fa_coherent_user_range - .long fa_flush_kern_dcache_area - .long fa_dma_map_area - .long fa_dma_unmap_area - .long fa_dma_flush_range - .size fa_cache_fns, . - fa_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions fa diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2e2bc406a18d..c2301f226100 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -129,16 +129,5 @@ ENDPROC(v3_dma_map_area) __INITDATA - .type v3_cache_fns, #object -ENTRY(v3_cache_fns) - .long v3_flush_icache_all - .long v3_flush_kern_cache_all - .long v3_flush_user_cache_all - .long v3_flush_user_cache_range - .long v3_coherent_kern_range - .long v3_coherent_user_range - .long v3_flush_kern_dcache_area - .long v3_dma_map_area - .long v3_dma_unmap_area - .long v3_dma_flush_range - .size v3_cache_fns, . - v3_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions v3 diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index a8fefb523f19..fd9bb7addc8d 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -141,16 +141,5 @@ ENDPROC(v4_dma_map_area) __INITDATA - .type v4_cache_fns, #object -ENTRY(v4_cache_fns) - .long v4_flush_icache_all - .long v4_flush_kern_cache_all - .long v4_flush_user_cache_all - .long v4_flush_user_cache_range - .long v4_coherent_kern_range - .long v4_coherent_user_range - .long v4_flush_kern_dcache_area - .long v4_dma_map_area - .long v4_dma_unmap_area - .long v4_dma_flush_range - .size v4_cache_fns, . - v4_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions v4 diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index f40c69656d8d..4f2c14151ccb 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -253,16 +253,5 @@ ENDPROC(v4wb_dma_unmap_area) __INITDATA - .type v4wb_cache_fns, #object -ENTRY(v4wb_cache_fns) - .long v4wb_flush_icache_all - .long v4wb_flush_kern_cache_all - .long v4wb_flush_user_cache_all - .long v4wb_flush_user_cache_range - .long v4wb_coherent_kern_range - .long v4wb_coherent_user_range - .long v4wb_flush_kern_dcache_area - .long v4wb_dma_map_area - .long v4wb_dma_unmap_area - .long v4wb_dma_flush_range - .size v4wb_cache_fns, . - v4wb_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions v4wb diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index a7b276dbda11..4d7b467631ce 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -197,16 +197,5 @@ ENDPROC(v4wt_dma_map_area) __INITDATA - .type v4wt_cache_fns, #object -ENTRY(v4wt_cache_fns) - .long v4wt_flush_icache_all - .long v4wt_flush_kern_cache_all - .long v4wt_flush_user_cache_all - .long v4wt_flush_user_cache_range - .long v4wt_coherent_kern_range - .long v4wt_coherent_user_range - .long v4wt_flush_kern_dcache_area - .long v4wt_dma_map_area - .long v4wt_dma_unmap_area - .long v4wt_dma_flush_range - .size v4wt_cache_fns, . - v4wt_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions v4wt diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 73b4a8b66a57..74c2e5a33a4d 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -330,16 +330,5 @@ ENDPROC(v6_dma_unmap_area) __INITDATA - .type v6_cache_fns, #object -ENTRY(v6_cache_fns) - .long v6_flush_icache_all - .long v6_flush_kern_cache_all - .long v6_flush_user_cache_all - .long v6_flush_user_cache_range - .long v6_coherent_kern_range - .long v6_coherent_user_range - .long v6_flush_kern_dcache_area - .long v6_dma_map_area - .long v6_dma_unmap_area - .long v6_dma_flush_range - .size v6_cache_fns, . - v6_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions v6 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index d32f02b61866..3b24bfa3b828 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -325,16 +325,5 @@ ENDPROC(v7_dma_unmap_area) __INITDATA - .type v7_cache_fns, #object -ENTRY(v7_cache_fns) - .long v7_flush_icache_all - .long v7_flush_kern_cache_all - .long v7_flush_user_cache_all - .long v7_flush_user_cache_range - .long v7_coherent_kern_range - .long v7_coherent_user_range - .long v7_flush_kern_dcache_area - .long v7_dma_map_area - .long v7_dma_unmap_area - .long v7_dma_flush_range - .size v7_cache_fns, . - v7_cache_fns + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions v7 diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index bdba6c65c901..63cca0097130 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); - __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093cee09a..0a0a1e7c20d2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -25,9 +25,11 @@ #include #include +#include "mm.h" + static u64 get_coherent_dma_mask(struct device *dev) { - u64 mask = ISA_DMA_THRESHOLD; + u64 mask = (u64)arm_dma_limit; if (dev) { mask = dev->coherent_dma_mask; @@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - if ((~mask) & ISA_DMA_THRESHOLD) { + if ((~mask) & (u64)arm_dma_limit) { dev_warn(dev, "coherent DMA mask %#llx is smaller " "than system GFP_DMA mask %#llx\n", - mask, (unsigned long long)ISA_DMA_THRESHOLD); + mask, (u64)arm_dma_limit); return 0; } } @@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } EXPORT_SYMBOL(dma_sync_sg_for_device); +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask + * to this function. + */ +int dma_supported(struct device *dev, u64 mask) +{ + if (mask < (u64)arm_dma_limit) + return 0; + return 1; +} +EXPORT_SYMBOL(dma_supported); + +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + +#ifndef CONFIG_DMABOUNCE + *dev->dma_mask = dma_mask; +#endif + + return 0; +} +EXPORT_SYMBOL(dma_set_mask); + #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 9ea4f7ddd665..3b5ea68acbb8 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) pud = pud_offset(pgd, addr); if (PTRS_PER_PUD != 1) - printk(", *pud=%08lx", pud_val(*pud)); + printk(", *pud=%08llx", (long long)pud_val(*pud)); if (pud_none(*pud)) break; @@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) tsk = current; mm = tsk->mm; + /* Enable interrupts if they were enabled in the parent context. */ + if (interrupts_enabled(regs)) + local_irq_enable(); + /* * If we're in an interrupt or have no user * context, we must not take the fault.. diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c40a21..2fee782077c1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -212,6 +212,18 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } #ifdef CONFIG_ZONE_DMA + +unsigned long arm_dma_zone_size __read_mostly; +EXPORT_SYMBOL(arm_dma_zone_size); + +/* + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. + */ +u32 arm_dma_limit; + static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) { @@ -267,17 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, #endif } -#ifdef ARM_DMA_ZONE_SIZE -#ifndef CONFIG_ZONE_DMA -#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations -#endif - +#ifdef CONFIG_ZONE_DMA /* * Adjust the sizes according to any special requirements for * this machine type. */ - arm_adjust_dma_zone(zone_size, zhole_size, - ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); + if (arm_dma_zone_size) { + arm_adjust_dma_zone(zone_size, zhole_size, + arm_dma_zone_size >> PAGE_SHIFT); + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + } else + arm_dma_limit = 0xffffffff; #endif free_area_init_node(0, zone_size, min, zhole_size); @@ -422,6 +434,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) return pages; } +/* + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). + */ +static inline void poison_init_mem(void *s, size_t count) +{ + u32 *p = (u32 *)s; + while ((count = count - 4)) + *p++ = 0xe7fddef0; +} + static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -639,8 +662,8 @@ void __init mem_init(void) " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" - " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", @@ -662,8 +685,8 @@ void __init mem_init(void) #endif MLM(MODULES_VADDR, MODULES_END), - MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); @@ -704,11 +727,13 @@ void free_initmem(void) #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), __phys_to_pfn(__pa(&__tcm_end)), "TCM link"); #endif + poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), __phys_to_pfn(__pa(__init_end)), @@ -721,10 +746,12 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); totalram_pages += free_area(__phys_to_pfn(__pa(start)), __phys_to_pfn(__pa(end)), "initrd"); + } } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5b3d7d543659..010566799c80 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif +#ifdef CONFIG_ZONE_DMA +extern u32 arm_dma_limit; +#else +#define arm_dma_limit ((u32)~0) +#endif + void __init bootmem_init(void); void arm_mm_memblock_reserve(void); diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 87970eba88ea..8bbff025269a 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S @@ -4,16 +4,18 @@ /* * Function: legacy_pabort * - * Params : r0 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = Simulated IFSR with section translation fault status + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ .align 5 ENTRY(legacy_pabort) + mov r0, r4 mov r1, #5 - mov pc, lr + b do_PrefetchAbort ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index 06e3d1ef2115..9627646ce783 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S @@ -4,16 +4,18 @@ /* * Function: v6_pabort * - * Params : r0 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = IFSR + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ .align 5 ENTRY(v6_pabort) + mov r0, r4 mrc p15, 0, r1, c5, c0, 1 @ get IFSR - mov pc, lr + b do_PrefetchAbort ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index a8b3b300a18d..875761f44f3b 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S @@ -2,12 +2,13 @@ #include /* - * Function: v6_pabort + * Function: v7_pabort * - * Params : r0 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = IFSR + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(v7_pabort) mrc p15, 0, r0, c6, c0, 2 @ get IFAR mrc p15, 0, r1, c5, c0, 1 @ get IFSR - mov pc, lr + b do_PrefetchAbort ENDPROC(v7_pabort) diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 6c4e7fd6c8af..67469665d47a 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -364,17 +364,8 @@ ENTRY(arm1020_dma_unmap_area) mov pc, lr ENDPROC(arm1020_dma_unmap_area) -ENTRY(arm1020_cache_fns) - .long arm1020_flush_icache_all - .long arm1020_flush_kern_cache_all - .long arm1020_flush_user_cache_all - .long arm1020_flush_user_cache_range - .long arm1020_coherent_kern_range - .long arm1020_coherent_user_range - .long arm1020_flush_kern_dcache_area - .long arm1020_dma_map_area - .long arm1020_dma_unmap_area - .long arm1020_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm1020 .align 5 ENTRY(cpu_arm1020_dcache_clean_area) @@ -477,38 +468,14 @@ arm1020_crval: crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 __INITDATA + @ define struct processor (see and proc-macros.S) + define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm1020_processor_functions, #object -arm1020_processor_functions: - .word v4t_early_abort - .word legacy_pabort - .word cpu_arm1020_proc_init - .word cpu_arm1020_proc_fin - .word cpu_arm1020_reset - .word cpu_arm1020_do_idle - .word cpu_arm1020_dcache_clean_area - .word cpu_arm1020_switch_mm - .word cpu_arm1020_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm1020_processor_functions, . - arm1020_processor_functions .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name + string cpu_arch_name, "armv5t" + string cpu_elf_name, "v5" .type cpu_arm1020_name, #object cpu_arm1020_name: diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 4ce947c19623..4251421c0ed5 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -350,17 +350,8 @@ ENTRY(arm1020e_dma_unmap_area) mov pc, lr ENDPROC(arm1020e_dma_unmap_area) -ENTRY(arm1020e_cache_fns) - .long arm1020e_flush_icache_all - .long arm1020e_flush_kern_cache_all - .long arm1020e_flush_user_cache_all - .long arm1020e_flush_user_cache_range - .long arm1020e_coherent_kern_range - .long arm1020e_coherent_user_range - .long arm1020e_flush_kern_dcache_area - .long arm1020e_dma_map_area - .long arm1020e_dma_unmap_area - .long arm1020e_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm1020e .align 5 ENTRY(cpu_arm1020e_dcache_clean_area) @@ -458,43 +449,14 @@ arm1020e_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm1020e_processor_functions, #object -arm1020e_processor_functions: - .word v4t_early_abort - .word legacy_pabort - .word cpu_arm1020e_proc_init - .word cpu_arm1020e_proc_fin - .word cpu_arm1020e_reset - .word cpu_arm1020e_do_idle - .word cpu_arm1020e_dcache_clean_area - .word cpu_arm1020e_switch_mm - .word cpu_arm1020e_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm1020e_processor_functions, . - arm1020e_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm1020e_name, #object -cpu_arm1020e_name: - .asciz "ARM1020E" - .size cpu_arm1020e_name, . - cpu_arm1020e_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5" + string cpu_arm1020e_name, "ARM1020E" .align diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index c8884c5413a2..d283cf3d06e3 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -339,17 +339,8 @@ ENTRY(arm1022_dma_unmap_area) mov pc, lr ENDPROC(arm1022_dma_unmap_area) -ENTRY(arm1022_cache_fns) - .long arm1022_flush_icache_all - .long arm1022_flush_kern_cache_all - .long arm1022_flush_user_cache_all - .long arm1022_flush_user_cache_range - .long arm1022_coherent_kern_range - .long arm1022_coherent_user_range - .long arm1022_flush_kern_dcache_area - .long arm1022_dma_map_area - .long arm1022_dma_unmap_area - .long arm1022_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm1022 .align 5 ENTRY(cpu_arm1022_dcache_clean_area) @@ -441,43 +432,14 @@ arm1022_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm1022_processor_functions, #object -arm1022_processor_functions: - .word v4t_early_abort - .word legacy_pabort - .word cpu_arm1022_proc_init - .word cpu_arm1022_proc_fin - .word cpu_arm1022_reset - .word cpu_arm1022_do_idle - .word cpu_arm1022_dcache_clean_area - .word cpu_arm1022_switch_mm - .word cpu_arm1022_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm1022_processor_functions, . - arm1022_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm1022_name, #object -cpu_arm1022_name: - .asciz "ARM1022" - .size cpu_arm1022_name, . - cpu_arm1022_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5" + string cpu_arm1022_name, "ARM1022" .align diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 413684660aad..678a1ceafed2 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -333,17 +333,8 @@ ENTRY(arm1026_dma_unmap_area) mov pc, lr ENDPROC(arm1026_dma_unmap_area) -ENTRY(arm1026_cache_fns) - .long arm1026_flush_icache_all - .long arm1026_flush_kern_cache_all - .long arm1026_flush_user_cache_all - .long arm1026_flush_user_cache_range - .long arm1026_coherent_kern_range - .long arm1026_coherent_user_range - .long arm1026_flush_kern_dcache_area - .long arm1026_dma_map_area - .long arm1026_dma_unmap_area - .long arm1026_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm1026 .align 5 ENTRY(cpu_arm1026_dcache_clean_area) @@ -436,45 +427,15 @@ arm1026_crval: crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm1026_processor_functions, #object -arm1026_processor_functions: - .word v5t_early_abort - .word legacy_pabort - .word cpu_arm1026_proc_init - .word cpu_arm1026_proc_fin - .word cpu_arm1026_reset - .word cpu_arm1026_do_idle - .word cpu_arm1026_dcache_clean_area - .word cpu_arm1026_switch_mm - .word cpu_arm1026_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm1026_processor_functions, . - arm1026_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort .section .rodata - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5tej" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name + string cpu_arch_name, "armv5tej" + string cpu_elf_name, "v5" .align - - .type cpu_arm1026_name, #object -cpu_arm1026_name: - .asciz "ARM1026EJ-S" - .size cpu_arm1026_name, . - cpu_arm1026_name - + string cpu_arm1026_name, "ARM1026EJ-S" .align .section ".proc.info.init", #alloc, #execinstr diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 5f79dc4ce3fb..e5b974cddac3 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area) /* * Function: arm6_7_data_abort () * - * Params : r2 = address of aborted instruction - * : sp = pointer to registers + * Params : r2 = pt_regs + * : r4 = aborted context pc + * : r5 = aborted context psr * * Purpose : obtain information about current aborted instruction * - * Returns : r0 = address of abort - * : r1 = FSR + * Returns : r4-r5, r10-r11, r13 preserved */ ENTRY(cpu_arm7_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort) nop /* 0 */ b .data_unknown -/* 1 */ mov pc, lr @ swp +/* 1 */ b do_DataAbort @ swp /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m @@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort) /* 9 */ b .data_arm_ldmstm @ ldm*b rn, /* a */ b .data_unknown /* b */ b .data_unknown -/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m -/* d */ mov pc, lr @ ldc rd, [rn, #m] +/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m +/* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable - mov r0, r2 + mov r0, r4 mov r1, r8 - mov r2, sp - bl baddataabort - b ret_from_exception + b baddataabort ENTRY(cpu_arm6_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #14 << 24 teq r7, #8 << 24 @ was it ldm/stm - movne pc, lr + bne do_DataAbort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 - and r2, r8, r7, lsl #1 - add r6, r6, r2, lsr #1 - and r2, r8, r7, lsl #2 - add r6, r6, r2, lsr #2 - and r2, r8, r7, lsl #3 - add r6, r6, r2, lsr #3 + and r9, r8, r7, lsl #1 + add r6, r6, r9, lsr #1 + and r9, r8, r7, lsl #2 + add r6, r6, r9, lsr #2 + and r9, r8, r7, lsl #3 + add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + str r7, [r2, r9, lsr #14] @ Put register 'Rn' + b do_DataAbort .data_arm_apply_r6_and_rn: - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + str r7, [r2, r9, lsr #14] @ Put register 'Rn' + b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r2, r8, lsl #20 @ Get offset - moveq pc, lr @ zero -> no fixup - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + movs r6, r8, lsl #20 @ Get offset + beq do_DataAbort @ zero -> no fixup + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r2, lsr #20 @ Undo increment - addeq r7, r7, r2, lsr #20 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + subne r7, r7, r6, lsr #20 @ Undo increment + addeq r7, r7, r6, lsr #20 @ Undo decrement + str r7, [r2, r9, lsr #14] @ Put register 'Rn' + b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction - ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' - mov r5, r8, lsr #7 @ get shift count - ands r5, r5, #31 + ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' + mov r9, r8, lsr #7 @ get shift count + ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop - mov r6, r6, lsl r5 @ 0: LSL #!0 + mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop @@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ 3: MUL? nop - mov r6, r6, lsr r5 @ 4: LSR #!0 + mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn @@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ 7: MUL? nop - mov r6, r6, asr r5 @ 8: ASR #!0 + mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn @@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ B: MUL? nop - mov r6, r6, ror r5 @ C: ROR #!0 + mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn @@ -269,159 +267,57 @@ __arm7_setup: mov r0, #0 __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm6_processor_functions, #object -ENTRY(arm6_processor_functions) - .word cpu_arm6_data_abort - .word legacy_pabort - .word cpu_arm6_proc_init - .word cpu_arm6_proc_fin - .word cpu_arm6_reset - .word cpu_arm6_do_idle - .word cpu_arm6_dcache_clean_area - .word cpu_arm6_switch_mm - .word cpu_arm6_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm6_processor_functions, . - arm6_processor_functions - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm7_processor_functions, #object -ENTRY(arm7_processor_functions) - .word cpu_arm7_data_abort - .word legacy_pabort - .word cpu_arm7_proc_init - .word cpu_arm7_proc_fin - .word cpu_arm7_reset - .word cpu_arm7_do_idle - .word cpu_arm7_dcache_clean_area - .word cpu_arm7_switch_mm - .word cpu_arm7_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm7_processor_functions, . - arm7_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort + define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: .asciz "armv3" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: .asciz "v3" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm6_name, #object -cpu_arm6_name: .asciz "ARM6" - .size cpu_arm6_name, . - cpu_arm6_name - - .type cpu_arm610_name, #object -cpu_arm610_name: - .asciz "ARM610" - .size cpu_arm610_name, . - cpu_arm610_name - - .type cpu_arm7_name, #object -cpu_arm7_name: .asciz "ARM7" - .size cpu_arm7_name, . - cpu_arm7_name - - .type cpu_arm710_name, #object -cpu_arm710_name: - .asciz "ARM710" - .size cpu_arm710_name, . - cpu_arm710_name + string cpu_arch_name, "armv3" + string cpu_elf_name, "v3" + string cpu_arm6_name, "ARM6" + string cpu_arm610_name, "ARM610" + string cpu_arm7_name, "ARM7" + string cpu_arm710_name, "ARM710" .align .section ".proc.info.init", #alloc, #execinstr - .type __arm6_proc_info, #object -__arm6_proc_info: - .long 0x41560600 - .long 0xfffffff0 - .long 0x00000c1e +.macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ + cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req + .type __\name\()_proc_info, #object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask + .long \cpu_mm_mmu_flags .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm6_setup + b \cpu_flush .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_26BIT - .long cpu_arm6_name - .long arm6_processor_functions + .long \cpu_name + .long \cpu_proc_funcs .long v3_tlb_fns .long v3_user_fns .long v3_cache_fns - .size __arm6_proc_info, . - __arm6_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm - .type __arm610_proc_info, #object -__arm610_proc_info: - .long 0x41560610 - .long 0xfffffff0 - .long 0x00000c1e - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __arm6_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_26BIT - .long cpu_arm610_name - .long arm6_processor_functions - .long v3_tlb_fns - .long v3_user_fns - .long v3_cache_fns - .size __arm610_proc_info, . - __arm610_proc_info - - .type __arm7_proc_info, #object -__arm7_proc_info: - .long 0x41007000 - .long 0xffffff00 - .long 0x00000c1e - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __arm7_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_26BIT - .long cpu_arm7_name - .long arm7_processor_functions - .long v3_tlb_fns - .long v3_user_fns - .long v3_cache_fns - .size __arm7_proc_info, . - __arm7_proc_info - - .type __arm710_proc_info, #object -__arm710_proc_info: - .long 0x41007100 - .long 0xfff8ff00 - .long PMD_TYPE_SECT | \ + arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \ + 0x00000c1e, __arm6_setup, arm6_processor_functions + arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \ + 0x00000c1e, __arm6_setup, arm6_processor_functions + arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \ + 0x00000c1e, __arm7_setup, arm7_processor_functions + arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \ + PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __arm7_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_26BIT - .long cpu_arm710_name - .long arm7_processor_functions - .long v3_tlb_fns - .long v3_user_fns - .long v3_cache_fns - .size __arm710_proc_info, . - __arm710_proc_info + PMD_SECT_AP_READ, \ + __arm7_setup, arm7_processor_functions diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 7a06e5964f59..55f4e290665a 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -169,46 +169,15 @@ arm720_crval: crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm720_processor_functions, #object -ENTRY(arm720_processor_functions) - .word v4t_late_abort - .word legacy_pabort - .word cpu_arm720_proc_init - .word cpu_arm720_proc_fin - .word cpu_arm720_reset - .word cpu_arm720_do_idle - .word cpu_arm720_dcache_clean_area - .word cpu_arm720_switch_mm - .word cpu_arm720_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm720_processor_functions, . - arm720_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm710_name, #object -cpu_arm710_name: - .asciz "ARM710T" - .size cpu_arm710_name, . - cpu_arm710_name - - .type cpu_arm720_name, #object -cpu_arm720_name: - .asciz "ARM720T" - .size cpu_arm720_name, . - cpu_arm720_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm710_name, "ARM710T" + string cpu_arm720_name, "ARM720T" .align @@ -218,10 +187,11 @@ cpu_arm720_name: .section ".proc.info.init", #alloc, #execinstr - .type __arm710_proc_info, #object -__arm710_proc_info: - .long 0x41807100 @ cpu_val - .long 0xffffff00 @ cpu_mask +.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req + .type __\name\()_proc_info,#object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ @@ -232,38 +202,17 @@ __arm710_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm710_setup @ cpu_flush + b \cpu_flush @ cpu_flush .long cpu_arch_name @ arch_name .long cpu_elf_name @ elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap - .long cpu_arm710_name @ name + .long \cpu_name .long arm720_processor_functions .long v4_tlb_fns .long v4wt_user_fns .long v4_cache_fns - .size __arm710_proc_info, . - __arm710_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm - .type __arm720_proc_info, #object -__arm720_proc_info: - .long 0x41807200 @ cpu_val - .long 0xffffff00 @ cpu_mask - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __arm720_setup @ cpu_flush - .long cpu_arch_name @ arch_name - .long cpu_elf_name @ elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap - .long cpu_arm720_name @ name - .long arm720_processor_functions - .long v4_tlb_fns - .long v4wt_user_fns - .long v4_cache_fns - .size __arm720_proc_info, . - __arm720_proc_info + arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup + arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 6f9d12effee1..4506be3adda6 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -17,6 +17,8 @@ #include #include +#include "proc-macros.S" + .text /* * cpu_arm740_proc_init() @@ -115,42 +117,14 @@ __arm740_setup: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm740_processor_functions, #object -ENTRY(arm740_processor_functions) - .word v4t_late_abort - .word legacy_pabort - .word cpu_arm740_proc_init - .word cpu_arm740_proc_fin - .word cpu_arm740_reset - .word cpu_arm740_do_idle - .word cpu_arm740_dcache_clean_area - .word cpu_arm740_switch_mm - .word 0 @ cpu_*_set_pte - .word 0 - .word 0 - .word 0 - .size arm740_processor_functions, . - arm740_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm740_name, #object -cpu_arm740_name: - .ascii "ARM740T" - .size cpu_arm740_name, . - cpu_arm740_name + string cpu_arch_name, "armv4" + string cpu_elf_name, "v4" + string cpu_arm740_name, "ARM740T" .align @@ -170,5 +144,3 @@ __arm740_proc_info: .long 0 .long v3_cache_fns @ cache model .size __arm740_proc_info, . - __arm740_proc_info - - diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 537ffcb0646d..7e0e1fe4ed4d 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -17,6 +17,8 @@ #include #include +#include "proc-macros.S" + .text /* * cpu_arm7tdmi_proc_init() @@ -55,197 +57,57 @@ __arm7tdmi_setup: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm7tdmi_processor_functions, #object -ENTRY(arm7tdmi_processor_functions) - .word v4t_late_abort - .word legacy_pabort - .word cpu_arm7tdmi_proc_init - .word cpu_arm7tdmi_proc_fin - .word cpu_arm7tdmi_reset - .word cpu_arm7tdmi_do_idle - .word cpu_arm7tdmi_dcache_clean_area - .word cpu_arm7tdmi_switch_mm - .word 0 @ cpu_*_set_pte - .word 0 - .word 0 - .word 0 - .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm7tdmi_name, #object -cpu_arm7tdmi_name: - .asciz "ARM7TDMI" - .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name - - .type cpu_triscenda7_name, #object -cpu_triscenda7_name: - .asciz "Triscend-A7x" - .size cpu_triscenda7_name, . - cpu_triscenda7_name - - .type cpu_at91_name, #object -cpu_at91_name: - .asciz "Atmel-AT91M40xxx" - .size cpu_at91_name, . - cpu_at91_name - - .type cpu_s3c3410_name, #object -cpu_s3c3410_name: - .asciz "Samsung-S3C3410" - .size cpu_s3c3410_name, . - cpu_s3c3410_name - - .type cpu_s3c44b0x_name, #object -cpu_s3c44b0x_name: - .asciz "Samsung-S3C44B0x" - .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name - - .type cpu_s3c4510b, #object -cpu_s3c4510b_name: - .asciz "Samsung-S3C4510B" - .size cpu_s3c4510b_name, . - cpu_s3c4510b_name - - .type cpu_s3c4530_name, #object -cpu_s3c4530_name: - .asciz "Samsung-S3C4530" - .size cpu_s3c4530_name, . - cpu_s3c4530_name - - .type cpu_netarm_name, #object -cpu_netarm_name: - .asciz "NETARM" - .size cpu_netarm_name, . - cpu_netarm_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm7tdmi_name, "ARM7TDMI" + string cpu_triscenda7_name, "Triscend-A7x" + string cpu_at91_name, "Atmel-AT91M40xxx" + string cpu_s3c3410_name, "Samsung-S3C3410" + string cpu_s3c44b0x_name, "Samsung-S3C44B0x" + string cpu_s3c4510b_name, "Samsung-S3C4510B" + string cpu_s3c4530_name, "Samsung-S3C4530" + string cpu_netarm_name, "NETARM" .align .section ".proc.info.init", #alloc, #execinstr - .type __arm7tdmi_proc_info, #object -__arm7tdmi_proc_info: - .long 0x41007700 - .long 0xfff8ff00 +.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ + extra_hwcaps=0 + .type __\name\()_proc_info, #object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long 0 .long 0 b __arm7tdmi_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP | HWCAP_26BIT - .long cpu_arm7tdmi_name + .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) + .long \cpu_name .long arm7tdmi_processor_functions .long 0 .long 0 .long v4_cache_fns - .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm - .type __triscenda7_proc_info, #object -__triscenda7_proc_info: - .long 0x0001d2ff - .long 0x0001ffff - .long 0 - .long 0 - b __arm7tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_triscenda7_name - .long arm7tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __triscenda7_proc_info, . - __triscenda7_proc_info - - .type __at91_proc_info, #object -__at91_proc_info: - .long 0x14000040 - .long 0xfff000e0 - .long 0 - .long 0 - b __arm7tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_at91_name - .long arm7tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __at91_proc_info, . - __at91_proc_info - - .type __s3c4510b_proc_info, #object -__s3c4510b_proc_info: - .long 0x36365000 - .long 0xfffff000 - .long 0 - .long 0 - b __arm7tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_s3c4510b_name - .long arm7tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __s3c4510b_proc_info, . - __s3c4510b_proc_info - - .type __s3c4530_proc_info, #object -__s3c4530_proc_info: - .long 0x4c000000 - .long 0xfff000e0 - .long 0 - .long 0 - b __arm7tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_s3c4530_name - .long arm7tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __s3c4530_proc_info, . - __s3c4530_proc_info - - .type __s3c3410_proc_info, #object -__s3c3410_proc_info: - .long 0x34100000 - .long 0xffff0000 - .long 0 - .long 0 - b __arm7tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_s3c3410_name - .long arm7tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __s3c3410_proc_info, . - __s3c3410_proc_info - - .type __s3c44b0x_proc_info, #object -__s3c44b0x_proc_info: - .long 0x44b00000 - .long 0xffff0000 - .long 0 - .long 0 - b __arm7tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_s3c44b0x_name - .long arm7tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info + arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \ + cpu_arm7tdmi_name + arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \ + cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB + arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \ + cpu_at91_name, extra_hwcaps=HWCAP_THUMB + arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \ + cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB + arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \ + cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB + arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \ + cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB + arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \ + cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index bf8a1d1cccb6..92bd102e3982 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -315,18 +315,8 @@ ENTRY(arm920_dma_unmap_area) mov pc, lr ENDPROC(arm920_dma_unmap_area) -ENTRY(arm920_cache_fns) - .long arm920_flush_icache_all - .long arm920_flush_kern_cache_all - .long arm920_flush_user_cache_all - .long arm920_flush_user_cache_range - .long arm920_coherent_kern_range - .long arm920_coherent_user_range - .long arm920_flush_kern_dcache_area - .long arm920_dma_map_area - .long arm920_dma_unmap_area - .long arm920_dma_flush_range - + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm920 #endif @@ -416,9 +406,6 @@ ENTRY(cpu_arm920_do_resume) PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE b cpu_resume_mmu ENDPROC(cpu_arm920_do_resume) -#else -#define cpu_arm920_do_suspend 0 -#define cpu_arm920_do_resume 0 #endif __CPUINIT @@ -450,43 +437,14 @@ arm920_crval: crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm920_processor_functions, #object -arm920_processor_functions: - .word v4t_early_abort - .word legacy_pabort - .word cpu_arm920_proc_init - .word cpu_arm920_proc_fin - .word cpu_arm920_reset - .word cpu_arm920_do_idle - .word cpu_arm920_dcache_clean_area - .word cpu_arm920_switch_mm - .word cpu_arm920_set_pte_ext - .word cpu_arm920_suspend_size - .word cpu_arm920_do_suspend - .word cpu_arm920_do_resume - .size arm920_processor_functions, . - arm920_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm920_name, #object -cpu_arm920_name: - .asciz "ARM920T" - .size cpu_arm920_name, . - cpu_arm920_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm920_name, "ARM920T" .align diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 95ba1fc56e4d..490e18833857 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -317,18 +317,8 @@ ENTRY(arm922_dma_unmap_area) mov pc, lr ENDPROC(arm922_dma_unmap_area) -ENTRY(arm922_cache_fns) - .long arm922_flush_icache_all - .long arm922_flush_kern_cache_all - .long arm922_flush_user_cache_all - .long arm922_flush_user_cache_range - .long arm922_coherent_kern_range - .long arm922_coherent_user_range - .long arm922_flush_kern_dcache_area - .long arm922_dma_map_area - .long arm922_dma_unmap_area - .long arm922_dma_flush_range - + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm922 #endif @@ -420,43 +410,14 @@ arm922_crval: crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm922_processor_functions, #object -arm922_processor_functions: - .word v4t_early_abort - .word legacy_pabort - .word cpu_arm922_proc_init - .word cpu_arm922_proc_fin - .word cpu_arm922_reset - .word cpu_arm922_do_idle - .word cpu_arm922_dcache_clean_area - .word cpu_arm922_switch_mm - .word cpu_arm922_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm922_processor_functions, . - arm922_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm922_name, #object -cpu_arm922_name: - .asciz "ARM922T" - .size cpu_arm922_name, . - cpu_arm922_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm922_name, "ARM922T" .align diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 541e4774eea1..51d494be057e 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -372,17 +372,8 @@ ENTRY(arm925_dma_unmap_area) mov pc, lr ENDPROC(arm925_dma_unmap_area) -ENTRY(arm925_cache_fns) - .long arm925_flush_icache_all - .long arm925_flush_kern_cache_all - .long arm925_flush_user_cache_all - .long arm925_flush_user_cache_range - .long arm925_coherent_kern_range - .long arm925_coherent_user_range - .long arm925_flush_kern_dcache_area - .long arm925_dma_map_area - .long arm925_dma_unmap_area - .long arm925_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm925 ENTRY(cpu_arm925_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -487,52 +478,24 @@ arm925_crval: crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 __INITDATA - -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm925_processor_functions, #object -arm925_processor_functions: - .word v4t_early_abort - .word legacy_pabort - .word cpu_arm925_proc_init - .word cpu_arm925_proc_fin - .word cpu_arm925_reset - .word cpu_arm925_do_idle - .word cpu_arm925_dcache_clean_area - .word cpu_arm925_switch_mm - .word cpu_arm925_set_pte_ext - .word 0 - .word 0 - .word 0 - .size arm925_processor_functions, . - arm925_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm925_name, #object -cpu_arm925_name: - .asciz "ARM925T" - .size cpu_arm925_name, . - cpu_arm925_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm925_name, "ARM925T" .align .section ".proc.info.init", #alloc, #execinstr - .type __arm925_proc_info,#object -__arm925_proc_info: - .long 0x54029250 - .long 0xfffffff0 +.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache + .type __\name\()_proc_info,#object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ @@ -550,27 +513,8 @@ __arm925_proc_info: .long v4wbi_tlb_fns .long v4wb_user_fns .long arm925_cache_fns - .size __arm925_proc_info, . - __arm925_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm - .type __arm915_proc_info,#object -__arm915_proc_info: - .long 0x54029150 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __arm925_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB - .long cpu_arm925_name - .long arm925_processor_functions - .long v4wbi_tlb_fns - .long v4wb_user_fns - .long arm925_cache_fns - .size __arm925_proc_info, . - __arm925_proc_info + arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name + arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 0ed85d930c09..2bbcf053dffd 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -335,17 +335,8 @@ ENTRY(arm926_dma_unmap_area) mov pc, lr ENDPROC(arm926_dma_unmap_area) -ENTRY(arm926_cache_fns) - .long arm926_flush_icache_all - .long arm926_flush_kern_cache_all - .long arm926_flush_user_cache_all - .long arm926_flush_user_cache_range - .long arm926_coherent_kern_range - .long arm926_coherent_user_range - .long arm926_flush_kern_dcache_area - .long arm926_dma_map_area - .long arm926_dma_unmap_area - .long arm926_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm926 ENTRY(cpu_arm926_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -430,9 +421,6 @@ ENTRY(cpu_arm926_do_resume) PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE b cpu_resume_mmu ENDPROC(cpu_arm926_do_resume) -#else -#define cpu_arm926_do_suspend 0 -#define cpu_arm926_do_resume 0 #endif __CPUINIT @@ -475,42 +463,14 @@ arm926_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm926_processor_functions, #object -arm926_processor_functions: - .word v5tj_early_abort - .word legacy_pabort - .word cpu_arm926_proc_init - .word cpu_arm926_proc_fin - .word cpu_arm926_reset - .word cpu_arm926_do_idle - .word cpu_arm926_dcache_clean_area - .word cpu_arm926_switch_mm - .word cpu_arm926_set_pte_ext - .word cpu_arm926_suspend_size - .word cpu_arm926_do_suspend - .word cpu_arm926_do_resume - .size arm926_processor_functions, . - arm926_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5tej" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm926_name, #object -cpu_arm926_name: - .asciz "ARM926EJ-S" - .size cpu_arm926_name, . - cpu_arm926_name + string cpu_arch_name, "armv5tej" + string cpu_elf_name, "v5" + string cpu_arm926_name, "ARM926EJ-S" .align diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 26aea3f71c26..ac750d506153 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -264,17 +264,8 @@ ENTRY(arm940_dma_unmap_area) mov pc, lr ENDPROC(arm940_dma_unmap_area) -ENTRY(arm940_cache_fns) - .long arm940_flush_icache_all - .long arm940_flush_kern_cache_all - .long arm940_flush_user_cache_all - .long arm940_flush_user_cache_range - .long arm940_coherent_kern_range - .long arm940_coherent_user_range - .long arm940_flush_kern_dcache_area - .long arm940_dma_map_area - .long arm940_dma_unmap_area - .long arm940_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm940 __CPUINIT @@ -348,42 +339,14 @@ __arm940_setup: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm940_processor_functions, #object -ENTRY(arm940_processor_functions) - .word nommu_early_abort - .word legacy_pabort - .word cpu_arm940_proc_init - .word cpu_arm940_proc_fin - .word cpu_arm940_reset - .word cpu_arm940_do_idle - .word cpu_arm940_dcache_clean_area - .word cpu_arm940_switch_mm - .word 0 @ cpu_*_set_pte - .word 0 - .word 0 - .word 0 - .size arm940_processor_functions, . - arm940_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" -.type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm940_name, #object -cpu_arm940_name: - .ascii "ARM940T" - .size cpu_arm940_name, . - cpu_arm940_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm940_name, "ARM940T" .align diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 8063345406fe..f8f7ea34bfc5 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -306,18 +306,8 @@ ENTRY(arm946_dma_unmap_area) mov pc, lr ENDPROC(arm946_dma_unmap_area) -ENTRY(arm946_cache_fns) - .long arm946_flush_icache_all - .long arm946_flush_kern_cache_all - .long arm946_flush_user_cache_all - .long arm946_flush_user_cache_range - .long arm946_coherent_kern_range - .long arm946_coherent_user_range - .long arm946_flush_kern_dcache_area - .long arm946_dma_map_area - .long arm946_dma_unmap_area - .long arm946_dma_flush_range - + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions arm946 ENTRY(cpu_arm946_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -403,43 +393,14 @@ __arm946_setup: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm946_processor_functions, #object -ENTRY(arm946_processor_functions) - .word nommu_early_abort - .word legacy_pabort - .word cpu_arm946_proc_init - .word cpu_arm946_proc_fin - .word cpu_arm946_reset - .word cpu_arm946_do_idle - - .word cpu_arm946_dcache_clean_area - .word cpu_arm946_switch_mm - .word 0 @ cpu_*_set_pte - .word 0 - .word 0 - .word 0 - .size arm946_processor_functions, . - arm946_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5t" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm946_name, #object -cpu_arm946_name: - .ascii "ARM946E-S" - .size cpu_arm946_name, . - cpu_arm946_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5t" + string cpu_arm946_name, "ARM946E-S" .align diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 546b54da1005..2120f9e2af7f 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -17,6 +17,8 @@ #include #include +#include "proc-macros.S" + .text /* * cpu_arm9tdmi_proc_init() @@ -55,82 +57,38 @@ __arm9tdmi_setup: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type arm9tdmi_processor_functions, #object -ENTRY(arm9tdmi_processor_functions) - .word nommu_early_abort - .word legacy_pabort - .word cpu_arm9tdmi_proc_init - .word cpu_arm9tdmi_proc_fin - .word cpu_arm9tdmi_reset - .word cpu_arm9tdmi_do_idle - .word cpu_arm9tdmi_dcache_clean_area - .word cpu_arm9tdmi_switch_mm - .word 0 @ cpu_*_set_pte - .word 0 - .word 0 - .word 0 - .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4t" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_arm9tdmi_name, #object -cpu_arm9tdmi_name: - .asciz "ARM9TDMI" - .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name - - .type cpu_p2001_name, #object -cpu_p2001_name: - .asciz "P2001" - .size cpu_p2001_name, . - cpu_p2001_name + string cpu_arch_name, "armv4t" + string cpu_elf_name, "v4" + string cpu_arm9tdmi_name, "ARM9TDMI" + string cpu_p2001_name, "P2001" .align .section ".proc.info.init", #alloc, #execinstr - .type __arm9tdmi_proc_info, #object -__arm9tdmi_proc_info: - .long 0x41009900 - .long 0xfff8ff00 +.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req + .type __\name\()_proc_info, #object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long 0 .long 0 b __arm9tdmi_setup .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_arm9tdmi_name + .long \cpu_name .long arm9tdmi_processor_functions .long 0 .long 0 .long v4_cache_fns - .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm - .type __p2001_proc_info, #object -__p2001_proc_info: - .long 0x41029000 - .long 0xffffffff - .long 0 - .long 0 - b __arm9tdmi_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT - .long cpu_p2001_name - .long arm9tdmi_processor_functions - .long 0 - .long 0 - .long v4_cache_fns - .size __p2001_proc_info, . - __p2001_proc_info + arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name + arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index fc2a4ae15cf4..4c7a5710472b 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -180,42 +180,14 @@ fa526_cr1_set: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type fa526_processor_functions, #object -fa526_processor_functions: - .word v4_early_abort - .word legacy_pabort - .word cpu_fa526_proc_init - .word cpu_fa526_proc_fin - .word cpu_fa526_reset - .word cpu_fa526_do_idle - .word cpu_fa526_dcache_clean_area - .word cpu_fa526_switch_mm - .word cpu_fa526_set_pte_ext - .word 0 - .word 0 - .word 0 - .size fa526_processor_functions, . - fa526_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_fa526_name, #object -cpu_fa526_name: - .asciz "FA526" - .size cpu_fa526_name, . - cpu_fa526_name + string cpu_arch_name, "armv4" + string cpu_elf_name, "v4" + string cpu_fa526_name, "FA526" .align diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index d3883eed7a4a..8a6c2f78c1c3 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -411,29 +411,28 @@ ENTRY(feroceon_dma_unmap_area) mov pc, lr ENDPROC(feroceon_dma_unmap_area) -ENTRY(feroceon_cache_fns) - .long feroceon_flush_icache_all - .long feroceon_flush_kern_cache_all - .long feroceon_flush_user_cache_all - .long feroceon_flush_user_cache_range - .long feroceon_coherent_kern_range - .long feroceon_coherent_user_range - .long feroceon_flush_kern_dcache_area - .long feroceon_dma_map_area - .long feroceon_dma_unmap_area - .long feroceon_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions feroceon -ENTRY(feroceon_range_cache_fns) - .long feroceon_flush_icache_all - .long feroceon_flush_kern_cache_all - .long feroceon_flush_user_cache_all - .long feroceon_flush_user_cache_range - .long feroceon_coherent_kern_range - .long feroceon_coherent_user_range - .long feroceon_range_flush_kern_dcache_area - .long feroceon_range_dma_map_area - .long feroceon_dma_unmap_area - .long feroceon_range_dma_flush_range +.macro range_alias basename + .globl feroceon_range_\basename + .type feroceon_range_\basename , %function + .equ feroceon_range_\basename , feroceon_\basename +.endm + +/* + * Most of the cache functions are unchanged for this case. + * Export suitable alias symbols for the unchanged functions: + */ + range_alias flush_icache_all + range_alias flush_user_cache_all + range_alias flush_kern_cache_all + range_alias flush_user_cache_range + range_alias coherent_kern_range + range_alias coherent_user_range + range_alias dma_unmap_area + + define_cache_functions feroceon_range .align 5 ENTRY(cpu_feroceon_dcache_clean_area) @@ -539,67 +538,27 @@ feroceon_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type feroceon_processor_functions, #object -feroceon_processor_functions: - .word v5t_early_abort - .word legacy_pabort - .word cpu_feroceon_proc_init - .word cpu_feroceon_proc_fin - .word cpu_feroceon_reset - .word cpu_feroceon_do_idle - .word cpu_feroceon_dcache_clean_area - .word cpu_feroceon_switch_mm - .word cpu_feroceon_set_pte_ext - .word 0 - .word 0 - .word 0 - .size feroceon_processor_functions, . - feroceon_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_feroceon_name, #object -cpu_feroceon_name: - .asciz "Feroceon" - .size cpu_feroceon_name, . - cpu_feroceon_name - - .type cpu_88fr531_name, #object -cpu_88fr531_name: - .asciz "Feroceon 88FR531-vd" - .size cpu_88fr531_name, . - cpu_88fr531_name - - .type cpu_88fr571_name, #object -cpu_88fr571_name: - .asciz "Feroceon 88FR571-vd" - .size cpu_88fr571_name, . - cpu_88fr571_name - - .type cpu_88fr131_name, #object -cpu_88fr131_name: - .asciz "Feroceon 88FR131" - .size cpu_88fr131_name, . - cpu_88fr131_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5" + string cpu_feroceon_name, "Feroceon" + string cpu_88fr531_name, "Feroceon 88FR531-vd" + string cpu_88fr571_name, "Feroceon 88FR571-vd" + string cpu_88fr131_name, "Feroceon 88FR131" .align .section ".proc.info.init", #alloc, #execinstr -#ifdef CONFIG_CPU_FEROCEON_OLD_ID - .type __feroceon_old_id_proc_info,#object -__feroceon_old_id_proc_info: - .long 0x41009260 - .long 0xff00fff0 +.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req + .type __\name\()_proc_info,#object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ @@ -614,85 +573,22 @@ __feroceon_old_id_proc_info: .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_feroceon_name + .long \cpu_name .long feroceon_processor_functions .long v4wbi_tlb_fns .long feroceon_user_fns - .long feroceon_cache_fns - .size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info + .long \cache + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm + +#ifdef CONFIG_CPU_FEROCEON_OLD_ID + feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \ + cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns #endif - .type __88fr531_proc_info,#object -__88fr531_proc_info: - .long 0x56055310 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __feroceon_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_88fr531_name - .long feroceon_processor_functions - .long v4wbi_tlb_fns - .long feroceon_user_fns - .long feroceon_cache_fns - .size __88fr531_proc_info, . - __88fr531_proc_info - - .type __88fr571_proc_info,#object -__88fr571_proc_info: - .long 0x56155710 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __feroceon_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_88fr571_name - .long feroceon_processor_functions - .long v4wbi_tlb_fns - .long feroceon_user_fns - .long feroceon_range_cache_fns - .size __88fr571_proc_info, . - __88fr571_proc_info - - .type __88fr131_proc_info,#object -__88fr131_proc_info: - .long 0x56251310 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_BIT4 | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __feroceon_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_88fr131_name - .long feroceon_processor_functions - .long v4wbi_tlb_fns - .long feroceon_user_fns - .long feroceon_range_cache_fns - .size __88fr131_proc_info, . - __88fr131_proc_info + feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \ + cache=feroceon_cache_fns + feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \ + cache=feroceon_range_cache_fns + feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \ + cache=feroceon_range_cache_fns diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 34261f9486b9..307a4def8d3a 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -254,3 +254,71 @@ mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line mcr p15, 0, ip, c7, c10, 4 @ data write barrier .endm + +.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 + .type \name\()_processor_functions, #object + .align 2 +ENTRY(\name\()_processor_functions) + .word \dabort + .word \pabort + .word cpu_\name\()_proc_init + .word cpu_\name\()_proc_fin + .word cpu_\name\()_reset + .word cpu_\name\()_do_idle + .word cpu_\name\()_dcache_clean_area + .word cpu_\name\()_switch_mm + + .if \nommu + .word 0 + .else + .word cpu_\name\()_set_pte_ext + .endif + + .if \suspend + .word cpu_\name\()_suspend_size +#ifdef CONFIG_PM_SLEEP + .word cpu_\name\()_do_suspend + .word cpu_\name\()_do_resume +#else + .word 0 + .word 0 +#endif + .else + .word 0 + .word 0 + .word 0 + .endif + + .size \name\()_processor_functions, . - \name\()_processor_functions +.endm + +.macro define_cache_functions name:req + .align 2 + .type \name\()_cache_fns, #object +ENTRY(\name\()_cache_fns) + .long \name\()_flush_icache_all + .long \name\()_flush_kern_cache_all + .long \name\()_flush_user_cache_all + .long \name\()_flush_user_cache_range + .long \name\()_coherent_kern_range + .long \name\()_coherent_user_range + .long \name\()_flush_kern_dcache_area + .long \name\()_dma_map_area + .long \name\()_dma_unmap_area + .long \name\()_dma_flush_range + .size \name\()_cache_fns, . - \name\()_cache_fns +.endm + +.macro define_tlb_functions name:req, flags_up:req, flags_smp + .type \name\()_tlb_fns, #object +ENTRY(\name\()_tlb_fns) + .long \name\()_flush_user_tlb_range + .long \name\()_flush_kern_tlb_range + .ifnb \flags_smp + ALT_SMP(.long \flags_smp ) + ALT_UP(.long \flags_up ) + .else + .long \flags_up + .endif + .size \name\()_tlb_fns, . - \name\()_tlb_fns +.endm diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9d4f2ae63370..db52b0fb14a0 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -92,6 +92,17 @@ ENTRY(cpu_mohawk_do_idle) mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt mov pc, lr +/* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(mohawk_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(mohawk_flush_icache_all) + /* * flush_user_cache_all() * @@ -288,16 +299,8 @@ ENTRY(mohawk_dma_unmap_area) mov pc, lr ENDPROC(mohawk_dma_unmap_area) -ENTRY(mohawk_cache_fns) - .long mohawk_flush_kern_cache_all - .long mohawk_flush_user_cache_all - .long mohawk_flush_user_cache_range - .long mohawk_coherent_kern_range - .long mohawk_coherent_user_range - .long mohawk_flush_kern_dcache_area - .long mohawk_dma_map_area - .long mohawk_dma_unmap_area - .long mohawk_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions mohawk ENTRY(cpu_mohawk_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -373,42 +376,14 @@ mohawk_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - .type mohawk_processor_functions, #object -mohawk_processor_functions: - .word v5t_early_abort - .word legacy_pabort - .word cpu_mohawk_proc_init - .word cpu_mohawk_proc_fin - .word cpu_mohawk_reset - .word cpu_mohawk_do_idle - .word cpu_mohawk_dcache_clean_area - .word cpu_mohawk_switch_mm - .word cpu_mohawk_set_pte_ext - .word 0 - .word 0 - .word 0 - .size mohawk_processor_functions, . - mohawk_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_mohawk_name, #object -cpu_mohawk_name: - .asciz "Marvell 88SV331x" - .size cpu_mohawk_name, . - cpu_mohawk_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5" + string cpu_mohawk_name, "Marvell 88SV331x" .align diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 46f09ed16b98..d50ada26edd6 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -187,43 +187,14 @@ sa110_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - - .type sa110_processor_functions, #object -ENTRY(sa110_processor_functions) - .word v4_early_abort - .word legacy_pabort - .word cpu_sa110_proc_init - .word cpu_sa110_proc_fin - .word cpu_sa110_reset - .word cpu_sa110_do_idle - .word cpu_sa110_dcache_clean_area - .word cpu_sa110_switch_mm - .word cpu_sa110_set_pte_ext - .word 0 - .word 0 - .word 0 - .size sa110_processor_functions, . - sa110_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_sa110_name, #object -cpu_sa110_name: - .asciz "StrongARM-110" - .size cpu_sa110_name, . - cpu_sa110_name + string cpu_arch_name, "armv4" + string cpu_elf_name, "v4" + string cpu_sa110_name, "StrongARM-110" .align diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 184a9c997e36..07219c2ae114 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -34,7 +34,7 @@ */ #define DCACHELINESIZE 32 - __INIT + .section .text /* * cpu_sa1100_proc_init() @@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init) mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland mov pc, lr - .section .text - /* * cpu_sa1100_proc_fin() * @@ -200,9 +198,6 @@ ENTRY(cpu_sa1100_do_resume) PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE b cpu_resume_mmu ENDPROC(cpu_sa1100_do_resume) -#else -#define cpu_sa1100_do_suspend 0 -#define cpu_sa1100_do_resume 0 #endif __CPUINIT @@ -235,60 +230,29 @@ sa1100_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - /* * SA1100 and SA1110 share the same function calls */ - .type sa1100_processor_functions, #object -ENTRY(sa1100_processor_functions) - .word v4_early_abort - .word legacy_pabort - .word cpu_sa1100_proc_init - .word cpu_sa1100_proc_fin - .word cpu_sa1100_reset - .word cpu_sa1100_do_idle - .word cpu_sa1100_dcache_clean_area - .word cpu_sa1100_switch_mm - .word cpu_sa1100_set_pte_ext - .word cpu_sa1100_suspend_size - .word cpu_sa1100_do_suspend - .word cpu_sa1100_do_resume - .size sa1100_processor_functions, . - sa1100_processor_functions + + @ define struct processor (see and proc-macros.S) + define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv4" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v4" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_sa1100_name, #object -cpu_sa1100_name: - .asciz "StrongARM-1100" - .size cpu_sa1100_name, . - cpu_sa1100_name - - .type cpu_sa1110_name, #object -cpu_sa1110_name: - .asciz "StrongARM-1110" - .size cpu_sa1110_name, . - cpu_sa1110_name + string cpu_arch_name, "armv4" + string cpu_elf_name, "v4" + string cpu_sa1100_name, "StrongARM-1100" + string cpu_sa1110_name, "StrongARM-1110" .align .section ".proc.info.init", #alloc, #execinstr - .type __sa1100_proc_info,#object -__sa1100_proc_info: - .long 0x4401a110 - .long 0xfffffff0 +.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req + .type __\name\()_proc_info,#object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ @@ -301,32 +265,13 @@ __sa1100_proc_info: .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT - .long cpu_sa1100_name + .long \cpu_name .long sa1100_processor_functions .long v4wb_tlb_fns .long v4_mc_user_fns .long v4wb_cache_fns - .size __sa1100_proc_info, . - __sa1100_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm - .type __sa1110_proc_info,#object -__sa1110_proc_info: - .long 0x6901b110 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __sa1100_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT - .long cpu_sa1110_name - .long sa1100_processor_functions - .long v4wb_tlb_fns - .long v4_mc_user_fns - .long v4wb_cache_fns - .size __sa1110_proc_info, . - __sa1110_proc_info + sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name + sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 1d2b8451bf25..219138d2f158 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -56,6 +56,11 @@ ENTRY(cpu_v6_proc_fin) */ .align 5 ENTRY(cpu_v6_reset) + mrc p15, 0, r1, c1, c0, 0 @ ctrl register + bic r1, r1, #0x1 @ ...............m + mcr p15, 0, r1, c1, c0, 0 @ disable MMU + mov r1, #0 + mcr p15, 0, r1, c7, c5, 4 @ ISB mov pc, r0 /* @@ -164,16 +169,9 @@ ENDPROC(cpu_v6_do_resume) cpu_resume_l1_flags: ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) -#else -#define cpu_v6_do_suspend 0 -#define cpu_v6_do_resume 0 #endif - - .type cpu_v6_name, #object -cpu_v6_name: - .asciz "ARMv6-compatible processor" - .size cpu_v6_name, . - cpu_v6_name + string cpu_v6_name, "ARMv6-compatible processor" .align @@ -239,33 +237,13 @@ v6_crval: __INITDATA - .type v6_processor_functions, #object -ENTRY(v6_processor_functions) - .word v6_early_abort - .word v6_pabort - .word cpu_v6_proc_init - .word cpu_v6_proc_fin - .word cpu_v6_reset - .word cpu_v6_do_idle - .word cpu_v6_dcache_clean_area - .word cpu_v6_switch_mm - .word cpu_v6_set_pte_ext - .word cpu_v6_suspend_size - .word cpu_v6_do_suspend - .word cpu_v6_do_resume - .size v6_processor_functions, . - v6_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv6" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v6" - .size cpu_elf_name, . - cpu_elf_name + string cpu_arch_name, "armv6" + string cpu_elf_name, "v6" .align .section ".proc.info.init", #alloc, #execinstr diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 089c0b5e454f..a30e78542ccf 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -58,9 +58,16 @@ ENDPROC(cpu_v7_proc_fin) * to what would be the reset vector. * * - loc - location to jump to for soft reset + * + * This code must be executed using a flat identity mapping with + * caches disabled. */ .align 5 ENTRY(cpu_v7_reset) + mrc p15, 0, r1, c1, c0, 0 @ ctrl register + bic r1, r1, #0x1 @ ...............m + mcr p15, 0, r1, c1, c0, 0 @ disable MMU + isb mov pc, r0 ENDPROC(cpu_v7_reset) @@ -173,8 +180,7 @@ ENTRY(cpu_v7_set_pte_ext) mov pc, lr ENDPROC(cpu_v7_set_pte_ext) -cpu_v7_name: - .ascii "ARMv7 Processor" + string cpu_v7_name, "ARMv7 Processor" .align /* @@ -257,9 +263,6 @@ ENDPROC(cpu_v7_do_resume) cpu_resume_l1_flags: ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) -#else -#define cpu_v7_do_suspend 0 -#define cpu_v7_do_resume 0 #endif __CPUINIT @@ -279,13 +282,20 @@ cpu_resume_l1_flags: * It is assumed that: * - cache type register is implemented */ +__v7_ca5mp_setup: __v7_ca9mp_setup: + mov r10, #(1 << 0) @ TLB ops broadcasting + b 1f +__v7_ca15mp_setup: + mov r10, #0 +1: #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, r0, c1, c0, 1) ALT_UP(mov r0, #(1 << 6)) @ fake it for UP tst r0, #(1 << 6) @ SMP/nAMP mode enabled? - orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and - mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting + orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode + orreq r0, r0, r10 @ Enable CPU-specific SMP bits + mcreq p15, 0, r0, c1, c0, 1 #endif __v7_setup: adr r12, __v7_setup_stack @ the local stack @@ -411,66 +421,69 @@ __v7_setup_stack: __INITDATA - .type v7_processor_functions, #object -ENTRY(v7_processor_functions) - .word v7_early_abort - .word v7_pabort - .word cpu_v7_proc_init - .word cpu_v7_proc_fin - .word cpu_v7_reset - .word cpu_v7_do_idle - .word cpu_v7_dcache_clean_area - .word cpu_v7_switch_mm - .word cpu_v7_set_pte_ext - .word cpu_v7_suspend_size - .word cpu_v7_do_suspend - .word cpu_v7_do_resume - .size v7_processor_functions, . - v7_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv7" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v7" - .size cpu_elf_name, . - cpu_elf_name + string cpu_arch_name, "armv7" + string cpu_elf_name, "v7" .align .section ".proc.info.init", #alloc, #execinstr - .type __v7_ca9mp_proc_info, #object -__v7_ca9mp_proc_info: - .long 0x410fc090 @ Required ID value - .long 0xff0ffff0 @ Mask for ID - ALT_SMP(.long \ - PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \ - PMD_FLAGS_SMP) - ALT_UP(.long \ - PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \ - PMD_FLAGS_UP) - .long PMD_TYPE_SECT | \ - PMD_SECT_XN | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - W(b) __v7_ca9mp_setup + /* + * Standard v7 proc info content + */ +.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 + ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ + PMD_FLAGS_SMP | \mm_mmuflags) + ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ + PMD_FLAGS_UP | \mm_mmuflags) + .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \io_mmuflags + W(b) \initfunc .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ + HWCAP_EDSP | HWCAP_TLS | \hwcaps .long cpu_v7_name .long v7_processor_functions .long v7wbi_tlb_fns .long v6_user_fns .long v7_cache_fns +.endm + + /* + * ARM Ltd. Cortex A5 processor. + */ + .type __v7_ca5mp_proc_info, #object +__v7_ca5mp_proc_info: + .long 0x410fc050 + .long 0xff0ffff0 + __v7_proc __v7_ca5mp_setup + .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info + + /* + * ARM Ltd. Cortex A9 processor. + */ + .type __v7_ca9mp_proc_info, #object +__v7_ca9mp_proc_info: + .long 0x410fc090 + .long 0xff0ffff0 + __v7_proc __v7_ca9mp_setup .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info + /* + * ARM Ltd. Cortex A15 processor. + */ + .type __v7_ca15mp_proc_info, #object +__v7_ca15mp_proc_info: + .long 0x410fc0f0 + .long 0xff0ffff0 + __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV + .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info + /* * Match any ARMv7 processor core. */ @@ -478,27 +491,5 @@ __v7_ca9mp_proc_info: __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID - ALT_SMP(.long \ - PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \ - PMD_FLAGS_SMP) - ALT_UP(.long \ - PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \ - PMD_FLAGS_UP) - .long PMD_TYPE_SECT | \ - PMD_SECT_XN | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - W(b) __v7_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS - .long cpu_v7_name - .long v7_processor_functions - .long v7wbi_tlb_fns - .long v6_user_fns - .long v7_cache_fns + __v7_proc __v7_setup .size __v7_proc_info, . - __v7_proc_info diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 596213699f37..64f1fc7edf0a 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -335,17 +335,8 @@ ENTRY(xsc3_dma_unmap_area) mov pc, lr ENDPROC(xsc3_dma_unmap_area) -ENTRY(xsc3_cache_fns) - .long xsc3_flush_icache_all - .long xsc3_flush_kern_cache_all - .long xsc3_flush_user_cache_all - .long xsc3_flush_user_cache_range - .long xsc3_coherent_kern_range - .long xsc3_coherent_user_range - .long xsc3_flush_kern_dcache_area - .long xsc3_dma_map_area - .long xsc3_dma_unmap_area - .long xsc3_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions xsc3 ENTRY(cpu_xsc3_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line @@ -454,9 +445,6 @@ ENTRY(cpu_xsc3_do_resume) ldr r3, =0x542e @ section flags b cpu_resume_mmu ENDPROC(cpu_xsc3_do_resume) -#else -#define cpu_xsc3_do_suspend 0 -#define cpu_xsc3_do_resume 0 #endif __CPUINIT @@ -503,52 +491,24 @@ xsc3_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - - .type xsc3_processor_functions, #object -ENTRY(xsc3_processor_functions) - .word v5t_early_abort - .word legacy_pabort - .word cpu_xsc3_proc_init - .word cpu_xsc3_proc_fin - .word cpu_xsc3_reset - .word cpu_xsc3_do_idle - .word cpu_xsc3_dcache_clean_area - .word cpu_xsc3_switch_mm - .word cpu_xsc3_set_pte_ext - .word cpu_xsc3_suspend_size - .word cpu_xsc3_do_suspend - .word cpu_xsc3_do_resume - .size xsc3_processor_functions, . - xsc3_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name - - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_xsc3_name, #object -cpu_xsc3_name: - .asciz "XScale-V3 based processor" - .size cpu_xsc3_name, . - cpu_xsc3_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5" + string cpu_xsc3_name, "XScale-V3 based processor" .align .section ".proc.info.init", #alloc, #execinstr - .type __xsc3_proc_info,#object -__xsc3_proc_info: - .long 0x69056000 - .long 0xffffe000 +.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req + .type __\name\()_proc_info,#object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ @@ -566,29 +526,10 @@ __xsc3_proc_info: .long v4wbi_tlb_fns .long xsc3_mc_user_fns .long xsc3_cache_fns - .size __xsc3_proc_info, . - __xsc3_proc_info + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm + + xsc3_proc_info xsc3, 0x69056000, 0xffffe000 /* Note: PXA935 changed its implementor ID from Intel to Marvell */ - - .type __xsc3_pxa935_proc_info,#object -__xsc3_pxa935_proc_info: - .long 0x56056000 - .long 0xffffe000 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xsc3_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_xsc3_name - .long xsc3_processor_functions - .long v4wbi_tlb_fns - .long xsc3_mc_user_fns - .long xsc3_cache_fns - .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info + xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000 diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 42af97664c9d..fbc06e55b87a 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -390,12 +390,12 @@ ENDPROC(xscale_dma_map_area) * - size - size of region * - dir - DMA direction */ -ENTRY(xscale_dma_a0_map_area) +ENTRY(xscale_80200_A0_A1_dma_map_area) add r1, r1, r0 teq r2, #DMA_TO_DEVICE beq xscale_dma_clean_range b xscale_dma_flush_range -ENDPROC(xscale_dma_a0_map_area) +ENDPROC(xscale_80200_A0_A1_dma_map_area) /* * dma_unmap_area(start, size, dir) @@ -407,17 +407,8 @@ ENTRY(xscale_dma_unmap_area) mov pc, lr ENDPROC(xscale_dma_unmap_area) -ENTRY(xscale_cache_fns) - .long xscale_flush_icache_all - .long xscale_flush_kern_cache_all - .long xscale_flush_user_cache_all - .long xscale_flush_user_cache_range - .long xscale_coherent_kern_range - .long xscale_coherent_user_range - .long xscale_flush_kern_dcache_area - .long xscale_dma_map_area - .long xscale_dma_unmap_area - .long xscale_dma_flush_range + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions xscale /* * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't @@ -432,16 +423,28 @@ ENTRY(xscale_cache_fns) * revision January 22, 2003, available at: * http://www.intel.com/design/iio/specupdt/273415.htm */ -ENTRY(xscale_80200_A0_A1_cache_fns) - .long xscale_flush_kern_cache_all - .long xscale_flush_user_cache_all - .long xscale_flush_user_cache_range - .long xscale_coherent_kern_range - .long xscale_coherent_user_range - .long xscale_flush_kern_dcache_area - .long xscale_dma_a0_map_area - .long xscale_dma_unmap_area - .long xscale_dma_flush_range +.macro a0_alias basename + .globl xscale_80200_A0_A1_\basename + .type xscale_80200_A0_A1_\basename , %function + .equ xscale_80200_A0_A1_\basename , xscale_\basename +.endm + +/* + * Most of the cache functions are unchanged for these processor revisions. + * Export suitable alias symbols for the unchanged functions: + */ + a0_alias flush_icache_all + a0_alias flush_user_cache_all + a0_alias flush_kern_cache_all + a0_alias flush_user_cache_range + a0_alias coherent_kern_range + a0_alias coherent_user_range + a0_alias flush_kern_dcache_area + a0_alias dma_flush_range + a0_alias dma_unmap_area + + @ define struct cpu_cache_fns (see and proc-macros.S) + define_cache_functions xscale_80200_A0_A1 ENTRY(cpu_xscale_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -551,9 +554,6 @@ ENTRY(cpu_xscale_do_resume) PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE b cpu_resume_mmu ENDPROC(cpu_xscale_do_resume) -#else -#define cpu_xscale_do_suspend 0 -#define cpu_xscale_do_resume 0 #endif __CPUINIT @@ -587,432 +587,74 @@ xscale_crval: __INITDATA -/* - * Purpose : Function pointers used to access above functions - all calls - * come through these - */ - - .type xscale_processor_functions, #object -ENTRY(xscale_processor_functions) - .word v5t_early_abort - .word legacy_pabort - .word cpu_xscale_proc_init - .word cpu_xscale_proc_fin - .word cpu_xscale_reset - .word cpu_xscale_do_idle - .word cpu_xscale_dcache_clean_area - .word cpu_xscale_switch_mm - .word cpu_xscale_set_pte_ext - .word cpu_xscale_suspend_size - .word cpu_xscale_do_suspend - .word cpu_xscale_do_resume - .size xscale_processor_functions, . - xscale_processor_functions + @ define struct processor (see and proc-macros.S) + define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 .section ".rodata" - .type cpu_arch_name, #object -cpu_arch_name: - .asciz "armv5te" - .size cpu_arch_name, . - cpu_arch_name + string cpu_arch_name, "armv5te" + string cpu_elf_name, "v5" - .type cpu_elf_name, #object -cpu_elf_name: - .asciz "v5" - .size cpu_elf_name, . - cpu_elf_name - - .type cpu_80200_A0_A1_name, #object -cpu_80200_A0_A1_name: - .asciz "XScale-80200 A0/A1" - .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name - - .type cpu_80200_name, #object -cpu_80200_name: - .asciz "XScale-80200" - .size cpu_80200_name, . - cpu_80200_name - - .type cpu_80219_name, #object -cpu_80219_name: - .asciz "XScale-80219" - .size cpu_80219_name, . - cpu_80219_name - - .type cpu_8032x_name, #object -cpu_8032x_name: - .asciz "XScale-IOP8032x Family" - .size cpu_8032x_name, . - cpu_8032x_name - - .type cpu_8033x_name, #object -cpu_8033x_name: - .asciz "XScale-IOP8033x Family" - .size cpu_8033x_name, . - cpu_8033x_name - - .type cpu_pxa250_name, #object -cpu_pxa250_name: - .asciz "XScale-PXA250" - .size cpu_pxa250_name, . - cpu_pxa250_name - - .type cpu_pxa210_name, #object -cpu_pxa210_name: - .asciz "XScale-PXA210" - .size cpu_pxa210_name, . - cpu_pxa210_name - - .type cpu_ixp42x_name, #object -cpu_ixp42x_name: - .asciz "XScale-IXP42x Family" - .size cpu_ixp42x_name, . - cpu_ixp42x_name - - .type cpu_ixp43x_name, #object -cpu_ixp43x_name: - .asciz "XScale-IXP43x Family" - .size cpu_ixp43x_name, . - cpu_ixp43x_name - - .type cpu_ixp46x_name, #object -cpu_ixp46x_name: - .asciz "XScale-IXP46x Family" - .size cpu_ixp46x_name, . - cpu_ixp46x_name - - .type cpu_ixp2400_name, #object -cpu_ixp2400_name: - .asciz "XScale-IXP2400" - .size cpu_ixp2400_name, . - cpu_ixp2400_name - - .type cpu_ixp2800_name, #object -cpu_ixp2800_name: - .asciz "XScale-IXP2800" - .size cpu_ixp2800_name, . - cpu_ixp2800_name - - .type cpu_pxa255_name, #object -cpu_pxa255_name: - .asciz "XScale-PXA255" - .size cpu_pxa255_name, . - cpu_pxa255_name - - .type cpu_pxa270_name, #object -cpu_pxa270_name: - .asciz "XScale-PXA270" - .size cpu_pxa270_name, . - cpu_pxa270_name + string cpu_80200_A0_A1_name, "XScale-80200 A0/A1" + string cpu_80200_name, "XScale-80200" + string cpu_80219_name, "XScale-80219" + string cpu_8032x_name, "XScale-IOP8032x Family" + string cpu_8033x_name, "XScale-IOP8033x Family" + string cpu_pxa250_name, "XScale-PXA250" + string cpu_pxa210_name, "XScale-PXA210" + string cpu_ixp42x_name, "XScale-IXP42x Family" + string cpu_ixp43x_name, "XScale-IXP43x Family" + string cpu_ixp46x_name, "XScale-IXP46x Family" + string cpu_ixp2400_name, "XScale-IXP2400" + string cpu_ixp2800_name, "XScale-IXP2800" + string cpu_pxa255_name, "XScale-PXA255" + string cpu_pxa270_name, "XScale-PXA270" .align .section ".proc.info.init", #alloc, #execinstr - .type __80200_A0_A1_proc_info,#object -__80200_A0_A1_proc_info: - .long 0x69052000 - .long 0xfffffffe - .long PMD_TYPE_SECT | \ +.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache + .type __\name\()_proc_info,#object +__\name\()_proc_info: + .long \cpu_val + .long \cpu_mask + .long PMD_TYPE_SECT | \ PMD_SECT_BUFFERABLE | \ PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ + .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ b __xscale_setup .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_80200_name + .long \cpu_name .long xscale_processor_functions .long v4wbi_tlb_fns .long xscale_mc_user_fns - .long xscale_80200_A0_A1_cache_fns - .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info - - .type __80200_proc_info,#object -__80200_proc_info: - .long 0x69052000 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_80200_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __80200_proc_info, . - __80200_proc_info - - .type __80219_proc_info,#object -__80219_proc_info: - .long 0x69052e20 - .long 0xffffffe0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_80219_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __80219_proc_info, . - __80219_proc_info - - .type __8032x_proc_info,#object -__8032x_proc_info: - .long 0x69052420 - .long 0xfffff7e0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_8032x_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __8032x_proc_info, . - __8032x_proc_info - - .type __8033x_proc_info,#object -__8033x_proc_info: - .long 0x69054010 - .long 0xfffffd30 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_8033x_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __8033x_proc_info, . - __8033x_proc_info - - .type __pxa250_proc_info,#object -__pxa250_proc_info: - .long 0x69052100 - .long 0xfffff7f0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_pxa250_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __pxa250_proc_info, . - __pxa250_proc_info - - .type __pxa210_proc_info,#object -__pxa210_proc_info: - .long 0x69052120 - .long 0xfffff3f0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_pxa210_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __pxa210_proc_info, . - __pxa210_proc_info - - .type __ixp2400_proc_info, #object -__ixp2400_proc_info: - .long 0x69054190 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_ixp2400_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __ixp2400_proc_info, . - __ixp2400_proc_info - - .type __ixp2800_proc_info, #object -__ixp2800_proc_info: - .long 0x690541a0 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_ixp2800_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __ixp2800_proc_info, . - __ixp2800_proc_info - - .type __ixp42x_proc_info, #object -__ixp42x_proc_info: - .long 0x690541c0 - .long 0xffffffc0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_ixp42x_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __ixp42x_proc_info, . - __ixp42x_proc_info - - .type __ixp43x_proc_info, #object -__ixp43x_proc_info: - .long 0x69054040 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_ixp43x_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __ixp43x_proc_info, . - __ixp43x_proc_info - - .type __ixp46x_proc_info, #object -__ixp46x_proc_info: - .long 0x69054200 - .long 0xffffff00 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_ixp46x_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __ixp46x_proc_info, . - __ixp46x_proc_info - - .type __pxa255_proc_info,#object -__pxa255_proc_info: - .long 0x69052d00 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_pxa255_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __pxa255_proc_info, . - __pxa255_proc_info - - .type __pxa270_proc_info,#object -__pxa270_proc_info: - .long 0x69054110 - .long 0xfffffff0 - .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - .long PMD_TYPE_SECT | \ - PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ - b __xscale_setup - .long cpu_arch_name - .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP - .long cpu_pxa270_name - .long xscale_processor_functions - .long v4wbi_tlb_fns - .long xscale_mc_user_fns - .long xscale_cache_fns - .size __pxa270_proc_info, . - __pxa270_proc_info + .ifb \cache + .long xscale_cache_fns + .else + .long \cache + .endif + .size __\name\()_proc_info, . - __\name\()_proc_info +.endm + xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \ + cache=xscale_80200_A0_A1_cache_fns + xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name + xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name + xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name + xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name + xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name + xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name + xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name + xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name + xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name + xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name + xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name + xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name + xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S index 9694f1f6f485..d3ddcf9a76ca 100644 --- a/arch/arm/mm/tlb-fa.S +++ b/arch/arm/mm/tlb-fa.S @@ -46,7 +46,6 @@ ENTRY(fa_flush_user_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB mcr p15, 0, r3, c7, c10, 4 @ data write barrier mov pc, lr @@ -60,16 +59,11 @@ ENTRY(fa_flush_kern_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB mcr p15, 0, r3, c7, c10, 4 @ data write barrier - mcr p15, 0, r3, c7, c5, 4 @ prefetch flush + mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) mov pc, lr __INITDATA - .type fa_tlb_fns, #object -ENTRY(fa_tlb_fns) - .long fa_flush_user_tlb_range - .long fa_flush_kern_tlb_range - .long fa_tlb_flags - .size fa_tlb_fns, . - fa_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions fa, fa_tlb_flags diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S index c10786ec8e0a..d253995ec4ca 100644 --- a/arch/arm/mm/tlb-v3.S +++ b/arch/arm/mm/tlb-v3.S @@ -44,9 +44,5 @@ ENTRY(v3_flush_kern_tlb_range) __INITDATA - .type v3_tlb_fns, #object -ENTRY(v3_tlb_fns) - .long v3_flush_user_tlb_range - .long v3_flush_kern_tlb_range - .long v3_tlb_flags - .size v3_tlb_fns, . - v3_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions v3, v3_tlb_flags diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S index d6c94457c2b9..17a025ade573 100644 --- a/arch/arm/mm/tlb-v4.S +++ b/arch/arm/mm/tlb-v4.S @@ -57,9 +57,5 @@ ENTRY(v4_flush_user_tlb_range) __INITDATA - .type v4_tlb_fns, #object -ENTRY(v4_tlb_fns) - .long v4_flush_user_tlb_range - .long v4_flush_kern_tlb_range - .long v4_tlb_flags - .size v4_tlb_fns, . - v4_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions v4, v4_tlb_flags diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S index cb829ca7845d..c04598fa4d4a 100644 --- a/arch/arm/mm/tlb-v4wb.S +++ b/arch/arm/mm/tlb-v4wb.S @@ -69,9 +69,5 @@ ENTRY(v4wb_flush_kern_tlb_range) __INITDATA - .type v4wb_tlb_fns, #object -ENTRY(v4wb_tlb_fns) - .long v4wb_flush_user_tlb_range - .long v4wb_flush_kern_tlb_range - .long v4wb_tlb_flags - .size v4wb_tlb_fns, . - v4wb_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions v4wb, v4wb_tlb_flags diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S index 60cfc4a25dd5..1f6062b6c1c1 100644 --- a/arch/arm/mm/tlb-v4wbi.S +++ b/arch/arm/mm/tlb-v4wbi.S @@ -60,9 +60,5 @@ ENTRY(v4wbi_flush_kern_tlb_range) __INITDATA - .type v4wbi_tlb_fns, #object -ENTRY(v4wbi_tlb_fns) - .long v4wbi_flush_user_tlb_range - .long v4wbi_flush_kern_tlb_range - .long v4wbi_tlb_flags - .size v4wbi_tlb_fns, . - v4wbi_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions v4wbi, v4wbi_tlb_flags diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S index 73d7d89b04c4..eca07f550a0b 100644 --- a/arch/arm/mm/tlb-v6.S +++ b/arch/arm/mm/tlb-v6.S @@ -54,7 +54,6 @@ ENTRY(v6wbi_flush_user_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier mov pc, lr @@ -83,16 +82,11 @@ ENTRY(v6wbi_flush_kern_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier - mcr p15, 0, r2, c7, c5, 4 @ prefetch flush + mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) mov pc, lr __INIT - .type v6wbi_tlb_fns, #object -ENTRY(v6wbi_tlb_fns) - .long v6wbi_flush_user_tlb_range - .long v6wbi_flush_kern_tlb_range - .long v6wbi_tlb_flags - .size v6wbi_tlb_fns, . - v6wbi_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions v6wbi, v6wbi_tlb_flags diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 53cd5b454673..845f461f8ec1 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -48,9 +48,6 @@ ENTRY(v7wbi_flush_user_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - mov ip, #0 - ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable - ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB dsb mov pc, lr ENDPROC(v7wbi_flush_user_tlb_range) @@ -75,9 +72,6 @@ ENTRY(v7wbi_flush_kern_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - mov r2, #0 - ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable - ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB dsb isb mov pc, lr @@ -85,10 +79,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range) __INIT - .type v7wbi_tlb_fns, #object -ENTRY(v7wbi_tlb_fns) - .long v7wbi_flush_user_tlb_range - .long v7wbi_flush_kern_tlb_range - ALT_SMP(.long v7wbi_tlb_flags_smp) - ALT_UP(.long v7wbi_tlb_flags_up) - .size v7wbi_tlb_fns, . - v7wbi_tlb_fns + /* define struct cpu_tlb_fns (see and proc-macros.S) */ + define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S index 2e49e71b1b98..066d464d322d 100644 --- a/arch/arm/plat-mxc/include/mach/entry-macro.S +++ b/arch/arm/plat-mxc/include/mach/entry-macro.S @@ -78,7 +78,3 @@ movs \irqnr, \irqnr #endif .endm - - @ irq priority table (not used) - .macro irq_prio_table - .endm diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 6af3d0b1f8d0..363c91e44efb 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -394,20 +394,15 @@ void omap3_sram_restore_context(void) } #endif /* CONFIG_PM */ -static int __init omap34xx_sram_init(void) -{ - _omap3_sram_configure_core_dpll = - omap_sram_push(omap3_sram_configure_core_dpll, - omap3_sram_configure_core_dpll_sz); - omap_push_sram_idle(); - return 0; -} -#else +#endif /* CONFIG_ARCH_OMAP3 */ + static inline int omap34xx_sram_init(void) { +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) + omap3_sram_restore_context(); +#endif return 0; } -#endif int __init omap_sram_init(void) { diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index fd7032f84ae7..c56612569b40 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S @@ -41,31 +41,6 @@ .text - /* s3c_cpu_save - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - stmfd sp!, { r4 - r12, lr } - ldr r3, =resume_with_mmu - bl cpu_suspend - - @@ jump to final code to send system to sleep - ldr r0, =pm_cpu_sleep - @@ldr pc, [ r0 ] - ldr r0, [ r0 ] - mov pc, r0 - - @@ return to the caller, after having the MMU - @@ turned on, this restores the last bits from the - @@ stack -resume_with_mmu: - ldmfd sp!, { r4 - r12, pc } - - .ltorg - /* sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the * s3c_cpu_resume entry. diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 7fb6f6be8c81..f6749916d194 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h @@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow; /* per-cpu sleep functions */ extern void (*pm_cpu_prep)(void); -extern void (*pm_cpu_sleep)(void); +extern int (*pm_cpu_sleep)(unsigned long); /* Flags for PM Control */ @@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */ /* from sleep.S */ -extern int s3c_cpu_save(unsigned long *saveblk, long); extern void s3c_cpu_resume(void); -extern void s3c2410_cpu_suspend(void); +extern int s3c2410_cpu_suspend(unsigned long); /* sleep save info */ diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 5c0a440d6e16..5fa1742d019b 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -231,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start, void (*pm_cpu_prep)(void); -void (*pm_cpu_sleep)(void); +int (*pm_cpu_sleep)(unsigned long); #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) @@ -294,15 +295,11 @@ static int s3c_pm_enter(suspend_state_t state) s3c_pm_arch_stop_clocks(); - /* s3c_cpu_save will also act as our return point from when + /* this will also act as our return point from when * we resume as it saves its own register state and restores it * during the resume. */ - s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); - - /* restore the cpu state using the kernel's cpu init code. */ - - cpu_init(); + cpu_suspend(0, pm_cpu_sleep); /* restore the system state */ diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 9897dcfc16d6..2d30c7f6edd3 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -77,27 +77,27 @@ ENTRY(vfp_support_entry) bne look_for_VFP_exceptions @ VFP is already enabled DBGSTR1 "enable %x", r10 - ldr r3, last_VFP_context_address + ldr r3, vfp_current_hw_state_address orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set - ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer + ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled - cmp r4, r10 - beq check_for_exception @ we are returning to the same - @ process, so the registers are - @ still there. In this case, we do - @ not want to drop a pending exception. + cmp r4, r10 @ this thread owns the hw context? +#ifndef CONFIG_SMP + @ For UP, checking that this thread owns the hw context is + @ sufficient to determine that the hardware state is valid. + beq vfp_hw_state_valid + + @ On UP, we lazily save the VFP context. As a different + @ thread wants ownership of the VFP hardware, save the old + @ state if there was a previous (valid) owner. VFPFMXR FPEXC, r5 @ enable VFP, disable any pending @ exceptions, so we can get at the @ rest of it -#ifndef CONFIG_SMP - @ Save out the current registers to the old thread state - @ No need for SMP since this is not done lazily - DBGSTR1 "save old state %p", r4 - cmp r4, #0 - beq no_old_VFP_process + cmp r4, #0 @ if the vfp_current_hw_state is NULL + beq vfp_reload_hw @ then the hw state needs reloading VFPFSTMIA r4, r5 @ save the working registers VFPFMRX r5, FPSCR @ current status #ifndef CONFIG_CPU_FEROCEON @@ -110,13 +110,35 @@ ENTRY(vfp_support_entry) 1: #endif stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 - @ and point r4 at the word at the - @ start of the register dump +vfp_reload_hw: + +#else + @ For SMP, if this thread does not own the hw context, then we + @ need to reload it. No need to save the old state as on SMP, + @ we always save the state when we switch away from a thread. + bne vfp_reload_hw + + @ This thread has ownership of the current hardware context. + @ However, it may have been migrated to another CPU, in which + @ case the saved state is newer than the hardware context. + @ Check this by looking at the CPU number which the state was + @ last loaded onto. + ldr ip, [r10, #VFP_CPU] + teq ip, r11 + beq vfp_hw_state_valid + +vfp_reload_hw: + @ We're loading this threads state into the VFP hardware. Update + @ the CPU number which contains the most up to date VFP context. + str r11, [r10, #VFP_CPU] + + VFPFMXR FPEXC, r5 @ enable VFP, disable any pending + @ exceptions, so we can get at the + @ rest of it #endif -no_old_VFP_process: DBGSTR1 "load state %p", r10 - str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer + str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer @ Load the saved state back into the VFP VFPFLDMIA r10, r5 @ reload the working registers while @ FPEXC is in a safe state @@ -132,7 +154,8 @@ no_old_VFP_process: #endif VFPFMXR FPSCR, r5 @ restore status -check_for_exception: +@ The context stored in the VFP hardware is up to date with this thread +vfp_hw_state_valid: tst r1, #FPEXC_EX bne process_exception @ might as well handle the pending @ exception before retrying branch @@ -207,8 +230,8 @@ ENTRY(vfp_save_state) ENDPROC(vfp_save_state) .align -last_VFP_context_address: - .word last_VFP_context +vfp_current_hw_state_address: + .word vfp_current_hw_state .macro tbl_branch, base, tmp, shift #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f25e7ec89416..79bcb4316930 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -33,7 +33,6 @@ void vfp_support_entry(void); void vfp_null_entry(void); void (*vfp_vector)(void) = vfp_null_entry; -union vfp_state *last_VFP_context[NR_CPUS]; /* * Dual-use variable. @@ -42,6 +41,46 @@ union vfp_state *last_VFP_context[NR_CPUS]; */ unsigned int VFP_arch; +/* + * The pointer to the vfpstate structure of the thread which currently + * owns the context held in the VFP hardware, or NULL if the hardware + * context is invalid. + * + * For UP, this is sufficient to tell which thread owns the VFP context. + * However, for SMP, we also need to check the CPU number stored in the + * saved state too to catch migrations. + */ +union vfp_state *vfp_current_hw_state[NR_CPUS]; + +/* + * Is 'thread's most up to date state stored in this CPUs hardware? + * Must be called from non-preemptible context. + */ +static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) +{ +#ifdef CONFIG_SMP + if (thread->vfpstate.hard.cpu != cpu) + return false; +#endif + return vfp_current_hw_state[cpu] == &thread->vfpstate; +} + +/* + * Force a reload of the VFP context from the thread structure. We do + * this by ensuring that access to the VFP hardware is disabled, and + * clear last_VFP_context. Must be called from non-preemptible context. + */ +static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) +{ + if (vfp_state_in_hw(cpu, thread)) { + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + vfp_current_hw_state[cpu] = NULL; + } +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif +} + /* * Per-thread VFP initialization. */ @@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread) union vfp_state *vfp = &thread->vfpstate; unsigned int cpu; + /* + * Disable VFP to ensure we initialize it first. We must ensure + * that the modification of vfp_current_hw_state[] and hardware + * disable are done for the same CPU and without preemption. + * + * Do this first to ensure that preemption won't overwrite our + * state saving should access to the VFP be enabled at this point. + */ + cpu = get_cpu(); + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + put_cpu(); + memset(vfp, 0, sizeof(union vfp_state)); vfp->hard.fpexc = FPEXC_EN; vfp->hard.fpscr = FPSCR_ROUND_NEAREST; - - /* - * Disable VFP to ensure we initialize it first. We must ensure - * that the modification of last_VFP_context[] and hardware disable - * are done for the same CPU and without preemption. - */ - cpu = get_cpu(); - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; - fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - put_cpu(); +#ifdef CONFIG_SMP + vfp->hard.cpu = NR_CPUS; +#endif } static void vfp_thread_exit(struct thread_info *thread) @@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread) union vfp_state *vfp = &thread->vfpstate; unsigned int cpu = get_cpu(); - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; put_cpu(); } @@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread) vfp_sync_hwstate(parent); thread->vfpstate = parent->vfpstate; +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif } /* @@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * case the thread migrates to a different CPU. The * restoring is done lazily. */ - if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { - vfp_save_state(last_VFP_context[cpu], fpexc); - last_VFP_context[cpu]->hard.cpu = cpu; - } - /* - * Thread migration, just force the reloading of the - * state on the new CPU in case the VFP registers - * contain stale data. - */ - if (thread->vfpstate.hard.cpu != cpu) - last_VFP_context[cpu] = NULL; + if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) + vfp_save_state(vfp_current_hw_state[cpu], fpexc); #endif /* @@ -415,7 +454,7 @@ static int vfp_pm_suspend(void) } /* clear any information we had about last context state */ - memset(last_VFP_context, 0, sizeof(last_VFP_context)); + memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); return 0; } @@ -443,15 +482,15 @@ static void vfp_pm_init(void) static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ +/* + * Ensure that the VFP state stored in 'thread->vfpstate' is up to date + * with the hardware state. + */ void vfp_sync_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); - /* - * If the thread we're interested in is the current owner of the - * hardware VFP state, then we need to save its state. - */ - if (last_VFP_context[cpu] == &thread->vfpstate) { + if (vfp_state_in_hw(cpu, thread)) { u32 fpexc = fmrx(FPEXC); /* @@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread) put_cpu(); } +/* Ensure that the thread reloads the hardware VFP state on the next use. */ void vfp_flush_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); - /* - * If the thread we're interested in is the current owner of the - * hardware VFP state, then we need to save its state. - */ - if (last_VFP_context[cpu] == &thread->vfpstate) { - u32 fpexc = fmrx(FPEXC); + vfp_force_reload(cpu, thread); - fmxr(FPEXC, fpexc & ~FPEXC_EN); - - /* - * Set the context to NULL to force a reload the next time - * the thread uses the VFP. - */ - last_VFP_context[cpu] = NULL; - } - -#ifdef CONFIG_SMP - /* - * For SMP we still have to take care of the case where the thread - * migrates to another CPU and then back to the original CPU on which - * the last VFP user is still the same thread. Mark the thread VFP - * state as belonging to a non-existent CPU so that the saved one will - * be reloaded in the above case. - */ - thread->vfpstate.hard.cpu = NR_CPUS; -#endif put_cpu(); } @@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { if (action == CPU_DYING || action == CPU_DYING_FROZEN) { - unsigned int cpu = (long)hcpu; - last_VFP_context[cpu] = NULL; + vfp_force_reload((long)hcpu, current_thread_info()); } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; @@ -582,7 +597,6 @@ static int __init vfp_init(void) elf_hwcap |= HWCAP_VFPv3D16; } #endif -#ifdef CONFIG_NEON /* * Check for the presence of the Advanced SIMD * load/store instructions, integer and single @@ -590,10 +604,13 @@ static int __init vfp_init(void) * for NEON if the hardware has the MVFR registers. */ if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { +#ifdef CONFIG_NEON if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; - } #endif + if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) + elf_hwcap |= HWCAP_VFPv4; + } } return 0; } diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index fef7140eb1d0..56e9a4168264 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1184,7 +1184,15 @@ static int __devinit mmci_probe(struct amba_device *dev, } mmc->ops = &mmci_ops; - mmc->f_min = (host->mclk + 511) / 512; + /* + * The ARM and ST versions of the block have slightly different + * clock divider equations which means that the minimum divider + * differs too. + */ + if (variant->st_clkdiv) + mmc->f_min = DIV_ROUND_UP(host->mclk, 257); + else + mmc->f_min = DIV_ROUND_UP(host->mclk, 512); /* * If the platform data supplies a maximum operating * frequency, this takes precedence. Else, we fall back