Initial ARC Linux port with some fixes on top for 3.9-rc1

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABAgAGBQJRMZYbAAoJEGnX8d3iisJeFj8P/R1hWohDDUc8pG3+ov9Y2Brt
 g7oIVw1udlKIk3HhVwsyT14/UHunfcTCONKKKGmmbfRrLJSMMsSlXvYoAQLozokf
 TuaO3Xt5IfERROqTrCDSwdNaAmwIZGsIuI9jWKo4qsXovAL0nc3sR527qMI1o7OE
 9X5eIqOJ/rPvOjXYrPqXmvO/DZKYG8PNTH4PYqePes31CsAmDXWBIlgYmgvF3th3
 ptwKPgRU/c2wKNpDDJXVQg/bcg9NI2cCnndNrjgXZgyUQrC37ZTdt/IOF5w6FgIW
 6i6UbDKXn8MgQAhrXx0Ns/+0kSJZ7eBWmj8hLyrxUzOYlF4rCs/il6ofDRaMO6fv
 9LmbNZXYnGICzm1YAxZRK7dm13IbDnltmMc81vISBpJSMTBgqzLWobHnq5/67Wh4
 2oUkoc2Tfaw70FnRCewX0x4Qop2YXmXl1KBwdecvzdcKi6Yg+rRH08ur/0yyCyx7
 +vAQpPVIuVqCc916qwmCPFaf1UMNnmMStxNH7D1AQHvi1G372NxfXizdYyKFRY9N
 f5Q+6DTo1xh2AxuGieSZxBoeK0Rlp4DWTOBD4MMz29y7BRX7LK1U2iS+nW0g8uir
 3RdYeAqyCxlJtjJNQX9U8ZT54jUPZgvJWU0udesRN1CBdOSQMjM9OyZsLLRtLeX1
 ww2tc7zqhBUyjBej6Itg
 =NKkW
 -----END PGP SIGNATURE-----

Merge tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull new ARC architecture from Vineet Gupta:
 "Initial ARC Linux port with some fixes on top for 3.9-rc1:

  I would like to introduce the Linux port to ARC Processors (from
  Synopsys) for 3.9-rc1.  The patch-set has been discussed on the public
  lists since Nov and has received a fair bit of review, specially from
  Arnd, tglx, Al and other subsystem maintainers for DeviceTree, kgdb...

  The arch bits are in arch/arc, some asm-generic changes (acked by
  Arnd), a minor change to PARISC (acked by Helge).

  The series is a touch bigger for a new port for 2 main reasons:

   1. It enables a basic kernel in first sub-series and adds
      ptrace/kgdb/.. later

   2. Some of the fallout of review (DeviceTree support, multi-platform-
      image support) were added on top of orig series, primarily to
      record the revision history.

  This updated pull request additionally contains

   - fixes due to our GNU tools catching up with the new syscall/ptrace
     ABI

   - some (minor) cross-arch Kconfig updates."

* tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (82 commits)
  ARC: split elf.h into uapi and export it for userspace
  ARC: Fixup the current ABI version
  ARC: gdbserver using regset interface possibly broken
  ARC: Kconfig cleanup tracking cross-arch Kconfig pruning in merge window
  ARC: make a copy of flat DT
  ARC: [plat-arcfpga] DT arc-uart bindings change: "baud" => "current-speed"
  ARC: Ensure CONFIG_VIRT_TO_BUS is not enabled
  ARC: Fix pt_orig_r8 access
  ARC: [3.9] Fallout of hlist iterator update
  ARC: 64bit RTSC timestamp hardware issue
  ARC: Don't fiddle with non-existent caches
  ARC: Add self to MAINTAINERS
  ARC: Provide a default serial.h for uart drivers needing BASE_BAUD
  ARC: [plat-arcfpga] defconfig for fully loaded ARC Linux
  ARC: [Review] Multi-platform image #8: platform registers SMP callbacks
  ARC: [Review] Multi-platform image #7: SMP common code to use callbacks
  ARC: [Review] Multi-platform image #6: cpu-to-dma-addr optional
  ARC: [Review] Multi-platform image #5: NR_IRQS defined by ARC core
  ARC: [Review] Multi-platform image #4: Isolate platform headers
  ARC: [Review] Multi-platform image #3: switch to board callback
  ...
This commit is contained in:
Linus Torvalds 2013-03-02 07:58:56 -08:00
commit e23b62256a
147 changed files with 19552 additions and 1 deletions

View File

@ -0,0 +1,24 @@
* ARC700 incore Interrupt Controller
The core interrupt controller provides 32 prioritised interrupts (2 levels)
to ARC700 core.
Properties:
- compatible: "snps,arc700-intc"
- interrupt-controller: This is an interrupt controller.
- #interrupt-cells: Must be <1>.
Single Cell "interrupts" property of a device specifies the IRQ number
between 0 to 31
intc accessed via the special ARC AUX register interface, hence "reg" property
is not specified.
Example:
intc: interrupt-controller {
compatible = "snps,arc700-intc";
interrupt-controller;
#interrupt-cells = <1>;
};

View File

@ -7682,6 +7682,12 @@ F: lib/swiotlb.c
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
SYNOPSYS ARC ARCHITECTURE
M: Vineet Gupta <vgupta@synopsys.com>
L: linux-snps-arc@vger.kernel.org
S: Supported
F: arch/arc/
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained

2
arch/arc/Kbuild Normal file
View File

@ -0,0 +1,2 @@
obj-y += kernel/
obj-y += mm/

453
arch/arc/Kconfig Normal file
View File

@ -0,0 +1,453 @@
#
# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
config ARC
def_bool y
select CLONE_BACKWARDS
# ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
select DEVTMPFS if !INITRAMFS_SOURCE=""
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_FIND_FIRST_BIT
# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
select GENERIC_KERNEL_EXECVE
select GENERIC_KERNEL_THREAD
select GENERIC_PENDING_IRQ if SMP
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_HARDIRQS
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select PERF_USE_VMALLOC
config SCHED_OMIT_FRAME_POINTER
def_bool y
config GENERIC_CSUM
def_bool y
config RWSEM_GENERIC_SPINLOCK
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
config MMU
def_bool y
config NO_IOPORT
def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_HWEIGHT
def_bool y
config BINFMT_ELF
def_bool y
config STACKTRACE_SUPPORT
def_bool y
select STACKTRACE
config HAVE_LATENCYTOP_SUPPORT
def_bool y
config NO_DMA
def_bool n
source "init/Kconfig"
source "kernel/Kconfig.freezer"
menu "ARC Architecture Configuration"
menu "ARC Platform/SoC/Board"
source "arch/arc/plat-arcfpga/Kconfig"
#New platform adds here
endmenu
menu "ARC CPU Configuration"
choice
prompt "ARC Core"
default ARC_CPU_770
config ARC_CPU_750D
bool "ARC750D"
help
Support for ARC750 core
config ARC_CPU_770
bool "ARC770"
select ARC_CPU_REL_4_10
help
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
This core has a bunch of cool new features:
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
Shared Address Spaces (for sharing TLB entires in MMU)
-Caches: New Prog Model, Region Flush
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
endchoice
config CPU_BIG_ENDIAN
bool "Enable Big Endian Mode"
default n
help
Build kernel for Big Endian Mode of ARC CPU
# If a platform can't work with 0x8000_0000 based dma_addr_t
config ARC_PLAT_NEEDS_CPU_TO_DMA
bool
config SMP
bool "Symmetric Multi-Processing (Incomplete)"
default n
select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
you have a system with more than one CPU, say Y.
if SMP
config ARC_HAS_COH_CACHES
def_bool n
config ARC_HAS_COH_LLSC
def_bool n
config ARC_HAS_COH_RTSC
def_bool n
config ARC_HAS_REENTRANT_IRQ_LV2
def_bool n
endif
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
default "2"
menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
# if SMP, cache enabled ONLY if ARC implementation has cache coherency
depends on !SMP || ARC_HAS_COH_CACHES
if ARC_CACHE
config ARC_CACHE_LINE_SHIFT
int "Cache Line Length (as power of 2)"
range 5 7
default "6"
help
Starting with ARC700 4.9, Cache line length is configurable,
This option specifies "N", with Line-len = 2 power N
So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
Linux only supports same line lengths for I and D caches.
config ARC_HAS_ICACHE
bool "Use Instruction Cache"
default y
config ARC_HAS_DCACHE
bool "Use Data Cache"
default y
config ARC_CACHE_PAGES
bool "Per Page Cache Control"
default y
depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
help
This can be used to over-ride the global I/D Cache Enable on a
per-page basis (but only for pages accessed via MMU such as
Kernel Virtual address or User Virtual Address)
TLB entries have a per-page Cache Enable Bit.
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
Global DISABLE + Per Page ENABLE won't work
endif #ARC_CACHE
config ARC_HAS_ICCM
bool "Use ICCM"
help
Single Cycle RAMS to store Fast Path Code
default n
config ARC_ICCM_SZ
int "ICCM Size in KB"
default "64"
depends on ARC_HAS_ICCM
config ARC_HAS_DCCM
bool "Use DCCM"
help
Single Cycle RAMS to store Fast Path Data
default n
config ARC_DCCM_SZ
int "DCCM Size in KB"
default "64"
depends on ARC_HAS_DCCM
config ARC_DCCM_BASE
hex "DCCM map address"
default "0xA0000000"
depends on ARC_HAS_DCCM
config ARC_HAS_HW_MPY
bool "Use Hardware Multiplier (Normal or Faster XMAC)"
default y
help
Influences how gcc generates code for MPY operations.
If enabled, MPYxx insns are generated, provided by Standard/XMAC
Multipler. Otherwise software multipy lib is used
choice
prompt "ARC700 MMU Version"
default ARC_MMU_V3 if ARC_CPU_770
default ARC_MMU_V2 if ARC_CPU_750D
config ARC_MMU_V1
bool "MMU v1"
help
Orig ARC700 MMU
config ARC_MMU_V2
bool "MMU v2"
help
Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
when 2 D-TLB and 1 I-TLB entries index into same 2way set.
config ARC_MMU_V3
bool "MMU v3"
depends on ARC_CPU_770
help
Introduced with ARC700 4.10: New Features
Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
Shared Address Spaces (SASID)
endchoice
choice
prompt "MMU Page Size"
default ARC_PAGE_SIZE_8K
config ARC_PAGE_SIZE_8K
bool "8KB"
help
Choose between 8k vs 16k
config ARC_PAGE_SIZE_16K
bool "16KB"
depends on ARC_MMU_V3
config ARC_PAGE_SIZE_4K
bool "4KB"
depends on ARC_MMU_V3
endchoice
config ARC_COMPACT_IRQ_LEVELS
bool "ARCompact IRQ Priorities: High(2)/Low(1)"
default n
# Timer HAS to be high priority, for any other high priority config
select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
if ARC_COMPACT_IRQ_LEVELS
config ARC_IRQ3_LV2
bool
config ARC_IRQ5_LV2
bool
config ARC_IRQ6_LV2
bool
endif
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
default n
help
Double Precision Floating Point unit had dedictaed regs which
need to be saved/restored across context-switch.
Note that ARC FPU is overly simplistic, unlike say x86, which has
hardware pieces to allow software to conditionally save/restore,
based on actual usage of FPU by a task. Thus our implemn does
this for all tasks in system.
menuconfig ARC_CPU_REL_4_10
bool "Enable support for Rel 4.10 features"
default n
help
-ARC770 (and dependent features) enabled
-ARC750 also shares some of the new features with 770
config ARC_HAS_LLSC
bool "Insn: LLOCK/SCOND (efficient atomic ops)"
default y
depends on ARC_CPU_770
# if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
depends on !SMP || ARC_HAS_COH_LLSC
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
depends on ARC_CPU_REL_4_10
config ARC_HAS_RTSC
bool "Insn: RTSC (64-bit r/o cycle counter)"
default y
depends on ARC_CPU_REL_4_10
# if SMP, enable RTSC only if counter is coherent across cores
depends on !SMP || ARC_HAS_COH_RTSC
endmenu # "ARC CPU Configuration"
config LINUX_LINK_BASE
hex "Linux Link Address"
default "0x80000000"
help
ARC700 divides the 32 bit phy address space into two equal halves
-Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
-Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
Typically Linux kernel is linked at the start of untransalted addr,
hence the default value of 0x8zs.
However some customers have peripherals mapped at this addr, so
Linux needs to be scooted a bit.
If you don't know what the above means, leave this setting alone.
config ARC_CURR_IN_REG
bool "Dedicate Register r25 for current_task pointer"
default y
help
This reserved Register R25 to point to Current Task in
kernel mode. This saves memory access for each such access
config ARC_MISALIGN_ACCESS
bool "Emulate unaligned memory access (userspace only)"
default N
select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_ARCH_UNALIGN_ALLOW
help
This enables misaligned 16 & 32 bit memory access from user space.
Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
potential bugs in code
config ARC_STACK_NONEXEC
bool "Make stack non-executable"
default n
help
To disable the execute permissions of stack/heap of processes
which are enabled by default.
config HZ
int "Timer Frequency"
default 100
config ARC_METAWARE_HLINK
bool "Support for Metaware debugger assisted Host access"
default n
help
This options allows a Linux userland apps to directly access
host file system (open/creat/read/write etc) with help from
Metaware Debugger. This can come in handy for Linux-host communication
when there is no real usable peripheral such as EMAC.
menuconfig ARC_DBG
bool "ARC debugging"
default y
config ARC_DW2_UNWIND
bool "Enable DWARF specific kernel stack unwind"
depends on ARC_DBG
default y
select KALLSYMS
help
Compiles the kernel with DWARF unwind information and can be used
to get stack backtraces.
If you say Y here the resulting kernel image will be slightly larger
but not slower, and it will give very useful debugging information.
If you don't debug the kernel, you can say N, but we may not be able
to solve problems without frame unwind information
config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
depends on ARC_DBG
default n
config ARC_DBG_TLB_MISS_COUNT
bool "Profile TLB Misses"
default n
select DEBUG_FS
depends on ARC_DBG
help
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
config CMDLINE
string "Kernel command line to built-in"
default "print-fatal-signals=1"
help
The default command line which will be appended to the optional
u-boot provided command line (see below)
config CMDLINE_UBOOT
bool "Support U-boot kernel command line passing"
default n
help
If you are using U-boot (www.denx.de) and wish to pass the kernel
command line from the U-boot environment to the Linux kernel then
switch this option on.
ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
to it. kernel startup code will copy the string into cmdline buffer
and also append CONFIG_CMDLINE.
config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
help
Set the name of the DTB to embed in the vmlinux binary
Leaving it blank selects the minimal "skeleton" dtb
source "kernel/Kconfig.preempt"
endmenu # "ARC Architecture Configuration"
source "mm/Kconfig"
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
source "arch/arc/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"

34
arch/arc/Kconfig.debug Normal file
View File

@ -0,0 +1,34 @@
menu "Kernel hacking"
source "lib/Kconfig.debug"
config EARLY_PRINTK
bool "Early printk" if EMBEDDED
default y
help
Write kernel log output directly into the VGA buffer or to a serial
port.
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized. For normal operation
it is not recommended because it looks ugly and doesn't cooperate
with klogd/syslogd or the X server. You should normally N here,
unless you want to debug such a crash.
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit.
config 16KSTACKS
bool "Use 16Kb for kernel stacks instead of 8Kb"
help
If you say Y here the kernel will use a 16Kb stacksize for the
kernel stack attached to each process/thread. The default is 8K.
This increases the resident kernel footprint and will cause less
threads to run on the system and also increase the pressure
on the VM subsystem for higher order allocations.
endmenu

126
arch/arc/Makefile Normal file
View File

@ -0,0 +1,126 @@
#
# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
UTS_MACHINE := arc
KBUILD_DEFCONFIG := fpga_defconfig
cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# Can't do unconditionally (like above) because of recursive include issues
# due to <linux/thread_info.h>
LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
endif
atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y)
cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
cflags-y += -O3
endif
# small data is default for elf32 tool-chain. If not usable, disable it
# This also allows repurposing GP as scratch reg to gcc reg allocator
disable_small_data := y
cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
# STAR 9000518362:
# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
# --build-id w/o "-marclinux".
# Default arc-elf32-ld is OK
ldflags-y += -marclinux
ARC_LIBGCC := -mA7
cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
ifndef CONFIG_ARC_HAS_HW_MPY
cflags-y += -mno-mpy
# newlib for ARC700 assumes MPY to be always present, which is generally true
# However, if someone really doesn't want MPY, we need to use the 600 ver
# which coupled with -mno-mpy will use mpy emulation
# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
ARC_LIBGCC := -marc600
ifneq ($(atleast_gcc44),y)
cflags-y += -multcost=30
endif
endif
LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls
# Finally dump eveything into kernel build system
KBUILD_CFLAGS += $(cflags-y)
KBUILD_AFLAGS += $(KBUILD_CFLAGS)
LDFLAGS += $(ldflags-y)
head-y := arch/arc/kernel/head.o
# See arch/arc/Kbuild for content of core part of the kernel
core-y += arch/arc/
# w/o this dtb won't embed into kernel binary
core-y += arch/arc/boot/dts/
core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/
drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
libs-y += arch/arc/lib/ $(LIBGCC)
#default target for make without any arguements.
KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)
boot := arch/arc/boot
bootpImage: vmlinux
uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
%.dtb %.dtb.S %.dtb.o: scripts
$(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot)/dts dtbs
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
# Hacks to enable final link due to absence of link-time branch relexation
# and gcc choosing optimal(shorter) branches at -O3
#
# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
# However lib/decompress_inflate.o (.init.text) calls
# zlib_inflate_workspacesize (.text) causing relocation errors.
# Thus forcing all exten calls in this file to be long calls
export CFLAGS_decompress_inflate.o = -mmedium-calls
export CFLAGS_initramfs.o = -mmedium-calls
ifdef CONFIG_SMP
export CFLAGS_core.o = -mmedium-calls
endif

26
arch/arc/boot/Makefile Normal file
View File

@ -0,0 +1,26 @@
targets := vmlinux.bin vmlinux.bin.gz uImage
# uImage build relies on mkimage being availble on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
# and make sure it's reacable from your PATH
MKIMAGE := $(srctree)/scripts/mkuboot.sh
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
LINUX_START_TEXT = $$(readelf -h vmlinux | \
grep "Entry point address" | grep -o 0x.*)
UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
UIMAGE_COMPRESSION = gzip
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage)
PHONY += FORCE

View File

@ -0,0 +1,13 @@
# Built-in dtb
builtindtb-y := angel4
ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
endif
obj-y += $(builtindtb-y).dtb.o
targets += $(builtindtb-y).dtb
dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
clean-files := *.dtb

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/dts-v1/;
/include/ "skeleton.dtsi"
/ {
compatible = "snps,arc-angel4";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
chosen {
bootargs = "console=ttyARC0,115200n8";
};
aliases {
serial0 = &arcuart0;
};
memory {
device_type = "memory";
reg = <0x00000000 0x10000000>; /* 256M */
};
fpga {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
/* child and parent address space 1:1 mapped */
ranges;
intc: interrupt-controller {
compatible = "snps,arc700-intc";
interrupt-controller;
#interrupt-cells = <1>;
};
arcuart0: serial@c0fc1000 {
compatible = "snps,arc-uart";
reg = <0xc0fc1000 0x100>;
interrupts = <5>;
clock-frequency = <80000000>;
current-speed = <115200>;
status = "okay";
};
};
};

View File

@ -0,0 +1,10 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/dts-v1/;
/include/ "skeleton.dtsi"

View File

@ -0,0 +1,37 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Skeleton device tree; the bare minimum needed to boot; just include and
* add a compatible value.
*/
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases { };
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "snps,arc770d";
reg = <0>;
};
};
memory {
device_type = "memory";
reg = <0x00000000 0x10000000>; /* 256M */
};
};

View File

@ -0,0 +1,61 @@
CONFIG_CROSS_COMPILE="arc-elf32-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_FPGA_LEGACY=y
CONFIG_ARC_BOARD_ML509=y
# CONFIG_ARC_HAS_RTSC is not set
CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
# CONFIG_COMPACTION is not set
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_XZ_DEC=y

View File

@ -0,0 +1,49 @@
generic-y += auxvec.h
generic-y += bugs.h
generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += fcntl.h
generic-y += fb.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += siginfo.h
generic-y += socket.h
generic-y += sockios.h
generic-y += stat.h
generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h

View File

@ -0,0 +1,433 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_ARCREGS_H
#define _ASM_ARC_ARCREGS_H
#ifdef __KERNEL__
/* Build Configuration Registers */
#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
#define ARC_REG_CRC_BCR 0x62
#define ARC_REG_DVFB_BCR 0x64
#define ARC_REG_EXTARITH_BCR 0x65
#define ARC_REG_VECBASE_BCR 0x68
#define ARC_REG_PERIBASE_BCR 0x69
#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
#define ARC_REG_MMU_BCR 0x6f
#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
#define ARC_REG_TIMERS_BCR 0x75
#define ARC_REG_ICCM_BCR 0x78
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
#define ARC_REG_MUL_BCR 0x7b
#define ARC_REG_SWAP_BCR 0x7c
#define ARC_REG_NORM_BCR 0x7d
#define ARC_REG_MIXMAX_BCR 0x7e
#define ARC_REG_BARREL_BCR 0x7f
#define ARC_REG_D_UNCACH_BCR 0x6A
/* status32 Bits Positions */
#define STATUS_H_BIT 0 /* CPU Halted */
#define STATUS_E1_BIT 1 /* Int 1 enable */
#define STATUS_E2_BIT 2 /* Int 2 enable */
#define STATUS_A1_BIT 3 /* Int 1 active */
#define STATUS_A2_BIT 4 /* Int 2 active */
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_H_MASK (1<<STATUS_H_BIT)
#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
#define STATUS_L_MASK (1<<STATUS_L_BIT)
/*
* ECR: Exception Cause Reg bits-n-pieces
* [23:16] = Exception Vector
* [15: 8] = Exception Cause Code
* [ 7: 0] = Exception Parameters (for certain types only)
*/
#define ECR_VEC_MASK 0xff0000
#define ECR_CODE_MASK 0x00ff00
#define ECR_PARAM_MASK 0x0000ff
/* Exception Cause Vector Values */
#define ECR_V_INSN_ERR 0x02
#define ECR_V_MACH_CHK 0x20
#define ECR_V_ITLB_MISS 0x21
#define ECR_V_DTLB_MISS 0x22
#define ECR_V_PROTV 0x23
/* Protection Violation Exception Cause Code Values */
#define ECR_C_PROTV_INST_FETCH 0x00
#define ECR_C_PROTV_LOAD 0x01
#define ECR_C_PROTV_STORE 0x02
#define ECR_C_PROTV_XCHG 0x03
#define ECR_C_PROTV_MISALIG_DATA 0x04
/* DTLB Miss Exception Cause Code Values */
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_LV12 0x43 /* interrupt level register */
#define AUX_IENABLE 0x40c
#define AUX_ITRIGGER 0x40d
#define AUX_IPULSE 0x415
/* Timer related Aux registers */
#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
/* MMU Management regs */
#define ARC_REG_TLBPD0 0x405
#define ARC_REG_TLBPD1 0x406
#define ARC_REG_TLBINDEX 0x407
#define ARC_REG_TLBCOMMAND 0x408
#define ARC_REG_PID 0x409
#define ARC_REG_SCRATCH_DATA0 0x418
/* Bits in MMU PID register */
#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
/* Error code if probe fails */
#define TLB_LKUP_ERR 0x80000000
/* TLB Commands */
#define TLBWrite 0x1
#define TLBRead 0x2
#define TLBGetIndex 0x3
#define TLBProbe 0x4
#if (CONFIG_ARC_MMU_VER >= 2)
#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
#else
#undef TLBWriteNI /* These cmds don't exist on older MMU */
#undef TLBIVUTLB
#endif
/* Instruction cache related Auxiliary registers */
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
#define ARC_REG_IC_IVIL 0x19
#if (CONFIG_ARC_MMU_VER > 2)
#define ARC_REG_IC_PTAG 0x1E
#endif
/* Bit val in IC_CTRL */
#define IC_CTRL_CACHE_DISABLE 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72
#define ARC_REG_DC_IVDC 0x47
#define ARC_REG_DC_CTRL 0x48
#define ARC_REG_DC_IVDL 0x4A
#define ARC_REG_DC_FLSH 0x4B
#define ARC_REG_DC_FLDL 0x4C
#if (CONFIG_ARC_MMU_VER > 2)
#define ARC_REG_DC_PTAG 0x5C
#endif
/* Bit val in DC_CTRL */
#define DC_CTRL_INV_MODE_FLUSH 0x40
#define DC_CTRL_FLUSH_STATUS 0x100
/* MMU Management regs */
#define ARC_REG_PID 0x409
#define ARC_REG_SCRATCH_DATA0 0x418
/* Bits in MMU PID register */
#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
/*
* Floating Pt Registers
* Status regs are read-only (build-time) so need not be saved/restored
*/
#define ARC_AUX_FP_STAT 0x300
#define ARC_AUX_DPFP_1L 0x301
#define ARC_AUX_DPFP_1H 0x302
#define ARC_AUX_DPFP_2L 0x303
#define ARC_AUX_DPFP_2H 0x304
#define ARC_AUX_DPFP_STAT 0x305
#ifndef __ASSEMBLY__
/*
******************************************************************
* Inline ASM macros to read/write AUX Regs
* Essentially invocation of lr/sr insns from "C"
*/
#if 1
#define read_aux_reg(reg) __builtin_arc_lr(reg)
/* gcc builtin sr needs reg param to be long immediate */
#define write_aux_reg(reg_immed, val) \
__builtin_arc_sr((unsigned int)val, reg_immed)
#else
#define read_aux_reg(reg) \
({ \
unsigned int __ret; \
__asm__ __volatile__( \
" lr %0, [%1]" \
: "=r"(__ret) \
: "i"(reg)); \
__ret; \
})
/*
* Aux Reg address is specified as long immediate by caller
* e.g.
* write_aux_reg(0x69, some_val);
* This generates tightest code.
*/
#define write_aux_reg(reg_imm, val) \
({ \
__asm__ __volatile__( \
" sr %0, [%1] \n" \
: \
: "ir"(val), "i"(reg_imm)); \
})
/*
* Aux Reg address is specified in a variable
* * e.g.
* reg_num = 0x69
* write_aux_reg2(reg_num, some_val);
* This has to generate glue code to load the reg num from
* memory to a reg hence not recommended.
*/
#define write_aux_reg2(reg_in_var, val) \
({ \
unsigned int tmp; \
\
__asm__ __volatile__( \
" ld %0, [%2] \n\t" \
" sr %1, [%0] \n\t" \
: "=&r"(tmp) \
: "r"(val), "memory"(&reg_in_var)); \
})
#endif
#define READ_BCR(reg, into) \
{ \
unsigned int tmp; \
tmp = read_aux_reg(reg); \
if (sizeof(tmp) == sizeof(into)) { \
into = *((typeof(into) *)&tmp); \
} else { \
extern void bogus_undefined(void); \
bogus_undefined(); \
} \
}
#define WRITE_BCR(reg, into) \
{ \
unsigned int tmp; \
if (sizeof(tmp) == sizeof(into)) { \
tmp = (*(unsigned int *)(into)); \
write_aux_reg(reg, tmp); \
} else { \
extern void bogus_undefined(void); \
bogus_undefined(); \
} \
}
/* Helpers */
#define TO_KB(bytes) ((bytes) >> 10)
#define TO_MB(bytes) (TO_KB(bytes) >> 10)
#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
/* These DPFP regs need to be saved/restored across ctx-sw */
struct arc_fpu {
struct {
unsigned int l, h;
} aux_dpfp[2];
};
#endif
/*
***************************************************************
* Build Configuration Registers, with encoded hardware config
*/
struct bcr_identity {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int chip_id:16, cpu_id:8, family:8;
#else
unsigned int family:8, cpu_id:8, chip_id:16;
#endif
};
struct bcr_mmu_1_2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
#else
unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
#endif
};
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
ways:4, ver:8;
#endif
};
#define EXTN_SWAP_VALID 0x1
#define EXTN_NORM_VALID 0x2
#define EXTN_MINMAX_VALID 0x2
#define EXTN_BARREL_VALID 0x2
struct bcr_extn {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
norm:2, swap:1;
#else
unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
crc:1, pad:20;
#endif
};
/* DSP Options Ref Manual */
struct bcr_extn_mac_mul {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:16, type:8, ver:8;
#else
unsigned int ver:8, type:8, pad:16;
#endif
};
struct bcr_extn_xymem {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
#else
unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
#endif
};
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
};
struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:8, pad2:8, sz:8, pad:8;
#else
unsigned int pad:8, sz:8, pad2:8, start:8;
#endif
};
struct bcr_iccm {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int base:16, pad:5, sz:3, ver:8;
#else
unsigned int ver:8, sz:3, pad:5, base:16;
#endif
};
/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
struct bcr_dccm_base {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int addr:24, ver:8;
#else
unsigned int ver:8, addr:24;
#endif
};
/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
struct bcr_dccm {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res:21, sz:3, ver:8;
#else
unsigned int ver:8, sz:3, res:21;
#endif
};
/* Both SP and DP FPU BCRs have same format */
struct bcr_fp {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int fast:1, ver:8;
#else
unsigned int ver:8, fast:1;
#endif
};
/*
*******************************************************************
* Generic structures to hold build configuration used at runtime
*/
struct cpuinfo_arc_mmu {
unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
};
struct cpuinfo_arc_cache {
unsigned int has_aliasing, sz, line_len, assoc, ver;
};
struct cpuinfo_arc_ccm {
unsigned int base_addr, sz;
};
struct cpuinfo_arc {
struct cpuinfo_arc_cache icache, dcache;
struct cpuinfo_arc_mmu mmu;
struct bcr_identity core;
unsigned int timers;
unsigned int vec_base;
unsigned int uncached_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct bcr_extn extn;
struct bcr_extn_xymem extn_xymem;
struct bcr_extn_mac_mul extn_mac_mul;
struct bcr_fp fp, dpfp;
};
extern struct cpuinfo_arc cpuinfo_arc700[];
#endif /* __ASEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_ARC_ARCREGS_H */

View File

@ -0,0 +1,9 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <generated/asm-offsets.h>

View File

@ -0,0 +1,232 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_ATOMIC_H
#define _ASM_ARC_ATOMIC_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/smp.h>
#define atomic_read(v) ((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
#define atomic_set(v, i) (((v)->counter) = (i))
static inline void atomic_add(int i, atomic_t *v)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" add %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp) /* Early clobber, to prevent reg reuse */
: "r"(&v->counter), "ir"(i)
: "cc");
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" sub %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
}
/* add and also return the new value */
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" add %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" sub %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
return temp;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bic %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(addr), "ir"(mask)
: "cc");
}
#else /* !CONFIG_ARC_HAS_LLSC */
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
#define atomic_set(v, i) (((v)->counter) = (i))
#else
static inline void atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
* to follow the same locking rules to make sure that a "hardware"
* atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
* sequence
*
* Thus atomic_set() despite being 1 insn (and seemingly atomic)
* requires the locking.
*/
unsigned long flags;
atomic_ops_lock(flags);
v->counter = i;
atomic_ops_unlock(flags);
}
#endif
/*
* Non hardware assisted Atomic-R-M-W
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;
atomic_ops_lock(flags);
v->counter += i;
atomic_ops_unlock(flags);
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
atomic_ops_lock(flags);
v->counter -= i;
atomic_ops_unlock(flags);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
unsigned long temp;
atomic_ops_lock(flags);
temp = v->counter;
temp += i;
v->counter = temp;
atomic_ops_unlock(flags);
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
unsigned long temp;
atomic_ops_lock(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
atomic_ops_unlock(flags);
return temp;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long flags;
atomic_ops_lock(flags);
*addr &= ~mask;
atomic_ops_unlock(flags);
}
#endif /* !CONFIG_ARC_HAS_LLSC */
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v
*/
#define __atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
c = old; \
c; \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
#define ATOMIC_INIT(i) { (i) }
#include <asm-generic/atomic64.h>
#endif
#endif
#endif

View File

@ -0,0 +1,42 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
#define wmb() mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() mb()
/* TODO-vineetg verify the correctness of macros here */
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#endif
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif
#endif

View File

@ -0,0 +1,516 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/compiler.h>
/*
* Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
* The Kconfig glue ensures that in SMP, this is only set if the container
* SoC/platform has cross-core coherent LLOCK/SCOND
*/
#if defined(CONFIG_ARC_HAS_LLSC)
static inline void set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bset %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bclr %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
}
static inline void change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bxor %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
}
/*
* Semantically:
* Test the bit
* if clear
* set it and return 0 (old value)
* else
* return 1 (old value).
*
* Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
* and the old value of bit is returned
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bset %1, %0, %3 \n"
" scond %1, [%2] \n"
" bnz 1b \n"
: "=&r"(old), "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
return (old & (1 << nr)) != 0;
}
static inline int
test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int old, temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bclr %1, %0, %3 \n"
" scond %1, [%2] \n"
" bnz 1b \n"
: "=&r"(old), "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
return (old & (1 << nr)) != 0;
}
static inline int
test_and_change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int old, temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bxor %1, %0, %3 \n"
" scond %1, [%2] \n"
" bnz 1b \n"
: "=&r"(old), "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
return (old & (1 << nr)) != 0;
}
#else /* !CONFIG_ARC_HAS_LLSC */
#include <asm/smp.h>
/*
* Non hardware assisted Atomic-R-M-W
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*
* There's "significant" micro-optimization in writing our own variants of
* bitops (over generic variants)
*
* (1) The generic APIs have "signed" @nr while we have it "unsigned"
* This avoids extra code to be generated for pointer arithmatic, since
* is "not sure" that index is NOT -ve
* (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
* only consider bottom 5 bits of @nr, so NO need to mask them off.
* (GCC Quirk: however for constant @nr we still need to do the masking
* at compile time)
*/
static inline void set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long temp, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
temp = *m;
*m = temp | (1UL << nr);
bitops_unlock(flags);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long temp, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
temp = *m;
*m = temp & ~(1UL << nr);
bitops_unlock(flags);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long temp, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
temp = *m;
*m = temp ^ (1UL << nr);
bitops_unlock(flags);
}
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
old = *m;
*m = old | (1 << nr);
bitops_unlock(flags);
return (old & (1 << nr)) != 0;
}
static inline int
test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
old = *m;
*m = old & ~(1 << nr);
bitops_unlock(flags);
return (old & (1 << nr)) != 0;
}
static inline int
test_and_change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
old = *m;
*m = old ^ (1 << nr);
bitops_unlock(flags);
return (old & (1 << nr)) != 0;
}
#endif /* CONFIG_ARC_HAS_LLSC */
/***************************************
* Non atomic variants
**************************************/
static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
temp = *m;
*m = temp | (1UL << nr);
}
static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
temp = *m;
*m = temp & ~(1UL << nr);
}
static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
temp = *m;
*m = temp ^ (1UL << nr);
}
static inline int
__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
old = *m;
*m = old | (1 << nr);
return (old & (1 << nr)) != 0;
}
static inline int
__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
old = *m;
*m = old & ~(1 << nr);
return (old & (1 << nr)) != 0;
}
static inline int
__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
old = *m;
*m = old ^ (1 << nr);
return (old & (1 << nr)) != 0;
}
/*
* This routine doesn't need to be atomic.
*/
static inline int
__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & 31)) &
(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
}
static inline int
__test_bit(unsigned int nr, const volatile unsigned long *addr)
{
unsigned long mask;
addr += nr >> 5;
/* ARC700 only considers 5 bits in bit-fiddling insn */
mask = 1 << nr;
return ((mask & *addr) != 0);
}
#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
__constant_test_bit((nr), (addr)) : \
__test_bit((nr), (addr)))
/*
* Count the number of zeros, starting from MSB
* Helper for fls( ) friends
* This is a pure count, so (1-32) or (0-31) doesn't apply
* It could be 0 to 32, based on num of 0's in there
* clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
*/
static inline __attribute__ ((const)) int clz(unsigned int x)
{
unsigned int res;
__asm__ __volatile__(
" norm.f %0, %1 \n"
" mov.n %0, 0 \n"
" add.p %0, %0, 1 \n"
: "=r"(res)
: "r"(x)
: "cc");
return res;
}
static inline int constant_fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
/*
* fls = Find Last Set in word
* @result: [1-32]
* fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
*/
static inline __attribute__ ((const)) int fls(unsigned long x)
{
if (__builtin_constant_p(x))
return constant_fls(x);
return 32 - clz(x);
}
/*
* __fls: Similar to fls, but zero based (0-31)
*/
static inline __attribute__ ((const)) int __fls(unsigned long x)
{
if (!x)
return 0;
else
return fls(x) - 1;
}
/*
* ffs = Find First Set in word (LSB to MSB)
* @result: [1-32], 0 if all 0's
*/
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
static inline __attribute__ ((const)) int __ffs(unsigned long word)
{
if (!word)
return word;
return ffs(word) - 1;
}
/*
* ffz = Find First Zero in word.
* @return:[0-31], 32 if all 1's
*/
#define ffz(x) __ffs(~(x))
/* TODO does this affect uni-processor code */
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif

View File

@ -0,0 +1,37 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_BUG_H
#define _ASM_ARC_BUG_H
#ifndef __ASSEMBLY__
#include <asm/ptrace.h>
struct task_struct;
void show_regs(struct pt_regs *regs);
void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address, unsigned long cause_reg);
void die(const char *str, struct pt_regs *regs, unsigned long address,
unsigned long cause_reg);
#define BUG() do { \
dump_stack(); \
pr_warn("Kernel BUG in %s: %s: %d!\n", \
__FILE__, __func__, __LINE__); \
} while (0)
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
#endif /* !__ASSEMBLY__ */
#endif

View File

@ -0,0 +1,75 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARC_ASM_CACHE_H
#define __ARC_ASM_CACHE_H
/* In case $$ not config, setup a dummy number for rest of kernel */
#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
#define L1_CACHE_SHIFT 6
#else
#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
#endif
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define ARC_ICACHE_WAYS 2
#define ARC_DCACHE_WAYS 4
/* Helpers */
#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
#error "Need to fix some code as I/D cache lines not same"
#else
#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
#endif
#ifndef __ASSEMBLY__
/* Uncached access macros */
#define arc_read_uncached_32(ptr) \
({ \
unsigned int __ret; \
__asm__ __volatile__( \
" ld.di %0, [%1] \n" \
: "=r"(__ret) \
: "r"(ptr)); \
__ret; \
})
#define arc_write_uncached_32(ptr, data)\
({ \
__asm__ __volatile__( \
" st.di %0, [%1] \n" \
: \
: "r"(data), "r"(ptr)); \
})
/* used to give SHMLBA a value to avoid Cache Aliasing */
extern unsigned int ARC_shmlba;
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* ARC700 doesn't cache any access in top 256M.
* Ideal for wiring memory mapped peripherals as we don't need to do
* explicit uncached accesses (LD.di/ST.di) hence more portable drivers
*/
#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void __init read_decode_cache_bcr(void);
#endif
#endif /* _ASM_CACHE_H */

View File

@ -0,0 +1,67 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
* -flush_cache_dup_mm (fork)
* -likewise for flush_cache_mm (exit/execve)
* -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
*
* vineetg: April 2008
* -Added a critical CacheLine flush to copy_to_user_page( ) which
* was causing gdbserver to not setup breakpoints consistently
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
#include <linux/mm.h>
void flush_cache_all(void);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
int len);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
void dma_cache_wback_inv(unsigned long start, unsigned long sz);
void dma_cache_inv(unsigned long start, unsigned long sz);
void dma_cache_wback(unsigned long start, unsigned long sz);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
/* TBD: optimize this */
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
/*
* VM callbacks when entire/range of user-space V-P mappings are
* torn-down/get-invalidated
*
* Currently we don't support D$ aliasing configs for our VIPT caches
* NOPS for VIPT Cache with non-aliasing D$ configurations only
*/
#define flush_cache_dup_mm(mm) /* called on fork */
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
if (vma->vm_flags & VM_EXEC) \
flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len); \
#endif

View File

@ -0,0 +1,101 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
* -Insn Scheduling improvements to csum core routines.
* = csum_fold( ) largely derived from ARM version.
* = ip_fast_cum( ) to have module scheduling
* -gcc 4.4.x broke networking. Alias analysis needed to be primed.
* worked around by adding memory clobber to ip_fast_csum( )
*
* vineetg: May 2010
* -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
*/
#ifndef _ASM_ARC_CHECKSUM_H
#define _ASM_ARC_CHECKSUM_H
/*
* Fold a partial checksum
*
* The 2 swords comprising the 32bit sum are added, any carry to 16th bit
* added back and final sword result inverted.
*/
static inline __sum16 csum_fold(__wsum s)
{
unsigned r = s << 16 | s >> 16; /* ror */
s = ~s;
s -= r;
return s >> 16;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
static inline __sum16
ip_fast_csum(const void *iph, unsigned int ihl)
{
const void *ptr = iph;
unsigned int tmp, tmp2, sum;
__asm__(
" ld.ab %0, [%3, 4] \n"
" ld.ab %2, [%3, 4] \n"
" sub %1, %4, 2 \n"
" lsr.f lp_count, %1, 1 \n"
" bcc 0f \n"
" add.f %0, %0, %2 \n"
" ld.ab %2, [%3, 4] \n"
"0: lp 1f \n"
" ld.ab %1, [%3, 4] \n"
" adc.f %0, %0, %2 \n"
" ld.ab %2, [%3, 4] \n"
" adc.f %0, %0, %1 \n"
"1: adc.f %0, %0, %2 \n"
" add.cs %0,%0,1 \n"
: "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
: "r"(ihl)
: "cc", "lp_count", "memory");
return csum_fold(sum);
}
/*
* TCP pseudo Header is 12 bytes:
* SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
*/
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
__asm__ __volatile__(
" add.f %0, %0, %1 \n"
" adc.f %0, %0, %2 \n"
" adc.f %0, %0, %3 \n"
" adc.f %0, %0, %4 \n"
" adc %0, %0, 0 \n"
: "+&r"(sum)
: "r"(saddr), "r"(daddr),
#ifdef CONFIG_CPU_BIG_ENDIAN
"r"(len),
#else
"r"(len << 8),
#endif
"r"(htons(proto))
: "cc");
return sum;
}
#define csum_fold csum_fold
#define ip_fast_csum ip_fast_csum
#define csum_tcpudp_nofold csum_tcpudp_nofold
#include <asm-generic/checksum.h>
#endif /* _ASM_ARC_CHECKSUM_H */

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_CLK_H
#define _ASM_ARC_CLK_H
/* Although we can't really hide core_freq, the accessor is still better way */
extern unsigned long core_freq;
static inline unsigned long arc_get_core_freq(void)
{
return core_freq;
}
extern int arc_set_core_freq(unsigned long);
#endif

View File

@ -0,0 +1,143 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_CMPXCHG_H
#define __ASM_ARC_CMPXCHG_H
#include <linux/types.h>
#include <asm/smp.h>
#ifdef CONFIG_ARC_HAS_LLSC
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
{
unsigned long prev;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" brne %0, %2, 2f \n"
" scond %3, [%1] \n"
" bnz 1b \n"
"2: \n"
: "=&r"(prev)
: "r"(ptr), "ir"(expected),
"r"(new) /* can't be "ir". scond can't take limm for "b" */
: "cc");
return prev;
}
#else
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
{
unsigned long flags;
int prev;
volatile unsigned long *p = ptr;
atomic_ops_lock(flags);
prev = *p;
if (prev == expected)
*p = new;
atomic_ops_unlock(flags);
return prev;
}
#endif /* CONFIG_ARC_HAS_LLSC */
#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), (unsigned long)(n)))
/*
* Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
* just to gaurantee semantics.
* atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
* which also happens to be atomic_ops_lock.
*
* Thus despite semantically being different, implementation of atomic_cmpxchg()
* is same as cmpxchg().
*/
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
/*
* xchg (reg with memory) based on "Native atomic" EX insn
*/
static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
int size)
{
extern unsigned long __xchg_bad_pointer(void);
switch (size) {
case 4:
__asm__ __volatile__(
" ex %0, [%1] \n"
: "+r"(val)
: "r"(ptr)
: "memory");
return val;
}
return __xchg_bad_pointer();
}
#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
sizeof(*(ptr))))
/*
* On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
* not require any locking. However there's a quirk.
* ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
* incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
* Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
* abide by same serializing rules, thus ends up using atomic_ops_lock as well.
*
* This however is only relevant if SMP and/or ARC lacks LLSC
* if (UP or LLSC)
* xchg doesn't need serialization
* else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
* xchg needs serialization
*/
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
#define xchg(ptr, with) \
({ \
unsigned long flags; \
typeof(*(ptr)) old_val; \
\
atomic_ops_lock(flags); \
old_val = _xchg(ptr, with); \
atomic_ops_unlock(flags); \
old_val; \
})
#else
#define xchg(ptr, with) _xchg(ptr, with)
#endif
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
* Since xchg() doesn't always do that, it would seem that following defintion
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
* is natively "SMP safe", no serialization required).
* UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
* could clobber them. atomic_xchg() itself would be 1 insn, so it
* can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved.
*/
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#endif

View File

@ -0,0 +1,32 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: May 16th, 2008
* - Current macro is now implemented as "global register" r25
*/
#ifndef _ASM_ARC_CURRENT_H
#define _ASM_ARC_CURRENT_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARC_CURR_IN_REG
register struct task_struct *curr_arc asm("r25");
#define current (curr_arc)
#else
#include <asm-generic/current.h>
#endif /* ! CONFIG_ARC_CURR_IN_REG */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_ARC_CURRENT_H */

View File

@ -0,0 +1,56 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARC_ASM_DEFINES_H__
#define __ARC_ASM_DEFINES_H__
#if defined(CONFIG_ARC_MMU_V1)
#define CONFIG_ARC_MMU_VER 1
#elif defined(CONFIG_ARC_MMU_V2)
#define CONFIG_ARC_MMU_VER 2
#elif defined(CONFIG_ARC_MMU_V3)
#define CONFIG_ARC_MMU_VER 3
#endif
#ifdef CONFIG_ARC_HAS_LLSC
#define __CONFIG_ARC_HAS_LLSC_VAL 1
#else
#define __CONFIG_ARC_HAS_LLSC_VAL 0
#endif
#ifdef CONFIG_ARC_HAS_SWAPE
#define __CONFIG_ARC_HAS_SWAPE_VAL 1
#else
#define __CONFIG_ARC_HAS_SWAPE_VAL 0
#endif
#ifdef CONFIG_ARC_HAS_RTSC
#define __CONFIG_ARC_HAS_RTSC_VAL 1
#else
#define __CONFIG_ARC_HAS_RTSC_VAL 0
#endif
#ifdef CONFIG_ARC_MMU_SASID
#define __CONFIG_ARC_MMU_SASID_VAL 1
#else
#define __CONFIG_ARC_MMU_SASID_VAL 0
#endif
#ifdef CONFIG_ARC_HAS_ICACHE
#define __CONFIG_ARC_HAS_ICACHE 1
#else
#define __CONFIG_ARC_HAS_ICACHE 0
#endif
#ifdef CONFIG_ARC_HAS_DCACHE
#define __CONFIG_ARC_HAS_DCACHE 1
#else
#define __CONFIG_ARC_HAS_DCACHE 0
#endif
#endif /* __ARC_ASM_DEFINES_H__ */

View File

@ -0,0 +1,68 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Delay routines using pre computed loops_per_jiffy value.
*
* vineetg: Feb 2012
* -Rewrote in "C" to avoid dealing with availability of H/w MPY
* -Also reduced the num of MPY operations from 3 to 2
*
* Amit Bhor: Codito Technologies 2004
*/
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
#include <asm/param.h> /* HZ */
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
"1: sub.f %0, %0, 1 \n"
" jpnz 1b \n"
: "+r"(loops)
:
: "cc");
}
extern void __bad_udelay(void);
/*
* Normal Math for computing loops in "N" usecs
* -we have precomputed @loops_per_jiffy
* -1 sec has HZ jiffies
* loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
*
* Approximate Division by multiplication:
* -Mathematically if we multiply and divide a number by same value the
* result remains unchanged: In this case, we use 2^32
* -> (loops_per_N_usec * 2^32 ) / 2^32
* -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
* -> (loops_per_jiffy * HZ * N * 4295) / 2^32
*
* -Divide by 2^32 is very simply right shift by 32
* -We simply need to ensure that the multiply per above eqn happens in
* 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
*/
static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
/* (long long) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
loops = ((long long)(usecs * 4295 * HZ) *
(long long)(loops_per_jiffy)) >> 32;
__delay(loops);
}
#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
: __udelay(n)) : __udelay(n))
#endif /* __ASM_ARC_UDELAY_H */

View File

@ -0,0 +1,116 @@
/*
* several functions that help interpret ARC instructions
* used for unaligned accesses, kprobes and kgdb
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARC_DISASM_H__
#define __ARC_DISASM_H__
enum {
op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
op_B_S = 30, op_BL_S = 31
};
enum flow {
noflow,
direct_jump,
direct_call,
indirect_jump,
indirect_call,
invalid_instr
};
#define IS_BIT(word, n) ((word) & (1<<n))
#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
#define MINOR_OPCODE(word) (BITS((word), 16, 21))
#define FIELD_A(word) (BITS((word), 0, 5))
#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
(BITS((word), 24, 26)))
#define FIELD_C(word) (BITS((word), 6, 11))
#define FIELD_u6(word) FIELDC(word)
#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
BITS((word), 6, 11)), 12)
/* note that for BL/BRcc these two macro's need another AND statement to mask
* out bit 1 (make the result a multiple of 4) */
#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
BITS(word, 16, 23)), 9)
#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
(BITS(word, 17, 26) << 1)), 12)
#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
(BITS(word, 6, 15) << 11) | \
(BITS(word, 17, 26) << 1)), 12)
/* note: these operate on 16 bits! */
#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
BITS((word), 8, 10))
#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
#define FIELD_S_u5(word) (BITS((word), 0, 4))
#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
#define STATUS32_L 0x00000100
#define REG_LIMM 62
struct disasm_state {
/* generic info */
unsigned long words[2];
int instr_len;
int major_opcode;
/* info for branch/jump */
int is_branch;
int target;
int delay_slot;
enum flow flow;
/* info for load/store */
int src1, src2, src3, dest, wb_reg;
int zz, aa, x, pref, di;
int fault, write;
};
static inline int sign_extend(int value, int bits)
{
if (IS_BIT(value, (bits - 1)))
value |= (0xffffffff << bits);
return value;
}
static inline int is_short_instr(unsigned long addr)
{
uint16_t word = *((uint16_t *)addr);
int opcode = (word >> 11) & 0x1F;
return (opcode >= 0x0B);
}
void disasm_instr(unsigned long addr, struct disasm_state *state,
int userspace, struct pt_regs *regs, struct callee_regs *cregs);
int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
*cregs, unsigned long *fall_thru, unsigned long *target);
long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
void set_reg(int reg, long val, struct pt_regs *regs,
struct callee_regs *cregs);
#endif /* __ARC_DISASM_H__ */

View File

@ -0,0 +1,221 @@
/*
* DMA Mapping glue for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
#include <asm-generic/dma-coherent.h>
#include <asm/cacheflush.h>
#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
/*
* dma_map_* API take cpu addresses, which is kernel logical address in the
* untranslated address space (0x8000_0000) based. The dma address (bus addr)
* ideally needs to be 0x0000_0000 based hence these glue routines.
* However given that intermediate bus bridges can ignore the high bit, we can
* do with these routines being no-ops.
* If a platform/device comes up which sriclty requires 0 based bus addr
* (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
*/
#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
#else
#include <plat/dma_addr.h>
#endif
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
dma_addr_t dma_handle);
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
* consistent before each use
*/
static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
}
}
void __arc_dma_cache_sync(unsigned long paddr, size_t size,
enum dma_data_direction dir);
#define _dma_cache_sync(addr, sz, dir) \
do { \
if (__builtin_constant_p(dir)) \
__inline_dma_cache_sync(addr, sz, dir); \
else \
__arc_dma_cache_sync(addr, sz, dir); \
} \
while (0);
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
_dma_cache_sync((unsigned long)cpu_addr, size, dir);
return plat_kernel_addr_to_dma(dev, cpu_addr);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + offset;
return dma_map_single(dev, (void *)paddr, size, dir);
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
DMA_FROM_DEVICE);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
DMA_TO_DEVICE);
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
size, DMA_FROM_DEVICE);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
size, DMA_TO_DEVICE);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++, sg++)
_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++, sg++)
_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
}
static inline int dma_supported(struct device *dev, u64 dma_mask)
{
/* Support 32 bit DMA mask exclusively */
return dma_mask == DMA_BIT_MASK(32);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
#endif

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_ARC_DMA_H
#define ASM_ARC_DMA_H
#define MAX_DMA_ADDRESS 0xC0000000
#endif

View File

@ -0,0 +1,78 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_ELF_H
#define __ASM_ARC_ELF_H
#include <linux/types.h>
#include <uapi/asm/elf.h>
/* These ELF defines belong to uapi but libc elf.h already defines them */
#define EM_ARCOMPACT 93
/* ARC Relocations (kernel Modules only) */
#define R_ARC_32 0x4
#define R_ARC_32_ME 0x1B
#define R_ARC_S25H_PCREL 0x10
#define R_ARC_S25W_PCREL 0x11
/*to set parameters in the core dumps */
#define ELF_ARCH EM_ARCOMPACT
#define ELF_CLASS ELFCLASS32
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ELF_DATA ELFDATA2MSB
#else
#define ELF_DATA ELFDATA2LSB
#endif
/*
* To ensure that
* -we don't load something for the wrong architecture.
* -The userspace is using the correct syscall ABI
*/
struct elf32_hdr;
extern int elf_check_arch(const struct elf32_hdr *);
#define elf_check_arch elf_check_arch
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
* registered with atexit, as per the SVR4 ABI. A value of 0 means we
* have no such handler.
*/
#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif

View File

@ -0,0 +1,724 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: March 2009 (Supporting 2 levels of Interrupts)
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
* it's prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
* Normally CPU does this automatically, however when doing FAKE rtie,
* we also need to explicitly do this. The problem in macros
* FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
* was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
*
* Vineetg: May 5th 2008
* -Modified CALLEE_REG save/restore macros to handle the fact that
* r25 contains the kernel current task ptr
* - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
* - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
* address Write back load ld.ab instead of seperate ld/add instn
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
#ifdef __ASSEMBLY__
#include <asm/unistd.h> /* For NR_syscalls defination */
#include <asm/asm-offsets.h>
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/thread_info.h> /* For THREAD_SIZE */
/* Note on the LD/ST addr modes with addr reg wback
*
* LD.a same as LD.aw
*
* LD.a reg1, [reg2, x] => Pre Incr
* Eff Addr for load = [reg2 + x]
*
* LD.ab reg1, [reg2, x] => Post Incr
* Eff Addr for load = [reg2]
*/
/*--------------------------------------------------------------
* Save caller saved registers (scratch registers) ( r0 - r12 )
* Registers are pushed / popped in the order defined in struct ptregs
* in asm/ptrace.h
*-------------------------------------------------------------*/
.macro SAVE_CALLER_SAVED
st.a r0, [sp, -4]
st.a r1, [sp, -4]
st.a r2, [sp, -4]
st.a r3, [sp, -4]
st.a r4, [sp, -4]
st.a r5, [sp, -4]
st.a r6, [sp, -4]
st.a r7, [sp, -4]
st.a r8, [sp, -4]
st.a r9, [sp, -4]
st.a r10, [sp, -4]
st.a r11, [sp, -4]
st.a r12, [sp, -4]
.endm
/*--------------------------------------------------------------
* Restore caller saved registers (scratch registers)
*-------------------------------------------------------------*/
.macro RESTORE_CALLER_SAVED
ld.ab r12, [sp, 4]
ld.ab r11, [sp, 4]
ld.ab r10, [sp, 4]
ld.ab r9, [sp, 4]
ld.ab r8, [sp, 4]
ld.ab r7, [sp, 4]
ld.ab r6, [sp, 4]
ld.ab r5, [sp, 4]
ld.ab r4, [sp, 4]
ld.ab r3, [sp, 4]
ld.ab r2, [sp, 4]
ld.ab r1, [sp, 4]
ld.ab r0, [sp, 4]
.endm
/*--------------------------------------------------------------
* Save callee saved registers (non scratch registers) ( r13 - r25 )
* on kernel stack.
* User mode callee regs need to be saved in case of
* -fork and friends for replicating from parent to child
* -before going into do_signal( ) for ptrace/core-dump
* Special case handling is required for r25 in case it is used by kernel
* for caching task ptr. Low level exception/ISR save user mode r25
* into task->thread.user_r25. So it needs to be retrieved from there and
* saved into kernel stack with rest of callee reg-file
*-------------------------------------------------------------*/
.macro SAVE_CALLEE_SAVED_USER
st.a r13, [sp, -4]
st.a r14, [sp, -4]
st.a r15, [sp, -4]
st.a r16, [sp, -4]
st.a r17, [sp, -4]
st.a r18, [sp, -4]
st.a r19, [sp, -4]
st.a r20, [sp, -4]
st.a r21, [sp, -4]
st.a r22, [sp, -4]
st.a r23, [sp, -4]
st.a r24, [sp, -4]
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it on stack
ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
st.a r12, [sp, -4]
#else
st.a r25, [sp, -4]
#endif
/* move up by 1 word to "create" callee_regs->"stack_place_holder" */
sub sp, sp, 4
.endm
/*--------------------------------------------------------------
* Save callee saved registers (non scratch registers) ( r13 - r25 )
* kernel mode callee regs needed to be saved in case of context switch
* If r25 is used for caching task pointer then that need not be saved
* as it can be re-created from current task global
*-------------------------------------------------------------*/
.macro SAVE_CALLEE_SAVED_KERNEL
st.a r13, [sp, -4]
st.a r14, [sp, -4]
st.a r15, [sp, -4]
st.a r16, [sp, -4]
st.a r17, [sp, -4]
st.a r18, [sp, -4]
st.a r19, [sp, -4]
st.a r20, [sp, -4]
st.a r21, [sp, -4]
st.a r22, [sp, -4]
st.a r23, [sp, -4]
st.a r24, [sp, -4]
#ifdef CONFIG_ARC_CURR_IN_REG
sub sp, sp, 8
#else
st.a r25, [sp, -4]
sub sp, sp, 4
#endif
.endm
/*--------------------------------------------------------------
* RESTORE_CALLEE_SAVED_KERNEL:
* Loads callee (non scratch) Reg File by popping from Kernel mode stack.
* This is reverse of SAVE_CALLEE_SAVED,
*
* NOTE:
* Ideally this shd only be called in switch_to for loading
* switched-IN task's CALLEE Reg File.
* For all other cases RESTORE_CALLEE_SAVED_FAST must be used
* which simply pops the stack w/o touching regs.
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
#ifdef CONFIG_ARC_CURR_IN_REG
add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
#else
add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
ld.ab r25, [sp, 4]
#endif
ld.ab r24, [sp, 4]
ld.ab r23, [sp, 4]
ld.ab r22, [sp, 4]
ld.ab r21, [sp, 4]
ld.ab r20, [sp, 4]
ld.ab r19, [sp, 4]
ld.ab r18, [sp, 4]
ld.ab r17, [sp, 4]
ld.ab r16, [sp, 4]
ld.ab r15, [sp, 4]
ld.ab r14, [sp, 4]
ld.ab r13, [sp, 4]
.endm
/*--------------------------------------------------------------
* RESTORE_CALLEE_SAVED_USER:
* This is called after do_signal where tracer might have changed callee regs
* thus we need to restore the reg file.
* Special case handling is required for r25 in case it is used by kernel
* for caching task ptr. Ptrace would have modified on-kernel-stack value of
* r25, which needs to be shoved back into task->thread.user_r25 where from
* Low level exception/ISR return code will retrieve to populate with rest of
* callee reg-file.
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_USER
add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
#ifdef CONFIG_ARC_CURR_IN_REG
ld.ab r12, [sp, 4]
st r12, [r25, TASK_THREAD + THREAD_USER_R25]
#else
ld.ab r25, [sp, 4]
#endif
ld.ab r24, [sp, 4]
ld.ab r23, [sp, 4]
ld.ab r22, [sp, 4]
ld.ab r21, [sp, 4]
ld.ab r20, [sp, 4]
ld.ab r19, [sp, 4]
ld.ab r18, [sp, 4]
ld.ab r17, [sp, 4]
ld.ab r16, [sp, 4]
ld.ab r15, [sp, 4]
ld.ab r14, [sp, 4]
ld.ab r13, [sp, 4]
.endm
/*--------------------------------------------------------------
* Super FAST Restore callee saved regs by simply re-adjusting SP
*-------------------------------------------------------------*/
.macro DISCARD_CALLEE_SAVED_USER
add sp, sp, 14 * 4
.endm
/*--------------------------------------------------------------
* Restore User mode r25 saved in task_struct->thread.user_r25
*-------------------------------------------------------------*/
.macro RESTORE_USER_R25
ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
.endm
/*-------------------------------------------------------------
* given a tsk struct, get to the base of it's kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
.macro GET_TSK_STACK_BASE tsk, out
/* Get task->thread_info (this is essentially start of a PAGE) */
ld \out, [\tsk, TASK_THREAD_INFO]
/* Go to end of page where stack begins (grows upwards) */
add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
.endm
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
* Entry : r9 contains pre-IRQ/exception/trap status32
* Exit : SP is set to kernel mode stack pointer
* If CURR_IN_REG, r25 set to "current" task pointer
* Clobbers: r9
*-------------------------------------------------------------*/
.macro SWITCH_TO_KERNEL_STK
/* User Mode when this happened ? Yes: Proceed to switch stack */
bbit1 r9, STATUS_U_BIT, 88f
/* OK we were already in kernel mode when this event happened, thus can
* assume SP is kernel mode SP. _NO_ need to do any stack switching
*/
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
/* However....
* If Level 2 Interrupts enabled, we may end up with a corner case:
* 1. User Task executing
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
* Thay way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
*/
brlo sp, VMALLOC_START, 88f
/* TODO: vineetg:
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
* Above brlo alone would treat it as a valid L1-L2 sceanrio
* instead of shouting alound
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack
*/
#endif
/* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
* safe-keeping not really needed, but it keeps the epilogue code
* (SP restore) simpler/uniform.
*/
b.d 77f
st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
GET_CURR_TASK_ON_CPU r9
#ifdef CONFIG_ARC_CURR_IN_REG
/* If current task pointer cached in r25, time to
* -safekeep USER r25 in task->thread_struct->user_r25
* -load r25 with current task ptr
*/
st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
mov r25, r9
#endif
/* With current tsk in r9, get it's kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
#ifdef PT_REGS_CANARY
st 0xabcdabcd, [r9, 0]
#endif
/* Save Pre Intr/Exception User SP on kernel stack */
st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
/* CAUTION:
* SP should be set at the very end when we are done with everything
* In case of 2 levels of interrupt we depend on value of SP to assume
* that everything else is done (loading r25 etc)
*/
/* set SP to point to kernel mode stack */
mov sp, r9
77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
.endm
/*------------------------------------------------------------
* "FAKE" a rtie to return from CPU Exception context
* This is to re-enable Exceptions within exception
* Look at EV_ProtV to see how this is actually used
*-------------------------------------------------------------*/
.macro FAKE_RET_FROM_EXCPN reg
ld \reg, [sp, PT_status32]
bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
bset \reg, \reg, STATUS_L_BIT
sr \reg, [erstatus]
mov \reg, 55f
sr \reg, [eret]
rtie
55:
.endm
/*
* @reg [OUT] &thread_info of "current"
*/
.macro GET_CURR_THR_INFO_FROM_SP reg
and \reg, sp, ~(THREAD_SIZE - 1)
.endm
/*
* @reg [OUT] thread_info->flags of "current"
*/
.macro GET_CURR_THR_INFO_FLAGS reg
GET_CURR_THR_INFO_FROM_SP \reg
ld \reg, [\reg, THREAD_INFO_FLAGS]
.endm
/*--------------------------------------------------------------
* For early Exception Prologue, a core reg is temporarily needed to
* code the rest of prolog (stack switching). This is done by stashing
* it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
*
* Before saving the full regfile - this reg is restored back, only
* to be saved again on kernel mode stack, as part of ptregs.
*-------------------------------------------------------------*/
.macro EXCPN_PROLOG_FREEUP_REG reg
#ifdef CONFIG_SMP
sr \reg, [ARC_REG_SCRATCH_DATA0]
#else
st \reg, [@ex_saved_reg1]
#endif
.endm
.macro EXCPN_PROLOG_RESTORE_REG reg
#ifdef CONFIG_SMP
lr \reg, [ARC_REG_SCRATCH_DATA0]
#else
ld \reg, [@ex_saved_reg1]
#endif
.endm
/*--------------------------------------------------------------
* Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
* Requires SP to be already switched to kernel mode Stack
* sp points to the next free element on the stack at exit of this macro.
* Registers are pushed / popped in the order defined in struct ptregs
* in asm/ptrace.h
* Note that syscalls are implemented via TRAP which is also a exception
* from CPU's point of view
*-------------------------------------------------------------*/
.macro SAVE_ALL_EXCEPTION marker
st \marker, [sp, 8]
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
/* Restore r9 used to code the early prologue */
EXCPN_PROLOG_RESTORE_REG r9
SAVE_CALLER_SAVED
st.a r26, [sp, -4] /* gp */
st.a fp, [sp, -4]
st.a blink, [sp, -4]
lr r9, [eret]
st.a r9, [sp, -4]
lr r9, [erstatus]
st.a r9, [sp, -4]
st.a lp_count, [sp, -4]
lr r9, [lp_end]
st.a r9, [sp, -4]
lr r9, [lp_start]
st.a r9, [sp, -4]
lr r9, [erbta]
st.a r9, [sp, -4]
#ifdef PT_REGS_CANARY
mov r9, 0xdeadbeef
st r9, [sp, -4]
#endif
/* move up by 1 word to "create" pt_regs->"stack_place_holder" */
sub sp, sp, 4
.endm
/*--------------------------------------------------------------
* Save scratch regs for exceptions
*-------------------------------------------------------------*/
.macro SAVE_ALL_SYS
SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
.endm
/*--------------------------------------------------------------
* Save scratch regs for sys calls
*-------------------------------------------------------------*/
.macro SAVE_ALL_TRAP
/*
* Setup pt_regs->orig_r8.
* Encode syscall number (r8) in upper short word of event type (r9)
* N.B. #1: This is already endian safe (see ptrace.h)
* #2: Only r9 can be used as scratch as it is already clobbered
* and it's contents are no longer needed by the latter part
* of exception prologue
*/
lsl r9, r8, 16
or r9, r9, orig_r8_IS_SCALL
SAVE_ALL_EXCEPTION r9
.endm
/*--------------------------------------------------------------
* Restore all registers used by system call or Exceptions
* SP should always be pointing to the next free stack element
* when entering this macro.
*
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
* for memory load operations. If used in that way interrupts are deffered
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro RESTORE_ALL_SYS
add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
ld.ab r9, [sp, 4]
sr r9, [erbta]
ld.ab r9, [sp, 4]
sr r9, [lp_start]
ld.ab r9, [sp, 4]
sr r9, [lp_end]
ld.ab r9, [sp, 4]
mov lp_count, r9
ld.ab r9, [sp, 4]
sr r9, [erstatus]
ld.ab r9, [sp, 4]
sr r9, [eret]
ld.ab blink, [sp, 4]
ld.ab fp, [sp, 4]
ld.ab r26, [sp, 4] /* gp */
RESTORE_CALLER_SAVED
ld sp, [sp] /* restore original sp */
/* orig_r0 and orig_r8 skipped automatically */
.endm
/*--------------------------------------------------------------
* Save all registers used by interrupt handlers.
*-------------------------------------------------------------*/
.macro SAVE_ALL_INT1
/* restore original r9 , saved in int1_saved_reg
* It will be saved on stack in macro: SAVE_CALLER_SAVED
*/
#ifdef CONFIG_SMP
lr r9, [ARC_REG_SCRATCH_DATA0]
#else
ld r9, [@int1_saved_reg]
#endif
/* now we are ready to save the remaining context :) */
st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
SAVE_CALLER_SAVED
st.a r26, [sp, -4] /* gp */
st.a fp, [sp, -4]
st.a blink, [sp, -4]
st.a ilink1, [sp, -4]
lr r9, [status32_l1]
st.a r9, [sp, -4]
st.a lp_count, [sp, -4]
lr r9, [lp_end]
st.a r9, [sp, -4]
lr r9, [lp_start]
st.a r9, [sp, -4]
lr r9, [bta_l1]
st.a r9, [sp, -4]
#ifdef PT_REGS_CANARY
mov r9, 0xdeadbee1
st r9, [sp, -4]
#endif
/* move up by 1 word to "create" pt_regs->"stack_place_holder" */
sub sp, sp, 4
.endm
.macro SAVE_ALL_INT2
/* TODO-vineetg: SMP we can't use global nor can we use
* SCRATCH0 as we do for int1 because while int1 is using
* it, int2 can come
*/
/* retsore original r9 , saved in sys_saved_r9 */
ld r9, [@int2_saved_reg]
/* now we are ready to save the remaining context :) */
st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
SAVE_CALLER_SAVED
st.a r26, [sp, -4] /* gp */
st.a fp, [sp, -4]
st.a blink, [sp, -4]
st.a ilink2, [sp, -4]
lr r9, [status32_l2]
st.a r9, [sp, -4]
st.a lp_count, [sp, -4]
lr r9, [lp_end]
st.a r9, [sp, -4]
lr r9, [lp_start]
st.a r9, [sp, -4]
lr r9, [bta_l2]
st.a r9, [sp, -4]
#ifdef PT_REGS_CANARY
mov r9, 0xdeadbee2
st r9, [sp, -4]
#endif
/* move up by 1 word to "create" pt_regs->"stack_place_holder" */
sub sp, sp, 4
.endm
/*--------------------------------------------------------------
* Restore all registers used by interrupt handlers.
*
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
* for memory load operations. If used in that way interrupts are deffered
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro RESTORE_ALL_INT1
add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
ld.ab r9, [sp, 4] /* Actual reg file */
sr r9, [bta_l1]
ld.ab r9, [sp, 4]
sr r9, [lp_start]
ld.ab r9, [sp, 4]
sr r9, [lp_end]
ld.ab r9, [sp, 4]
mov lp_count, r9
ld.ab r9, [sp, 4]
sr r9, [status32_l1]
ld.ab r9, [sp, 4]
mov ilink1, r9
ld.ab blink, [sp, 4]
ld.ab fp, [sp, 4]
ld.ab r26, [sp, 4] /* gp */
RESTORE_CALLER_SAVED
ld sp, [sp] /* restore original sp */
/* orig_r0 and orig_r8 skipped automatically */
.endm
.macro RESTORE_ALL_INT2
add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
ld.ab r9, [sp, 4]
sr r9, [bta_l2]
ld.ab r9, [sp, 4]
sr r9, [lp_start]
ld.ab r9, [sp, 4]
sr r9, [lp_end]
ld.ab r9, [sp, 4]
mov lp_count, r9
ld.ab r9, [sp, 4]
sr r9, [status32_l2]
ld.ab r9, [sp, 4]
mov ilink2, r9
ld.ab blink, [sp, 4]
ld.ab fp, [sp, 4]
ld.ab r26, [sp, 4] /* gp */
RESTORE_CALLER_SAVED
ld sp, [sp] /* restore original sp */
/* orig_r0 and orig_r8 skipped automatically */
.endm
/* Get CPU-ID of this core */
.macro GET_CPU_ID reg
lr \reg, [identity]
lsr \reg, \reg, 8
bmsk \reg, \reg, 7
.endm
#ifdef CONFIG_SMP
/*-------------------------------------------------
* Retrieve the current running task on this CPU
* 1. Determine curr CPU id.
* 2. Use it to index into _current_task[ ]
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
ld.as \reg, [@_current_task, \reg]
.endm
/*-------------------------------------------------
* Save a new task as the "current" task on this CPU
* 1. Determine curr CPU id.
* 2. Use it to index into _current_task[ ]
*
* Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
* because ST r0, [r1, offset] can ONLY have s9 @offset
* while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
*/
.macro SET_CURR_TASK_ON_CPU tsk, tmp
GET_CPU_ID \tmp
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
#endif
.endm
#else /* Uniprocessor implementation of macros */
.macro GET_CURR_TASK_ON_CPU reg
ld \reg, [@_current_task]
.endm
.macro SET_CURR_TASK_ON_CPU tsk, tmp
st \tsk, [@_current_task]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
#endif
.endm
#endif /* SMP / UNI */
/* ------------------------------------------------------------------
* Get the ptr to some field of Current Task at @off in task struct
* -Uses r25 for Current task ptr if that is enabled
*/
#ifdef CONFIG_ARC_CURR_IN_REG
.macro GET_CURR_TASK_FIELD_PTR off, reg
add \reg, r25, \off
.endm
#else
.macro GET_CURR_TASK_FIELD_PTR off, reg
GET_CURR_TASK_ON_CPU \reg
add \reg, \reg, \off
.endm
#endif /* CONFIG_ARC_CURR_IN_REG */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARC_ENTRY_H */

View File

@ -0,0 +1,15 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_EXEC_H
#define __ASM_ARC_EXEC_H
/* Align to 16b */
#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
#endif

View File

@ -0,0 +1,151 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: August 2010: From Android kernel work
*/
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <linux/futex.h>
#include <linux/preempt.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\
__asm__ __volatile__( \
"1: ld %1, [%2] \n" \
insn "\n" \
"2: st %0, [%2] \n" \
" mov %0, 0 \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
"4: mov %0, %4 \n" \
" b 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .align 4 \n" \
" .word 1b, 4b \n" \
" .word 2b, 4b \n" \
" .previous \n" \
\
: "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
: "cc", "memory")
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable(); /* implies preempt_disable() */
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable(); /* subsumes preempt_enable() */
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
ret = (oldval == cmparg);
break;
case FUTEX_OP_CMP_NE:
ret = (oldval != cmparg);
break;
case FUTEX_OP_CMP_LT:
ret = (oldval < cmparg);
break;
case FUTEX_OP_CMP_GE:
ret = (oldval >= cmparg);
break;
case FUTEX_OP_CMP_LE:
ret = (oldval <= cmparg);
break;
case FUTEX_OP_CMP_GT:
ret = (oldval > cmparg);
break;
default:
ret = -ENOSYS;
}
}
return ret;
}
/* Compare-xchg with preemption disabled.
* Notes:
* -Best-Effort: Exchg happens only if compare succeeds.
* If compare fails, returns; leaving retry/looping to upper layers
* -successful cmp-xchg: return orig value in @addr (same as cmp val)
* -Compare fails: return orig value in @addr
* -user access r/w fails: return -EFAULT
*/
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
u32 newval)
{
u32 val;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable(); /* implies preempt_disable() */
/* TBD : can use llock/scond */
__asm__ __volatile__(
"1: ld %0, [%3] \n"
" brne %0, %1, 3f \n"
"2: st %2, [%3] \n"
"3: \n"
" .section .fixup,\"ax\" \n"
"4: mov %0, %4 \n"
" b 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .word 2b, 4b \n"
" .previous\n"
: "=&r"(val)
: "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
: "cc", "memory");
pagefault_enable(); /* subsumes preempt_enable() */
*uval = val;
return val;
}
#endif

105
arch/arc/include/asm/io.h Normal file
View File

@ -0,0 +1,105 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_IO_H
#define _ASM_ARC_IO_H
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#define PCI_IOBASE ((void __iomem *)0)
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long flags);
extern void iounmap(const void __iomem *addr);
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc(phy, sz) ioremap(phy, sz)
/* Change struct page to physical address */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 b;
__asm__ __volatile__(
" ldb%U1 %0, %1 \n"
: "=r" (b)
: "m" (*(volatile u8 __force *)addr)
: "memory");
return b;
}
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 s;
__asm__ __volatile__(
" ldw%U1 %0, %1 \n"
: "=r" (s)
: "m" (*(volatile u16 __force *)addr)
: "memory");
return s;
}
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 w;
__asm__ __volatile__(
" ld%U1 %0, %1 \n"
: "=r" (w)
: "m" (*(volatile u32 __force *)addr)
: "memory");
return w;
}
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
__asm__ __volatile__(
" stb%U1 %0, %1 \n"
:
: "r" (b), "m" (*(volatile u8 __force *)addr)
: "memory");
}
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 s, volatile void __iomem *addr)
{
__asm__ __volatile__(
" stw%U1 %0, %1 \n"
:
: "r" (s), "m" (*(volatile u16 __force *)addr)
: "memory");
}
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 w, volatile void __iomem *addr)
{
__asm__ __volatile__(
" st%U1 %0, %1 \n"
:
: "r" (w), "m" (*(volatile u32 __force *)addr)
: "memory");
}
#include <asm-generic/io.h>
#endif /* _ASM_ARC_IO_H */

View File

@ -0,0 +1,25 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_IRQ_H
#define __ASM_ARC_IRQ_H
#define NR_IRQS 32
/* Platform Independent IRQs */
#define TIMER0_IRQ 3
#define TIMER1_IRQ 4
#include <asm-generic/irq.h>
extern void __init arc_init_IRQ(void);
extern int __init get_hw_config_num_irq(void);
void __cpuinit arc_local_timer_setup(unsigned int cpu);
#endif

View File

@ -0,0 +1,153 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_IRQFLAGS_H
#define __ASM_ARC_IRQFLAGS_H
/* vineetg: March 2010 : local_irq_save( ) optimisation
* -Remove explicit mov of current status32 into reg, that is not needed
* -Use BIC insn instead of INVERTED + AND
* -Conditionally disable interrupts (if they are not enabled, don't disable)
*/
#ifdef __KERNEL__
#include <asm/arcregs.h>
#ifndef __ASSEMBLY__
/******************************************************************
* IRQ Control Macros
******************************************************************/
/*
* Save IRQ state and disable IRQs
*/
static inline long arch_local_irq_save(void)
{
unsigned long temp, flags;
__asm__ __volatile__(
" lr %1, [status32] \n"
" bic %0, %1, %2 \n"
" and.f 0, %1, %2 \n"
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
: "cc");
return flags;
}
/*
* restore saved IRQ state
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
__asm__ __volatile__(
" flag %0 \n"
:
: "r"(flags));
}
/*
* Unconditionally Enable IRQs
*/
extern void arch_local_irq_enable(void);
/*
* Unconditionally Disable IRQs
*/
static inline void arch_local_irq_disable(void)
{
unsigned long temp;
__asm__ __volatile__(
" lr %0, [status32] \n"
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
}
/*
* save IRQ state
*/
static inline long arch_local_save_flags(void)
{
unsigned long temp;
__asm__ __volatile__(
" lr %0, [status32] \n"
: "=&r"(temp));
return temp;
}
/*
* Query IRQ state
*/
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (STATUS_E1_MASK
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
| STATUS_E2_MASK
#endif
));
}
static inline int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
static inline void arch_mask_irq(unsigned int irq)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb &= ~(1 << irq);
write_aux_reg(AUX_IENABLE, ienb);
}
static inline void arch_unmask_irq(unsigned int irq)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb |= (1 << irq);
write_aux_reg(AUX_IENABLE, ienb);
}
#else
.macro IRQ_DISABLE scratch
lr \scratch, [status32]
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
.macro IRQ_DISABLE_SAVE scratch, save
lr \scratch, [status32]
mov \save, \scratch /* Make a copy */
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
#endif /* __ASSEMBLY__ */
#endif /* KERNEL */
#endif

View File

@ -0,0 +1,19 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_KDEBUG_H
#define _ASM_ARC_KDEBUG_H
enum die_val {
DIE_UNUSED,
DIE_TRAP,
DIE_IERR,
DIE_OOPS
};
#endif

View File

@ -0,0 +1,61 @@
/*
* kgdb support for ARC
*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARC_KGDB_H__
#define __ARC_KGDB_H__
#ifdef CONFIG_KGDB
#include <asm/user.h>
/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
* register API yet */
#undef DBG_MAX_REG_NUM
#define GDB_MAX_REGS 39
#define BREAK_INSTR_SIZE 2
#define CACHE_FLUSH_IS_SAFE 1
#define NUMREGBYTES (GDB_MAX_REGS * 4)
#define BUFMAX 2048
static inline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ("trap_s 0x4\n");
}
extern void kgdb_trap(struct pt_regs *regs, int param);
enum arc700_linux_regnums {
_R0 = 0,
_R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
_R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
_R25, _R26,
_BTA = 27,
_LP_START = 28,
_LP_END = 29,
_LP_COUNT = 30,
_STATUS32 = 31,
_BLINK = 32,
_FP = 33,
__SP = 34,
_EFA = 35,
_RET = 36,
_ORIG_R8 = 37,
_STOP_PC = 38
};
#else
static inline void kgdb_trap(struct pt_regs *regs, int param)
{
}
#endif
#endif /* __ARC_KGDB_H__ */

View File

@ -0,0 +1,62 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ARC_KPROBES_H
#define _ARC_KPROBES_H
#ifdef CONFIG_KPROBES
typedef u16 kprobe_opcode_t;
#define UNIMP_S_INSTRUCTION 0x79e0
#define TRAP_S_2_INSTRUCTION 0x785e
#define MAX_INSN_SIZE 8
#define MAX_STACK_SIZE 64
struct arch_specific_insn {
int is_short;
kprobe_opcode_t *t1_addr, *t2_addr;
kprobe_opcode_t t1_opcode, t2_opcode;
};
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct pt_regs jprobe_saved_regs;
char jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
void kretprobe_trampoline(void);
void trap_is_kprobe(unsigned long cause, unsigned long address,
struct pt_regs *regs);
#else
static void trap_is_kprobe(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
}
#endif
#endif

View File

@ -0,0 +1,63 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#ifdef __ASSEMBLY__
/* Can't use the ENTRY macro in linux/linkage.h
* gas considers ';' as comment vs. newline
*/
.macro ARC_ENTRY name
.global \name
.align 4
\name:
.endm
.macro ARC_EXIT name
#define ASM_PREV_SYM_ADDR(name) .-##name
.size \ name, ASM_PREV_SYM_ADDR(\name)
.endm
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm
#ifdef CONFIG_ARC_HAS_DCCM
.section .data.arcfp
#else
.section .data
#endif
.global \nm
.endm
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_CODE
#ifdef CONFIG_ARC_HAS_ICCM
.section .text.arcfp, "ax",@progbits
#else
.section .text, "ax",@progbits
#endif
.endm
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_ARC_HAS_ICCM
#define __arcfp_code __attribute__((__section__(".text.arcfp")))
#else
#define __arcfp_code __attribute__((__section__(".text")))
#endif
#ifdef CONFIG_ARC_HAS_DCCM
#define __arcfp_data __attribute__((__section__(".data.arcfp")))
#else
#define __arcfp_data __attribute__((__section__(".data")))
#endif
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,87 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* based on METAG mach/arch.h (which in turn was based on ARM)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_MACH_DESC_H_
#define _ASM_ARC_MACH_DESC_H_
/**
* struct machine_desc - Board specific callbacks, called from ARC common code
* Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
* a multi-platform kernel builds with array of such descriptors.
* We extend the early DT scan to also match the DT's "compatible" string
* against the @dt_compat of all such descriptors, and one with highest
* "DT score" is selected as global @machine_desc.
*
* @name: Board/SoC name
* @dt_compat: Array of device tree 'compatible' strings
* (XXX: although only 1st entry is looked at)
* @init_early: Very early callback [called from setup_arch()]
* @init_irq: setup external IRQ controllers [called from init_IRQ()]
* @init_smp: for each CPU (e.g. setup IPI)
* [(M):init_IRQ(), (o):start_kernel_secondary()]
* @init_time: platform specific clocksource/clockevent registration
* [called from time_init()]
* @init_machine: arch initcall level callback (e.g. populate static
* platform devices or parse Devicetree)
* @init_late: Late initcall level callback
*
*/
struct machine_desc {
const char *name;
const char **dt_compat;
void (*init_early)(void);
void (*init_irq)(void);
#ifdef CONFIG_SMP
void (*init_smp)(unsigned int);
#endif
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
};
/*
* Current machine - only accessible during boot.
*/
extern struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
extern struct machine_desc __arch_info_begin[], __arch_info_end[];
#define for_each_machine_desc(p) \
for (p = __arch_info_begin; p < __arch_info_end; p++)
static inline struct machine_desc *default_machine_desc(void)
{
/* the default machine is the last one linked in */
if (__arch_info_end - 1 < __arch_info_begin)
return NULL;
return __arch_info_end - 1;
}
/*
* Set of macros to define architecture features.
* This is built into a table by the linker.
*/
#define MACHINE_START(_type, _name) \
static const struct machine_desc __mach_desc_##_type \
__used \
__attribute__((__section__(".arch.info.init"))) = { \
.name = _name,
#define MACHINE_END \
};
extern struct machine_desc *setup_machine_fdt(void *dt);
extern void __init copy_devtree(void);
#endif

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
#ifndef __ASSEMBLY__
typedef struct {
unsigned long asid; /* Pvt Addr-Space ID for mm */
#ifdef CONFIG_ARC_TLB_DBG
struct task_struct *tsk;
#endif
} mm_context_t;
#endif
#endif

View File

@ -0,0 +1,213 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -Refactored get_new_mmu_context( ) to only handle live-mm.
* retiring-mm handled in other hooks
*
* Vineetg: March 25th, 2008: Bug #92690
* -Major rewrite of Core ASID allocation routine get_new_mmu_context
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_MMU_CONTEXT_H
#define _ASM_ARC_MMU_CONTEXT_H
#include <asm/arcregs.h>
#include <asm/tlb.h>
#include <asm-generic/mm_hooks.h>
/* ARC700 ASID Management
*
* ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
* with same vaddr (different tasks) to co-exit. This provides for
* "Fast Context Switch" i.e. no TLB flush on ctxt-switch
*
* Linux assigns each task a unique ASID. A simple round-robin allocation
* of H/w ASID is done using software tracker @asid_cache.
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
* For book-keeping, Linux uses a couple of data-structures:
* -mm_struct has an @asid field to keep a note of task's ASID (needed at the
* time of say switch_mm( )
* -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
* given an ASID, finding the mm struct associated.
*
* The round-robin allocation algorithm allows for ASID stealing.
* If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
* already assigned to another (switched-out) task. Obviously the prev owner
* is marked with an invalid ASID to make it request for a new ASID when it
* gets scheduled next time. However its TLB entries (with ASID "x") could
* exist, which must be cleared before the same ASID is used by the new owner.
* Flushing them would be plausible but costly solution. Instead we force a
* allocation policy quirk, which ensures that a stolen ASID won't have any
* TLB entries associates, alleviating the need to flush.
* The quirk essentially is not allowing ASID allocated in prev cycle
* to be used past a roll-over in the next cycle.
* When this happens (i.e. task ASID > asid tracker), task needs to refresh
* its ASID, aligning it to current value of tracker. If the task doesn't get
* scheduled past a roll-over, hence its ASID is not yet realigned with
* tracker, such ASID is anyways safely reusable because it is
* gauranteed that TLB entries with that ASID wont exist.
*/
#define FIRST_ASID 0
#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
/* ASID to mm struct mapping */
extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
extern int asid_cache;
/*
* Assign a new ASID to task. If the task already has an ASID, it is
* relinquished.
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
struct mm_struct *prev_owner;
unsigned long flags;
local_irq_save(flags);
/*
* Relinquish the currently owned ASID (if any).
* Doing unconditionally saves a cmp-n-branch; for already unused
* ASID slot, the value was/remains NULL
*/
asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
/* move to new ASID */
if (++asid_cache > MAX_ASID) { /* ASID roll-over */
asid_cache = FIRST_ASID;
flush_tlb_all();
}
/*
* Is next ASID already owned by some-one else (we are stealing it).
* If so, let the orig owner be aware of this, so when it runs, it
* asks for a brand new ASID. This would only happen for a long-lived
* task with ASID from prev allocation cycle (before ASID roll-over).
*
* This might look wrong - if we are re-using some other task's ASID,
* won't we use it's stale TLB entries too. Actually switch_mm( ) takes
* care of such a case: it ensures that task with ASID from prev alloc
* cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
* The stealing scenario described here will only happen if that task
* didn't get a chance to refresh it's ASID - implying stale entries
* won't exist.
*/
prev_owner = asid_mm_map[asid_cache];
if (prev_owner)
prev_owner->context.asid = NO_ASID;
/* Assign new ASID to tsk */
asid_mm_map[asid_cache] = mm;
mm->context.asid = asid_cache;
#ifdef CONFIG_ARC_TLB_DBG
pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
" pid:%u, assigned asid:%lu\n",
(unsigned int)mm, (unsigned int)prev_owner,
(unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
(mm->context.tsk)->pid, mm->context.asid);
#endif
write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
local_irq_restore(flags);
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.asid = NO_ASID;
#ifdef CONFIG_ARC_TLB_DBG
mm->context.tsk = tsk;
#endif
return 0;
}
/* Prepare the MMU for task: setup PID reg with allocated ASID
If task doesn't have an ASID (never alloc or stolen, get a new ASID)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
#ifndef CONFIG_SMP
/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
/*
* Get a new ASID if task doesn't have a valid one. Possible when
* -task never had an ASID (fresh after fork)
* -it's ASID was stolen - past an ASID roll-over.
* -There's a third obscure scenario (if this task is running for the
* first time afer an ASID rollover), where despite having a valid
* ASID, we force a get for new ASID - see comments at top.
*
* Both the non-alloc scenario and first-use-after-rollover can be
* detected using the single condition below: NO_ASID = 256
* while asid_cache is always a valid ASID value (0-255).
*/
if (next->context.asid > asid_cache) {
get_new_mmu_context(next);
} else {
/*
* XXX: This will never happen given the chks above
* BUG_ON(next->context.asid > MAX_ASID);
*/
write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
}
}
static inline void destroy_context(struct mm_struct *mm)
{
unsigned long flags;
local_irq_save(flags);
asid_mm_map[mm->context.asid] = NULL;
mm->context.asid = NO_ASID;
local_irq_restore(flags);
}
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
* there is a good chance that task gets sched-out/in, making it's ASID valid
* again (this teased me for a whole day).
*/
#define deactivate_mm(tsk, mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
#ifndef CONFIG_SMP
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
/* Unconditionally get a new ASID */
get_new_mmu_context(next);
}
#define enter_lazy_tlb(mm, tsk)
#endif /* __ASM_ARC_MMU_CONTEXT_H */

View File

@ -0,0 +1,28 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_MODULE_H
#define _ASM_ARC_MODULE_H
#include <asm-generic/module.h>
#ifdef CONFIG_ARC_DW2_UNWIND
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
};
#endif
#define MODULE_PROC_FAMILY "ARC700"
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#endif /* _ASM_ARC_MODULE_H */

View File

@ -0,0 +1,18 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
* atomic dec based which can "count" any number of lock contenders.
* This ideally needs to be fixed in core, but for now switching to dec ver.
*/
#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
#include <asm-generic/mutex-dec.h>
#else
#include <asm-generic/mutex-xchg.h>
#endif

109
arch/arc/include/asm/page.h Normal file
View File

@ -0,0 +1,109 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_PAGE_H
#define __ASM_ARC_PAGE_H
#include <uapi/asm/page.h>
#ifndef __ASSEMBLY__
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
/* TBD: for now don't worry about VIPT D$ aliasing */
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
#define clear_user_page(addr, vaddr, pg) clear_page(addr)
#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef struct {
unsigned long pte;
} pte_t;
typedef struct {
unsigned long pgd;
} pgd_t;
typedef struct {
unsigned long pgprot;
} pgprot_t;
typedef unsigned long pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
#define pte_pgprot(x) __pgprot(pte_val(x))
#else /* !STRICT_MM_TYPECHECKS */
typedef unsigned long pte_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
typedef unsigned long pgtable_t;
#define pte_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pgprot(x) (x)
#define pte_pgprot(x) (x)
#endif
#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
/*
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
*
* These macros have historically been misnamed
* virt here means link-address/program-address as embedded in object code.
* So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
* 128th page, and virt_to_page( ) will return the struct page corresp to it.
* mem_map[ ] is an array of struct page for each page frame in the system
*
* Independent of where linux is linked at, link-addr = physical address
* So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
* would have been wrong in case kernel is not at 0x8zs
*/
#define __pa(vaddr) ((unsigned long)vaddr)
#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_page(kaddr) \
(mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/* Default Permissions for page, used in mmap.c */
#ifdef CONFIG_ARC_STACK_NONEXEC
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
#else
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif
#define WANT_PAGE_VIRTUAL 1
#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
#include <asm-generic/getorder.h>
#endif /* !__ASSEMBLY__ */
#endif

View File

@ -0,0 +1,13 @@
/*
* Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
#endif /* __ASM_PERF_EVENT_H */

View File

@ -0,0 +1,134 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: June 2011
* -"/proc/meminfo | grep PageTables" kept on increasing
* Recently added pgtable dtor was not getting called.
*
* vineetg: May 2011
* -Variable pg-sz means that Page Tables could be variable sized themselves
* So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
* -Page Table size capped to max 1 to save memory - hence verified.
* -Since these deal with constants, gcc compile-time optimizes them.
*
* vineetg: Nov 2010
* -Added pgtable ctor/dtor used for pgtable mem accounting
*
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
* to deal with struct page. Thay way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
* pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_PGALLOC_H
#define _ASM_ARC_PGALLOC_H
#include <linux/mm.h>
#include <linux/log2.h>
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_set(pmd, pte);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
{
pmd_set(pmd, (pte_t *) ptep);
}
static inline int __get_order_pgd(void)
{
return get_order(PTRS_PER_PGD * 4);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
int num, num2;
pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
if (ret) {
num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
memzero(ret, num * sizeof(pgd_t));
num2 = VMALLOC_SIZE / PGDIR_SIZE;
memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
memzero(ret + num + num2,
(PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
}
return ret;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, __get_order_pgd());
}
/*
* With software-only page-tables, addr-split for traversal is tweakable and
* that directly governs how big tables would be at each level.
* Further, the MMU page size is configurable.
* Thus we need to programatically assert the size constraint
* All of this is const math, allowing gcc to do constant folding/propagation.
*/
static inline int __get_order_pte(void)
{
return get_order(PTRS_PER_PTE * 4);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
__get_order_pte());
return pte;
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pgtable_t pte_pg;
pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
if (pte_pg) {
memzero((void *)pte_pg, PTRS_PER_PTE * 4);
pgtable_page_ctor(virt_to_page(pte_pg));
}
return pte_pg;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
pgtable_page_dtor(virt_to_page(ptep));
free_pages(ptep, __get_order_pte());
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#define check_pgt_cache() do { } while (0)
#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
#endif /* _ASM_ARC_PGALLOC_H */

View File

@ -0,0 +1,405 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
* They are semantically the same although in different contexts
* VALID marks a TLB entry exists and it will only happen if PRESENT
* - Utilise some unused free bits to confine PTE flags to 12 bits
* This is a must for 4k pg-sz
*
* vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
* -TLB Locking never really existed, except for initial specs
* -SILENT_xxx not needed for our port
* -Per my request, MMU V3 changes the layout of some of the bits
* to avoid a few shifts in TLB Miss handlers.
*
* vineetg: April 2010
* -PGD entry no longer contains any flags. If empty it is 0, otherwise has
* Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
*
* vineetg: April 2010
* -Switched form 8:11:13 split for page table lookup to 11:8:13
* -this speeds up page table allocation itself as we now have to memset 1K
* instead of 8k per page table.
* -TODO: Right now page table alloc is 8K and rest 7K is unused
* need to optimise it
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_PGTABLE_H
#define _ASM_ARC_PGTABLE_H
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm-generic/pgtable-nopmd.h>
/**************************************************************************
* Page Table Flags
*
* ARC700 MMU only deals with softare managed TLB entries.
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
* (saves some bit shift ops in TLB Miss hdlrs)
*/
#if (CONFIG_ARC_MMU_VER <= 2)
#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
#else
/* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
/* PD0 */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
usable for shared TLB entries (H) */
#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
#endif
/* Kernel allowed all permissions for all pages */
#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
#ifdef CONFIG_ARC_CACHE_PAGES
#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
#else
#define _PAGE_DEF_CACHEABLE (0)
#endif
/* Helper for every "user" page
* -kernel can R/W/X
* -by default cached, unless config otherwise
* -present in memory
*/
#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE)
#define PAGE_SHARED PAGE_U_W_R
/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
* kernel vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached
*/
#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
/* ioremap */
#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
_PAGE_GLOBAL)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
*
* Certain cases have 1:1 mapping
* e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
* which directly corresponds to PAGE_U_X_R
*
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
* can be tracked independet of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
*
* 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
* This is to enable COW mechanism
*/
/* xwr */
#define __P000 PAGE_U_NONE
#define __P001 PAGE_U_R
#define __P010 PAGE_U_R /* Pvt-W => !W */
#define __P011 PAGE_U_R /* Pvt-W => !W */
#define __P100 PAGE_U_X_R /* X => R */
#define __P101 PAGE_U_X_R
#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
#define __P111 PAGE_U_X_R /* Pvt-W => !W */
#define __S000 PAGE_U_NONE
#define __S001 PAGE_U_R
#define __S010 PAGE_U_W_R /* W => R */
#define __S011 PAGE_U_W_R
#define __S100 PAGE_U_X_R /* X => R */
#define __S101 PAGE_U_X_R
#define __S110 PAGE_U_X_W_R /* X => R */
#define __S111 PAGE_U_X_W_R
/****************************************************************
* Page Table Lookup split
*
* We implement 2 tier paging and since this is all software, we are free
* to customize the span of a PGD / PTE entry to suit us
*
* 32 bit virtual address
* -------------------------------------------------------
* | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
* -------------------------------------------------------
* | | |
* | | --> off in page frame
* | |
* | ---> index into Page Table
* |
* ----> index into Page Directory
*/
#define BITS_IN_PAGE PAGE_SHIFT
/* Optimal Sizing of Pg Tbl - based on MMU page size */
#if defined(CONFIG_ARC_PAGE_SIZE_8K)
#define BITS_FOR_PTE 8
#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
#define BITS_FOR_PTE 8
#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
#define BITS_FOR_PTE 9
#endif
#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#ifdef __ASSEMBLY__
#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
#else
#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
#endif
/*
* Number of entries a user land program use.
* TASK_SIZE is the maximum vaddr that can be used by a userland program.
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/*
* No special requirements for lowest virtual address we permit any user space
* mapping to be mapped at.
*/
#define FIRST_USER_ADDRESS 0
/****************************************************************
* Bucket load of VM Helpers
*/
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/* the zero page used for uninitialized and anonymous pages */
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
/* find the page descriptor of the Page Tbl ref by PMD entry */
#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
/* In a 2 level sys, setup the PGD entry with PTE value */
static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
pmd_val(*pmdp) = (unsigned long)ptep;
}
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
#define pmd_none(x) (!pmd_val(x))
#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
#define pte_page(x) (mem_map + \
(unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
#define mk_pte(page, pgprot) \
({ \
pte_t pte; \
pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
pte; \
})
/* TBD: Non linear mapping stuff */
static inline int pte_file(pte_t pte)
{
return pte_val(pte) & _PAGE_FILE;
}
#define PTE_FILE_MAX_BITS 30
#define pgoff_to_pte(x) __pte(x)
#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/*
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
* and returns ptr to PTE entry corresponding to @addr
*/
#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
__pte_index(addr))
/* No mapping of Page Tables in high mem etc, so following same as above */
#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
#define pte_offset_map(dir, addr) pte_offset(dir, addr)
/* Zoo of pte_xxx function */
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
#define pte_special(pte) (0)
#define PTE_BIT_FUNC(fn, op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
/* Macro to mark a page protection as uncacheable */
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
set_pte(ptep, pteval);
}
/*
* All kernel related VM pages are in init's mm.
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
/*
* Macro to quickly access the PGD entry, utlising the fact that some
* arch may cache the pointer to Page Directory of "current" task
* in a MMU register
*
* Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
* becomes read a register
*
* ********CAUTION*******:
* Kernel code might be dealing with some mm_struct of NON "current"
* Thus use this macro only when you are certain that "current" is current
* e.g. when dealing with signal frame setup code etc
*/
#ifndef CONFIG_SMP
#define pgd_offset_fast(mm, addr) \
({ \
pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
pgd_base + pgd_index(addr); \
})
#else
#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
#endif
extern void paging_init(void);
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep);
/* Encode swap {type,off} tuple into PTE
* We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
* both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
*/
#define __swp_entry(type, off) ((swp_entry_t) { \
((type) & 0x1f) | ((off) << 13) })
/* Decode a PTE containing swap "identifier "into constituents */
#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
/* NOPs, to keep generic kernel happy */
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define kern_addr_valid(addr) (1)
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
#define io_remap_pfn_range(vma, from, pfn, size, prot) \
remap_pfn_range(vma, from, pfn, size, prot)
#include <asm-generic/pgtable.h>
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,151 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: March 2009
* -Implemented task_pt_regs( )
*
* Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
*/
#ifndef __ASM_ARC_PROCESSOR_H
#define __ASM_ARC_PROCESSOR_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
/* Arch specific stuff which needs to be saved per task.
* However these items are not so important so as to earn a place in
* struct thread_info
*/
struct thread_struct {
unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
unsigned long cause_code; /* Exception Cause Code (ECR) */
#ifdef CONFIG_ARC_CURR_IN_REG
unsigned long user_r25;
#endif
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
struct arc_fpu fpu;
#endif
};
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
}
/* Forward declaration, a strange C thing */
struct task_struct;
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *t);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while (0)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/*
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
*/
#ifdef CONFIG_SMP
#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
#else
#define cpu_relax() do { } while (0)
#endif
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
/*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
* These can't be derived from pt_regs as that would give correp user-mode val
*/
#define KSTK_ESP(tsk) (tsk->thread.ksp)
#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
/*
* Do necessary setup to start up a newly executed thread.
*
* E1,E2 so that Interrupts are enabled in user mode
* L set, so Loop inhibited to begin with
* lp_start and lp_end seeded with bogus non-zero values so to easily catch
* the ARC700 sr to lp_start hardware bug
*/
#define start_thread(_regs, _pc, _usp) \
do { \
set_fs(USER_DS); /* reads from user space */ \
(_regs)->ret = (_pc); \
/* Interrupts enabled in User Mode */ \
(_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
| STATUS_E1_MASK | STATUS_E2_MASK; \
(_regs)->sp = (_usp); \
/* bogus seed values for debugging */ \
(_regs)->lp_start = 0x10; \
(_regs)->lp_end = 0x80; \
} while (0)
extern unsigned int get_wchan(struct task_struct *p);
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
* Should the PC register be read instead ? This macro does not seem to
* be used in many places so this wont be all that bad.
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#endif /* !__ASSEMBLY__ */
/* Kernels Virtual memory area.
* Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
* "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
* of the translated bottom 2GB for kernel virtual memory and protect
* these pages from user accesses by disabling Ru, Eu and Wu.
*/
#define VMALLOC_SIZE (0x10000000) /* 256M */
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define VMALLOC_END (PAGE_OFFSET)
/* Most of the architectures seem to be keeping some kind of padding between
* userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
*/
#define USER_KERNEL_GUTTER 0x10000000
/* User address space:
* On ARC700, CPU allows the entire lower half of 32 bit address space to be
* translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
* However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
* 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
* Thus total User vaddr space is (0:0x5FFF_FFFF)
*/
#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
#endif /* __KERNEL__ */
#endif /* __ASM_ARC_PROCESSOR_H */

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_PROM_H_
#define _ASM_ARC_PROM_H_
#define HAVE_ARCH_DEVTREE_FIXUPS
#endif

View File

@ -0,0 +1,130 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef __ASM_ARC_PTRACE_H
#define __ASM_ARC_PTRACE_H
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
/* THE pt_regs: Defines how regs are saved during entry into kernel */
struct pt_regs {
/*
* 1 word gutter after reg-file has been saved
* Technically not needed, Since SP always points to a "full" location
* (vs. "empty"). But pt_regs is shared with tools....
*/
long res;
/* Real registers */
long bta; /* bta_l1, bta_l2, erbta */
long lp_start;
long lp_end;
long lp_count;
long status32; /* status32_l1, status32_l2, erstatus */
long ret; /* ilink1, ilink2 or eret */
long blink;
long fp;
long r26; /* gp */
long r12;
long r11;
long r10;
long r9;
long r8;
long r7;
long r6;
long r5;
long r4;
long r3;
long r2;
long r1;
long r0;
long sp; /* user/kernel sp depending on where we came from */
long orig_r0;
/*to distinguish bet excp, syscall, irq */
union {
#ifdef CONFIG_CPU_BIG_ENDIAN
/* so that assembly code is same for LE/BE */
unsigned long orig_r8:16, event:16;
#else
unsigned long event:16, orig_r8:16;
#endif
long orig_r8_word;
};
};
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
long res; /* Again this is not needed */
long r25;
long r24;
long r23;
long r22;
long r21;
long r20;
long r19;
long r18;
long r17;
long r16;
long r15;
long r14;
long r13;
};
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */
#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
#define user_stack_pointer(regs)\
({ unsigned int sp; \
if (user_mode(regs)) \
sp = (regs)->sp;\
else \
sp = -1; \
sp; \
})
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
/* open-coded current_thread_info() */ \
register unsigned long sp asm ("sp"); \
unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
(struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
})
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->r0;
}
#endif /* !__ASSEMBLY__ */
#define orig_r8_IS_SCALL 0x0001
#define orig_r8_IS_SCALL_RESTARTED 0x0002
#define orig_r8_IS_BRKPT 0x0004
#define orig_r8_IS_EXCPN 0x0004
#define orig_r8_IS_IRQ1 0x0010
#define orig_r8_IS_IRQ2 0x0020
#endif /* __ASM_PTRACE_H */

View File

@ -0,0 +1,18 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SECTIONS_H
#define _ASM_ARC_SECTIONS_H
#include <asm-generic/sections.h>
extern char _int_vec_base_lds[];
extern char __arc_dccm_base[];
extern char __dtb_start[];
#endif

View File

@ -0,0 +1,24 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASMARC_SEGMENT_H
#define __ASMARC_SEGMENT_H
#ifndef __ASSEMBLY__
typedef unsigned long mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(0)
#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define segment_eq(a, b) ((a) == (b))
#endif /* __ASSEMBLY__ */
#endif /* __ASMARC_SEGMENT_H */

View File

@ -0,0 +1,25 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SERIAL_H
#define _ASM_ARC_SERIAL_H
/*
* early-8250 requires BASE_BAUD to be defined and includes this header.
* We put in a typical value:
* (core clk / 16) - i.e. UART samples 16 times per sec.
* Athough in multi-platform-image this might not work, specially if the
* clk driving the UART is different.
* We can't use DeviceTree as this is typically for early serial.
*/
#include <asm/clk.h>
#define BASE_BAUD (arc_get_core_freq() / 16)
#endif /* _ASM_ARC_SERIAL_H */

View File

@ -0,0 +1,37 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASMARC_SETUP_H
#define __ASMARC_SETUP_H
#include <linux/types.h>
#include <uapi/asm/setup.h>
#define COMMAND_LINE_SIZE 256
/*
* Data structure to map a ID to string
* Used a lot for bootup reporting of hardware diversity
*/
struct id_to_str {
int id;
const char *str;
};
struct cpuinfo_data {
struct id_to_str info;
int up_range;
};
extern int root_mountflags, end_mem;
extern int running_on_hw;
void __init setup_processor(void);
void __init setup_arch_memory(void);
#endif /* __ASMARC_SETUP_H */

130
arch/arc/include/asm/smp.h Normal file
View File

@ -0,0 +1,130 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_SMP_H
#define __ASM_ARC_SMP_H
#ifdef CONFIG_SMP
#include <linux/types.h>
#include <linux/init.h>
#include <linux/threads.h>
#define raw_smp_processor_id() (current_thread_info()->cpu)
/* including cpumask.h leads to cyclic deps hence this Forward declaration */
struct cpumask;
/*
* APIs provided by arch SMP code to generic code
*/
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
/*
* APIs provided by arch SMP code to rest of arch code
*/
extern void __init smp_init_cpus(void);
extern void __init first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
/*
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
* Takes @cpu and @irq to which the arch-common ISR is hooked up
*/
extern int smp_ipi_irq_setup(int cpu, int irq);
/*
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
*
* @info: SoC SMP specific info for /proc/cpuinfo etc
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
* @ipi_send: To send IPI to a @cpumask
* @ips_clear: To clear IPI received by @cpu at @irq
*/
struct plat_smp_ops {
const char *info;
void (*cpu_kick)(int cpu, unsigned long pc);
void (*ipi_send)(void *callmap);
void (*ipi_clear)(int cpu, int irq);
};
/* TBD: stop exporting it for direct population by platform */
extern struct plat_smp_ops plat_smp_ops;
#endif /* CONFIG_SMP */
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
* Originally Interrupts had to be disabled around code to gaurantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
*
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
* However exported spinlock API is not usable due to cyclic hdr deps
* (even after system.h disintegration upstream)
* asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
* -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
*
* So the workaround is to use the lowest level arch spinlock API.
* The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
* but same is not true for ARCH backend, hence the need for 2 variants
*/
#ifndef CONFIG_ARC_HAS_LLSC
#include <linux/irqflags.h>
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
extern arch_spinlock_t smp_atomic_ops_lock;
extern arch_spinlock_t smp_bitops_lock;
#define atomic_ops_lock(flags) do { \
local_irq_save(flags); \
arch_spin_lock(&smp_atomic_ops_lock); \
} while (0)
#define atomic_ops_unlock(flags) do { \
arch_spin_unlock(&smp_atomic_ops_lock); \
local_irq_restore(flags); \
} while (0)
#define bitops_lock(flags) do { \
local_irq_save(flags); \
arch_spin_lock(&smp_bitops_lock); \
} while (0)
#define bitops_unlock(flags) do { \
arch_spin_unlock(&smp_bitops_lock); \
local_irq_restore(flags); \
} while (0)
#else /* !CONFIG_SMP */
#define atomic_ops_lock(flags) local_irq_save(flags)
#define atomic_ops_unlock(flags) local_irq_restore(flags)
#define bitops_lock(flags) local_irq_save(flags)
#define bitops_unlock(flags) local_irq_restore(flags)
#endif /* !CONFIG_SMP */
#endif /* !CONFIG_ARC_HAS_LLSC */
#endif

View File

@ -0,0 +1,144 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#include <asm/spinlock_types.h>
#include <asm/processor.h>
#include <asm/barrier.h>
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
__asm__ __volatile__(
"1: ex %0, [%1] \n"
" breq %0, %2, 1b \n"
: "+&r" (tmp)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
__asm__ __volatile__(
"1: ex %0, [%1] \n"
: "+r" (tmp)
: "r"(&(lock->slock))
: "memory");
return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
*
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
/* Would read_trylock() succeed? */
#define arch_read_can_lock(x) ((x)->counter > 0)
/* Would write_trylock() succeed? */
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&(rw->lock_mutex));
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*/
if (rw->counter > 0) {
rw->counter--;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
smp_mb();
return ret;
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&(rw->lock_mutex));
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*/
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
rw->counter = 0;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
return ret;
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (!arch_read_trylock(rw))
cpu_relax();
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (!arch_write_trylock(rw))
cpu_relax();
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */

View File

@ -0,0 +1,35 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
#define __ARCH_SPIN_LOCK_LOCKED__ 1
#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
/*
* Unlocked: 0x01_00_00_00
* Read lock(s): 0x00_FF_00_00 to say 0x01
* Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
*/
typedef struct {
volatile unsigned int counter;
arch_spinlock_t lock_mutex;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
#endif

View File

@ -0,0 +1,40 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -We had half-optimised memset/memcpy, got better versions of those
* -Added memcmp, strchr, strcpy, strcmp, strlen
*
* Amit Bhor: Codito Technologies 2004
*/
#ifndef _ASM_ARC_STRING_H
#define _ASM_ARC_STRING_H
#include <linux/types.h>
#ifdef __KERNEL__
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMCMP
#define __HAVE_ARCH_STRCHR
#define __HAVE_ARCH_STRCPY
#define __HAVE_ARCH_STRCMP
#define __HAVE_ARCH_STRLEN
extern void *memset(void *ptr, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void memzero(void *ptr, __kernel_size_t n);
extern int memcmp(const void *, const void *, __kernel_size_t);
extern char *strchr(const char *s, int c);
extern char *strcpy(char *dest, const char *src);
extern int strcmp(const char *cs, const char *ct);
extern __kernel_size_t strlen(const char *);
#endif /* __KERNEL__ */
#endif /* _ASM_ARC_STRING_H */

View File

@ -0,0 +1,41 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SWITCH_TO_H
#define _ASM_ARC_SWITCH_TO_H
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
#define ARC_FPU_NEXT(t)
#else
#define ARC_FPU_PREV(p, n)
#define ARC_FPU_NEXT(n)
#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
#define switch_to(prev, next, last) \
do { \
ARC_FPU_PREV(prev, next); \
last = __switch_to(prev, next);\
ARC_FPU_NEXT(next); \
mb(); \
} while (0)
#endif
#endif

View File

@ -0,0 +1,72 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SYSCALL_H
#define _ASM_ARC_SYSCALL_H 1
#include <linux/err.h>
#include <linux/sched.h>
#include <asm/unistd.h>
#include <asm/ptrace.h> /* in_syscall() */
static inline long
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
if (user_mode(regs) && in_syscall(regs))
return regs->orig_r8;
else
return -1;
}
static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
/* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
regs->r8 = regs->orig_r8;
}
static inline long
syscall_get_error(struct task_struct *task, struct pt_regs *regs)
{
/* 0 if syscall succeeded, otherwise -Errorcode */
return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
}
static inline long
syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
{
return regs->r0;
}
static inline void
syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
int error, long val)
{
regs->r0 = (long) error ?: val;
}
/*
* @i: argument index [0,5]
* @n: number of arguments; n+i must be [1,6].
*/
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, unsigned long *args)
{
unsigned long *inside_ptregs = &(regs->r0);
inside_ptregs -= i;
BUG_ON((i + n) > 6);
while (n--) {
args[i++] = (*inside_ptregs);
inside_ptregs--;
}
}
#endif

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SYSCALLS_H
#define _ASM_ARC_SYSCALLS_H 1
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
int sys_fork_wrapper(void);
int sys_vfork_wrapper(void);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
#include <asm-generic/syscalls.h>
#endif /* __KERNEL__ */
#endif

View File

@ -0,0 +1,121 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Oct 2009
* No need for ARC specific thread_info allocator (kmalloc/free). This is
* anyways one page allocation, thus slab alloc can be short-circuited and
* the generic version (get_free_page) would be loads better.
*
* Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
#include <asm/page.h>
#ifdef CONFIG_16KSTACKS
#define THREAD_SIZE_ORDER 1
#else
#define THREAD_SIZE_ORDER 0
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#include <asm/segment.h>
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants
* must also be changed
*/
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct task_struct *task; /* main task structure */
mm_segment_t addr_limit; /* thread address space */
struct exec_domain *exec_domain;/* execution domain */
__u32 cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct restart_block restart_block;
};
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
static inline __attribute_const__ struct thread_info *current_thread_info(void)
{
register unsigned long sp asm("sp");
return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
}
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000
/*
* thread information flags
* - these are process state flags that various assembly files may need to
* access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 16
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
* SYSCALL_TRACE is anways seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */

View File

@ -0,0 +1,18 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_TIMEX_H
#define _ASM_ARC_TIMEX_H
#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
#include <asm-generic/timex.h>
/* XXX: get_cycles() to be implemented with RTSC insn */
#endif /* _ASM_ARC_TIMEX_H */

View File

@ -0,0 +1,104 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_TLB_MMU_V1_H__
#define __ASM_TLB_MMU_V1_H__
#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
#include <asm/tlb.h>
.macro TLB_WRITE_HEURISTICS
#define JH_HACK1
#undef JH_HACK2
#undef JH_HACK3
#ifdef JH_HACK3
; Calculate set index for 2-way MMU
; -avoiding use of GetIndex from MMU
; and its unpleasant LFSR pseudo-random sequence
;
; r1 = TLBPD0 from TLB_RELOAD above
;
; -- jh_ex_way_set not cleared on startup
; didn't want to change setup.c
; hence extra instruction to clean
;
; -- should be in cache since in same line
; as r0/r1 saves above
;
ld r0,[jh_ex_way_sel] ; victim pointer
and r0,r0,1 ; clean
xor.f r0,r0,1 ; flip
st r0,[jh_ex_way_sel] ; store back
asr r0,r1,12 ; get set # <<1, note bit 12=R=0
or.nz r0,r0,1 ; set way bit
and r0,r0,0xff ; clean
sr r0,[ARC_REG_TLBINDEX]
#endif
#ifdef JH_HACK2
; JH hack #2
; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
; Slower in thrash case (where it matters) because more code is executed
; Inefficient due to two-register paradigm of this miss handler
;
/* r1 = data TLBPD0 at this point */
lr r0,[eret] /* instruction address */
xor r0,r0,r1 /* compare set # */
and.f r0,r0,0x000fe000 /* 2-way MMU mask */
bne 88f /* not in same set - no need to probe */
lr r0,[eret] /* instruction address */
and r0,r0,PAGE_MASK /* VPN of instruction address */
; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
and r1,r1,0xff /* Data ASID */
or r0,r0,r1 /* Instruction address + Data ASID */
lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
xor r0,r0,1 /* flip bottom bit of data index */
b.d 89f
sr r0,[ARC_REG_TLBINDEX] /* and put it back */
88:
sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
89:
#endif
#ifdef JH_HACK1
;
; Always checks whether instruction will be kicked out by dtlb miss
;
mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
lr r0,[eret] /* instruction address */
and r0,r0,PAGE_MASK /* VPN of instruction address */
bmsk r1,r3,7 /* Data ASID, bits 7-0 */
or_s r0,r0,r1 /* Instruction address + Data ASID */
sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
cmp r0,r1 /* if no match on indices, go around */
xor.eq r1,r1,1 /* flip bottom bit of data index */
sr r1,[ARC_REG_TLBINDEX] /* and put it back */
#endif
.endm
#endif
#endif

View File

@ -0,0 +1,58 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H
#ifdef __KERNEL__
#include <asm/pgtable.h>
/* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
#ifndef __ASSEMBLY__
#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
/*
* This pair is called at time of munmap/exit to flush cache and TLB entries
* for mappings being torn down.
* 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
* as we don't support aliasing configs in our VIPT D$.
* 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
* albiet for difft reasons - its better handled by moving to new ASID
*
* Note, read http://lkml.org/lkml/2004/1/15/6
*/
#define tlb_start_vma(tlb, vma)
#define tlb_end_vma(tlb, vma)
#define __tlb_remove_tlb_entry(tlb, ptep, address)
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
#else
#define tlb_paranoid_check(a, b)
#endif
void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
void __init read_decode_mmu_bcr(void);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_ARC_TLB_H */

View File

@ -0,0 +1,28 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_TLBFLUSH__
#define __ASM_ARC_TLBFLUSH__
#include <linux/mm.h>
void local_flush_tlb_all(void);
void local_flush_tlb_mm(struct mm_struct *mm);
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
/* XXX: Revisit for SMP */
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#endif

View File

@ -0,0 +1,751 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: June 2010
* -__clear_user( ) called multiple times during elf load was byte loop
* converted to do as much word clear as possible.
*
* vineetg: Dec 2009
* -Hand crafted constant propagation for "constant" copy sizes
* -stock kernel shrunk by 33K at -O3
*
* vineetg: Sept 2009
* -Added option to (UN)inline copy_(to|from)_user to reduce code sz
* -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
* -Enabled when doing -Os
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_UACCESS_H
#define _ASM_ARC_UACCESS_H
#include <linux/sched.h>
#include <asm/errno.h>
#include <linux/string.h> /* for generic string functions */
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
* Algorthmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.
*
* This can however be rewritten as follows:
* (len <= TASK_SIZE) && (start+len < TASK_SIZE)
*
* Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too.
*
* The reason for rewriting being, for majorit yof cases, @len is generally
* compile time constant, causing first sub-expression to be compile time
* subsumed.
*
* The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
* so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
* would already have been done at this call site for __kernel_ok()
*
*/
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
(((addr)+(sz)) <= get_fs()))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz))))
/*********** Single byte/hword/word copies ******************/
#define __get_user_fn(sz, u, k) \
({ \
long __ret = 0; /* success by default */ \
switch (sz) { \
case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
} \
__ret; \
})
/*
* Returns 0 on success, -EFAULT if not.
* @ret already contains 0 - given that errors will be less likely
* (hence +r asm constraint below).
* In case of error, fixup code will make it -EFAULT
*/
#define __arc_get_user_one(dst, src, op, ret) \
__asm__ __volatile__( \
"1: "op" %1,[%2]\n" \
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
" .align 4\n" \
" .word 1b,3b\n" \
" .previous\n" \
\
: "+r" (ret), "=r" (dst) \
: "r" (src), "ir" (-EFAULT))
#define __arc_get_user_one_64(dst, src, ret) \
__asm__ __volatile__( \
"1: ld %1,[%2]\n" \
"4: ld %R1,[%2, 4]\n" \
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
" .align 4\n" \
" .word 1b,3b\n" \
" .word 4b,3b\n" \
" .previous\n" \
\
: "+r" (ret), "=r" (dst) \
: "r" (src), "ir" (-EFAULT))
#define __put_user_fn(sz, u, k) \
({ \
long __ret = 0; /* success by default */ \
switch (sz) { \
case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
} \
__ret; \
})
#define __arc_put_user_one(src, dst, op, ret) \
__asm__ __volatile__( \
"1: "op" %1,[%2]\n" \
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
" .align 4\n" \
" .word 1b,3b\n" \
" .previous\n" \
\
: "+r" (ret) \
: "r" (src), "r" (dst), "ir" (-EFAULT))
#define __arc_put_user_one_64(src, dst, ret) \
__asm__ __volatile__( \
"1: st %1,[%2]\n" \
"4: st %R1,[%2, 4]\n" \
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
" .align 4\n" \
" .word 1b,3b\n" \
" .word 4b,3b\n" \
" .previous\n" \
\
: "+r" (ret) \
: "r" (src), "r" (dst), "ir" (-EFAULT))
static inline unsigned long
__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
{
long res = 0;
char val;
unsigned long tmp1, tmp2, tmp3, tmp4;
unsigned long orig_n = n;
if (n == 0)
return 0;
/* unaligned */
if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
unsigned char tmp;
__asm__ __volatile__ (
" mov.f lp_count, %0 \n"
" lpnz 2f \n"
"1: ldb.ab %1, [%3, 1] \n"
" stb.ab %1, [%2, 1] \n"
" sub %0,%0,1 \n"
"2: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"3: j 2b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 3b \n"
" .previous \n"
: "+r" (n),
/*
* Note as an '&' earlyclobber operand to make sure the
* temporary register inside the loop is not the same as
* FROM or TO.
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
: "lp_count", "lp_start", "lp_end", "memory");
return n;
}
/*
* Hand-crafted constant propagation to reduce code sz of the
* laddered copy 16x,8,4,2,1
*/
if (__builtin_constant_p(orig_n)) {
res = orig_n;
if (orig_n / 16) {
orig_n = orig_n % 16;
__asm__ __volatile__(
" lsr lp_count, %7,4 \n"
" lp 3f \n"
"1: ld.ab %3, [%2, 4] \n"
"11: ld.ab %4, [%2, 4] \n"
"12: ld.ab %5, [%2, 4] \n"
"13: ld.ab %6, [%2, 4] \n"
" st.ab %3, [%1, 4] \n"
" st.ab %4, [%1, 4] \n"
" st.ab %5, [%1, 4] \n"
" st.ab %6, [%1, 4] \n"
" sub %0,%0,16 \n"
"3: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 3b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .word 11b,4b \n"
" .word 12b,4b \n"
" .word 13b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from),
"=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
: "ir"(n)
: "lp_count", "memory");
}
if (orig_n / 8) {
orig_n = orig_n % 8;
__asm__ __volatile__(
"14: ld.ab %3, [%2,4] \n"
"15: ld.ab %4, [%2,4] \n"
" st.ab %3, [%1,4] \n"
" st.ab %4, [%1,4] \n"
" sub %0,%0,8 \n"
"31: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 31b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 14b,4b \n"
" .word 15b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from),
"=r"(tmp1), "=r"(tmp2)
:
: "memory");
}
if (orig_n / 4) {
orig_n = orig_n % 4;
__asm__ __volatile__(
"16: ld.ab %3, [%2,4] \n"
" st.ab %3, [%1,4] \n"
" sub %0,%0,4 \n"
"32: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 32b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 16b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
:
: "memory");
}
if (orig_n / 2) {
orig_n = orig_n % 2;
__asm__ __volatile__(
"17: ldw.ab %3, [%2,2] \n"
" stw.ab %3, [%1,2] \n"
" sub %0,%0,2 \n"
"33: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 33b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 17b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
:
: "memory");
}
if (orig_n & 1) {
__asm__ __volatile__(
"18: ldb.ab %3, [%2,2] \n"
" stb.ab %3, [%1,2] \n"
" sub %0,%0,1 \n"
"34: ; nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 34b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 18b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
:
: "memory");
}
} else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
__asm__ __volatile__(
" mov %0,%3 \n"
" lsr.f lp_count, %3,4 \n" /* 16x bytes */
" lpnz 3f \n"
"1: ld.ab %5, [%2, 4] \n"
"11: ld.ab %6, [%2, 4] \n"
"12: ld.ab %7, [%2, 4] \n"
"13: ld.ab %8, [%2, 4] \n"
" st.ab %5, [%1, 4] \n"
" st.ab %6, [%1, 4] \n"
" st.ab %7, [%1, 4] \n"
" st.ab %8, [%1, 4] \n"
" sub %0,%0,16 \n"
"3: and.f %3,%3,0xf \n" /* stragglers */
" bz 34f \n"
" bbit0 %3,3,31f \n" /* 8 bytes left */
"14: ld.ab %5, [%2,4] \n"
"15: ld.ab %6, [%2,4] \n"
" st.ab %5, [%1,4] \n"
" st.ab %6, [%1,4] \n"
" sub.f %0,%0,8 \n"
"31: bbit0 %3,2,32f \n" /* 4 bytes left */
"16: ld.ab %5, [%2,4] \n"
" st.ab %5, [%1,4] \n"
" sub.f %0,%0,4 \n"
"32: bbit0 %3,1,33f \n" /* 2 bytes left */
"17: ldw.ab %5, [%2,2] \n"
" stw.ab %5, [%1,2] \n"
" sub.f %0,%0,2 \n"
"33: bbit0 %3,0,34f \n"
"18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
" stb.ab %5, [%1,1] \n"
" sub.f %0,%0,1 \n"
"34: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 34b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .word 11b,4b \n"
" .word 12b,4b \n"
" .word 13b,4b \n"
" .word 14b,4b \n"
" .word 15b,4b \n"
" .word 16b,4b \n"
" .word 17b,4b \n"
" .word 18b,4b \n"
" .previous \n"
: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
"=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
:
: "lp_count", "memory");
}
return res;
}
extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
unsigned long n);
static inline unsigned long
__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
{
long res = 0;
char val;
unsigned long tmp1, tmp2, tmp3, tmp4;
unsigned long orig_n = n;
if (n == 0)
return 0;
/* unaligned */
if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
unsigned char tmp;
__asm__ __volatile__(
" mov.f lp_count, %0 \n"
" lpnz 3f \n"
" ldb.ab %1, [%3, 1] \n"
"1: stb.ab %1, [%2, 1] \n"
" sub %0, %0, 1 \n"
"3: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 3b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .previous \n"
: "+r" (n),
/* Note as an '&' earlyclobber operand to make sure the
* temporary register inside the loop is not the same as
* FROM or TO.
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
: "lp_count", "lp_start", "lp_end", "memory");
return n;
}
if (__builtin_constant_p(orig_n)) {
res = orig_n;
if (orig_n / 16) {
orig_n = orig_n % 16;
__asm__ __volatile__(
" lsr lp_count, %7,4 \n"
" lp 3f \n"
" ld.ab %3, [%2, 4] \n"
" ld.ab %4, [%2, 4] \n"
" ld.ab %5, [%2, 4] \n"
" ld.ab %6, [%2, 4] \n"
"1: st.ab %3, [%1, 4] \n"
"11: st.ab %4, [%1, 4] \n"
"12: st.ab %5, [%1, 4] \n"
"13: st.ab %6, [%1, 4] \n"
" sub %0, %0, 16 \n"
"3:;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 3b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .word 11b,4b \n"
" .word 12b,4b \n"
" .word 13b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from),
"=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
: "ir"(n)
: "lp_count", "memory");
}
if (orig_n / 8) {
orig_n = orig_n % 8;
__asm__ __volatile__(
" ld.ab %3, [%2,4] \n"
" ld.ab %4, [%2,4] \n"
"14: st.ab %3, [%1,4] \n"
"15: st.ab %4, [%1,4] \n"
" sub %0, %0, 8 \n"
"31:;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 31b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 14b,4b \n"
" .word 15b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from),
"=r"(tmp1), "=r"(tmp2)
:
: "memory");
}
if (orig_n / 4) {
orig_n = orig_n % 4;
__asm__ __volatile__(
" ld.ab %3, [%2,4] \n"
"16: st.ab %3, [%1,4] \n"
" sub %0, %0, 4 \n"
"32:;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 32b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 16b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
:
: "memory");
}
if (orig_n / 2) {
orig_n = orig_n % 2;
__asm__ __volatile__(
" ldw.ab %3, [%2,2] \n"
"17: stw.ab %3, [%1,2] \n"
" sub %0, %0, 2 \n"
"33:;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 33b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 17b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
:
: "memory");
}
if (orig_n & 1) {
__asm__ __volatile__(
" ldb.ab %3, [%2,1] \n"
"18: stb.ab %3, [%1,1] \n"
" sub %0, %0, 1 \n"
"34: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 34b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 18b,4b \n"
" .previous \n"
: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
:
: "memory");
}
} else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
__asm__ __volatile__(
" mov %0,%3 \n"
" lsr.f lp_count, %3,4 \n" /* 16x bytes */
" lpnz 3f \n"
" ld.ab %5, [%2, 4] \n"
" ld.ab %6, [%2, 4] \n"
" ld.ab %7, [%2, 4] \n"
" ld.ab %8, [%2, 4] \n"
"1: st.ab %5, [%1, 4] \n"
"11: st.ab %6, [%1, 4] \n"
"12: st.ab %7, [%1, 4] \n"
"13: st.ab %8, [%1, 4] \n"
" sub %0, %0, 16 \n"
"3: and.f %3,%3,0xf \n" /* stragglers */
" bz 34f \n"
" bbit0 %3,3,31f \n" /* 8 bytes left */
" ld.ab %5, [%2,4] \n"
" ld.ab %6, [%2,4] \n"
"14: st.ab %5, [%1,4] \n"
"15: st.ab %6, [%1,4] \n"
" sub.f %0, %0, 8 \n"
"31: bbit0 %3,2,32f \n" /* 4 bytes left */
" ld.ab %5, [%2,4] \n"
"16: st.ab %5, [%1,4] \n"
" sub.f %0, %0, 4 \n"
"32: bbit0 %3,1,33f \n" /* 2 bytes left */
" ldw.ab %5, [%2,2] \n"
"17: stw.ab %5, [%1,2] \n"
" sub.f %0, %0, 2 \n"
"33: bbit0 %3,0,34f \n"
" ldb.ab %5, [%2,1] \n" /* 1 byte left */
"18: stb.ab %5, [%1,1] \n"
" sub.f %0, %0, 1 \n"
"34: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: j 34b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .word 11b,4b \n"
" .word 12b,4b \n"
" .word 13b,4b \n"
" .word 14b,4b \n"
" .word 15b,4b \n"
" .word 16b,4b \n"
" .word 17b,4b \n"
" .word 18b,4b \n"
" .previous \n"
: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
"=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
:
: "lp_count", "memory");
}
return res;
}
static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
{
long res = n;
unsigned char *d_char = to;
__asm__ __volatile__(
" bbit0 %0, 0, 1f \n"
"75: stb.ab %2, [%0,1] \n"
" sub %1, %1, 1 \n"
"1: bbit0 %0, 1, 2f \n"
"76: stw.ab %2, [%0,2] \n"
" sub %1, %1, 2 \n"
"2: asr.f lp_count, %1, 2 \n"
" lpnz 3f \n"
"77: st.ab %2, [%0,4] \n"
" sub %1, %1, 4 \n"
"3: bbit0 %1, 1, 4f \n"
"78: stw.ab %2, [%0,2] \n"
" sub %1, %1, 2 \n"
"4: bbit0 %1, 0, 5f \n"
"79: stb.ab %2, [%0,1] \n"
" sub %1, %1, 1 \n"
"5: \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"3: j 5b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 75b, 3b \n"
" .word 76b, 3b \n"
" .word 77b, 3b \n"
" .word 78b, 3b \n"
" .word 79b, 3b \n"
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
: "lp_count", "lp_start", "lp_end", "memory");
return res;
}
static inline long
__arc_strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = count;
char val;
unsigned int hw_count;
if (count == 0)
return 0;
__asm__ __volatile__(
" lp 2f \n"
"1: ldb.ab %3, [%2, 1] \n"
" breq.d %3, 0, 2f \n"
" stb.ab %3, [%1, 1] \n"
"2: sub %0, %6, %4 \n"
"3: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: mov %0, %5 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .previous \n"
: "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
: "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
: "memory");
return res;
}
static inline long __arc_strnlen_user(const char __user *s, long n)
{
long res, tmp1, cnt;
char val;
__asm__ __volatile__(
" mov %2, %1 \n"
"1: ldb.ab %3, [%0, 1] \n"
" breq.d %3, 0, 2f \n"
" sub.f %2, %2, 1 \n"
" bnz 1b \n"
" sub %2, %2, 1 \n"
"2: sub %0, %1, %2 \n"
"3: ;nop \n"
" .section .fixup, \"ax\" \n"
" .align 4 \n"
"4: mov %0, 0 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .previous \n"
: "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
: "0"(s), "1"(n)
: "memory");
return res;
}
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
#define __clear_user(d, n) __arc_clear_user(d, n)
#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
#else
extern long arc_copy_from_user_noinline(void *to, const void __user * from,
unsigned long n);
extern long arc_copy_to_user_noinline(void __user *to, const void *from,
unsigned long n);
extern unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n);
extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
long count);
extern long arc_strnlen_user_noinline(const char __user *src, long n);
#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
#define __clear_user(d, n) arc_clear_user_noinline(d, n)
#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
#endif
#include <asm-generic/uaccess.h>
extern int fixup_exception(struct pt_regs *regs);
#endif

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_UNALIGNED_H
#define _ASM_ARC_UNALIGNED_H
/* ARC700 can't handle unaligned Data accesses. */
#include <asm-generic/unaligned.h>
#include <asm/ptrace.h>
#ifdef CONFIG_ARC_MISALIGN_ACCESS
int misaligned_fixup(unsigned long address, struct pt_regs *regs,
unsigned long cause, struct callee_regs *cregs);
#else
static inline int
misaligned_fixup(unsigned long address, struct pt_regs *regs,
unsigned long cause, struct callee_regs *cregs)
{
return 0;
}
#endif
#endif /* _ASM_ARC_UNALIGNED_H */

View File

@ -0,0 +1,163 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_UNWIND_H
#define _ASM_ARC_UNWIND_H
#ifdef CONFIG_ARC_DW2_UNWIND
#include <linux/sched.h>
struct arc700_regs {
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
unsigned long r16;
unsigned long r17;
unsigned long r18;
unsigned long r19;
unsigned long r20;
unsigned long r21;
unsigned long r22;
unsigned long r23;
unsigned long r24;
unsigned long r25;
unsigned long r26;
unsigned long r27; /* fp */
unsigned long r28; /* sp */
unsigned long r29;
unsigned long r30;
unsigned long r31; /* blink */
unsigned long r63; /* pc */
};
struct unwind_frame_info {
struct arc700_regs regs;
struct task_struct *task;
unsigned call_frame:1;
};
#define UNW_PC(frame) ((frame)->regs.r63)
#define UNW_SP(frame) ((frame)->regs.r28)
#define UNW_BLINK(frame) ((frame)->regs.r31)
/* Rajesh FIXME */
#ifdef CONFIG_FRAME_POINTER
#define UNW_FP(frame) ((frame)->regs.r27)
#define FRAME_RETADDR_OFFSET 4
#define FRAME_LINK_OFFSET 0
#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
#else
#define UNW_FP(frame) ((void)(frame), 0)
#endif
#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
#define UNW_REGISTER_INFO \
PTREGS_INFO(r0), \
PTREGS_INFO(r1), \
PTREGS_INFO(r2), \
PTREGS_INFO(r3), \
PTREGS_INFO(r4), \
PTREGS_INFO(r5), \
PTREGS_INFO(r6), \
PTREGS_INFO(r7), \
PTREGS_INFO(r8), \
PTREGS_INFO(r9), \
PTREGS_INFO(r10), \
PTREGS_INFO(r11), \
PTREGS_INFO(r12), \
PTREGS_INFO(r13), \
PTREGS_INFO(r14), \
PTREGS_INFO(r15), \
PTREGS_INFO(r16), \
PTREGS_INFO(r17), \
PTREGS_INFO(r18), \
PTREGS_INFO(r19), \
PTREGS_INFO(r20), \
PTREGS_INFO(r21), \
PTREGS_INFO(r22), \
PTREGS_INFO(r23), \
PTREGS_INFO(r24), \
PTREGS_INFO(r25), \
PTREGS_INFO(r26), \
PTREGS_INFO(r27), \
PTREGS_INFO(r28), \
PTREGS_INFO(r29), \
PTREGS_INFO(r30), \
PTREGS_INFO(r31), \
PTREGS_INFO(r63)
#define UNW_DEFAULT_RA(raItem, dataAlign) \
((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
extern int arc_unwind(struct unwind_frame_info *frame);
extern void arc_unwind_init(void);
extern void arc_unwind_setup(void);
extern void *unwind_add_table(struct module *module, const void *table_start,
unsigned long table_size);
extern void unwind_remove_table(void *handle, int init_only);
static inline int
arch_unwind_init_running(struct unwind_frame_info *info,
int (*callback) (struct unwind_frame_info *info,
void *arg),
void *arg)
{
return 0;
}
static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
{
return 0;
}
static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
{
return;
}
static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
struct pt_regs *regs)
{
return;
}
#else
#define UNW_PC(frame) ((void)(frame), 0)
#define UNW_SP(frame) ((void)(frame), 0)
#define UNW_FP(frame) ((void)(frame), 0)
static inline void arc_unwind_init(void)
{
}
static inline void arc_unwind_setup(void)
{
}
#define unwind_add_table(a, b, c)
#define unwind_remove_table(a, b)
#endif /* CONFIG_ARC_DW2_UNWIND */
#endif /* _ASM_ARC_UNWIND_H */

View File

@ -0,0 +1,12 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
header-y += elf.h
header-y += page.h
header-y += setup.h
header-y += byteorder.h
header-y += cachectl.h
header-y += ptrace.h
header-y += sigcontext.h
header-y += signal.h
header-y += swab.h
header-y += unistd.h

View File

@ -0,0 +1,18 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_BYTEORDER_H
#define __ASM_ARC_BYTEORDER_H
#ifdef CONFIG_CPU_BIG_ENDIAN
#include <linux/byteorder/big_endian.h>
#else
#include <linux/byteorder/little_endian.h>
#endif
#endif /* ASM_ARC_BYTEORDER_H */

View File

@ -0,0 +1,28 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARC_ASM_CACHECTL_H
#define __ARC_ASM_CACHECTL_H
/*
* ARC ABI flags defined for Android's finegrained cacheflush requirements
*/
#define CF_I_INV 0x0002
#define CF_D_FLUSH 0x0010
#define CF_D_FLUSH_INV 0x0020
#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
/*
* Standard flags expected by cacheflush system call users
*/
#define ICACHE CF_I_INV
#define DCACHE CF_D_FLUSH
#define BCACHE (CF_I_INV | CF_D_FLUSH)
#endif

View File

@ -0,0 +1,26 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _UAPI__ASM_ARC_ELF_H
#define _UAPI__ASM_ARC_ELF_H
#include <asm/ptrace.h> /* for user_regs_struct */
/* Machine specific ELF Hdr flags */
#define EF_ARC_OSABI_MSK 0x00000f00
#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
typedef unsigned long elf_greg_t;
typedef unsigned long elf_fpregset_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#endif

View File

@ -0,0 +1,39 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _UAPI__ASM_ARC_PAGE_H
#define _UAPI__ASM_ARC_PAGE_H
/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_ARC_PAGE_SIZE_16K)
#define PAGE_SHIFT 14
#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
#define PAGE_SHIFT 12
#else
/*
* Default 8k
* done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
* user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
* using the correct uClibc header and in their build our autoconf.h is
* not available
*/
#define PAGE_SHIFT 13
#endif
#ifdef __ASSEMBLY__
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_OFFSET (0x80000000)
#else
#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#endif /* _UAPI__ASM_ARC_PAGE_H */

View File

@ -0,0 +1,48 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _UAPI__ASM_ARC_PTRACE_H
#define _UAPI__ASM_ARC_PTRACE_H
#ifndef __ASSEMBLY__
/*
* Userspace ABI: Register state needed by
* -ptrace (gdbserver)
* -sigcontext (SA_SIGNINFO signal frame)
*
* This is to decouple pt_regs from user-space ABI, to be able to change it
* w/o affecting the ABI.
* Although the layout (initial padding) is similar to pt_regs to have some
* optimizations when copying pt_regs to/from user_regs_struct.
*
* Also, sigcontext only care about the scratch regs as that is what we really
* save/restore for signal handling.
*/
struct user_regs_struct {
struct scratch {
long pad;
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
} scratch;
struct callee {
long pad;
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;
} callee;
long efa; /* break pt addr, for break points in delay slots */
long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */
};
#endif /* !__ASSEMBLY__ */
#endif /* _UAPI__ASM_ARC_PTRACE_H */

View File

@ -0,0 +1,6 @@
/*
* setup.h is part of userspace header ABI so UAPI scripts have to generate it
* even if there's nothing to export - causing empty <uapi/asm/setup.h>
* However to prevent "patch" from discarding it we add this placeholder
* comment
*/

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SIGCONTEXT_H
#define _ASM_ARC_SIGCONTEXT_H
#include <asm/ptrace.h>
/*
* Signal context structure - contains all info to do with the state
* before the signal handler was invoked.
*/
struct sigcontext {
struct user_regs_struct regs;
};
#endif /* _ASM_ARC_SIGCONTEXT_H */

View File

@ -0,0 +1,27 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_SIGNAL_H
#define _ASM_ARC_SIGNAL_H
/*
* This is much needed for ARC sigreturn optimization.
* This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
* which allows sigreturn based re-entry into kernel after handling signal.
* W/o this kernel needs to "synthesize" the sigreturn trampoline on user
* mode stack which in turn forces the following:
* -TLB Flush (after making the stack page executable)
* -Cache line Flush (to make I/D Cache lines coherent)
*/
#define SA_RESTORER 0x04000000
#include <asm-generic/signal.h>
#endif /* _ASM_ARC_SIGNAL_H */

View File

@ -0,0 +1,98 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -Support single cycle endian-swap insn in ARC700 4.10
*
* vineetg: June 2009
* -Better htonl implementation (5 instead of 9 ALU instructions)
* -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
*/
#ifndef __ASM_ARC_SWAB_H
#define __ASM_ARC_SWAB_H
#include <linux/types.h>
/* Native single cycle endian swap insn */
#ifdef CONFIG_ARC_HAS_SWAPE
#define __arch_swab32(x) \
({ \
unsigned int tmp = x; \
__asm__( \
" swape %0, %1 \n" \
: "=r" (tmp) \
: "r" (tmp)); \
tmp; \
})
#else
/* Several ways of Endian-Swap Emulation for ARC
* 0: kernel generic
* 1: ARC optimised "C"
* 2: ARC Custom instruction
*/
#define ARC_BSWAP_TYPE 1
#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
/* The kernel default implementation of htonl is
* return x<<24 | x>>24 |
* (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
*
* This generates 9 instructions on ARC (excluding the ld/st)
*
* 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
* 8051fd98: asl r5,r3,24 ; get 3rd Byte
* 8051fd9c: lsr r2,r3,24 ; get 0th Byte
* 8051fda0: and r4,r3,0xff00
* 8051fda8: asl r4,r4,8 ; get 1st Byte
* 8051fdac: and r3,r3,0x00ff0000
* 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
* 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
* 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
* 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
* (1) It is portable to any architecure
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
#define __arch_swab32(x) \
({ unsigned long __in = (x), __tmp; \
__tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
__in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
__tmp ^= __in; \
__tmp &= 0xff00ff; \
__tmp ^ __in; \
})
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
#define __arch_swab32(x) \
({ \
unsigned int tmp = x; \
__asm__( \
" .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
" bswap %0, %1 \n"\
: "=r" (tmp) \
: "r" (tmp)); \
tmp; \
})
#endif /* ARC_BSWAP_TYPE=zzz */
#endif /* CONFIG_ARC_HAS_SWAPE */
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
#define __SWAB_64_THRU_32__
#endif
#endif

View File

@ -0,0 +1,34 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/******** no-legacy-syscalls-ABI *******/
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_FORK
#define sys_mmap2 sys_mmap_pgoff
#include <asm-generic/unistd.h>
#define NR_syscalls __NR_syscalls
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_sysfs, sys_sysfs)

33
arch/arc/kernel/Makefile Normal file
View File

@ -0,0 +1,33 @@
#
# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
# Pass UTS_MACHINE for user_regset definition
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
obj-y += devtree.o
obj-$(CONFIG_MODULES) += arcksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
CFLAGS_fpu.o += -mdpfp
ifdef CONFIG_ARC_DW2_UNWIND
CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
obj-y += ctx_sw.o
else
obj-y += ctx_sw_asm.o
endif
extra-y := vmlinux.lds head.o

View File

@ -0,0 +1,58 @@
/*
* arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
*
* Allows Linux userland access to host in absence of any peripherals.
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h> /* file_operations */
#include <linux/miscdevice.h>
#include <linux/mm.h> /* VM_IO */
#include <linux/module.h>
#include <linux/uaccess.h>
static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
pr_warn("Hostlink buffer mmap ERROR\n");
return -EAGAIN;
}
return 0;
}
static long arc_hl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* we only support, returning the physical addr to mmap in user space */
put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
return 0;
}
static const struct file_operations arc_hl_fops = {
.unlocked_ioctl = arc_hl_ioctl,
.mmap = arc_hl_mmap,
};
static struct miscdevice arc_hl_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "hostlink",
.fops = &arc_hl_fops
};
static int __init arc_hl_init(void)
{
pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
return misc_register(&arc_hl_dev);
}
module_init(arc_hl_init);

View File

@ -0,0 +1,56 @@
/*
* arcksyms.c - Exporting symbols not exportable from their own sources
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
/* libgcc functions, not part of kernel sources */
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __divsf3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __cmpdi2(void);
extern void __fixunsdfsi(void);
extern void __muldf3(void);
extern void __divdf3(void);
extern void __floatunsidf(void);
extern void __floatunsisf(void);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__divsf3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__cmpdi2);
EXPORT_SYMBOL(__fixunsdfsi);
EXPORT_SYMBOL(__muldf3);
EXPORT_SYMBOL(__divdf3);
EXPORT_SYMBOL(__floatunsidf);
EXPORT_SYMBOL(__floatunsisf);
/* ARC optimised assembler routines */
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strlen);

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <asm/hardirq.h>
#include <asm/page.h>
#include <asm/ptrace.h>
int main(void)
{
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
BLANK();
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
#ifdef CONFIG_ARC_CURR_IN_REG
DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
#endif
DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address));
BLANK();
DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_PREEMPT_COUNT,
offsetof(struct thread_info, preempt_count));
BLANK();
DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32));
DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1));
DEFINE(PT_r2, offsetof(struct pt_regs, r2));
DEFINE(PT_r3, offsetof(struct pt_regs, r3));
DEFINE(PT_r4, offsetof(struct pt_regs, r4));
DEFINE(PT_r5, offsetof(struct pt_regs, r5));
DEFINE(PT_r6, offsetof(struct pt_regs, r6));
DEFINE(PT_r7, offsetof(struct pt_regs, r7));
return 0;
}

21
arch/arc/kernel/clk.c Normal file
View File

@ -0,0 +1,21 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/clk.h>
unsigned long core_freq = 800000000;
/*
* As of now we default to device-tree provided clock
* In future we can determine this in early boot
*/
int arc_set_core_freq(unsigned long freq)
{
core_freq = freq;
return 0;
}

109
arch/arc/kernel/ctx_sw.c Normal file
View File

@ -0,0 +1,109 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Aug 2009
* -"C" version of lowest level context switch asm macro called by schedular
* gcc doesn't generate the dward CFI info for hand written asm, hence can't
* backtrace out of it (e.g. tasks sleeping in kernel).
* So we cheat a bit by writing almost similar code in inline-asm.
* -This is a hacky way of doing things, but there is no other simple way.
* I don't want/intend to extend unwinding code to understand raw asm
*/
#include <asm/asm-offsets.h>
#include <linux/sched.h>
struct task_struct *__sched
__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
{
unsigned int tmp;
unsigned int prev = (unsigned int)prev_task;
unsigned int next = (unsigned int)next_task;
int num_words_to_skip = 1;
#ifdef CONFIG_ARC_CURR_IN_REG
num_words_to_skip++;
#endif
__asm__ __volatile__(
/* FP/BLINK save generated by gcc (standard function prologue */
"st.a r13, [sp, -4] \n\t"
"st.a r14, [sp, -4] \n\t"
"st.a r15, [sp, -4] \n\t"
"st.a r16, [sp, -4] \n\t"
"st.a r17, [sp, -4] \n\t"
"st.a r18, [sp, -4] \n\t"
"st.a r19, [sp, -4] \n\t"
"st.a r20, [sp, -4] \n\t"
"st.a r21, [sp, -4] \n\t"
"st.a r22, [sp, -4] \n\t"
"st.a r23, [sp, -4] \n\t"
"st.a r24, [sp, -4] \n\t"
#ifndef CONFIG_ARC_CURR_IN_REG
"st.a r25, [sp, -4] \n\t"
#endif
"sub sp, sp, %4 \n\t" /* create gutter at top */
/* set ksp of outgoing task in tsk->thread.ksp */
"st.as sp, [%3, %1] \n\t"
"sync \n\t"
/*
* setup _current_task with incoming tsk.
* optionally, set r25 to that as well
* For SMP extra work to get to &_current_task[cpu]
* (open coded SET_CURR_TASK_ON_CPU)
*/
#ifndef CONFIG_SMP
"st %2, [@_current_task] \n\t"
#else
"lr r24, [identity] \n\t"
"lsr r24, r24, 8 \n\t"
"bmsk r24, r24, 7 \n\t"
"add2 r24, @_current_task, r24 \n\t"
"st %2, [r24] \n\t"
#endif
#ifdef CONFIG_ARC_CURR_IN_REG
"mov r25, %2 \n\t"
#endif
/* get ksp of incoming task from tsk->thread.ksp */
"ld.as sp, [%2, %1] \n\t"
/* start loading it's CALLEE reg file */
"add sp, sp, %4 \n\t" /* skip gutter at top */
#ifndef CONFIG_ARC_CURR_IN_REG
"ld.ab r25, [sp, 4] \n\t"
#endif
"ld.ab r24, [sp, 4] \n\t"
"ld.ab r23, [sp, 4] \n\t"
"ld.ab r22, [sp, 4] \n\t"
"ld.ab r21, [sp, 4] \n\t"
"ld.ab r20, [sp, 4] \n\t"
"ld.ab r19, [sp, 4] \n\t"
"ld.ab r18, [sp, 4] \n\t"
"ld.ab r17, [sp, 4] \n\t"
"ld.ab r16, [sp, 4] \n\t"
"ld.ab r15, [sp, 4] \n\t"
"ld.ab r14, [sp, 4] \n\t"
"ld.ab r13, [sp, 4] \n\t"
/* last (ret value) = prev : although for ARC it mov r0, r0 */
"mov %0, %3 \n\t"
/* FP/BLINK restore generated by gcc (standard func epilogue */
: "=r"(tmp)
: "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
"n"(num_words_to_skip * 4)
: "blink"
);
return (struct task_struct *)tmp;
}

View File

@ -0,0 +1,58 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Aug 2009
* -Moved core context switch macro out of entry.S into this file.
* -This is the more "natural" hand written assembler
*/
#include <asm/entry.h> /* For the SAVE_* macros */
#include <asm/asm-offsets.h>
#include <asm/linkage.h>
;################### Low Level Context Switch ##########################
.section .sched.text,"ax",@progbits
.align 4
.global __switch_to
.type __switch_to, @function
__switch_to:
/* Save regs on kernel mode stack of task */
st.a blink, [sp, -4]
st.a fp, [sp, -4]
SAVE_CALLEE_SAVED_KERNEL
/* Save the now KSP in task->thread.ksp */
st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
/*
* Return last task in r0 (return reg)
* On ARC, Return reg = First Arg reg = r0.
* Since we already have last task in r0,
* don't need to do anything special to return it
*/
/* hardware memory barrier */
sync
/*
* switch to new task, contained in r1
* Temp reg r3 is required to get the ptr to store val
*/
SET_CURR_TASK_ON_CPU r1, r3
/* reload SP with kernel mode stack pointer in task->thread.ksp */
ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
/* restore the registers */
RESTORE_CALLEE_SAVED_KERNEL
ld.ab fp, [sp, 4]
ld.ab blink, [sp, 4]
j [blink]
ARC_EXIT __switch_to

123
arch/arc/kernel/devtree.c Normal file
View File

@ -0,0 +1,123 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* Based on reduced version of METAG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <asm/clk.h>
#include <asm/mach_desc.h>
/* called from unflatten_device_tree() to bootstrap devicetree itself */
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __va(memblock_alloc(size, align));
}
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
*
* If a dtb was passed to the kernel, then use it to choose the correct
* machine_desc and to setup the system.
*/
struct machine_desc * __init setup_machine_fdt(void *dt)
{
struct boot_param_header *devtree = dt;
struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
unsigned int score, mdesc_score = ~1;
unsigned long dt_root;
const char *model, *compat;
void *clk;
char manufacturer[16];
unsigned long len;
/* check device tree validity */
if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
return NULL;
initial_boot_params = devtree;
dt_root = of_get_flat_dt_root();
/*
* The kernel could be multi-platform enabled, thus could have many
* "baked-in" machine descriptors. Search thru all for the best
* "compatible" string match.
*/
for_each_machine_desc(mdesc) {
score = of_flat_dt_match(dt_root, mdesc->dt_compat);
if (score > 0 && score < mdesc_score) {
mdesc_best = mdesc;
mdesc_score = score;
}
}
if (!mdesc_best) {
const char *prop;
long size;
pr_err("\n unrecognized device tree list:\n[ ");
prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
if (prop) {
while (size > 0) {
printk("'%s' ", prop);
size -= strlen(prop) + 1;
prop += strlen(prop) + 1;
}
}
printk("]\n\n");
machine_halt();
}
/* compat = "<manufacturer>,<model>" */
compat = mdesc_best->dt_compat[0];
model = strchr(compat, ',');
if (model)
model++;
strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
/* Retrieve various information from the /chosen node */
of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
/* Initialize {size,address}-cells info */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
/* Setup memory, calling early_init_dt_add_memory_arch */
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
if (clk)
arc_set_core_freq(of_read_ulong(clk, len/4));
return mdesc_best;
}
/*
* Copy the flattened DT out of .init since unflattening doesn't copy strings
* and the normal DT APIs refs them from orig flat DT
*/
void __init copy_devtree(void)
{
void *alloc = early_init_dt_alloc_memory_arch(
be32_to_cpu(initial_boot_params->totalsize), 64);
if (alloc) {
memcpy(alloc, initial_boot_params,
be32_to_cpu(initial_boot_params->totalsize));
initial_boot_params = alloc;
}
}

538
arch/arc/kernel/disasm.c Normal file
View File

@ -0,0 +1,538 @@
/*
* several functions that help interpret ARC instructions
* used for unaligned accesses, kprobes and kgdb
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/disasm.h>
#include <asm/uaccess.h>
#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
defined(CONFIG_KPROBES)
/* disasm_instr: Analyses instruction at addr, stores
* findings in *state
*/
void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
int userspace, struct pt_regs *regs, struct callee_regs *cregs)
{
int fieldA = 0;
int fieldC = 0, fieldCisReg = 0;
uint16_t word1 = 0, word0 = 0;
int subopcode, is_linked, op_format;
uint16_t *ins_ptr;
uint16_t ins_buf[4];
int bytes_not_copied = 0;
memset(state, 0, sizeof(struct disasm_state));
/* This fetches the upper part of the 32 bit instruction
* in both the cases of Little Endian or Big Endian configurations. */
if (userspace) {
bytes_not_copied = copy_from_user(ins_buf,
(const void __user *) addr, 8);
if (bytes_not_copied > 6)
goto fault;
ins_ptr = ins_buf;
} else {
ins_ptr = (uint16_t *) addr;
}
word1 = *((uint16_t *)addr);
state->major_opcode = (word1 >> 11) & 0x1F;
/* Check if the instruction is 32 bit or 16 bit instruction */
if (state->major_opcode < 0x0B) {
if (bytes_not_copied > 4)
goto fault;
state->instr_len = 4;
word0 = *((uint16_t *)(addr+2));
state->words[0] = (word1 << 16) | word0;
} else {
state->instr_len = 2;
state->words[0] = word1;
}
/* Read the second word in case of limm */
word1 = *((uint16_t *)(addr + state->instr_len));
word0 = *((uint16_t *)(addr + state->instr_len + 2));
state->words[1] = (word1 << 16) | word0;
switch (state->major_opcode) {
case op_Bcc:
state->is_branch = 1;
/* unconditional branch s25, conditional branch s21 */
fieldA = (IS_BIT(state->words[0], 16)) ?
FIELD_s25(state->words[0]) :
FIELD_s21(state->words[0]);
state->delay_slot = IS_BIT(state->words[0], 5);
state->target = fieldA + (addr & ~0x3);
state->flow = direct_jump;
break;
case op_BLcc:
if (IS_BIT(state->words[0], 16)) {
/* Branch and Link*/
/* unconditional branch s25, conditional branch s21 */
fieldA = (IS_BIT(state->words[0], 17)) ?
(FIELD_s25(state->words[0]) & ~0x3) :
FIELD_s21(state->words[0]);
state->flow = direct_call;
} else {
/*Branch On Compare */
fieldA = FIELD_s9(state->words[0]) & ~0x3;
state->flow = direct_jump;
}
state->delay_slot = IS_BIT(state->words[0], 5);
state->target = fieldA + (addr & ~0x3);
state->is_branch = 1;
break;
case op_LD: /* LD<zz> a,[b,s9] */
state->write = 0;
state->di = BITS(state->words[0], 11, 11);
if (state->di)
break;
state->x = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 7, 8);
state->aa = BITS(state->words[0], 9, 10);
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->instr_len += 4;
state->aa = 0;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->wb_reg, regs, cregs);
}
state->src2 = FIELD_s9(state->words[0]);
state->dest = FIELD_A(state->words[0]);
state->pref = (state->dest == REG_LIMM);
break;
case op_ST:
state->write = 1;
state->di = BITS(state->words[0], 5, 5);
if (state->di)
break;
state->aa = BITS(state->words[0], 3, 4);
state->zz = BITS(state->words[0], 1, 2);
state->src1 = FIELD_C(state->words[0]);
if (state->src1 == REG_LIMM) {
state->instr_len += 4;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->src1, regs, cregs);
}
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->aa = 0;
state->instr_len += 4;
state->src2 = state->words[1];
} else {
state->src2 = get_reg(state->wb_reg, regs, cregs);
}
state->src3 = FIELD_s9(state->words[0]);
break;
case op_MAJOR_4:
subopcode = MINOR_OPCODE(state->words[0]);
switch (subopcode) {
case 32: /* Jcc */
case 33: /* Jcc.D */
case 34: /* JLcc */
case 35: /* JLcc.D */
is_linked = 0;
if (subopcode == 33 || subopcode == 35)
state->delay_slot = 1;
if (subopcode == 34 || subopcode == 35)
is_linked = 1;
fieldCisReg = 0;
op_format = BITS(state->words[0], 22, 23);
if (op_format == 0 || ((op_format == 3) &&
(!IS_BIT(state->words[0], 5)))) {
fieldC = FIELD_C(state->words[0]);
if (fieldC == REG_LIMM) {
fieldC = state->words[1];
state->instr_len += 4;
} else {
fieldCisReg = 1;
}
} else if (op_format == 1 || ((op_format == 3)
&& (IS_BIT(state->words[0], 5)))) {
fieldC = FIELD_C(state->words[0]);
} else {
/* op_format == 2 */
fieldC = FIELD_s12(state->words[0]);
}
if (!fieldCisReg) {
state->target = fieldC;
state->flow = is_linked ?
direct_call : direct_jump;
} else {
state->target = get_reg(fieldC, regs, cregs);
state->flow = is_linked ?
indirect_call : indirect_jump;
}
state->is_branch = 1;
break;
case 40: /* LPcc */
if (BITS(state->words[0], 22, 23) == 3) {
/* Conditional LPcc u7 */
fieldC = FIELD_C(state->words[0]);
fieldC = fieldC << 1;
fieldC += (addr & ~0x03);
state->is_branch = 1;
state->flow = direct_jump;
state->target = fieldC;
}
/* For Unconditional lp, next pc is the fall through
* which is updated */
break;
case 48 ... 55: /* LD a,[b,c] */
state->di = BITS(state->words[0], 15, 15);
if (state->di)
break;
state->x = BITS(state->words[0], 16, 16);
state->zz = BITS(state->words[0], 17, 18);
state->aa = BITS(state->words[0], 22, 23);
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->instr_len += 4;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->wb_reg, regs,
cregs);
}
state->src2 = FIELD_C(state->words[0]);
if (state->src2 == REG_LIMM) {
state->instr_len += 4;
state->src2 = state->words[1];
} else {
state->src2 = get_reg(state->src2, regs,
cregs);
}
state->dest = FIELD_A(state->words[0]);
if (state->dest == REG_LIMM)
state->pref = 1;
break;
case 10: /* MOV */
/* still need to check for limm to extract instr len */
/* MOV is special case because it only takes 2 args */
switch (BITS(state->words[0], 22, 23)) {
case 0: /* OP a,b,c */
if (FIELD_C(state->words[0]) == REG_LIMM)
state->instr_len += 4;
break;
case 1: /* OP a,b,u6 */
break;
case 2: /* OP b,b,s12 */
break;
case 3: /* OP.cc b,b,c/u6 */
if ((!IS_BIT(state->words[0], 5)) &&
(FIELD_C(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
}
break;
default:
/* Not a Load, Jump or Loop instruction */
/* still need to check for limm to extract instr len */
switch (BITS(state->words[0], 22, 23)) {
case 0: /* OP a,b,c */
if ((FIELD_B(state->words[0]) == REG_LIMM) ||
(FIELD_C(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
case 1: /* OP a,b,u6 */
break;
case 2: /* OP b,b,s12 */
break;
case 3: /* OP.cc b,b,c/u6 */
if ((!IS_BIT(state->words[0], 5)) &&
((FIELD_B(state->words[0]) == REG_LIMM) ||
(FIELD_C(state->words[0]) == REG_LIMM)))
state->instr_len += 4;
break;
}
break;
}
break;
/* 16 Bit Instructions */
case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
state->zz = BITS(state->words[0], 3, 4);
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->dest = FIELD_S_A(state->words[0]);
break;
case op_ADD_MOV_CMP:
/* check for limm, ignore mov_s h,b (== mov_s 0,b) */
if ((BITS(state->words[0], 3, 4) < 3) &&
(FIELD_S_H(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
case op_S:
subopcode = BITS(state->words[0], 5, 7);
switch (subopcode) {
case 0: /* j_s */
case 1: /* j_s.d */
case 2: /* jl_s */
case 3: /* jl_s.d */
state->target = get_reg(FIELD_S_B(state->words[0]),
regs, cregs);
state->delay_slot = subopcode & 1;
state->flow = (subopcode >= 2) ?
direct_call : indirect_jump;
break;
case 7:
switch (BITS(state->words[0], 8, 10)) {
case 4: /* jeq_s [blink] */
case 5: /* jne_s [blink] */
case 6: /* j_s [blink] */
case 7: /* j_s.d [blink] */
state->delay_slot = (subopcode == 7);
state->flow = indirect_jump;
state->target = get_reg(31, regs, cregs);
default:
break;
}
default:
break;
}
break;
case op_LD_S: /* LD_S c, [b, u7] */
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = FIELD_S_u7(state->words[0]);
state->dest = FIELD_S_C(state->words[0]);
break;
case op_LDB_S:
case op_STB_S:
/* no further handling required as byte accesses should not
* cause an unaligned access exception */
state->zz = 1;
break;
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
/* intentional fall-through */
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = FIELD_S_u6(state->words[0]);
state->dest = FIELD_S_C(state->words[0]);
break;
case op_ST_S: /* ST_S c, [b, u7] */
state->write = 1;
state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src3 = FIELD_S_u7(state->words[0]);
break;
case op_STW_S: /* STW_S c,[b,u6] */
state->write = 1;
state->zz = 2;
state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src3 = FIELD_S_u6(state->words[0]);
break;
case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
/* note: we are ignoring possibility of:
* ADD_S, SUB_S, PUSH_S, POP_S as these should not
* cause unaliged exception anyway */
state->write = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 5, 5);
if (state->zz)
break; /* byte accesses should not come here */
if (!state->write) {
state->src1 = get_reg(28, regs, cregs);
state->src2 = FIELD_S_u7(state->words[0]);
state->dest = FIELD_S_B(state->words[0]);
} else {
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
cregs);
state->src2 = get_reg(28, regs, cregs);
state->src3 = FIELD_S_u7(state->words[0]);
}
break;
case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
/* note: ADD_S r0, gp, s11 is ignored */
state->zz = BITS(state->words[0], 9, 10);
state->src1 = get_reg(26, regs, cregs);
state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
FIELD_S_s11(state->words[0]);
state->dest = 0;
break;
case op_Pcl: /* LD_S b,[pcl,u10] */
state->src1 = regs->ret & ~3;
state->src2 = FIELD_S_u10(state->words[0]);
state->dest = FIELD_S_B(state->words[0]);
break;
case op_BR_S:
state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
state->flow = direct_jump;
state->is_branch = 1;
break;
case op_B_S:
fieldA = (BITS(state->words[0], 9, 10) == 3) ?
FIELD_S_s7(state->words[0]) :
FIELD_S_s10(state->words[0]);
state->target = fieldA + (addr & ~0x03);
state->flow = direct_jump;
state->is_branch = 1;
break;
case op_BL_S:
state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
state->flow = direct_call;
state->is_branch = 1;
break;
default:
break;
}
if (bytes_not_copied <= (8 - state->instr_len))
return;
fault: state->fault = 1;
}
long __kprobes get_reg(int reg, struct pt_regs *regs,
struct callee_regs *cregs)
{
long *p;
if (reg <= 12) {
p = &regs->r0;
return p[-reg];
}
if (cregs && (reg <= 25)) {
p = &cregs->r13;
return p[13-reg];
}
if (reg == 26)
return regs->r26;
if (reg == 27)
return regs->fp;
if (reg == 28)
return regs->sp;
if (reg == 31)
return regs->blink;
return 0;
}
void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
struct callee_regs *cregs)
{
long *p;
switch (reg) {
case 0 ... 12:
p = &regs->r0;
p[-reg] = val;
break;
case 13 ... 25:
if (cregs) {
p = &cregs->r13;
p[13-reg] = val;
}
break;
case 26:
regs->r26 = val;
break;
case 27:
regs->fp = val;
break;
case 28:
regs->sp = val;
break;
case 31:
regs->blink = val;
break;
default:
break;
}
}
/*
* Disassembles the insn at @pc and sets @next_pc to next PC (which could be
* @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
*
* If @pc is a branch
* -@tgt_if_br is set to branch target.
* -If branch has delay slot, @next_pc updated with actual next PC.
*/
int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
struct callee_regs *cregs,
unsigned long *next_pc, unsigned long *tgt_if_br)
{
struct disasm_state instr;
memset(&instr, 0, sizeof(struct disasm_state));
disasm_instr(pc, &instr, 0, regs, cregs);
*next_pc = pc + instr.instr_len;
/* Instruction with possible two targets branch, jump and loop */
if (instr.is_branch)
*tgt_if_br = instr.target;
/* For the instructions with delay slots, the fall through is the
* instruction following the instruction in delay slot.
*/
if (instr.delay_slot) {
struct disasm_state instr_d;
disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
*next_pc += instr_d.instr_len;
}
/* Zero Overhead Loop - end of the loop */
if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
&& (regs->lp_count > 1)) {
*next_pc = regs->lp_start;
}
return instr.is_branch;
}
#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */

839
arch/arc/kernel/entry.S Normal file
View File

@ -0,0 +1,839 @@
/*
* Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -Userspace unaligned access emulation
*
* vineetg: Feb 2011 (ptrace low level code fixes)
* -traced syscall return code (r0) was not saved into pt_regs for restoring
* into user reg-file when traded task rets to user space.
* -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
* were not invoking post-syscall trace hook (jumping directly into
* ret_from_system_call)
*
* vineetg: Nov 2010:
* -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
* -To maintain the slot size of 8 bytes/vector, added nop, which is
* not executed at runtime.
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
* -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
* need ptregs anymore
*
* Vineetg: Oct 2009
* -In a rare scenario, Process gets a Priv-V exception and gets scheduled
* out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
* active (AE bit enabled). This causes a double fault for a subseq valid
* exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
* Instr Error could also cause similar scenario, so same there as well.
*
* Vineetg: March 2009 (Supporting 2 levels of Interrupts)
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
* Normally CPU does this automatically, however when doing FAKE rtie,
* we need to explicitly do this. The problem in macros
* FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
* was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
* setting it and not clearing it clears ZOL context
*
* Vineetg: May 16th, 2008
* - r25 now contains the Current Task when in kernel
*
* Vineetg: Dec 22, 2007
* Minor Surgery of Low Level ISR to make it SMP safe
* - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
* - _current_task is made an array of NR_CPUS
* - Access of _current_task wrapped inside a macro so that if hardware
* team agrees for a dedicated reg, no other code is touched
*
* Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
*/
/*------------------------------------------------------------------
* Function ABI
*------------------------------------------------------------------
*
* Arguments r0 - r7
* Caller Saved Registers r0 - r12
* Callee Saved Registers r13- r25
* Global Pointer (gp) r26
* Frame Pointer (fp) r27
* Stack Pointer (sp) r28
* Interrupt link register (ilink1) r29
* Interrupt link register (ilink2) r30
* Branch link register (blink) r31
*------------------------------------------------------------------
*/
.cpu A7
;############################ Vector Table #################################
.macro VECTOR lbl
#if 1 /* Just in case, build breaks */
j \lbl
#else
b \lbl
nop
#endif
.endm
.section .vector, "ax",@progbits
.align 4
/* Each entry in the vector table must occupy 2 words. Since it is a jump
* across sections (.vector to .text) we are gauranteed that 'j somewhere'
* will use the 'j limm' form of the intrsuction as long as somewhere is in
* a section other than .vector.
*/
; ********* Critical System Events **********************
VECTOR res_service ; 0x0, Restart Vector (0x0)
VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
#ifdef CONFIG_ARC_IRQ3_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
VECTOR handle_interrupt_level1
#ifdef CONFIG_ARC_IRQ5_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
.rept 25
VECTOR handle_interrupt_level1 ; Other devices
.endr
/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
; ******************** Exceptions **********************
VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
; or Misaligned Access
VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
VECTOR EV_Trap ; 0x128, Trap exception (0x25)
VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
.rept 24
VECTOR reserved ; Reserved Exceptions
.endr
#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
#include <asm/errno.h>
#include <asm/arcregs.h>
#include <asm/irqflags.h>
;##################### Scratch Mem for IRQ stack switching #############
ARCFP_DATA int1_saved_reg
.align 32
.type int1_saved_reg, @object
.size int1_saved_reg, 4
int1_saved_reg:
.zero 4
/* Each Interrupt level needs it's own scratch */
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
ARCFP_DATA int2_saved_reg
.type int2_saved_reg, @object
.size int2_saved_reg, 4
int2_saved_reg:
.zero 4
#endif
; ---------------------------------------------
.section .text, "ax",@progbits
res_service: ; processor restart
flag 0x1 ; not implemented
nop
nop
reserved: ; processor restart
rtie ; jump to processor initializations
;##################### Interrupt Handling ##############################
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
; ---------------------------------------------
; Level 2 ISR: Can interrupt a Level 1 ISR
; ---------------------------------------------
ARC_ENTRY handle_interrupt_level2
; TODO-vineetg for SMP this wont work
; free up r9 as scratchpad
st r9, [@int2_saved_reg]
;Which mode (user/kernel) was the system in when intr occured
lr r9, [status32_l2]
SWITCH_TO_KERNEL_STK
SAVE_ALL_INT2
;------------------------------------------------------
; if L2 IRQ interrupted a L1 ISR, disable preemption
;------------------------------------------------------
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
; A1 is set in status32_l2
; bump thread_info->preempt_count (Disable preemption)
GET_CURR_THR_INFO_FROM_SP r10
ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
add r9, r9, 1
st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
1:
;------------------------------------------------------
; setup params for Linux common ISR and invoke it
;------------------------------------------------------
lr r0, [icause2]
and r0, r0, 0x1f
bl.d @arch_do_IRQ
mov r1, sp
mov r8,0x2
sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
b ret_from_exception
ARC_EXIT handle_interrupt_level2
#endif
; ---------------------------------------------
; Level 1 ISR
; ---------------------------------------------
ARC_ENTRY handle_interrupt_level1
/* free up r9 as scratchpad */
#ifdef CONFIG_SMP
sr r9, [ARC_REG_SCRATCH_DATA0]
#else
st r9, [@int1_saved_reg]
#endif
;Which mode (user/kernel) was the system in when intr occured
lr r9, [status32_l1]
SWITCH_TO_KERNEL_STK
SAVE_ALL_INT1
lr r0, [icause1]
and r0, r0, 0x1f
bl.d @arch_do_IRQ
mov r1, sp
mov r8,0x1
sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
b ret_from_exception
ARC_EXIT handle_interrupt_level1
;################### Non TLB Exception Handling #############################
; ---------------------------------------------
; Instruction Error Exception Handler
; ---------------------------------------------
ARC_ENTRY instr_service
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
lr r0, [ecr]
lr r1, [efa]
mov r2, sp
FAKE_RET_FROM_EXCPN r9
bl do_insterror_or_kprobe
b ret_from_exception
ARC_EXIT instr_service
; ---------------------------------------------
; Memory Error Exception Handler
; ---------------------------------------------
ARC_ENTRY mem_service
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
lr r0, [ecr]
lr r1, [efa]
mov r2, sp
bl do_memory_error
b ret_from_exception
ARC_EXIT mem_service
; ---------------------------------------------
; Machine Check Exception Handler
; ---------------------------------------------
ARC_ENTRY EV_MachineCheck
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
lr r0, [ecr]
lr r1, [efa]
mov r2, sp
brne r0, 0x200100, 1f
bl do_tlb_overlap_fault
b ret_from_exception
1:
; DEAD END: can't do much, display Regs and HALT
SAVE_CALLEE_SAVED_USER
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
j do_machine_check_fault
ARC_EXIT EV_MachineCheck
; ---------------------------------------------
; Protection Violation Exception Handler
; ---------------------------------------------
ARC_ENTRY EV_TLBProtV
EXCPN_PROLOG_FREEUP_REG r9
;Which mode (user/kernel) was the system in when Exception occured
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
;---------(3) Save some more regs-----------------
; vineetg: Mar 6th: Random Seg Fault issue #1
; ecr and efa were not saved in case an Intr sneaks in
; after fake rtie
;
lr r3, [ecr]
lr r4, [efa]
; --------(4) Return from CPU Exception Mode ---------
; Fake a rtie, but rtie to next label
; That way, subsequently, do_page_fault ( ) executes in pure kernel
; mode with further Exceptions enabled
FAKE_RET_FROM_EXCPN r9
;------ (5) Type of Protection Violation? ----------
;
; ProtV Hardware Exception is triggered for Access Faults of 2 types
; -Access Violaton (WRITE to READ ONLY Page) - for linux COW
; -Unaligned Access (READ/WRITE on odd boundary)
;
cmp r3, 0x230400 ; Misaligned data access ?
beq 4f
;========= (6a) Access Violation Processing ========
cmp r3, 0x230100
mov r1, 0x0 ; if LD exception ? write = 0
mov.ne r1, 0x1 ; else write = 1
mov r2, r4 ; faulting address
mov r0, sp ; pt_regs
bl do_page_fault
b ret_from_exception
;========== (6b) Non aligned access ============
4:
mov r0, r3 ; cause code
mov r1, r4 ; faulting address
mov r2, sp ; pt_regs
#ifdef CONFIG_ARC_MISALIGN_ACCESS
SAVE_CALLEE_SAVED_USER
mov r3, sp ; callee_regs
#endif
bl do_misaligned_access
#ifdef CONFIG_ARC_MISALIGN_ACCESS
DISCARD_CALLEE_SAVED_USER
#endif
b ret_from_exception
ARC_EXIT EV_TLBProtV
; ---------------------------------------------
; Privilege Violation Exception Handler
; ---------------------------------------------
ARC_ENTRY EV_PrivilegeV
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
lr r0, [ecr]
lr r1, [efa]
mov r2, sp
FAKE_RET_FROM_EXCPN r9
bl do_privilege_fault
b ret_from_exception
ARC_EXIT EV_PrivilegeV
; ---------------------------------------------
; Extension Instruction Exception Handler
; ---------------------------------------------
ARC_ENTRY EV_Extension
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
lr r0, [ecr]
lr r1, [efa]
mov r2, sp
bl do_extension_fault
b ret_from_exception
ARC_EXIT EV_Extension
;######################### System Call Tracing #########################
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR]
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_entry
; Tracing code now returns the syscall num (orig or modif)
mov r8, r0
; Do the Sys Call as we normally would.
; Validate the Sys Call number
cmp r8, NR_syscalls
mov.hi r0, -ENOSYS
bhi tracesys_exit
; Restore the sys-call args. Mere invocation of the hook abv could have
; clobbered them (since they are in scratch regs). The tracer could also
; have deliberately changed the syscall args: r0-r7
ld r0, [sp, PT_r0]
ld r1, [sp, PT_r1]
ld r2, [sp, PT_r2]
ld r3, [sp, PT_r3]
ld r4, [sp, PT_r4]
ld r5, [sp, PT_r5]
ld r6, [sp, PT_r6]
ld r7, [sp, PT_r7]
ld.as r9, [sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above
;################### Break Point TRAP ##########################
; ======= (5b) Trap is due to Break-Point =========
trap_with_param:
; stop_pc info by gdb needs this info
stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
mov r0, r12
lr r1, [efa]
mov r2, sp
; Now that we have read EFA, its safe to do "fake" rtie
; and get out of CPU exception mode
FAKE_RET_FROM_EXCPN r11
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->pc
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
; Call the trap handler
bl do_non_swi_trap
; unwind stack to discard Callee saved Regs
DISCARD_CALLEE_SAVED_USER
b ret_from_exception
;##################### Trap Handling ##############################
;
; EV_Trap caused by TRAP_S and TRAP0 instructions.
;------------------------------------------------------------------
; (1) System Calls
; :parameters in r0-r7.
; :r8 has the system call number
; (2) Break Points
;------------------------------------------------------------------
ARC_ENTRY EV_Trap
; Need at least 1 reg to code the early exception prolog
EXCPN_PROLOG_FREEUP_REG r9
;Which mode (user/kernel) was the system in when intr occured
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_TRAP
;------- (4) What caused the Trap --------------
lr r12, [ecr]
and.f 0, r12, ECR_PARAM_MASK
bnz trap_with_param
; ======= (5a) Trap is due to System Call ========
; Before doing anything, return from CPU Exception Mode
FAKE_RET_FROM_EXCPN r11
; If syscall tracing ongoing, invoke pre-pos-hooks
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys ; this never comes back
;============ This is normal System Call case ==========
; Sys-call num shd not exceed the total system calls avail
cmp r8, NR_syscalls
mov.hi r0, -ENOSYS
bhi ret_from_system_call
; Offset into the syscall_table and call handler
ld.as r9,[sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
; fall through to ret_from_system_call
ARC_EXIT EV_Trap
ARC_ENTRY ret_from_system_call
st r0, [sp, PT_r0] ; sys call return value in pt_regs
; fall through yet again to ret_from_exception
;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
;
; If ret to user mode do we need to handle signals, schedule() et al.
ARC_ENTRY ret_from_exception
; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
#ifdef CONFIG_PREEMPT
bbit0 r8, STATUS_U_BIT, resume_kernel_mode
#else
bbit0 r8, STATUS_U_BIT, restore_regs
#endif
; Before returning to User mode check-for-and-complete any pending work
; such as rescheduling/signal-delivery etc.
resume_user_mode_begin:
; Disable IRQs to ensures that chk for pending work itself is atomic
; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
; interim IRQ).
IRQ_DISABLE r10
; Fast Path return to user mode if no pending work
GET_CURR_THR_INFO_FLAGS r9
and.f 0, r9, _TIF_WORK_MASK
bz restore_regs
; --- (Slow Path #1) task preemption ---
bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
b @schedule ; BTST+Bnz causes relo error in link
.Lchk_pend_signals:
IRQ_ENABLE r10
; --- (Slow Path #2) pending signal ---
mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
; in pt_reg since the "C" ABI (kernel code) will automatically
; save/restore callee-saved regs.
;
; However, here we need to explicitly save callee regs because
; (i) If this signal causes coredump - full regfile needed
; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
; tracer might call PEEKUSR(CALLEE reg)
;
; NOTE: SP will grow up by size of CALLEE Reg-File
SAVE_CALLEE_SAVED_USER ; clobbers r12
; save location of saved Callee Regs @ thread_struct->callee
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
bl @do_signal
; Ideally we want to discard the Callee reg above, however if this was
; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
RESTORE_CALLEE_SAVED_USER
b resume_user_mode_begin ; loop back to start of U mode ret
; --- (Slow Path #3) notify_resume ---
.Lchk_notify_resume:
btst r9, TIF_NOTIFY_RESUME
blnz @do_notify_resume
b resume_user_mode_begin ; unconditionally back to U mode ret chks
; for single exit point from this block
#ifdef CONFIG_PREEMPT
resume_kernel_mode:
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
brne r8, 0, restore_regs
; check if this task's NEED_RESCHED flag set
ld r9, [r10, THREAD_INFO_FLAGS]
bbit0 r9, TIF_NEED_RESCHED, restore_regs
IRQ_DISABLE r9
; Invoke PREEMPTION
bl preempt_schedule_irq
; preempt_schedule_irq() always returns with IRQ disabled
#endif
; fall through
;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
;
; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
; IRQ shd definitely not happen between now and rtie
restore_regs :
; Disable Interrupts while restoring reg-file back
; XXX can this be optimised out
IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
#ifdef CONFIG_ARC_CURR_IN_REG
; Restore User R25
; Earlier this used to be only for returning to user mode
; However with 2 levels of IRQ this can also happen even if
; in kernel mode
ld r9, [sp, PT_sp]
brhs r9, VMALLOC_START, 8f
RESTORE_USER_R25
8:
#endif
; Restore REG File. In case multiple Events outstanding,
; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
; Note that we use realtime STATUS32 (not pt_regs->status32) to
; decide that.
; if Returning from Exception
bbit0 r10, STATUS_AE_BIT, not_exception
RESTORE_ALL_SYS
rtie
; Not Exception so maybe Interrupts (Level 1 or 2)
not_exception:
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
;------------------------------------------------------------------
; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
; so that sched doesnt move to new task, causing L1 to be delayed
; undeterministically. Now that we've achieved that, lets reset
; things to what they were, before returning from L2 context
;----------------------------------------------------------------
ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
; A1 is set in status32_l2
; decrement thread_info->preempt_count (re-enable preemption)
GET_CURR_THR_INFO_FROM_SP r10
ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
; paranoid check, given A1 was active when A2 happened, preempt count
; must not be 0 beccause we would have incremented it.
; If this does happen we simply HALT as it means a BUG !!!
cmp r9, 0
bnz 2f
flag 1
2:
sub r9, r9, 1
st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
149:
;return from level 2
RESTORE_ALL_INT2
debug_marker_l2:
rtie
not_level2_interrupt:
#endif
bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
;return from level 1
RESTORE_ALL_INT1
debug_marker_l1:
rtie
not_level1_interrupt:
;this case is for syscalls or Exceptions (with fake rtie)
RESTORE_ALL_SYS
debug_marker_syscall:
rtie
ARC_EXIT ret_from_exception
ARC_ENTRY ret_from_fork
; when the forked child comes here from the __switch_to function
; r0 has the last task pointer.
; put last task in scheduler queue
bl @schedule_tail
; If kernel thread, jump to it's entry-point
ld r9, [sp, PT_status32]
brne r9, 0, 1f
jl.d [r14]
mov r0, r13 ; arg to payload
1:
; special case of kernel_thread entry point returning back due to
; kernel_execve() - pretend return from syscall to ret to userland
b ret_from_exception
ARC_EXIT ret_from_fork
;################### Special Sys Call Wrappers ##########################
; TBD: call do_fork directly from here
ARC_ENTRY sys_fork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_fork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_fork_wrapper
ARC_ENTRY sys_vfork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_vfork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_vfork_wrapper
ARC_ENTRY sys_clone_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_clone
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_clone_wrapper
#ifdef CONFIG_ARC_DW2_UNWIND
; Workaround for bug 94179 (STAR ):
; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
; section (.debug_frame) as loadable. So we force it here.
; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
; would not work after a clean build due to kernel build system dependencies.
.section .debug_frame, "wa",@progbits
#endif

55
arch/arc/kernel/fpu.c Normal file
View File

@ -0,0 +1,55 @@
/*
* fpu.c - save/restore of Floating Point Unit Registers on task switch
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <asm/switch_to.h>
/*
* To save/restore FPU regs, simplest scheme would use LR/SR insns.
* However since SR serializes the pipeline, an alternate "hack" can be used
* which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
*
* Store to 64bit dpfp1 reg from a pair of core regs:
* dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
*
* Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
* mov_s r3, 0
* daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
* dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
* dexcl1 0, r1, r0 ; restore dpfp1 to orig value
*
* However we can tweak the read, so that read-out of outgoing task's FPU regs
* and write of incoming task's regs happen in one shot. So all the work is
* done before context switch
*/
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
{
unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
const unsigned int zero = 0;
__asm__ __volatile__(
"daddh11 %0, %2, %2\n"
"dexcl1 %1, %3, %4\n"
: "=&r" (*(saveto + 1)), /* early clobber must here */
"=&r" (*(saveto))
: "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
);
__asm__ __volatile__(
"daddh22 %0, %2, %2\n"
"dexcl2 %1, %3, %4\n"
: "=&r"(*(saveto + 3)), /* early clobber must here */
"=&r"(*(saveto + 2))
: "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
);
}

111
arch/arc/kernel/head.S Normal file
View File

@ -0,0 +1,111 @@
/*
* ARC CPU startup Code
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Dec 2007
* -Check if we are running on Simulator or on real hardware
* to skip certain things during boot on simulator
*/
#include <asm/asm-offsets.h>
#include <asm/entry.h>
#include <linux/linkage.h>
#include <asm/arcregs.h>
.cpu A7
.section .init.text, "ax",@progbits
.type stext, @function
.globl stext
stext:
;-------------------------------------------------------------------
; Don't clobber r0-r4 yet. It might have bootloader provided info
;-------------------------------------------------------------------
#ifdef CONFIG_SMP
; Only Boot (Master) proceeds. Others wait in platform dependent way
; IDENTITY Reg [ 3 2 1 0 ]
; (cpu-id) ^^^ => Zero for UP ARC700
; => #Core-ID if SMP (Master 0)
GET_CPU_ID r5
cmp r5, 0
jnz arc_platform_smp_wait_to_boot
#endif
; Clear BSS before updating any globals
; XXX: use ZOL here
mov r5, __bss_start
mov r6, __bss_stop
1:
st.ab 0, [r5,4]
brlt r5, r6, 1b
#ifdef CONFIG_CMDLINE_UBOOT
; support for bootloader provided cmdline
; If cmdline passed by u-boot, then
; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
; r1 = magic number (board identity)
; r2 = addr of cmdline string (somewhere in memory/flash)
brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
mov r5, @command_line
1:
ldb.ab r6, [r2, 1]
breq r6, 0, .Lother_bootup_chores
b.d 1b
stb.ab r6, [r5, 1]
#endif
.Lother_bootup_chores:
; Identify if running on ISS vs Silicon
; IDENTITY Reg [ 3 2 1 0 ]
; (chip-id) ^^^^^ ==> 0xffff for ISS
lr r0, [identity]
lsr r3, r0, 16
cmp r3, 0xffff
mov.z r4, 0
mov.nz r4, 1
st r4, [@running_on_hw]
; setup "current" tsk and optionally cache it in dedicated r25
mov r9, @init_task
SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
; setup stack (fp, sp)
mov fp, 0
; tsk->thread_info is really a PAGE, whose bottom hoists stack
GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
j start_kernel ; "C" entry point
#ifdef CONFIG_SMP
;----------------------------------------------------------------
; First lines of code run by secondary before jumping to 'C'
;----------------------------------------------------------------
.section .init.text, "ax",@progbits
.type first_lines_of_secondary, @function
.globl first_lines_of_secondary
first_lines_of_secondary:
; setup per-cpu idle task as "current" on this CPU
ld r0, [@secondary_idle_tsk]
SET_CURR_TASK_ON_CPU r0, r1
; setup stack (fp, sp)
mov fp, 0
; set it's stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary
#endif

273
arch/arc/kernel/irq.c Normal file
View File

@ -0,0 +1,273 @@
/*
* Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/mach_desc.h>
/*
* Early Hardware specific Interrupt setup
* -Called very early (start_kernel -> setup_arch -> setup_processor)
* -Platform Independent (must for any ARC700)
* -Needed for each CPU (hence not foldable into init_IRQ)
*
* what it does ?
* -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
* -Disable all IRQs (on CPU side)
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/
void __init arc_init_IRQ(void)
{
int level_mask = 0;
write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
/* Disable all IRQs: enable them as devices request */
write_aux_reg(AUX_IENABLE, 0);
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */
#ifdef CONFIG_ARC_IRQ3_LV2
level_mask |= (1 << 3);
#endif
#ifdef CONFIG_ARC_IRQ5_LV2
level_mask |= (1 << 5);
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
level_mask |= (1 << 6);
#endif
if (level_mask) {
pr_info("Level-2 interrupts bitset %x\n", level_mask);
write_aux_reg(AUX_IRQ_LEV, level_mask);
}
}
/*
* ARC700 core includes a simple on-chip intc supporting
* -per IRQ enable/disable
* -2 levels of interrupts (high/low)
* -all interrupts being level triggered
*
* To reduce platform code, we assume all IRQs directly hooked-up into intc.
* Platforms with external intc, hence cascaded IRQs, are free to over-ride
* below, per IRQ.
*/
static void arc_mask_irq(struct irq_data *data)
{
arch_mask_irq(data->irq);
}
static void arc_unmask_irq(struct irq_data *data)
{
arch_unmask_irq(data->irq);
}
static struct irq_chip onchip_intc = {
.name = "ARC In-core Intc",
.irq_mask = arc_mask_irq,
.irq_unmask = arc_unmask_irq,
};
static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
if (irq == TIMER0_IRQ)
irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
else
irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
return 0;
}
static const struct irq_domain_ops arc_intc_domain_ops = {
.xlate = irq_domain_xlate_onecell,
.map = arc_intc_domain_map,
};
static struct irq_domain *root_domain;
void __init init_onchip_IRQ(void)
{
struct device_node *intc = NULL;
intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
if(!intc)
panic("DeviceTree Missing incore intc\n");
root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
&arc_intc_domain_ops, NULL);
if (!root_domain)
panic("root irq domain not avail\n");
/* with this we don't need to export root_domain */
irq_set_default_host(root_domain);
}
/*
* Late Interrupt system init called from start_kernel for Boot CPU only
*
* Since slab must already be initialized, platforms can start doing any
* needed request_irq( )s
*/
void __init init_IRQ(void)
{
init_onchip_IRQ();
/* Any external intc can be setup here */
if (machine_desc->init_irq)
machine_desc->init_irq();
#ifdef CONFIG_SMP
/* Master CPU can initialize it's side of IPI */
if (machine_desc->init_smp)
machine_desc->init_smp(smp_processor_id());
#endif
}
/*
* "C" Entry point for any ARC ISR, called from low level vector handler
* @irq is the vector number read from ICAUSE reg of on-chip intc
*/
void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
}
int __init get_hw_config_num_irq(void)
{
uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
switch (val & 0x03) {
case 0:
return 16;
case 1:
return 32;
case 2:
return 8;
default:
return 0;
}
return 0;
}
/*
* arch_local_irq_enable - Enable interrupts.
*
* 1. Explicitly called to re-enable interrupts
* 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
* which maybe in hard ISR itself
*
* Semantics of this function change depending on where it is called from:
*
* -If called from hard-ISR, it must not invert interrupt priorities
* e.g. suppose TIMER is high priority (Level 2) IRQ
* Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
* Here local_irq_enable( ) shd not re-enable lower priority interrupts
* -If called from soft-ISR, it must re-enable all interrupts
* soft ISR are low prioity jobs which can be very slow, thus all IRQs
* must be enabled while they run.
* Now hardware context wise we may still be in L2 ISR (not done rtie)
* still we must re-enable both L1 and L2 IRQs
* Another twist is prev scenario with flow being
* L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
* here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
* over-written (this is deficiency in ARC700 Interrupt mechanism)
*/
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
void arch_local_irq_enable(void)
{
unsigned long flags;
flags = arch_local_save_flags();
/* Allow both L1 and L2 at the onset */
flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
/* Called from hard ISR (between irq_enter and irq_exit) */
if (in_irq()) {
/* If in L2 ISR, don't re-enable any further IRQs as this can
* cause IRQ priorities to get upside down. e.g. it could allow
* L1 be taken while in L2 hard ISR which is wrong not only in
* theory, it can also cause the dreaded L1-L2-L1 scenario
*/
if (flags & STATUS_A2_MASK)
flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
/* Even if in L1 ISR, allowe Higher prio L2 IRQs */
else if (flags & STATUS_A1_MASK)
flags &= ~(STATUS_E1_MASK);
}
/* called from soft IRQ, ideally we want to re-enable all levels */
else if (in_softirq()) {
/* However if this is case of L1 interrupted by L2,
* re-enabling both may cause whaco L1-L2-L1 scenario
* because ARC700 allows level 1 to interrupt an active L2 ISR
* Thus we disable both
* However some code, executing in soft ISR wants some IRQs
* to be enabled so we re-enable L2 only
*
* How do we determine L1 intr by L2
* -A2 is set (means in L2 ISR)
* -E1 is set in this ISR's pt_regs->status32 which is
* saved copy of status32_l2 when l2 ISR happened
*/
struct pt_regs *pt = get_irq_regs();
if ((flags & STATUS_A2_MASK) && pt &&
(pt->status32 & STATUS_A1_MASK)) {
/*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
flags &= ~(STATUS_E1_MASK);
}
}
arch_local_irq_restore(flags);
}
#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
/*
* Simpler version for only 1 level of interrupt
* Here we only Worry about Level 1 Bits
*/
void arch_local_irq_enable(void)
{
unsigned long flags;
/*
* ARC IDE Drivers tries to re-enable interrupts from hard-isr
* context which is simply wrong
*/
if (in_irq()) {
WARN_ONCE(1, "IRQ enabled from hard-isr");
return;
}
flags = arch_local_save_flags();
flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
arch_local_irq_restore(flags);
}
#endif
EXPORT_SYMBOL(arch_local_irq_enable);

205
arch/arc/kernel/kgdb.c Normal file
View File

@ -0,0 +1,205 @@
/*
* kgdb support for ARC
*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kgdb.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>
static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
struct callee_regs *cregs)
{
int regno;
for (regno = 0; regno <= 26; regno++)
gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
for (regno = 27; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
gdb_regs[_FP] = kernel_regs->fp;
gdb_regs[__SP] = kernel_regs->sp;
gdb_regs[_BLINK] = kernel_regs->blink;
gdb_regs[_RET] = kernel_regs->ret;
gdb_regs[_STATUS32] = kernel_regs->status32;
gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
gdb_regs[_LP_END] = kernel_regs->lp_end;
gdb_regs[_LP_START] = kernel_regs->lp_start;
gdb_regs[_BTA] = kernel_regs->bta;
gdb_regs[_STOP_PC] = kernel_regs->ret;
}
static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
struct callee_regs *cregs)
{
int regno;
for (regno = 0; regno <= 26; regno++)
set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
kernel_regs->fp = gdb_regs[_FP];
kernel_regs->sp = gdb_regs[__SP];
kernel_regs->blink = gdb_regs[_BLINK];
kernel_regs->ret = gdb_regs[_RET];
kernel_regs->status32 = gdb_regs[_STATUS32];
kernel_regs->lp_count = gdb_regs[_LP_COUNT];
kernel_regs->lp_end = gdb_regs[_LP_END];
kernel_regs->lp_start = gdb_regs[_LP_START];
kernel_regs->bta = gdb_regs[_BTA];
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
{
to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
current->thread.callee_reg);
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
{
from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
current->thread.callee_reg);
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
if (task)
to_gdb_regs(gdb_regs, task_pt_regs(task),
(struct callee_regs *) task->thread.callee_reg);
}
struct single_step_data_t {
uint16_t opcode[2];
unsigned long address[2];
int is_branch;
int armed;
} single_step_data;
static void undo_single_step(struct pt_regs *regs)
{
if (single_step_data.armed) {
int i;
for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
memcpy((void *) single_step_data.address[i],
&single_step_data.opcode[i],
BREAK_INSTR_SIZE);
flush_icache_range(single_step_data.address[i],
single_step_data.address[i] +
BREAK_INSTR_SIZE);
}
single_step_data.armed = 0;
}
}
static void place_trap(unsigned long address, void *save)
{
memcpy(save, (void *) address, BREAK_INSTR_SIZE);
memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
BREAK_INSTR_SIZE);
flush_icache_range(address, address + BREAK_INSTR_SIZE);
}
static void do_single_step(struct pt_regs *regs)
{
single_step_data.is_branch = disasm_next_pc((unsigned long)
regs->ret, regs, (struct callee_regs *)
current->thread.callee_reg,
&single_step_data.address[0],
&single_step_data.address[1]);
place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
if (single_step_data.is_branch) {
place_trap(single_step_data.address[1],
&single_step_data.opcode[1]);
}
single_step_data.armed++;
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *regs)
{
unsigned long addr;
char *ptr;
undo_single_step(regs);
switch (remcomInBuffer[0]) {
case 's':
case 'c':
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->ret = addr;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcomInBuffer[0] == 's') {
do_single_step(regs);
atomic_set(&kgdb_cpu_doing_single_step,
smp_processor_id());
}
return 0;
}
return -1;
}
unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
{
return instruction_pointer(regs);
}
int kgdb_arch_init(void)
{
single_step_data.armed = 0;
return 0;
}
void kgdb_trap(struct pt_regs *regs, int param)
{
/* trap_s 3 is used for breakpoints that overwrite existing
* instructions, while trap_s 4 is used for compiled breakpoints.
*
* with trap_s 3 breakpoints the original instruction needs to be
* restored and continuation needs to start at the location of the
* breakpoint.
*
* with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint.
*/
if (param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
}
void kgdb_arch_exit(void)
{
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
instruction_pointer(regs) = ip;
}
struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = {0x78, 0x7e},
#else
.gdb_bpt_instr = {0x7e, 0x78},
#endif
};

525
arch/arc/kernel/kprobes.c Normal file
View File

@ -0,0 +1,525 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/current.h>
#include <asm/disasm.h>
#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
(unsigned long)current_thread_info() + THREAD_SIZE - (addr))
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* Attempt to probe at unaligned address */
if ((unsigned long)p->addr & 0x01)
return -EINVAL;
/* Address should not be in exception handling code */
p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
p->opcode = *p->addr;
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = UNIMP_S_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
arch_disarm_kprobe(p);
/* Can we remove the kprobe in the middle of kprobe handling? */
if (p->ainsn.t1_addr) {
*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t1_addr = NULL;
}
if (p->ainsn.t2_addr) {
*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t2_addr = NULL;
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static inline void __kprobes set_current_kprobe(struct kprobe *p)
{
__get_cpu_var(current_kprobe) = p;
}
static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
struct pt_regs *regs)
{
/* Remove the trap instructions inserted for single step and
* restore the original instructions
*/
if (p->ainsn.t1_addr) {
*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t1_addr = NULL;
}
if (p->ainsn.t2_addr) {
*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t2_addr = NULL;
}
return;
}
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
{
unsigned long next_pc;
unsigned long tgt_if_br = 0;
int is_branch;
unsigned long bta;
/* Copy the opcode back to the kprobe location and execute the
* instruction. Because of this we will not be able to get into the
* same kprobe until this kprobe is done
*/
*(p->addr) = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
/* Now we insert the trap at the next location after this instruction to
* single step. If it is a branch we insert the trap at possible branch
* targets
*/
bta = regs->bta;
if (regs->status32 & 0x40) {
/* We are in a delay slot with the branch taken */
next_pc = bta & ~0x01;
if (!p->ainsn.is_short) {
if (bta & 0x01)
regs->blink += 2;
else {
/* Branch not taken */
next_pc += 2;
/* next pc is taken from bta after executing the
* delay slot instruction
*/
regs->bta += 2;
}
}
is_branch = 0;
} else
is_branch =
disasm_next_pc((unsigned long)p->addr, regs,
(struct callee_regs *) current->thread.callee_reg,
&next_pc, &tgt_if_br);
p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
*(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
if (is_branch) {
p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
*(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
}
}
int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe((unsigned long *)addr);
if (p) {
/*
* We have reentered the kprobe_handler, since another kprobe
* was hit while within the handler, we save the original
* kprobes and single step on the instruction of the new probe
* without calling any user handlers to avoid recursive
* kprobes.
*/
if (kprobe_running()) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
* non-zero - which is expected from setjmp_pre_handler for
* jprobe, we return without single stepping and leave that to
* the break-handler which is invoked by a kprobe from
* jprobe_return
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
}
return 1;
} else if (kprobe_running()) {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
}
}
/* no_kprobe: */
preempt_enable_no_resched();
return 0;
}
static int __kprobes arc_post_kprobe_handler(unsigned long addr,
struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
resume_execution(cur, addr, regs);
/* Rearm the kprobe */
arch_arm_kprobe(cur);
/*
* When we return from trap instruction we go to the next instruction
* We restored the actual instruction in resume_exectuiont and we to
* return to the same address and execute it
*/
regs->ret = addr;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
/*
* Fault can be for the instruction being single stepped or for the
* pre/post handlers in the module.
* This is applicable for applications like user probes, where we have the
* probe in user space and the handlers in the kernel
*/
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single stepped
* caused the fault. We reset the current kprobe and allow the
* exception handler as if it is regular exception. In our
* case it doesn't matter because the system will be halted
*/
resume_execution(cur, (unsigned long)cur->addr, regs);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We are here because the instructions in the pre/post handler
* caused the fault.
*/
/* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accouting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned zero,
* try to fix up.
*/
if (fixup_exception(regs))
return 1;
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = data;
unsigned long addr = args->err;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_IERR:
if (arc_kprobe_handler(addr, args->regs))
return NOTIFY_STOP;
break;
case DIE_TRAP:
if (arc_post_kprobe_handler(addr, args->regs))
return NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long sp_addr = regs->sp;
kcb->jprobe_saved_regs = *regs;
memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
regs->ret = (unsigned long)(jp->entry);
return 1;
}
void __kprobes jprobe_return(void)
{
__asm__ __volatile__("unimp_s");
return;
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long sp_addr;
*regs = kcb->jprobe_saved_regs;
sp_addr = regs->sp;
memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
preempt_enable_no_resched();
return 1;
}
static void __used kretprobe_trampoline_holder(void)
{
__asm__ __volatile__(".global kretprobe_trampoline\n"
"kretprobe_trampoline:\n" "nop\n");
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->blink;
/* Replace the return addr with trampoline addr */
regs->blink = (unsigned long)&kretprobe_trampoline;
}
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more than one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address) {
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->ret = orig_ret_address;
reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
/* By returning a non zero value, we are telling the kprobe handler
* that we don't want the post_handler to run
*/
return 1;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
/* Registering the trampoline code for the kret probe */
return register_kprobe(&trampoline_p);
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
return 1;
return 0;
}
void trap_is_kprobe(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
}

145
arch/arc/kernel/module.c Normal file
View File

@ -0,0 +1,145 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <asm/unwind.h>
static inline void arc_write_me(unsigned short *addr, unsigned long value)
{
*addr = (value & 0xffff0000) >> 16;
*(addr + 1) = (value & 0xffff);
}
/* ARC specific section quirks - before relocation loop in generic loader
*
* For dwarf unwinding out of modules, this needs to
* 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
* -fasynchronous-unwind-tables it doesn't).
* 2. Since we are iterating thru sec hdr tbl anyways, make a note of
* the exact section index, for later use.
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int i;
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
sechdrs[i].sh_flags |= SHF_ALLOC;
mod->arch.unw_sec_idx = i;
break;
}
}
#endif
return 0;
}
void module_arch_cleanup(struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
if (mod->arch.unw_info)
unwind_remove_table(mod->arch.unw_info, 0);
#endif
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex, /* sec index for sym tbl */
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
int i, n;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
Elf32_Addr relocation;
Elf32_Addr location;
Elf32_Addr sec_to_patch;
int relo_type;
sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
pr_debug("\n========== Module Sym reloc ===========================\n");
pr_debug("Section to fixup %x\n", sec_to_patch);
pr_debug("=========================================================\n");
pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
/* This is where to make the change */
location = sec_to_patch + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
relocation = sym_entry->st_value + rel_entry[i].r_addend;
pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation,
strtab + sym_entry->st_name);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
* global data access again is abs 32 bit.
* Both of these are handled by same relocation type
*/
relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
if (likely(R_ARC_32_ME == relo_type))
arc_write_me((unsigned short *)location, relocation);
else if (R_ARC_32 == relo_type)
*((Elf32_Addr *) location) = relocation;
else
goto relo_err;
}
return 0;
relo_err:
pr_err("%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel_entry[i].r_info));
return -ENOEXEC;
}
/* Just before lift off: After sections have been relocated, we add the
* dwarf section to unwinder table pool
* This couldn't be done in module_frob_arch_sections() because
* relocations had not been applied by then
*/
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
void *unw;
int unwsec = mod->arch.unw_sec_idx;
if (unwsec) {
unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
sechdrs[unwsec].sh_size);
mod->arch.unw_info = unw;
}
#endif
return 0;
}

235
arch/arc/kernel/process.c Normal file
View File

@ -0,0 +1,235 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Amit Bhor, Kanika Nema: Codito Technologies 2004
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/elf.h>
#include <linux/tick.h>
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
{
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
return 0;
}
/*
* We return the user space TLS data ptr as sys-call return code
* Ideally it should be copy to user.
* However we can cheat by the fact that some sys-calls do return
* absurdly high values
* Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
* it won't be considered a sys-call error
* and it will be loads better than copy-to-user, which is a definite
* D-TLB Miss
*/
SYSCALL_DEFINE0(arc_gettls)
{
return task_thread_info(current)->thr_ptr;
}
static inline void arch_idle(void)
{
/* sleep, but enable all interrupts before committing */
__asm__("sleep 0x3");
}
void cpu_idle(void)
{
/* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_idle_enter();
rcu_idle_enter();
doze:
local_irq_disable();
if (!need_resched()) {
arch_idle();
goto doze;
} else {
local_irq_enable();
}
rcu_idle_exit();
tick_nohz_idle_exit();
schedule_preempt_disabled();
}
}
asmlinkage void ret_from_fork(void);
/* Layout of Child kernel mode stack as setup at the end of this function is
*
* | ... |
* | ... |
* | unused |
* | |
* ------------------ <==== top of Stack (thread.ksp)
* | UNUSED 1 word|
* ------------------
* | r25 |
* ~ ~
* | --to-- | (CALLEE Regs of user mode)
* | r13 |
* ------------------
* | fp |
* | blink | @ret_from_fork
* ------------------
* | |
* ~ ~
* ~ ~
* | |
* ------------------
* | r12 |
* ~ ~
* | --to-- | (scratch Regs of user mode)
* | r0 |
* ------------------
* | UNUSED 1 word|
* ------------------ <===== END of PAGE
*/
int copy_thread(unsigned long clone_flags,
unsigned long usp, unsigned long arg,
struct task_struct *p)
{
struct pt_regs *c_regs; /* child's pt_regs */
unsigned long *childksp; /* to unwind out of __switch_to() */
struct callee_regs *c_callee; /* child's callee regs */
struct callee_regs *parent_callee; /* paren't callee */
struct pt_regs *regs = current_pt_regs();
/* Mark the specific anchors to begin with (see pic above) */
c_regs = task_pt_regs(p);
childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
c_callee = ((struct callee_regs *)childksp) - 1;
/*
* __switch_to() uses thread.ksp to start unwinding stack
* For kernel threads we don't need to create callee regs, the
* stack layout nevertheless needs to remain the same.
* Also, since __switch_to anyways unwinds callee regs, we use
* this to populate kernel thread entry-pt/args into callee regs,
* so that ret_from_kernel_thread() becomes simpler.
*/
p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
/* __switch_to expects FP(0), BLINK(return addr) at top */
childksp[0] = 0; /* fp */
childksp[1] = (unsigned long)ret_from_fork; /* blink */
if (unlikely(p->flags & PF_KTHREAD)) {
memset(c_regs, 0, sizeof(struct pt_regs));
c_callee->r13 = arg; /* argument to kernel thread */
c_callee->r14 = usp; /* function */
return 0;
}
/*--------- User Task Only --------------*/
/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
childksp[0] = 0; /* for POP fp */
childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
/* Copy parents pt regs on child's kernel mode stack */
*c_regs = *regs;
if (usp)
c_regs->sp = usp;
c_regs->r0 = 0; /* fork returns 0 in child */
parent_callee = ((struct callee_regs *)regs) - 1;
*c_callee = *parent_callee;
if (unlikely(clone_flags & CLONE_SETTLS)) {
/*
* set task's userland tls data ptr from 4th arg
* clone C-lib call is difft from clone sys-call
*/
task_thread_info(p)->thr_ptr = regs->r3;
} else {
/* Normal fork case: set parent's TLS ptr in child */
task_thread_info(p)->thr_ptr =
task_thread_info(current)->thr_ptr;
}
return 0;
}
/*
* Some archs flush debug and FPU info here
*/
void flush_thread(void)
{
}
/*
* Free any architecture-specific thread data structures, etc.
*/
void exit_thread(void)
{
}
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
return 0;
}
/*
* API: expected by schedular Code: If thread is sleeping where is that.
* What is this good for? it will be always the scheduler or ret_from_fork.
* So we hard code that anyways.
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
struct pt_regs *regs = task_pt_regs(t);
unsigned long blink = 0;
/*
* If the thread being queried for in not itself calling this, then it
* implies it is not executing, which in turn implies it is sleeping,
* which in turn implies it got switched OUT by the schedular.
* In that case, it's kernel mode blink can reliably retrieved as per
* the picture above (right above pt_regs).
*/
if (t != current && t->state != TASK_RUNNING)
blink = *((unsigned int *)regs - 1);
return blink;
}
int elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
if (x->e_machine != EM_ARCOMPACT)
return 0;
eflags = x->e_flags;
if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
pr_err("ABI mismatch - you need newer toolchain\n");
force_sigsegv(SIGSEGV, current);
return 0;
}
return 1;
}
EXPORT_SYMBOL(elf_check_arch);

Some files were not shown because too many files have changed in this diff Show More