linux-headers-5.4.0-2.3

This commit is contained in:
Alibek Omarov 2021-07-14 01:44:10 +03:00
commit d99c8c0484
5613 changed files with 1084920 additions and 0 deletions

1877
Makefile Normal file

File diff suppressed because it is too large Load Diff

252
arch/e2k/Makefile Normal file
View File

@ -0,0 +1,252 @@
# e2k/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies. Remember to do have actions
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
KBUILD_DEFCONFIG ?= defconfig
AS = $(shell $(CC) -print-prog-name=as)
OBJDUMP = $(shell $(CC) -print-prog-name=objdump)
LD = $(shell $(CC) -print-prog-name=ld)
OBJCOPY = $(shell $(CC) -print-prog-name=objcopy)
KBUILD_CFLAGS += -fkernel -gline -masm-inline $(call cc-option,-fforbid-fp) \
$(call cc-option,-fmax-errors=5)
ifeq ($(PROFILE_GENERATE), 1)
KBUILD_CFLAGS += -fprofile-generate-kernel
endif
ifeq ($(origin PROFILE_USE), undefined)
else
KBUILD_CFLAGS += -fprofile-use="$(PROFILE_USE)"
endif
KBUILD_CFLAGS += $(call cc-option,-finline-functions,) \
$(call cc-option,-finline-functions-called-once,)
# Some uninteresting or broken warnings can be disabled with #pragma's only
KBUILD_CFLAGS += -Wno-array-bounds -Wno-duplicate-type-qualifier \
-Wno-builtin-functions-redefined -Wno-reduced-alignment \
-Wno-unused-value -Wno-overflow -Wno-signed-one-bit-field \
-include $(srctree)/arch/e2k/include/asm/override-lcc-warnings.h
LDFLAGS_vmlinux :=
CHECKFLAGS += -D__e2k__
CFLAGS += -pipe -D__linux__
KBUILD_CFLAGS += $(CFLAGS)
ifdef CONFIG_SMP_DAM_BUG
KBUILD_CFLAGS += -fno-dam-call
endif
CFLAGS_GENERIC := -march=elbrus-v2
CFLAGS_ES2 := -mtune=elbrus-2c+
CFLAGS_E2S := -mtune=elbrus-4c
CFLAGS_E8C := -mtune=elbrus-8c
CFLAGS_E1CP := -mtune=elbrus-1c+
CFLAGS_E8C2 := -mtune=elbrus-8c2
CFLAGS_E12C := -mtune=elbrus-12c
CFLAGS_E16C := -mtune=elbrus-16c
CFLAGS_E2C3 := -mtune=elbrus-2c3
CFLAGS_ALL_CPUS := $(CFLAGS_ES2) $(CFLAGS_E2S) $(CFLAGS_E8C) $(CFLAGS_E1CP) \
$(CFLAGS_E8C2) $(CFLAGS_E12C) $(CFLAGS_E16C) $(CFLAGS_E2C3)
export CFLAGS_ALL_CPUS
CFLAGS_E2K_SIC := $(CFLAGS_ES2)
export CFLAGS_ES2 CFLAGS_E2S CFLAGS_E8C CFLAGS_E1CP CFLAGS_E8C2 CFLAGS_E2C3 \
CFLAGS_E12C CFLAGS_E16C CFLAGS_E2K_SIC
ifeq ($(CONFIG_E2K_MACHINE),y)
ifeq ($(CONFIG_E2K_ES2_DSP),y)
KBUILD_CFLAGS += $(CFLAGS_ES2)
KBUILD_AFLAGS += $(CFLAGS_ES2)
TARGET_MDL := 04
else
ifeq ($(CONFIG_E2K_ES2_RU),y)
KBUILD_CFLAGS += $(CFLAGS_ES2)
KBUILD_AFLAGS += $(CFLAGS_ES2)
TARGET_MDL := 06
else
ifeq ($(CONFIG_E2K_E2S),y)
KBUILD_CFLAGS += $(CFLAGS_E2S)
KBUILD_AFLAGS += $(CFLAGS_E2S)
TARGET_MDL := 03
else
ifeq ($(CONFIG_E2K_E8C),y)
KBUILD_CFLAGS += $(CFLAGS_E8C)
KBUILD_AFLAGS += $(CFLAGS_E8C)
TARGET_MDL := 07
else
ifeq ($(CONFIG_E2K_E1CP),y)
KBUILD_CFLAGS += $(CFLAGS_E1CP)
KBUILD_AFLAGS += $(CFLAGS_E1CP)
TARGET_MDL := 08
else
ifeq ($(CONFIG_E2K_E8C2),y)
KBUILD_CFLAGS += $(CFLAGS_E8C2)
KBUILD_AFLAGS += $(CFLAGS_E8C2)
TARGET_MDL := 09
else
ifeq ($(CONFIG_E2K_E12C),y)
KBUILD_CFLAGS += $(CFLAGS_E12C)
KBUILD_AFLAGS += $(CFLAGS_E12C)
TARGET_MDL := 0a
else
ifeq ($(CONFIG_E2K_E16C),y)
KBUILD_CFLAGS += $(CFLAGS_E16C)
KBUILD_AFLAGS += $(CFLAGS_E16C)
TARGET_MDL := 0b
else
ifeq ($(CONFIG_E2K_E2C3),y)
KBUILD_CFLAGS += $(CFLAGS_E2C3)
KBUILD_AFLAGS += $(CFLAGS_E2C3)
TARGET_MDL := 0c
else
error "Invalid e2k machine type"
endif # ifeq ($(CONFIG_E2K_E2C3),y)
endif # ifeq ($(CONFIG_E2K_E16C),y)
endif # ifeq ($(CONFIG_E2K_E12C),y)
endif # ifeq ($(CONFIG_E2K_E8C2),y)
endif # ifeq ($(CONFIG_E2K_E1CP),y)
endif # ifeq ($(CONFIG_E2K_E8C),y)
endif # ifeq ($(CONFIG_E2K_E2S),y)
endif # ifeq ($(CONFIG_E2K_ES2_RU),y)
endif # ifeq ($(CONFIG_E2K_ES2_DSP),y)
else # ! ifeq ($(CONFIG_E2K_MACHINE),y)
KBUILD_CFLAGS += $(CFLAGS_GENERIC)
KBUILD_AFLAGS += $(CFLAGS_GENERIC)
TARGET_MDL := 00
endif
KBUILD_LDFLAGS += --relaxed-e2k-machine-check
KBUILD_CFLAGS += $(cflags-y)
libs-y += arch/e2k/lib/
core-y += arch/e2k/kernel/ \
arch/e2k/mm/ \
arch/e2k/p2v/ \
arch/e2k/fast_syscalls/
core-$(CONFIG_PROTECTED_MODE) += arch/e2k/3p/
drivers-$(CONFIG_PCI) += arch/e2k/pci/
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/e2k/power/
#KVM hypervisor and guest support
core-$(CONFIG_KVM) += arch/e2k/kvm/
core-$(CONFIG_KVM_GUEST) += arch/e2k/kvm/guest/
# Elbrus common modules
core-y += arch/l/
drivers-$(CONFIG_PCI) += arch/l/pci/
boot := arch/e2k/boot
all: es2boot
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
.PHONY: clean archclean archmrproper archdep bootimage image zImage
es2boot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_ES2=y boot
e2sboot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E2S=y boot
e8cboot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E8C=y boot
e1cpboot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E1CP=y boot
e8c2boot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E8C2=y CONFIG_E8C=y boot
e12cboot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E12C=y boot
e16cboot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E16C=y boot
e2c3boot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E2C3=y boot
image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_BOOT=y $(objtree)/image.boot
$(Q)echo "Target mdl: $(TARGET_MDL)"; \
echo $(TARGET_MDL) | \
xxd -r -p | \
dd of=$(objtree)/image.boot bs=1 seek=258 count=1 conv=notrunc 2>/dev/null; \
echo 00000000 | xxd -r -p | \
dd of=$(objtree)/image.boot bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \
e2k_kernel_csum=`cksum $(objtree)/image.boot | awk '{ printf "%08x\n", $$1 }'`; \
echo "Kernel image check sum: $$e2k_kernel_csum"; \
echo $$e2k_kernel_csum | \
sed 's/\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)/\7\8\5\6\3\4\1\2/' | \
xxd -r -p | \
dd of=$(objtree)/image.boot bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \
echo 'Kernel: image.boot is ready' ' (#'`cat .version`')'
zImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_BOOT=y $(objtree)/zImage
$(Q)echo "Target mdl: $(TARGET_MDL)"; \
echo $(TARGET_MDL) | \
xxd -r -p | \
dd of=$(objtree)/zImage bs=1 seek=258 count=1 conv=notrunc 2>/dev/null; \
echo 00000000 | xxd -r -p | \
dd of=$(objtree)/zImage bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \
e2k_kernel_csum=`cksum $(objtree)/zImage | awk '{ printf "%08x\n", $$1 }'`; \
echo "Kernel image check sum: $$e2k_kernel_csum"; \
echo $$e2k_kernel_csum | \
sed 's/\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)/\7\8\5\6\3\4\1\2/' | \
xxd -r -p | \
dd of=$(objtree)/zImage bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \
echo 'Kernel: zImage is ready' ' (#'`cat .version`')'
image.boot: bootimage
bootimage: image
archclean:
$(Q)$(MAKE) $(clean)=arch/e2k/boot
archmrproper:
archdep:
@$(MAKEBOOT) dep
install-headers:
@$(MAKEBOOT) install-headers
install-includes: include/linux/version.h arch/e2k/include FORCE
$(CONFIG_SHELL) scripts/gen-osl-include -l $(srctree) -r $(ROOT_WA)
build-install: FORCE
$(CONFIG_SHELL) scripts/gen-osl-build -l $(srctree) -m $(MODLIB)
define archhelp
echo '* image/bootimage - Kernel boot image (image.boot)'
echo ' zImage - Compressed kernel boot image (image.boot)'
echo ' install-headers - Install kernel headers in '
echo ' <basedir>/usr/include'
echo ' es2boot - Build kernel boot image with small embedded boot for es2 simulator'
echo ' e2sboot - Build kernel boot image with small embedded boot for e2s simulator'
echo ' e8cboot - Build kernel boot image with small embedded boot for e8c simulator'
echo ' e1cpboot - Build kernel boot image with small embedded boot for e1cp simulator'
echo ' e8c2boot - Build kernel boot image with small embedded boot for e8c2 simulator'
echo ' e12cboot - Build kernel boot image with small embedded boot for e12c simulator'
echo ' e16cboot - Build kernel boot image with small embedded boot for e16c simulator'
echo ' e2c3boot - Build kernel boot image with small embedded boot for e2c3 simulator'
echo ' [with_kernel=1] - When building boot, build in compressed kernel into the boot image'
endef

View File

@ -0,0 +1,14 @@
#ifndef _ASM_L_ACENV_H_
#define _ASM_L_ACENV_H_
int __acpi_acquire_global_lock(unsigned int *lock);
int __acpi_release_global_lock(unsigned int *lock);
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = __acpi_release_global_lock(&facs->global_lock))
#endif /* _ASM_L_ACENV_H_ */

View File

@ -0,0 +1,137 @@
#ifndef _ASM_L_ACPI_H
#define _ASM_L_ACPI_H
/*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
* Copuright (C) 2012 Evgeny Kravtsunov <kravtsunov_e@mcst.ru>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <asm/mpspec.h>
#include <asm/irqflags.h>
#include <acpi/pdc_intel.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() raw_local_irq_disable()
#define ACPI_ENABLE_IRQS() raw_local_irq_enable()
#ifdef CONFIG_ACPI
#include <asm/acenv.h>
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
IDLE_POLL};
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_ht;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
extern void acpi_restore_state_mem(void);
extern unsigned long acpi_wakeup_address;
/*
* Check if the CPU can handle C2 and deeper
*/
static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
{
/* here check machine type taken from mptable */
return 1;
}
/*
* Elbrus won't implement _PDC as it is deprecated in ACPI4.0 in favor of _OSC
*/
static inline bool arch_has_acpi_pdc(void)
{
return 0;
}
static inline void arch_acpi_set_pdc_bits(u32 *buf)
{
return;
}
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
#define acpi_ioapic 0
#define acpi_disable_cmcff 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
static inline void disable_acpi(void) { }
#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 0
#define acpi_unlazy_tlb(x)
#endif /* _ASM_L_ACPI_H */

View File

@ -0,0 +1,776 @@
#ifndef _ASM_L_APIC_H
#define _ASM_L_APIC_H
#include <linux/cpumask.h>
#include <linux/errno.h>
#if 0
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/fixmap.h>
#include <asm/msr.h>
#else
#include <linux/cpumask.h>
#include <asm/percpu.h>
#endif
#include <asm/processor.h>
#include <asm/apicdef.h>
#include <asm/hardirq.h>
#include <linux/atomic.h>
#include <asm/mpspec.h>
#include <asm-l/idle.h>
#if defined CONFIG_E2K || defined CONFIG_E90S
# define cpu_has_tsc 1
# define cpu_has_apic 1
# define cpu_has_x2apic 0
# define READ_APIC_ID() GET_APIC_ID(arch_apic_read(APIC_ID))
extern int first_system_vector;
#endif
#if 0
#define ARCH_APICTIMER_STOPS_ON_C3 1
#endif
/*
* Debugging macros
*/
#define APIC_QUIET 0
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
/*
* Define the default level of output to be very little
* This can be turned up by using apic=verbose for more
* information and apic=debug for _lots_ of information.
* apic_verbosity is defined in apic.c
*/
#define apic_printk(v, s, a...) do { \
if ((v) <= apic_verbosity) \
printk(s, ##a); \
} while (0)
extern unsigned int calibration_result;
#if defined(CONFIG_L_LOCAL_APIC) && defined(CONFIG_L_X86_32)
extern void generic_apic_probe(void);
#else
static inline void generic_apic_probe(void)
{
}
#endif
#ifdef CONFIG_L_LOCAL_APIC
# define READ_APIC_ID() GET_APIC_ID(arch_apic_read(APIC_ID))
# define BOOT_READ_APIC_ID() GET_APIC_ID(boot_arch_apic_read(APIC_ID))
extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
#if 0
extern int disable_apic;
#else
#define disable_apic 0
#endif
extern unsigned int lapic_timer_frequency;
#ifdef CONFIG_SMP
extern void __inquire_remote_apic(int apicid);
#else /* CONFIG_SMP */
static inline void __inquire_remote_apic(int apicid)
{
}
#endif /* CONFIG_SMP */
static inline void default_inquire_remote_apic(int apicid)
{
if (apic_verbosity >= APIC_DEBUG)
__inquire_remote_apic(apicid);
}
/*
* With 82489DX we can't rely on apic feature bit
* retrieved via cpuid but still have to deal with
* such an apic chip so we assume that SMP configuration
* is found from MP table (64bit case uses ACPI mostly
* which set smp presence flag as well so we are safe
* to use this helper too).
*/
static inline bool apic_from_smp_config(void)
{
return smp_found_config && !disable_apic;
}
/*
* Basic functions accessing APICs.
*/
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt/pv_ops.h>
#endif
#if 0
#ifdef CONFIG_L_X86_64
extern int is_vsmp_box(void);
#else
static inline int is_vsmp_box(void)
{
return 0;
}
#endif
#else
# define is_vsmp_box() 0
#endif
extern void xapic_wait_icr_idle(void);
extern u32 safe_xapic_wait_icr_idle(void);
extern void xapic_icr_write(u32, u32);
extern int setup_profiling_timer(unsigned int);
#if 0
static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
}
#else
static inline void native_apic_mem_write(u32 reg, u32 v)
{
arch_apic_write(reg, v);
}
static inline u32 native_apic_mem_read(u32 reg)
{
return arch_apic_read(reg);
}
#endif
extern void native_apic_wait_icr_idle(void);
extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void);
extern int x2apic_mode;
#ifdef CONFIG_X86_X2APIC
/*
* Make previous memory operations globally visible before
* sending the IPI through x2apic wrmsr. We need a serializing instruction or
* mfence for this.
*/
static inline void x2apic_wrmsr_fence(void)
{
asm volatile("mfence" : : : "memory");
}
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
reg == APIC_LVR)
return;
wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
}
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
{
wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
}
static inline u32 native_apic_msr_read(u32 reg)
{
u64 msr;
if (reg == APIC_DFR)
return -1;
rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
return (u32)msr;
}
static inline void native_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return;
}
static inline u32 native_safe_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return 0;
}
static inline void native_x2apic_icr_write(u32 low, u32 id)
{
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
static inline u64 native_x2apic_icr_read(void)
{
unsigned long val;
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
extern int x2apic_phys;
extern int x2apic_preenabled;
extern void check_x2apic(void);
extern void enable_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
u64 msr;
if (!cpu_has_x2apic)
return 0;
rdmsrl(MSR_IA32_APICBASE, msr);
if (msr & X2APIC_ENABLE)
return 1;
return 0;
}
#define x2apic_supported() (cpu_has_x2apic)
static inline void x2apic_force_phys(void)
{
x2apic_phys = 1;
}
#else
static inline void disable_x2apic(void)
{
}
static inline void check_x2apic(void)
{
}
static inline void enable_x2apic(void)
{
}
static inline int x2apic_enabled(void)
{
return 0;
}
static inline void x2apic_force_phys(void)
{
}
#define nox2apic 0
#define x2apic_preenabled 0
#define x2apic_supported() 0
#endif
extern void enable_IR_x2apic(void);
extern int get_physical_broadcast(void);
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
#ifdef CONFIG_E2K
extern void clear_local_APIC(void);
#endif /* CONFIG_E2K */
extern void lapic_shutdown(void);
extern int verify_local_APIC(void);
extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void setup_local_APIC(void);
extern void end_local_APIC_setup(void);
extern void bsp_end_local_APIC_setup(void);
extern void init_apic_mappings(void);
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
extern int APIC_init_uniprocessor(void);
extern int apic_force_enable(unsigned long addr);
/*
* On 32bit this is mach-xxx local
*/
#ifdef CONFIG_L_X86_64
extern int apic_is_clustered_box(void);
#else
static inline int apic_is_clustered_box(void)
{
return 0;
}
#endif
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
#else /* !CONFIG_L_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
#define local_apic_timer_c2_ok 1
static inline void init_apic_mappings(void) { }
static inline void disable_local_APIC(void) { }
#ifdef CONFIG_E2K
static inline void clear_local_APIC(void) { }
#endif /* CONFIG_E2K */
# define setup_boot_APIC_clock x86_init_noop
# define setup_secondary_APIC_clock x86_init_noop
#endif /* !CONFIG_L_LOCAL_APIC */
#ifdef CONFIG_L_X86_64
#define SET_APIC_ID(x) (apic->set_apic_id(x))
#else
#endif
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct apic {
char *name;
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
int (*apic_id_valid)(int apicid);
int (*apic_id_registered)(void);
u32 irq_delivery_mode;
u32 irq_dest_mode;
const struct cpumask *(*target_cpus)(void);
int disable_esr;
int dest_logical;
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
unsigned long (*check_apicid_present)(int apicid);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
const struct cpumask *mask);
void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
int (*cpu_present_to_apicid)(int mps_cpu);
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int phys_apicid);
void (*enable_apic_mode)(void);
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
/*
* When one of the next two hooks returns 1 the apic
* is switched to this. Essentially they are additional
* probe functions:
*/
int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* wakeup_secondary_cpu */
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
int trampoline_phys_low;
int trampoline_phys_high;
void (*wait_for_init_deassert)(atomic_t *deassert);
void (*smp_callin_clear_local_apic)(void);
void (*inquire_remote_apic)(int apicid);
/* apic ops */
u32 (*read)(u32 reg);
void (*write)(u32 reg, u32 v);
/*
* ->eoi_write() has the same signature as ->write().
*
* Drivers can support both ->eoi_write() and ->write() by passing the same
* callback value. Kernel can override ->eoi_write() and fall back
* on write for EOI.
*/
void (*eoi_write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
#ifdef CONFIG_L_X86_32
/*
* Called very early during boot from get_smp_config(). It should
* return the logical apicid. x86_[bios]_cpu_to_apicid is
* initialized before this function is called.
*
* If logical apicid can't be determined that early, the function
* may return BAD_APICID. Logical apicid will be configured after
* init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
* won't be applied properly during early boot in this case.
*/
int (*x86_32_early_logical_apicid)(int cpu);
/*
* Optional method called from setup_local_APIC() after logical
* apicid is guaranteed to be known to initialize apicid -> node
* mapping if NUMA initialization hasn't done so already. Don't
* add new users.
*/
int (*x86_32_numa_cpu_node)(int cpu);
#endif
};
/*
* Pointer to the local APIC driver in use on this system (there's
* always just one such driver in use - the kernel decides via an
* early probing process which one it picks - and then sticks to it):
*/
extern struct apic *apic;
/*
* APIC drivers are probed based on how they are listed in the .apicdrivers
* section. So the order is important and enforced by the ordering
* of different apic driver files in the Makefile.
*
* For the files having two apic drivers, we use apic_drivers()
* to enforce the order with in them.
*/
#define apic_driver(sym) \
static const struct apic *__apicdrivers_##sym __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym }
#define apic_drivers(sym1, sym2) \
static struct apic *__apicdrivers_##sym1##sym2[2] __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym1, &sym2 }
extern struct apic *__apicdrivers[], *__apicdrivers_end[];
/*
* APIC functionality to boot other CPUs - only used on SMP:
*/
#ifdef CONFIG_SMP
extern atomic_t init_deasserted;
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
#endif
#ifdef CONFIG_L_LOCAL_APIC
static inline u32 apic_read(u32 reg)
{
return apic->read(reg);
}
static inline void apic_write(u32 reg, u32 val)
{
apic->write(reg, val);
}
static inline void apic_eoi(void)
{
apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
}
static inline u64 apic_icr_read(void)
{
return apic->icr_read();
}
static inline void apic_icr_write(u32 low, u32 high)
{
apic->icr_write(low, high);
}
static inline void apic_wait_icr_idle(void)
{
apic->wait_icr_idle();
}
static inline u32 safe_apic_wait_icr_idle(void)
{
return apic->safe_wait_icr_idle();
}
extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
#else /* CONFIG_L_LOCAL_APIC */
static inline u32 apic_read(u32 reg) { return 0; }
static inline void apic_write(u32 reg, u32 val) { }
static inline void apic_eoi(void) { }
static inline u64 apic_icr_read(void) { return 0; }
static inline void apic_icr_write(u32 low, u32 high) { }
static inline void apic_wait_icr_idle(void) { }
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
#endif /* CONFIG_L_LOCAL_APIC */
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
apic_eoi();
}
static inline unsigned default_get_apic_id(unsigned long x)
{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
if (APIC_XAPIC(ver)/* || boot_cpu_has(X86_FEATURE_EXTD_APICID)*/)
return (x >> 24) & 0xFF;
else
return (x >> 24) & 0x0F;
}
/*
* Warm reset vector default position:
*/
#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
#ifdef CONFIG_L_X86_64
extern int default_acpi_madt_oem_check(char *, char *);
extern void apic_send_IPI_self(int vector);
#endif
static inline void default_wait_for_init_deassert(atomic_t *deassert)
{
while (!atomic_read(deassert))
cpu_relax();
return;
}
extern void generic_bigsmp_probe(void);
#ifdef CONFIG_L_LOCAL_APIC
#if 0
#include <asm/smp.h>
#endif
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
static inline const struct cpumask *online_target_cpus(void)
{
return cpu_online_mask;
}
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
#ifdef CONFIG_SMP
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
#else
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#endif
static inline unsigned int read_apic_id(void)
{
unsigned int reg;
reg = apic_read(APIC_ID);
return apic->get_apic_id(reg);
}
static inline int default_apic_id_valid(int apicid)
{
return (apicid < 255);
}
extern void default_setup_apic_routing(void);
extern struct apic apic_noop;
#ifdef CONFIG_L_X86_32
static inline int noop_x86_32_early_logical_apicid(int cpu)
{
return BAD_APICID;
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
extern void default_init_apic_ldr(void);
static inline int default_apic_id_registered(void)
{
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif
static inline int
flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
cpumask_bits(andmask)[0] &
cpumask_bits(cpu_online_mask)[0] &
APIC_ALL_CPUS;
if (likely(cpu_mask)) {
*apicid = (unsigned int)cpu_mask;
return 0;
} else {
return -EINVAL;
}
}
extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
static inline void
flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
static inline void
default_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_copy(retmask, cpumask_of(cpu));
}
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
{
return physid_isset(apicid, *map);
}
static inline unsigned long default_check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
*retmap = *phys_map;
}
static inline int __default_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline int
__default_check_phys_apicid_present(int phys_apicid)
{
return physid_isset(phys_apicid, phys_cpu_present_map);
}
/* #ifdef CONFIG_L_X86_32 */
#if 1
static inline int default_cpu_present_to_apicid(int mps_cpu)
{
return __default_cpu_present_to_apicid(mps_cpu);
}
static inline int
default_check_phys_apicid_present(int phys_apicid)
{
return __default_check_phys_apicid_present(phys_apicid);
}
#else
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
#endif
#endif /* CONFIG_L_LOCAL_APIC */
static inline void entering_irq(void)
{
l_irq_enter();
exit_idle();
}
static inline void entering_ack_irq(void)
{
entering_irq();
ack_APIC_irq();
}
static inline void exiting_irq(void)
{
l_irq_exit();
}
static inline void exiting_ack_irq(void)
{
l_irq_exit();
/* Ack only at the end to avoid potential reentry */
ack_APIC_irq();
}
extern void ioapic_zap_locks(void);
struct irq_data;
extern void ack_apic_edge(struct irq_data *data);
#endif /* _ASM_L_APIC_H */

View File

@ -0,0 +1,529 @@
#ifndef _ASM_L_APICDEF_H
#define _ASM_L_APICDEF_H
/*
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
* Alan Cox <Alan.Cox@linux.org>, 1995.
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
/*
* This is the IO-APIC register space as specified
* by Intel docs:
*/
#define IO_APIC_SLOT_SIZE 1024
#define APIC_REGS_SIZE 0x1000
#define APIC_BSP 0x10
#define APIC_BSP_ENABLE 0x00000800
#define APIC_BSP_IS_BSP 0x00000100
#define APIC_ENABLE(x) ((x) & APIC_BSP_ENABLE)
#define BootStrap(x) ((x) & APIC_BSP_IS_BSP)
#define APIC_ID 0x20
#define APIC_ID_SHIFT 24
#define APIC_ID_SIZE 8
#define APIC_ID_BIT_MASK ((1 << APIC_ID_SIZE) - 1)
#define APIC_ID_MASK (APIC_ID_BIT_MASK << \
APIC_ID_SHIFT)
#define GET_APIC_ID(x) (((x) >> APIC_ID_SHIFT) & \
APIC_ID_BIT_MASK)
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define APIC_LVR_DIRECTED_EOI (1 << 24)
#define APIC_MAXLVT 0x03
#define APIC_VERSION 0x10
#define GET_APIC_VERSION(x) ((x) & 0xFFu)
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
#define SET_APIC_VERSION(x) ((x) & 0xFF)
#define SET_APIC_MAXLVT(x) (((x) & 0xff) << 16)
#if 0
# define APIC_INTEGRATED(x) ((x) & 0xF0u)
#else
# define APIC_INTEGRATED(x) (1)
#endif
#define APIC_XAPIC(x) ((x) >= 0x14)
#define APIC_EXT_SPACE(x) ((x) & 0x80000000)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
#define APIC_ARBPRI 0x90
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
#define APIC_EOI_ACK 0x0
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu << 24)
#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define GET_APIC_DLVR_MODE(x) (((x) >> 28) & 0xF)
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_DIRECTED_EOI (1 << 12)
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_SOFT_ENABLED(x) ((x) & APIC_SPIV_APIC_ENABLED)
#define APIC_FOCUS_DISABLED(x) ((x) & APIC_SPIV_FOCUS_DISABLED)
#define APIC_SPIV_SPURIOUS_VECT 0x000FF
#define GET_SPURIOUS_VECTOR(x) ((x) & APIC_SPIV_SPURIOUS_VECT)
#define SET_SPURIOUS_VECTOR(x) ((x) & APIC_SPIV_SPURIOUS_VECT)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
#define APIC_ESR_SEND_CS 0x00001
#define APIC_ESR_RECV_CS 0x00002
#define APIC_ESR_SEND_ACC 0x00004
#define APIC_ESR_RECV_ACC 0x00008
#define APIC_ESR_SENDILL 0x00020
#define APIC_ESR_RECVILL 0x00040
#define APIC_ESR_ILLREGA 0x00080
#define APIC_LVTCMCI 0x2f0
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
#define APIC_ICR_RR_MASK 0x30000
#define APIC_ICR_RR_INVALID 0x00000
#define APIC_ICR_RR_INPROG 0x10000
#define APIC_ICR_RR_VALID 0x20000
#define APIC_INT_LEVELTRIG 0x08000
#define APIC_INT_ASSERT 0x04000
#define APIC_ICR_BUSY 0x01000
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
#define APIC_DM_NMI 0x00400
#define APIC_DM_INIT 0x00500
#define APIC_DM_STARTUP 0x00600
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
#define SET_APIC_DEST_FIELD(x) ((x) << 24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
#define SET_APIC_TIMER_BASE(x) (((x) << 18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
#define APIC_LVT_TIMER_PERIODIC (1 << 17)
#define APIC_LVT_MASKED (1 << 16)
#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
#define APIC_LVT_REMOTE_IRR (1 << 14)
#define APIC_INPUT_POLARITY (1 << 13)
#define APIC_SEND_PENDING (1 << 12)
#define APIC_MODE_MASK 0x700
#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
#define APIC_LVT1 0x360
#define APIC_LVTERR 0x370
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
#define APIC_TDR_DIV_8 0x2
#define APIC_TDR_DIV_16 0x3
#define APIC_TDR_DIV_32 0x8
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#if 0
#define APIC_EFEAT 0x400
#define APIC_ECTRL 0x410
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
#define APIC_EILVT_MASKED (1 << 16)
#endif
#define APIC_NM_TIMER_LVTT 0xf00
#define APIC_NM_TIMER_INIT_COUNT 0xf10
#define APIC_NM_TIMER_CURRENT_COUNT 0xf20
#define APIC_NM_TIMER_DIVIDER 0xf30
#define APIC_LVT2 0xf40
#define APIC_LVT3 0xf50
#define APIC_DSP APIC_LVT3
#define APIC_LVT4 0xf60
#define APIC_M_ERM 0xfc0
#define APIC_NM_WATCHDOG 0x80000000
#define APIC_NM_WATCHDOG1 0x40000000
#define APIC_NM_SPECIAL 0x20000
#define APIC_NM_TIMER 0x10000
#define APIC_NM_NMI_DEBUG_MASK 0x8000
#define APIC_NM_INTQLAPIC_MASK 0x4000
#define APIC_NM_INT_VIOLAT_MASK 0x2000
#define APIC_NM 0xfe0
#define APIC_NM_BIT_MASK 0x7ff00
#define APIC_NM_PCI 0x40000
#define APIC_NM_SPECIAL 0x20000
#define APIC_NM_TIMER 0x10000
#define APIC_NM_NMI_DEBUG 0x8000
#define APIC_NM_INTQLAPIC 0x4000
#define APIC_NM_INT_VIOLAT 0x2000
#define APIC_NM_STARTUP 0x1000
#define APIC_NM_INIT 0x0800
#define APIC_NM_NMI 0x0400
#define APIC_NM_SMI 0x0200
#define APIC_NM_EXTINT 0x0100
#define APIC_NM_STARTUP_ADDR 0x00ff
#define GET_APIC_STARTUP_ADDR(x) ((x) & APIC_NM_STARTUP_ADDR)
#define APIC_NM_MASK(x) ((x) & APIC_NM_BIT_MASK)
#define GET_APIC_NM_BITS(x) (((x) & APIC_NM_BIT_MASK) >> 9)
#define APIC_NM_IS_STRATUP(x) ((x) & APIC_NM_STARTUP)
#define APIC_NM_IS_INIT(x) ((x) & APIC_NM_INIT)
#define APIC_NM_IS_NMI(x) ((x) & APIC_NM_NMI)
#define APIC_NM_IS_SMI(x) ((x) & APIC_NM_SMI)
#define APIC_VECT 0xff0
#define APIC_VECT_VECTOR_MASK 0x000000ff
#define APIC_VECT_EXTINT (1 << 31)
#define APIC_VECT_VECTOR(x) ((x) & APIC_VECT_VECTOR_MASK)
#define APIC_VECT_IS_EXTINT(x) ((x) & APIC_VECT_EXTINT)
#if 0
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define APIC_BASE_MSR 0x800
#else
#define APIC_BASE 0x00000000fee00000UL
#endif
#define X2APIC_ENABLE (1UL << 10)
/*
* a maximum number of IO-APICs depends on the following:
* each IO link can have IOHUB with IO-APIC
* each node can have embedded IO-APIC
*/
#define MAX_IO_APICS (MAX_NUMIOLINKS + MAX_NUMNODES)
#define MAX_LOCAL_APIC MAX_APICS
#if 0
#ifdef CONFIG_L_X86_32
# define MAX_IO_APICS 64
# define MAX_LOCAL_APIC 256
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
#endif
/*
* All x86-64 systems are xAPIC compatible.
* In the following, "apicid" is a physical APIC ID.
*/
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
#if 0
#ifndef __ASSEMBLY__
/*
* the local APIC register structure, memory mapped. Not terribly well
* tested, but we might eventually use this one in the future - the
* problem why we cannot use it right now is the P5 APIC, it has an
* errata which cannot take 8-bit reads and writes, only 32-bit ones ...
*/
#define u32 unsigned int
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
/*010*/ struct { u32 __reserved[4]; } __reserved_02;
/*020*/ struct { /* APIC ID Register */
u32 __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
u32 __reserved[3];
} id;
/*030*/ const
struct { /* APIC Version Register */
u32 version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
u32 __reserved[3];
} version;
/*040*/ struct { u32 __reserved[4]; } __reserved_03;
/*050*/ struct { u32 __reserved[4]; } __reserved_04;
/*060*/ struct { u32 __reserved[4]; } __reserved_05;
/*070*/ struct { u32 __reserved[4]; } __reserved_06;
/*080*/ struct { /* Task Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} tpr;
/*090*/ const
struct { /* Arbitration Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} apr;
/*0A0*/ const
struct { /* Processor Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} ppr;
/*0B0*/ struct { /* End Of Interrupt Register */
u32 eoi;
u32 __reserved[3];
} eoi;
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
/*0D0*/ struct { /* Logical Destination Register */
u32 __reserved_1 : 24,
logical_dest : 8;
u32 __reserved_2[3];
} ldr;
/*0E0*/ struct { /* Destination Format Register */
u32 __reserved_1 : 28,
model : 4;
u32 __reserved_2[3];
} dfr;
/*0F0*/ struct { /* Spurious Interrupt Vector Register */
u32 spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
u32 __reserved_3[3];
} svr;
/*100*/ struct { /* In Service Register */
/*170*/ u32 bitfield;
u32 __reserved[3];
} isr [8];
/*180*/ struct { /* Trigger Mode Register */
/*1F0*/ u32 bitfield;
u32 __reserved[3];
} tmr [8];
/*200*/ struct { /* Interrupt Request Register */
/*270*/ u32 bitfield;
u32 __reserved[3];
} irr [8];
/*280*/ union { /* Error Status Register */
struct {
u32 send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
u32 __reserved_3[3];
} error_bits;
struct {
u32 errors;
u32 __reserved_3[3];
} all_errors;
} esr;
/*290*/ struct { u32 __reserved[4]; } __reserved_08;
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
/*300*/ struct { /* Interrupt Command Register 1 */
u32 vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
u32 __reserved_4[3];
} icr1;
/*310*/ struct { /* Interrupt Command Register 2 */
union {
u32 __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
u32 __reserved_3 : 24,
logical_dest : 8;
} dest;
u32 __reserved_4[3];
} icr2;
/*320*/ struct { /* LVT - Timer */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
u32 __reserved_4[3];
} lvt_timer;
/*330*/ struct { /* LVT - Thermal Sensor */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_thermal;
/*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_pc;
/*350*/ struct { /* LVT - LINT0 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint0;
/*360*/ struct { /* LVT - LINT1 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint1;
/*370*/ struct { /* LVT - Error */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_error;
/*380*/ struct { /* Timer Initial Count Register */
u32 initial_count;
u32 __reserved_2[3];
} timer_icr;
/*390*/ const
struct { /* Timer Current Count Register */
u32 curr_count;
u32 __reserved_2[3];
} timer_ccr;
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
/*3E0*/ struct { /* Timer Divide Configuration Register */
u32 divisor : 4,
__reserved_1 : 28;
u32 __reserved_2[3];
} timer_dcr;
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
} __attribute__ ((packed));
#undef u32
#endif /* __ASSEMBLY__ */
#endif
#if 0
#ifdef CONFIG_L_X86_32
#define BAD_APICID 0xFFu
#else
#define BAD_APICID 0xFFFFu
#endif
#else
#define BAD_APICID 0xFFu
#endif
#ifndef __ASSEMBLY__
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
#endif
#endif /* _ASM_L_APICDEF_H */

View File

@ -0,0 +1,35 @@
#ifndef _ASM_L_BOOT_PROFILING_H
#define _ASM_L_BOOT_PROFILING_H
#ifdef CONFIG_BOOT_TRACE
#include <linux/list.h>
extern void notrace add_boot_trace_event(const char *fmt, ...);
extern struct boot_tracepoint *boot_trace_prev_event(int cpu,
struct boot_tracepoint *event);
extern struct boot_tracepoint *boot_trace_next_event(int cpu,
struct boot_tracepoint *event);
extern void stop_boot_trace(void);
# define BOOT_TRACE_ARRAY_SIZE (1500 + 20 * NR_CPUS)
struct boot_tracepoint {
char name[81];
unsigned int cpu;
u64 cycles;
struct list_head list;
};
extern struct boot_tracepoint boot_trace_events[BOOT_TRACE_ARRAY_SIZE];
extern struct list_head boot_trace_cpu_events_list[];
extern atomic_t boot_trace_top_event;
extern int boot_trace_enabled;
# define BOOT_TRACEPOINT(...) add_boot_trace_event(__VA_ARGS__)
#else /* !CONFIG_BOOT_TRACE */
# define BOOT_TRACEPOINT(...) do { } while(0)
#endif /* CONFIG_BOOT_TRACE */
#endif /* _ASM_L_BOOT_PROFILING_H */

View File

@ -0,0 +1,315 @@
#ifndef _L_BOOTINFO_H_
#define _L_BOOTINFO_H_
#if defined(__KERNEL__) || defined(__KVM_BOOTINFO_SUPPORT__)
/*
* 0x0:
* 0x1: extended command line
*/
#define BOOTBLOCK_VER 0x1
#define KSTRMAX_SIZE 128
#define KSTRMAX_SIZE_EX 512
#define BIOS_INFO_SIGN_SIZE 8
#define KERNEL_ARGS_STRING_EX_SIGN_SIZE 22
#define BOOT_VER_STR_SIZE 128
#define BOOTBLOCK_SIZE 0x1000 /* 1 PAGE_SIZE */
#define X86BOOT_SIGNATURE 0x8086
#define ROMLOADER_SIGNATURE 0xe200
#define KVM_GUEST_SIGNATURE 0x20e2
#define BIOS_INFO_SIGNATURE "E2KBIOS"
#define KVM_INFO_SIGNATURE "E2KKVM"
#define KERNEL_ARGS_STRING_EX_SIGNATURE "KERNEL_ARGS_STRING_EX"
#define BOOT_KERNEL_ARGS_STRING_EX_SIGNATURE \
boot_va_to_pa(KERNEL_ARGS_STRING_EX_SIGNATURE)
/*
* Below is boot information that comes out of the x86 code of Linux/E2K
* loader proto.
*/
/* L_MAX_NODE_PHYS_BANKS = 4 sometimes is not enough, so we increase it to
* an arbitary value (8 now). The old L_MAX_NODE_PHYS_BANKS we rename to
* L_MAX_NODE_PHYS_BANKS_FUSTY and take in mind for boot_info compatibility.
*
* L_MAX_NODE_PHYS_BANKS_FUSTY and L_MAX_MEM_NUMNODES describe max size of
* array of memory banks on all nodes and should be in accordance with old value
* of L_MAX_PHYS_BANKS for compatibility with boot_info old structure (bank)
* size, so L_MAX_NODE_PHYS_BANKS_FUSTY * L_MAX_MEM_NUMNODES should be
* equal to 32.
*/
#define L_MAX_NODE_PHYS_BANKS 64 /* max number of memory banks */
/* on one node */
#define L_MAX_NODE_PHYS_BANKS_FUSTY 4 /* fusty max number of memory */
/* banks on one node */
#define L_MAX_PHYS_BANKS_EX 64 /* max number of memory banks */
/* in banks_ex field of */
/* boot_info */
#define L_MAX_MEM_NUMNODES 8 /* max number of nodes in the */
/* list of memory banks on */
/* each node */
#define L_MAX_BUSY_AREAS 4 /* max number of busy areas */
/* occupied by BIOS and should be */
/* kept unchanged by kernel to */
/* support recovery mode */
#ifndef __ASSEMBLY__
typedef struct bank_info {
__u64 address; /* start address of bank */
__u64 size; /* size of bank in bytes */
} bank_info_t;
typedef struct node_banks {
bank_info_t banks[L_MAX_NODE_PHYS_BANKS_FUSTY]; /* memory banks array */
/* of a node */
} node_banks_t;
typedef struct boot_times {
__u64 arch;
__u64 unpack;
__u64 pci;
__u64 drivers1;
__u64 drivers2;
__u64 menu;
__u64 sm;
__u64 kernel;
__u64 reserved[8];
} boot_times_t;
typedef struct bios_info {
__u8 signature[BIOS_INFO_SIGN_SIZE]; /* signature, */
/* 'E2KBIOS' */
__u8 boot_ver[BOOT_VER_STR_SIZE]; /* boot version */
__u8 mb_type; /* mother board type */
__u8 chipset_type; /* chipset type */
__u8 cpu_type; /* cpu type */
__u8 kernel_args_string_ex[KSTRMAX_SIZE_EX]; /* extended command */
/* line of kernel */
/* used to pass */
/* command line */
/* from e2k BIOS */
__u8 reserved1; /* reserved1 */
__u32 cache_lines_damaged; /* number of damaged */
/* cache lines */
__u64 nodes_mem_slabs_deprecated[52]; /* array of slabs */
/* accessible memory */
/* on each node */
/* accessible memory */
/* on each node */
bank_info_t banks_ex[L_MAX_PHYS_BANKS_EX]; /* extended array of */
/* descriptors of */
/* banks of available */
/* physical memory */
__u64 devtree; /* devtree pointer */
__u32 bootlog_addr; /* bootlog address */
__u32 bootlog_len; /* bootlog length */
__u8 uuid[16]; /* UUID boot device */
} bios_info_t;
typedef struct boot_info {
__u16 signature; /* signature, 0x8086 */
__u8 target_mdl; /* target cpu model number */
__u8 reserved1; /* reserved1 */
__u16 reserved2; /* reserved2 */
__u8 vga_mode; /* vga mode */
__u8 num_of_banks; /* number of available physical memory banks */
/* see below bank array */
/* total number on all nodes or 0 */
__u64 kernel_base; /* base address to load kernel image */
/* if 0 then BIOS can load at any address */
/* but address should be large page size */
/* aligned - 4 Mb */
__u64 kernel_size; /* kernel image byte's size */
__u64 ramdisk_base; /* base address to load RAM-disk */
/* now not used */
__u64 ramdisk_size; /* RAM-disk byte's size */
__u16 num_of_cpus; /* number of started physical CPU(s) */
__u16 mach_flags; /* machine identifacition flags */
/* should be set by our romloader and BIOS */
__u16 num_of_busy; /* number of busy areas occupied by BIOS */
/* see below busy array */
__u16 num_of_nodes; /* number of nodes on NUMA system */
__u64 mp_table_base; /* MP-table base address */
__u64 serial_base; /* base address of serial port for Am85c30 */
/* Used for debugging purpose */
__u64 nodes_map; /* online nodes map */
__u64 mach_serialn; /* serial number of the machine */
__u8 mac_addr[6]; /* base MAC address for ethernet cards */
__u16 reserved3; /* reserved3 */
char kernel_args_string[KSTRMAX_SIZE]; /* command line of kernel */
/* used to pass command line */
/* from e2k BIOS */
node_banks_t nodes_mem[L_MAX_MEM_NUMNODES]; /* array of */
/* descriptors of banks of */
/* available physical memory */
/* on each node */
bank_info_t busy[L_MAX_BUSY_AREAS]; /* descriptors of areas */
/* occupied by BIOS, all this */
/* shoud be kept in system */
/* recovery mode */
u64 cntp_info_deprecated[32]; /* control points */
/* info to save and */
/* restore them state */
u64 dmp_deprecated[20]; /* Info for future work of */
/* dump analyzer */
__u64 reserved4[13]; /* reserved4 */
__u8 mb_name[16]; /* Motherboard product name */
__u32 reserved5; /* reserved5 */
__u32 kernel_csum; /* kernel image control sum */
bios_info_t bios; /* extended BIOS info */
/* SHOULD BE LAST ITEM into this */
/* structure */
} boot_info_t;
typedef struct bootblock_struct {
boot_info_t info; /* general kernel<->BIOS info */
__u8 /* zip area to make size of */
/* bootblock struct - constant */
gap[BOOTBLOCK_SIZE -
sizeof (boot_info_t) -
sizeof (boot_times_t) -
1 - /* u8 : bootblock_ver */
4 - /* u32 : reserved1 */
2 - /* u16 : kernel_flags */
1 - /* u8 : reserved2 */
5 - /* u8 : number of cnt points */
/* u8 : current # of cnt point */
/* u8 : number of cnt points */
/* ready in the memory */
/* u8 : number of cnt points */
/* saved on the disk */
/* u8 : all control points */
/* is created */
8 - /* u64 : dump sector */
8 - /* u64 : cnt point sector */
2 - /* u16 : dump device */
2 - /* u16 : cnt point device */
2 - /* u16 : boot_flags */
2]; /* u16 : x86_marker */
__u8 bootblock_ver; /* bootblock version number */
__u32 reserved1; /* reserved1 */
boot_times_t boot_times; /* boot load times */
__u16 kernel_flags; /* kernel flags, boot should */
/* not modify it */
__u8 reserved2; /* reserved2 */
__u8 cnt_points_num_deprecated; /* number of control points */
/* all memory will be devided */
/* on this number of parts */
__u8 cur_cnt_point_deprecated; /* current # of active */
/* control point (running */
/* part) */
__u8 mem_cnt_points_deprecated; /* number of started control */
/* points (ready in the memory) */
__u8 disk_cnt_points_deprecated; /* number of control points */
/* saved on the disk (ready */
/* to be loaded from disk) */
__u8 cnt_points_created_deprecated; /* all control points created */
/* in the memory and on disk */
__u64 dump_sector_deprecated; /* start sector # to dump */
/* physical memory */
__u64 cnt_point_sector_deprecated; /* start sector # to save */
/* restore control points */
__u16 dump_dev_deprecated; /* disk # to dump memory */
__u16 cnt_point_dev_deprecated; /* disk # for save/restore */
/* control point */
__u16 boot_flags; /* boot flags: if non */
/* zero then this structure */
/* is recovery info */
/* structure instead of boot */
/* info structure */
__u16 x86_marker; /* marker of the end of x86 */
/* boot block (0xAA55) */
} bootblock_struct_t;
extern bootblock_struct_t *bootblock_virt; /* bootblock structure */
/* virtual pointer */
#endif /* ! __ASSEMBLY__ */
/*
* Boot block flags to elaborate boot modes
*/
#define RECOVERY_BB_FLAG 0x0001 /* recovery flag: if non zero then */
/* this structure is recovery info */
/* structure instead of boot info */
/* structure */
/* BIOS should not clear memory */
/* and should keep current state of */
/* physical memory */
#define CNT_POINT_BB_FLAG 0x0002 /* kernel restarted in the mode of */
/* control point creation */
/* BIOS should read kernel image from */
/* the disk to the specified area of */
/* the memory and start kernel (this */
/* flag should be with */
/* RECOVERY_BB_FLAG flag) */
#define NO_READ_IMAGE_BB_FLAG 0x0004 /* BIOS should not read kernel image */
/* from disk and start current */
/* image in the specified area of */
/* the memory (this flag should be */
/* with RECOVERY_BB_FLAG flag) */
#define DUMP_ANALYZE_BB_FLAG 0x0008 /* This flag is used only by kernel */
/* to indicate dump analyzer mode */
#define MEMORY_DUMP_BB_FLAG 0x0010 /* BIOS should dump all physical */
/* memory before start all other */
/* actions */
/*
* The machine identification flags
*/
#define SIMULATOR_MACH_FLAG 0x0001 /* system is running on */
/* simulator */
#define PROTOTYPE_MACH_FLAG_DEPRECATED 0x0002 /* machine is prototype */
#define IOHUB_MACH_FLAG 0x0004 /* machine has IOHUB */
#define OLDMGA_MACH_FLAG 0x0008 /* MGA card has old firmware */
#define MULTILINK_MACH_FLAG 0x0010 /* some nodes are connected */
/* by sevral IP links */
#define MSI_MACH_FLAG 0x0020 /* boot inits right values in */
/* apic to support MSI. */
/* Meanfull for e2k only. For */
/* v9 it always true */
#define KVM_GUEST_MACH_FLAG 0x0100 /* system is running */
/* as KVM guest */
/*
* The chipset types
*/
#define CHIPSET_TYPE_PIIX4 0x01 /* PIIX4 */
#define CHIPSET_TYPE_IOHUB 0x02 /* IOHUB */
/*
* The chipset types names
*/
#define GET_CHIPSET_TYPE_NAME(type) \
({ \
char *name; \
\
switch (type) { \
case CHIPSET_TYPE_PIIX4: \
name = "PIIX4"; \
break; \
case CHIPSET_TYPE_IOHUB: \
name = "IOHUB"; \
break; \
default: \
name = "?????"; \
} \
\
name; \
})
extern char *mcst_mb_name;
#endif /* __KERNEL__ || __KVM_BOOTINFO_SUPPORT__ */
#endif /* _L_BOOTINFO_H_ */

View File

@ -0,0 +1,22 @@
#ifndef _ASM_L_CLK_RT_H
#define _ASM_L_CLK_RT_H
#define CLK_RT_NO 0
#define CLK_RT_RTC 1
#define CLK_RT_EXT 2
#define CLK_RT_RESUME 3
extern struct clocksource clocksource_clk_rt;
extern int clk_rt_mode;
extern atomic_t num_clk_rt_register;
extern int clk_rt_register(void *);
extern struct clocksource clocksource_clk_rt;
extern int proc_clk_rt(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int read_clk_rt_freq(void);
extern void clk_rt_set_mode(void *mode_arg);
extern u64 raw_read_clk_rt(void);
extern struct clocksource lt_cs;
extern struct clocksource *curr_clocksource;
#endif

View File

@ -0,0 +1,6 @@
#ifndef _ASM_L_CLKR_H
#define _ASM_L_CLKR_H
extern struct clocksource clocksource_clkr;
#endif

View File

@ -0,0 +1,59 @@
#ifndef _L_CONSOLE_H_
#define _L_CONSOLE_H_
#ifndef __ASSEMBLY__
#include <linux/init.h>
#include <linux/spinlock_types.h>
#include <stdarg.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm-l/console_types.h>
#ifdef CONFIG_SERIAL_PRINTK
# ifdef CONFIG_SERIAL_AM85C30_CONSOLE
extern serial_console_opts_t am85c30_serial_console;
# endif
extern serial_console_opts_t *serial_console_opts;
# define opts_entry(opts, member) opts->member
# define serial_console_opts_entry(entry) opts_entry(serial_console_opts, entry)
extern unsigned char serial_dump_console_num;
extern void *get_serial_console_io_base(void);
extern void setup_serial_dump_console(boot_info_t *);
#endif /* CONFIG_SERIAL_PRINTK */
#ifdef CONFIG_L_EARLY_PRINTK
extern void dump_printk(char const *fmt_v, ...);
extern void dump_vprintk(char const *fmt, va_list ap);
extern void dump_puts(const char *s);
extern void dump_putns(const char *s, int n);
# ifdef CONFIG_EARLY_DUMP_CONSOLE
extern void register_early_dump_console(void);
# else
static inline void register_early_dump_console(void) { };
# endif
# ifdef CONFIG_EARLY_PRINTK
extern int switch_to_early_dump_console(void);
extern void switch_from_early_dump_console(void);
# endif
#else /* !CONFIG_L_EARLY_PRINTK */
# define dump_printk printk
# define dump_vprintk vprintk
# define dump_puts(s) printk("%s", (s))
static inline void register_early_dump_console(void) { };
#endif /* CONFIG_L_EARLY_PRINTK */
#if defined(CONFIG_SERIAL_AM85C30_CONSOLE) && defined(CONFIG_SERIAL_L_ZILOG)
extern raw_spinlock_t *uap_a_reg_lock;
#endif
#endif /* __ASSEMBLY__ */
#endif /* _L_CONSOLE_H_ */

View File

@ -0,0 +1,31 @@
#ifndef _L_CONSOLE_TYPES_H_
#define _L_CONSOLE_TYPES_H_
#ifndef __ASSEMBLY__
#include <asm/bootinfo.h>
#ifdef CONFIG_E2K
# include <asm/p2v/boot_spinlock_types.h>
extern boot_spinlock_t vprint_lock;
#endif
#define L_LMS_CONS_DATA_PORT LMS_CONS_DATA_PORT
#define L_LMS_CONS_STATUS_PORT LMS_CONS_STATUS_PORT
#define SERIAL_CONSOLE_8250_NAME "8250"
#if defined CONFIG_SERIAL_PRINTK || defined CONFIG_SERIAL_BOOT_PRINTK
# define SERIAL_CONSOLE_16550_NAME "ns16550"
# define SERIAL_CONSOLE_AM85C30_NAME "AM85C30"
typedef struct serial_console_opts_ {
char* name;
unsigned long long io_base;
unsigned char (*serial_getc)(void);
int (*serial_tstc)(void);
int (*init)(void *serial_io_base);
void (*serial_putc)(unsigned char c);
} serial_console_opts_t;
#endif /* SERIAL_PRINTK || SERIAL_BOOT_PRINTK */
#endif /* __ASSEMBLY__ */
#endif /* _L_CONSOLE_H_ */

View File

@ -0,0 +1,13 @@
#ifndef _ASM_L_DEVTREE_H
#define _ASM_L_DEVTREE_H
#include <linux/types.h>
int device_tree_init(void);
void get_dtb_from_boot(u8*, u32);
u32 get_dtb_size(void);
extern int devtree_detected;
#ifdef CONFIG_DTB_L_TEST
extern unsigned char test_blob[];
#endif
#endif /* _ASM_L_DEVTREE_H */

View File

@ -0,0 +1,29 @@
#ifndef ___ASM_L_DMA_DIRECT_H
#define ___ASM_L_DMA_DIRECT_H
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev) /* caller knows better */
return true;
if (!dev->dma_mask)
return false;
#if defined(CONFIG_E2K) && defined(CONFIG_NUMA)
if (cpu_has(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE)) {
if (page_to_nid(phys_to_page(addr)) != dev_to_node(dev))
return false;
}
#endif
return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr;
}
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return daddr;
}
#endif /* ___ASM_L_DMA_DIRECT_H */

View File

@ -0,0 +1,21 @@
#ifndef ___ASM_L_DMA_MAPPING_H
#define ___ASM_L_DMA_MAPPING_H
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/dma-debug.h>
/*
* No easy way to get cache size on all processors
* so return the maximum possible to be safe.
*/
#define ARCH_DMA_MINALIGN (1 << INTERNODE_CACHE_SHIFT)
extern const struct dma_map_ops *dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return dma_ops;
}
#endif /* ___ASM_L_DMA_MAPPING_H */

View File

@ -0,0 +1,103 @@
#ifndef __ASM_L_EPIC_H
#define __ASM_L_EPIC_H
#ifdef __KERNEL__
#include <asm/epicdef.h>
#include <asm/epic_regs.h>
extern unsigned int early_prepic_node_read_w(int node, unsigned int reg);
extern void early_prepic_node_write_w(int node, unsigned int reg,
unsigned int v);
extern unsigned int prepic_node_read_w(int node, unsigned int reg);
extern void prepic_node_write_w(int node, unsigned int reg, unsigned int v);
/*
* Verbosity can be turned on by passing 'epic_debug' cmdline parameter
* epic_debug is defined in epic.c
*/
extern bool epic_debug;
#define epic_printk(s, a...) do { \
if (epic_debug) \
printk(s, ##a); \
} while (0)
extern bool epic_bgi_mode;
extern unsigned int cepic_timer_delta;
extern void setup_boot_epic_clock(void);
extern void __init setup_bsp_epic(void);
/*
* CEPIC_ID register has 10 valid bits: 2 for prepicn (node) and 8 for
* cepicn (core in node). Since currently kernel does not support NR_CPUS > 64,
* we ignore 4 most significant bits of cepicn.
*
* For example, core 0 on node 1 will have full cepic id = 256 and short cepic
* id = 16
*/
static inline unsigned int cepic_id_full_to_short(unsigned int reg_value)
{
union cepic_id reg_id;
reg_id.raw = reg_value;
reg_id.bits.cepicn_reserved = 0;
return reg_id.bits.prepicn << CEPIC_ID_SHORT_VALID_BITS
| reg_id.bits.cepicn;
}
static inline unsigned int cepic_id_short_to_full(unsigned int cepic_id)
{
union cepic_id reg_id;
reg_id.raw = 0;
reg_id.bits.cepicn = cepic_id & CEPIC_ID_SHORT_VALID_MASK;
reg_id.bits.prepicn = cepic_id >> CEPIC_ID_SHORT_VALID_BITS;
return reg_id.raw;
}
static inline unsigned int read_epic_id(void)
{
return cepic_id_full_to_short(epic_read_w(CEPIC_ID));
}
static inline bool read_epic_bsp(void)
{
union cepic_ctrl reg;
reg.raw = epic_read_w(CEPIC_CTRL);
return reg.bits.bsp_core;
}
extern void __init_recv setup_prepic(void);
extern void ack_epic_irq(void);
extern void epic_send_IPI(unsigned int dest_id, int vector);
extern void epic_send_IPI_mask(const struct cpumask *mask, int vector);
extern void epic_send_IPI_self(int vector);
extern void epic_send_IPI_mask_allbutself(const struct cpumask *mask,
int vector);
extern void epic_wait_icr_idle(void);
extern void clear_cepic(void);
extern __visible void epic_smp_timer_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_spurious_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_error_interrupt(struct pt_regs *regs);
extern __visible void prepic_smp_error_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_irq_move_cleanup_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_irq_work_interrupt(struct pt_regs *regs);
extern __visible void cepic_epic_interrupt(struct pt_regs *regs);
extern __visible void epic_hc_emerg_interrupt(struct pt_regs *regs);
extern __visible void epic_iommu_interrupt(struct pt_regs *regs);
extern __visible void epic_uncore_interrupt(struct pt_regs *regs);
extern __visible void epic_ipcc_interrupt(struct pt_regs *regs);
extern __visible void epic_hc_interrupt(struct pt_regs *regs);
extern __visible void epic_pcs_interrupt(struct pt_regs *regs);
#ifdef CONFIG_KVM_ASYNC_PF
extern __visible void epic_pv_apf_wake(struct pt_regs *regs);
#endif /* CONFIG_KVM_ASYNC_PF */
#ifdef CONFIG_SMP
extern __visible void epic_smp_reschedule_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_call_function_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_call_function_single_interrupt(
struct pt_regs *regs);
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_L_EPIC_H */

View File

@ -0,0 +1,669 @@
#ifndef __ASM_L_EPIC_REGS_H
#define __ASM_L_EPIC_REGS_H
#include <asm/types.h>
#ifndef __ASSEMBLY__
#ifdef __LITTLE_ENDIAN
union cepic_ctrl {
u32 raw;
struct {
u32 __reserved1 : 8,
bsp_core : 1,
__reserved2 : 1,
soft_en : 1,
__reserved3 : 21;
} __packed bits;
};
/* Ignore 4 bits of CEPIC (core) ID so that physical core ID is <= 64 */
union cepic_id {
u32 raw;
struct {
u32 cepicn : 4,
cepicn_reserved : 4,
prepicn : 2,
__reserved2 : 22;
} __packed bits;
};
union cepic_ctrl2 {
u32 raw;
struct {
u32 mi_gst_blk : 1,
nmi_gst_blk : 1,
int_hv : 1,
__reserved1 : 1,
clear_gst : 1,
__reserved2 : 3,
timer_stop : 1,
__reserved3 : 23;
} __packed bits;
};
union cepic_dat {
u64 raw;
struct {
u64 __reserved1 : 6,
dat_cop : 2,
__reserved2 : 4,
stat : 1,
__reserved3 : 7,
index : 10,
__reserved4 : 2,
__reserved5 : 8,
gst_dst : 10,
__reserved6 : 2,
gst_id : 12;
} __packed bits;
};
union cepic_epic_int {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 2,
stat : 1,
__reserved2 : 3,
mask : 1,
__reserved3 : 15;
} __packed bits;
};
union cepic_epic_int2 {
u64 raw;
struct {
u64 vect : 10,
dst_sh : 2,
__reserved1 : 1,
dlvm : 3,
__reserved2 : 4,
gst_id : 12,
__reserved3 : 12,
gst_dst : 10,
__reserved4 : 10;
} __packed bits;
};
union cepic_cpr {
u32 raw;
struct {
u32 __reserved1 : 8,
cpr : 3,
__reserved2 : 21;
} __packed bits;
};
union cepic_esr {
u32 raw;
struct {
u32 __reserved1 : 5,
rq_addr_err : 1,
rq_virt_err : 1,
rq_cop_err : 1,
ms_gstid_err : 1,
ms_virt_err : 1,
ms_err : 1,
ms_icr_err : 1,
__reserved2 : 20;
} __packed bits;
};
union cepic_esr2 {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 2,
stat : 1,
__reserved2 : 3,
mask : 1,
__reserved3 : 15;
} __packed bits;
};
union cepic_eoi {
u32 raw;
struct {
u32 __reserved1 : 16,
rcpr : 3,
__reserved2 : 13;
} __packed bits;
};
union cepic_cir {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 2,
stat : 1,
__reserved2 : 19;
} __packed bits;
};
union cepic_gstbase_hi {
u32 raw;
struct {
u32 gstbase_hi : 4,
__reserved : 28;
} __packed bits;
};
union cepic_gstid {
u32 raw;
struct {
u32 gstid : 12,
__reserved : 20;
} __packed bits;
};
union cepic_pnmirr {
u32 raw;
struct {
u32 startup_entry : 8,
__reserved1 : 1,
smi : 1,
nmi : 1,
init : 1,
startup : 1,
int_violat : 1,
__reserved2 : 2,
nm_timer : 1,
nm_special : 1,
__reserved3 : 14;
} __packed bits;
};
union cepic_icr {
u64 raw;
struct {
u64 vect : 10,
dst_sh : 2,
stat : 1,
dlvm : 3,
__reserved1 : 4,
gst_id : 12,
__reserved2 : 8,
dst : 10,
__reserved3 : 14;
} __packed bits;
};
union cepic_timer_lvtt {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 2,
stat : 1,
__reserved2 : 3,
mask : 1,
mode : 1,
__reserved3 : 14;
} __packed bits;
};
union cepic_timer_div {
u32 raw;
struct {
u32 divider : 4,
__reserved1 : 28;
} __packed bits;
};
union cepic_nm_timer_lvtt {
u32 raw;
struct {
u32 __reserved1 : 17,
mode : 1,
__reserved2 : 14;
} __packed bits;
};
union cepic_nm_timer_div {
u32 raw;
struct {
u32 divider : 4,
__reserved1 : 28;
} __packed bits;
};
union cepic_svr {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 22;
} __packed bits;
};
union cepic_pnmirr_mask {
u32 raw;
struct {
u32 __reserved1 : 9,
smi : 1,
nmi : 1,
__reserved2 : 2,
int_violat : 1,
__reserved3 : 2,
nm_timer : 1,
nm_special : 1,
__reserved4 : 14;
} __packed bits;
};
union cepic_vect_inta {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 6,
cpr : 3,
__reserved2 : 13;
} __packed bits;
};
union prepic_ctrl {
u32 raw;
struct {
u32 __reserved1 : 8,
bsp : 1,
__reserved2 : 2,
epic_en : 1,
__reserved3 : 20;
} __packed bits;
};
union prepic_id {
u32 raw;
struct {
u32 __reserved1 : 8,
prepicn : 2,
__reserved2 : 22;
} __packed bits;
};
union prepic_ctrl2 {
u32 raw;
struct {
u32 __reserved1 : 9,
bgi_mode : 1,
__reserved2 : 2,
virt_en : 1,
__reserved3 : 19;
} __packed bits;
};
union prepic_err_int {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 2,
stat : 1,
dlvm : 3,
mask : 1,
__reserved2 : 3,
dst : 10,
__reserved3 : 2;
} __packed bits;
};
union prepic_linpn {
u32 raw;
struct {
u32 vect : 10,
__reserved1 : 2,
stat : 1,
dlvm : 3,
mask : 1,
__reserved2 : 3,
dst : 10,
__reserved3 : 2;
} __packed bits;
};
typedef struct kvm_epic_page {
/*000*/ u32 ctrl;
u32 id;
u32 cpr;
u32 esr;
u32 esr2;
u32 cir;
atomic_t esr_new;
u32 svr;
u64 icr;
u32 timer_lvtt;
u32 timer_init;
u32 timer_cur;
u32 timer_div;
u32 nm_timer_lvtt;
u32 nm_timer_init;
u32 nm_timer_cur;
u32 nm_timer_div;
u32 pnmirr_mask;
/*04c*/ u32 __reserved1[45];
/*100*/ atomic64_t pmirr[16];
/*180*/ u32 __reserved2[24];
/*1e0*/ atomic_t pnmirr;
u32 __reserved3[263];
/*600*/ u8 pnmirr_byte[16];
/*610*/ u32 __reserved4[124];
/*800*/ u8 pmirr_byte[1024];
} epic_page_t;
#elif defined(__BIG_ENDIAN)
union cepic_ctrl {
u32 raw;
struct {
u32 __reserved3 : 21,
soft_en : 1,
__reserved2 : 1,
bsp_core : 1,
__reserved1 : 8;
} __packed bits;
};
/* Ignore 4 bits of CEPIC (core) ID so that physical core ID is <= 64 */
union cepic_id {
u32 raw;
struct {
u32 __reserved2 : 22,
prepicn : 2,
cepicn_reserved : 4,
cepicn : 4;
} __packed bits;
};
union cepic_ctrl2 {
u32 raw;
struct {
u32 __reserved3 : 23,
timer_stop : 1,
__reserved2 : 3,
clear_gst : 1,
__reserved1 : 1,
int_hv : 1,
nmi_gst_blk : 1,
mi_gst_blk : 1;
} __packed bits;
};
union cepic_dat {
u64 raw;
struct {
u64 gst_id : 12,
__reserved6 : 2,
gst_dst : 10,
__reserved5 : 8,
__reserved4 : 2,
index : 10,
__reserved3 : 7,
stat : 1,
__reserved2 : 4,
dat_cop : 2,
__reserved1 : 6;
} __packed bits;
};
union cepic_epic_int {
u32 raw;
struct {
u32 __reserved3 : 15,
mask : 1,
__reserved2 : 3,
stat : 1,
__reserved1 : 2,
vect : 10;
} __packed bits;
};
union cepic_epic_int2 {
u64 raw;
struct {
u64 __reserved4 : 10,
gst_dst : 10,
__reserved3 : 12,
gst_id : 12,
__reserved2 : 4,
dlvm : 3,
__reserved1 : 1,
dst_sh : 2,
vect : 10;
} __packed bits;
};
union cepic_cpr {
u32 raw;
struct {
u32 __reserved2 : 21,
cpr : 3,
__reserved1 : 8;
} __packed bits;
};
union cepic_esr {
u32 raw;
struct {
u32 __reserved2 : 20,
ms_icr_err : 1,
ms_err : 1,
ms_virt_err : 1,
ms_gstid_err : 1,
rq_cop_err : 1,
rq_virt_err : 1,
rq_addr_err : 1,
__reserved1 : 5;
} __packed bits;
};
union cepic_esr2 {
u32 raw;
struct {
u32 __reserved3 : 15,
mask : 1,
__reserved2 : 3,
stat : 1,
__reserved1 : 2,
vect : 10;
} __packed bits;
};
union cepic_eoi {
u32 raw;
struct {
u32 __reserved2 : 13,
rcpr : 3,
__reserved1 : 16;
} __packed bits;
};
union cepic_cir {
u32 raw;
struct {
u32 __reserved2 : 19,
stat : 1,
__reserved1 : 2,
vect : 10;
} __packed bits;
};
union cepic_gstbase_hi {
u32 raw;
struct {
u32 __reserved : 28,
gstbase_hi : 4;
} __packed bits;
};
union cepic_gstid {
u32 raw;
struct {
u32 __reserved : 20,
gstid : 12;
} __packed bits;
};
union cepic_pnmirr {
u32 raw;
struct {
u32 __reserved3 : 14,
nm_special : 1,
nm_timer : 1,
__reserved2 : 2,
int_violat : 1,
startup : 1,
init : 1,
nmi : 1,
smi : 1,
__reserved1 : 1,
startup_entry : 8;
} __packed bits;
};
union cepic_icr {
u64 raw;
struct {
u64 __reserved3 : 14,
dst : 10,
__reserved2 : 8,
gst_id : 12,
__reserved1 : 4,
dlvm : 3,
stat : 1,
dst_sh : 2,
vect : 10;
} __packed bits;
};
union cepic_timer_lvtt {
u32 raw;
struct {
u32 __reserved3 : 14,
mode : 1,
mask : 1,
__reserved2 : 3,
stat : 1,
__reserved1 : 2,
vect : 10;
} __packed bits;
};
union cepic_timer_div {
u32 raw;
struct {
u32 __reserved1 : 28,
divider : 4;
} __packed bits;
};
union cepic_nm_timer_lvtt {
u32 raw;
struct {
u32 __reserved2 : 14,
mode : 1,
__reserved1 : 17;
} __packed bits;
};
union cepic_nm_timer_div {
u32 raw;
struct {
u32 __reserved1 : 28,
divider : 4;
} __packed bits;
};
union cepic_svr {
u32 raw;
struct {
u32 __reserved1 : 22,
vect : 10;
} __packed bits;
};
union cepic_pnmirr_mask {
u32 raw;
struct {
u32 __reserved4 : 14,
nm_special : 1,
nm_timer : 1,
__reserved3 : 2,
int_violat : 1,
__reserved2 : 2,
nmi : 1,
smi : 1,
__reserved1 : 9;
} __packed bits;
};
union cepic_vect_inta {
u32 raw;
struct {
u32 __reserved2 : 13,
cpr : 3,
__reserved1 : 6,
vect : 10;
} __packed bits;
};
union prepic_ctrl {
u32 raw;
struct {
u32 __reserved3 : 20,
epic_en : 1,
__reserved2 : 2,
bsp : 1,
__reserved1 : 8;
} __packed bits;
};
union prepic_id {
u32 raw;
struct {
u32 __reserved2 : 22,
prepicn : 2,
__reserved1 : 8;
} __packed bits;
};
union prepic_ctrl2 {
u32 raw;
struct {
u32 __reserved3 : 19,
virt_en : 1,
__reserved2 : 2,
bgi_mode : 1,
__reserved1 : 9;
} __packed bits;
};
union prepic_err_int {
u32 raw;
struct {
u32 __reserved3 : 2,
dst : 10,
__reserved2 : 3,
mask : 1,
dlvm : 3,
stat : 1,
__reserved1 : 2,
vect : 10;
} __packed bits;
};
union prepic_linpn {
u32 raw;
struct {
u32 __reserved3 : 2,
dst : 10,
__reserved2 : 3,
mask : 1,
dlvm : 3,
stat : 1,
__reserved1 : 2,
vect : 10;
} __packed bits;
};
#else /*__BIG_ENDIAN*/
# error FIXME
#endif
#endif /* !(__ASSEMBLY__) */
#endif /* __ASM_L_EPIC_REGS_H */

View File

@ -0,0 +1,92 @@
#ifndef _ASM_L_EPICDEF_H
#define _ASM_L_EPICDEF_H
/*
* Constants for EPICs (CEPIC, IOEPIC)
*/
#define MAX_EPICS_ORDER 10
#define EPIC_REGS_SIZE 0x2000
#define IO_EPIC_REGS_SIZE 0x100000
/* CEPIC registers */
#define CEPIC_CTRL 0x0
#define CEPIC_CTRL_BSP_CORE 0x100
#define CEPIC_ID 0x10
#define CEPIC_ID_BIT_MASK 0x3ff
#define CEPIC_ID_SHORT_VALID_BITS 4
#define CEPIC_ID_SHORT_VALID_MASK 0xf
#define CEPIC_CPR 0x70
#define CEPIC_CPR_CORE_PRIORITY_SHIFT 8
#define CEPIC_ESR 0x80
#define CEPIC_ESR_BIT_MASK 0x7e0
#define CEPIC_ESR2 0x90
#define CEPIC_EOI 0xa0
#define CEPIC_CIR 0xb0
#define CEPIC_PMIRR 0x100
#define CEPIC_PMIRR_NR_BITS 0x400
#define CEPIC_PMIRR_NR_REGS 0x20
#define CEPIC_PMIRR_NR_DREGS 0x10
#define CEPIC_PNMIRR 0x1e0
#define CEPIC_PNMIRR_BIT_MASK 0x33e00
#define CEPIC_PNMIRR_NMI 0x400
#define CEPIC_PNMIRR_STARTUP 0x1000
#define CEPIC_PNMIRR_STARTUP_ENTRY 0xff
#define CEPIC_ESR_NEW 0x1f0
#define CEPIC_ICR 0x200
#define CEPIC_ICR_DST_FULL 0
#define CEPIC_ICR_DST_SELF 1
#define CEPIC_ICR_DST_ALLBUT 2
#define CEPIC_ICR_DST_ALLINC 3
#define CEPIC_ICR_DLVM_FIXED_EXT 0
#define CEPIC_ICR_DLVM_FIXED_IPI 1
#define CEPIC_ICR_DLVM_SMI 2
#define CEPIC_ICR_DLVM_NM_SPECIAL 3
#define CEPIC_ICR_DLVM_NMI 4
#define CEPIC_ICR_DLVM_INIT 5
#define CEPIC_ICR_DLVM_STARTUP 6
#define CEPIC_ICR2 0x204
#define CEPIC_TIMER_LVTT 0x220
#define CEPIC_TIMER_INIT 0x230
#define CEPIC_TIMER_CUR 0x240
#define CEPIC_TIMER_DIV 0x250
#define CEPIC_TIMER_DIV_1 0xb
#define CEPIC_NM_TIMER_LVTT 0x260
#define CEPIC_NM_TIMER_INIT 0x270
#define CEPIC_NM_TIMER_CUR 0x280
#define CEPIC_NM_TIMER_DIV 0x290
#define CEPIC_SVR 0x2a0
#define CEPIC_PNMIRR_MASK 0x2d0
#define CEPIC_VECT_INTA 0x2f0
#define CEPIC_VECT_INTA_VMASK 0x3ff
#define CEPIC_VECT_INTA_PRI_SHIFT 16
/* CEPIC (HP) registers */
#define CEPIC_GUEST 0x1000
#define CEPIC_CTRL2 0x1820
#define CEPIC_DAT 0x1830
#define CEPIC_DAT_READ 0
#define CEPIC_DAT_INVALIDATE 2
#define CEPIC_DAT_WRITE 3
#define CEPIC_DAT2 0x1834
#define CEPIC_EPIC_INT 0x1850
#define CEPIC_EPIC_INT2 0x1860
#define CEPIC_EPIC_INT3 0x1864
#define CEPIC_GSTBASE_LO 0x18c0
#define CEPIC_GSTBASE_HI 0x18c4
#define CEPIC_GSTID 0x18d0
#define CEPIC_PMIRR_OR 0x1900
#define CEPIC_PNMIRR_OR 0x19e0
#define CEPIC_ESR_NEW_OR 0x19f0
#define CEPIC_PNMIRR_INT_VIOLAT_BIT 13
#define BAD_EPICID 0xffff
#endif /* _ASM_L_EPICDEF_H */

View File

@ -0,0 +1,50 @@
/*
* arch/l/include/gpio.h
*
* Copyright (C) 2012 Evgeny Kravtsunov
*
* AC97-GPIO Controller (part of Elbrus IOHUB).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_GPIO_H_
#define __ASM_ARCH_GPIO_H_
#include <linux/kernel.h>
/* IOHUB GPIO pins */
#define IOUHB_GPIO_0 0
#define IOHUB_GPIO_1 1
#define IOHUB_GPIO_2 2
#define IOHUB_GPIO_3 3
#define IOHUB_GPIO_4 4
#define IOHUB_GPIO_5 5
#define IOHUB_GPIO_6 6
#define IOHUB_GPIO_7 7
#define IOHUB_GPIO_8 8
#define IOHUB_GPIO_9 9
#define IOHUB_GPIO_10 10
#define IOHUB_GPIO_11 11
#define IOHUB_GPIO_12 12
#define IOHUB_GPIO_13 13
#define IOHUB_GPIO_14 14
#define IOHUB_GPIO_15 15
/* Amount of iohub's own gpios: */
#define ARCH_NR_IOHUB_GPIOS 16
#define ARCH_NR_IOHUB2_GPIOS 32
#define ARCH_MAX_NR_OWN_GPIOS ARCH_NR_IOHUB2_GPIOS
#if IS_ENABLED(CONFIG_INPUT_LTC2954)
#define LTC2954_IRQ_GPIO_PIN IOHUB_GPIO_3
#define LTC2954_KILL_GPIO_PIN IOHUB_GPIO_4
#endif /* CONFIG_INPUT_LTC2954 */
#ifdef CONFIG_GPIOLIB
#include <asm-generic/gpio.h>
#endif /* CONFIG_GPIOLIB */
#endif

View File

@ -0,0 +1,55 @@
#ifndef __ASM_L_HARDIRQ_H
#define __ASM_L_HARDIRQ_H
#include <linux/cache.h>
#include <linux/percpu.h>
typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
#ifdef CONFIG_L_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count;
unsigned int icr_read_retry_count;
unsigned int apic_irq_work_irqs;
#endif
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
# ifdef CONFIG_E2K
/*
* irq_tlb_count is double-counted in irq_call_count, so it must be
* subtracted from irq_call_count when displaying irq_call_count
*/
unsigned int irq_tlb_count;
# endif
#endif
#if (IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \
IS_ENABLED(CONFIG_RDMA_NET))
unsigned int irq_rdma_count;
#endif
#ifdef CONFIG_E2K
#if IS_ENABLED(CONFIG_ELDSP)
unsigned int irq_eldsp_count;
#endif
#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
extern void ack_bad_irq(unsigned int irq);
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
#define inc_irq_stat(member) __IRQ_STAT(raw_smp_processor_id(), member) ++
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
#include <linux/irq_cpustat.h>
#endif /* __ASM_L_HARDIRQ_H */

View File

@ -0,0 +1,141 @@
#ifndef _ASM_L_HW_IRQ_H
#define _ASM_L_HW_IRQ_H
/* required by linux/irq.h */
#include <asm/irq_vectors.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <asm/atomic.h>
#include <asm/smp.h>
#include <asm/current.h>
#include <asm/sections.h>
#include <asm/page.h>
#ifdef CONFIG_L_LOCAL_APIC
#ifdef CONFIG_PIC
# define platform_legacy_irq(irq) ((irq) < 16)
#else
# define platform_legacy_irq(irq) 0
#endif
#endif
/*
* Various low-level irq details needed by irq.c, process.c,
* time.c, io_apic.c and smp.c
*
* Interrupt entry/exit code at both C and assembly level
*/
extern atomic_t irq_err_count;
/* IOAPIC */
#ifdef CONFIG_PIC
# define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
#else
# define IO_APIC_IRQ(x) 1
#endif
extern void disable_IO_APIC(void);
struct io_apic_irq_attr {
int ioapic;
int ioapic_pin;
int trigger;
int polarity;
};
static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
int ioapic, int ioapic_pin,
int trigger, int polarity)
{
irq_attr->ioapic = ioapic;
irq_attr->ioapic_pin = ioapic_pin;
irq_attr->trigger = trigger;
irq_attr->polarity = polarity;
}
/*
* This is performance-critical, we want to do it O(1)
*
* Most irqs are mapped 1:1 with pins.
*/
struct irq_cfg {
struct irq_pin_list *irq_2_pin;
cpumask_var_t domain;
cpumask_var_t old_domain;
u8 vector;
u8 move_in_progress : 1;
#ifdef CONFIG_INTR_REMAP
struct irq_2_iommu irq_2_iommu;
#endif
};
extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin,
struct io_apic_irq_attr *irq_attr);
extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func,
int irq);
extern void (*interrupt[NR_VECTORS])(struct pt_regs *regs);
#ifdef CONFIG_TRACING
#define trace_interrupt interrupt
#endif
#define VECTOR_UNDEFINED -1
#define VECTOR_RETRIGGERED -2
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
extern void __setup_vector_irq(int cpu);
#define IO_APIC_VECTOR(irq) ({ \
struct irq_cfg *__cfg = irq_cfg(irq); \
(__cfg) ? __cfg->vector : 0; \
})
extern void setup_ioapic_dest(void);
/* Statistics */
extern atomic_t irq_err_count;
extern atomic_t irq_mis_count;
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
/* SMP */
extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
extern __visible void smp_spurious_interrupt(struct pt_regs *);
extern __visible void smp_error_interrupt(struct pt_regs *);
extern __visible void smp_irq_move_cleanup_interrupt(struct pt_regs *);
extern __visible void smp_irq_work_interrupt(struct pt_regs *);
#ifdef CONFIG_SMP
extern __visible void smp_reschedule_interrupt(struct pt_regs *regs);
extern __visible void smp_call_function_interrupt(struct pt_regs *regs);
extern __visible void smp_call_function_single_interrupt(struct pt_regs *regs);
#endif
#ifdef CONFIG_TRACING
/* Interrupt handlers registered during init_IRQ */
extern void smp_trace_apic_timer_interrupt(struct pt_regs *regs);
extern void smp_trace_error_interrupt(struct pt_regs *regs);
extern void smp_trace_irq_work_interrupt(struct pt_regs *regs);
extern void smp_trace_spurious_interrupt(struct pt_regs *regs);
extern void smp_trace_reschedule_interrupt(struct pt_regs *regs);
extern void smp_trace_call_function_interrupt(struct pt_regs *regs);
extern void smp_trace_call_function_single_interrupt(struct pt_regs *regs);
#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
#endif /* CONFIG_TRACING */
extern void do_nmi(struct pt_regs * regs);
extern void l_init_system_handlers_table(void);
extern void epic_init_system_handlers_table(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
extern void do_IRQ(struct pt_regs * regs, unsigned int vector);
#endif /* _ASM_L_HW_IRQ_H */

View File

@ -0,0 +1,42 @@
#ifndef __L_ASM_SPI_H__
#define __L_ASM_SPI_H__
#include <asm-l/iolinkmask.h>
#include <linux/i2c.h>
/* PCI registers definitions for reset */
#define PCI_RESET_CONTROL 0x60
#define L_SOFTWARE_RESET_TO_HARD 0x00000004 /* software reset */
/* to hardware reset */
#define L_WATCHDOG_RESET_TO_HARD 0x00000008 /* watchdog reset */
/* to hardware reset */
#define L_SOFTWARE_RESET_TO_SOFT 0x00000010 /* software reset */
/* to soft reset */
#define L_WATCHDOG_RESET_TO_SOFT 0x00000020 /* watchdog reset */
/* to soft reset */
#define L_RED_RESET_OUT 0x80000080 /* Led control */
#define PCI_SOFT_RESET_CONTROL 0x64
#define L_SOFTWARE_RESET 0x00000001
#define L_SOFTWARE_RESET_DONE 0x00000002
#define L_LAST_RESET_INFO 0x000000fc /* last reset type */
#define PCI_SOFT_RESET_DURATION 0x68
#define L_IOHUB_SOFT_RESET_DURATION 0x0000ffff
#define L_IOHUB2_SOFT_RESET_DURATION 0x00ffffff
/* Common SPI & I2C definitions */
#define I2C_SPI_CNTRL_AREA_SIZE 0x40
#define I2C_SPI_DATA_AREA_SIZE 0x40
#define I2C_SPI_DEFAULT_IRQ 23
#define I2C_MAX_ADAPTERS_PER_CONTROLLER 5
#define I2C_MAX_BUSSES I2C_MAX_ADAPTERS_PER_CONTROLLER
#ifdef CONFIG_E2K
extern int iohub_i2c_line_id;
#else
#define iohub_i2c_line_id 0
#endif
#endif /* __L_ASM_SPI_H__ */

View File

@ -0,0 +1,7 @@
#ifndef _ASM_L_IDLE_H
#define _ASM_L_IDLE_H
static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
#endif /* _ASM_L_IDLE_H */

View File

@ -0,0 +1,307 @@
#ifndef _ASM_L_IO_APIC_H
#define _ASM_L_IO_APIC_H
#include <linux/types.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
#if 0
#include <asm/x86_init.h>
#endif
/*
* Intel IO-APIC support for SMP and UP systems.
*
* Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
*/
/* I/O Unit Redirection Table */
#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
#define IO_APIC_REDIR_MASKED (1 << 16)
#if 0
/*
* The structure of the IO-APIC:
*/
union IO_APIC_reg_00 {
u32 raw;
struct {
u32 __reserved_2 : 14,
LTS : 1,
delivery_type : 1,
__reserved_1 : 8,
ID : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_01 {
u32 raw;
struct {
u32 version : 8,
__reserved_2 : 7,
PRQ : 1,
entries : 8,
__reserved_1 : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_02 {
u32 raw;
struct {
u32 __reserved_2 : 24,
arbitration : 4,
__reserved_1 : 4;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_03 {
u32 raw;
struct {
u32 boot_DT : 1,
__reserved_1 : 31;
} __attribute__ ((packed)) bits;
};
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
* 001: lowest prio
* 111: ExtINT
*/
dest_mode : 1, /* 0: physical, 1: logical */
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1, /* 0: edge, 1: level */
mask : 1, /* 0: enabled, 1: disabled */
__reserved_2 : 15;
__u32 __reserved_3 : 24,
dest : 8;
} __attribute__ ((packed));
struct IR_IO_APIC_route_entry {
__u64 vector : 8,
zero : 3,
index2 : 1,
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1,
mask : 1,
reserved : 31,
format : 1,
index : 15;
} __attribute__ ((packed));
#endif
#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
#ifdef CONFIG_L_IO_APIC
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
/*
* # of IO-APICs and # of IRQ routing registers
*/
extern int nr_ioapics;
extern int mpc_ioapic_id(int ioapic);
extern unsigned long mpc_ioapic_addr(int ioapic);
extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
#define MP_MAX_IOAPIC_PIN 127
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
extern struct mpc_intsrc mp_irqs[];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
/* Older SiS APIC requires we rewrite the index register */
extern int sis_apic_bug;
/* 1 if "noapic" boot option passed */
extern int skip_ioapic_setup;
/* 1 if "noapic" boot option passed */
extern int noioapicquirk;
/* -1 if "noapic" boot option passed */
extern int noioapicreroute;
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
extern int timer_through_8259;
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
*/
#ifdef CONFIG_PIC
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
#else
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup)
#endif
extern void setup_IO_APIC(void);
extern void enable_IO_APIC(void);
struct io_apic_irq_attr;
struct irq_cfg;
struct device;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
extern void ioapic_insert_resources(void);
extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
unsigned int, int,
struct io_apic_irq_attr *);
extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
unsigned int, int,
struct io_apic_irq_attr *);
extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
struct pci_dev;
struct msi_msg;
extern void native_compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id);
extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
extern int save_ioapic_entries(void);
extern void mask_ioapic_entries(void);
extern int restore_ioapic_entries(void);
extern void probe_nr_irqs_gsi(void);
extern int get_nr_irqs_gsi(void);
extern int set_ioapic_affinity_irq(unsigned int, const struct cpumask *);
extern void setup_ioapic_ids_from_mpc(void);
extern void setup_ioapic_ids_from_mpc_nocheck(void);
struct mp_ioapic_gsi{
u32 gsi_base;
u32 gsi_end;
};
extern struct mp_ioapic_gsi mp_gsi_routing[];
extern u32 gsi_top;
int mp_find_ioapic(u32 gsi);
int mp_find_ioapic_pin(int ioapic, u32 gsi);
#if defined CONFIG_E2K || defined CONFIG_E90S
void __init mp_register_ioapic(int id, unsigned long address, u32 gsi_base);
#else
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
#endif
extern void __init pre_init_apic_IRQ0(void);
extern void mp_save_irq(struct mpc_intsrc *m);
extern void disable_ioapic_support(void);
extern void __init native_io_apic_init_mappings(void);
extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
extern void native_disable_io_apic(void);
extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
struct irq_data;
extern int native_ioapic_set_affinity(struct irq_data *,
const struct cpumask *,
bool);
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
#if 0
return x86_io_apic_ops.read(apic, reg);
#else
return native_io_apic_read(apic, reg);
#endif
}
static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
#if 0
x86_io_apic_ops.write(apic, reg, value);
#else
native_io_apic_write(apic, reg, value);
#endif
}
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
#if 0
x86_io_apic_ops.modify(apic, reg, value);
#else
native_io_apic_modify(apic, reg, value);
#endif
}
extern void io_apic_eoi(unsigned int apic, unsigned int vector);
extern unsigned int __create_irqs(unsigned int from, unsigned int count,
int node);
extern void destroy_irqs(unsigned int irq, unsigned int count);
extern int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
struct msi_msg *msg, u8 hpet_id);
extern int ioapic_retrigger_irq(struct irq_data *data);
extern int __ioapic_set_affinity(struct irq_data *data,
const struct cpumask *mask,
unsigned int *dest_id);
extern int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic,
int pin);
extern unsigned int ioapic_cfg_get_pin(struct irq_cfg *cfg);
extern unsigned int ioapic_cfg_get_idx(struct irq_cfg *cfg);
#else /* !CONFIG_L_IO_APIC */
#define io_apic_assign_pci_irqs 0
#define setup_ioapic_ids_from_mpc x86_init_noop
static const int timer_through_8259 = 0;
static inline void ioapic_insert_resources(void) { }
#define gsi_top (NR_IRQS_LEGACY)
static inline int mp_find_ioapic(u32 gsi) { return 0; }
struct io_apic_irq_attr;
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr) { return 0; }
static inline int save_ioapic_entries(void)
{
return -ENOMEM;
}
static inline void mask_ioapic_entries(void) { }
static inline int restore_ioapic_entries(void)
{
return -ENOMEM;
}
static inline void mp_save_irq(struct mpc_intsrc *m) { };
static inline void disable_ioapic_support(void) { }
#define native_io_apic_init_mappings NULL
#define native_io_apic_read NULL
#define native_io_apic_write NULL
#define native_io_apic_modify NULL
#define native_disable_io_apic NULL
#define native_io_apic_print_entries NULL
#define native_ioapic_set_affinity NULL
#define native_setup_ioapic_entry NULL
#define native_compose_msi_msg NULL
#define native_eoi_ioapic_pin NULL
#endif
extern int __init calibrate_APIC_clock(void);
#endif /* _ASM_L_IO_APIC_H */

View File

@ -0,0 +1,77 @@
#ifndef _ASM_L_IO_EPIC_H
#define _ASM_L_IO_EPIC_H
#include <linux/types.h>
#include <asm/mpspec.h>
#include <asm/epicdef.h>
#include <asm/irq_vectors.h>
#define IOEPIC_ID 0x0
#define IOEPIC_VERSION 0x4
#define IOEPIC_INT_RID(pin) (0x800 + 0x4 * pin)
#define IOEPIC_TABLE_INT_CTRL(pin) (0x20 + 0x1000 * pin)
#define IOEPIC_TABLE_MSG_DATA(pin) (0x24 + 0x1000 * pin)
#define IOEPIC_TABLE_ADDR_HIGH(pin) (0x28 + 0x1000 * pin)
#define IOEPIC_TABLE_ADDR_LOW(pin) (0x2c + 0x1000 * pin)
#define MAX_IO_EPICS (MAX_NUMIOLINKS + MAX_NUMNODES)
#define IOEPIC_AUTO -1
#define IOEPIC_EDGE 0
#define IOEPIC_LEVEL 1
#define IOEPIC_VERSION_1 1
#define IOEPIC_VERSION_2 2 /* Fast level EOI (without reading int_ctrl) */
extern int nr_ioepics;
extern void setup_io_epic(void);
extern void __init mp_register_ioepic(int ver, int id, int node,
unsigned long address, u32 gsi_base);
extern int ioepic_pin_to_irq(unsigned int pin, struct pci_dev *dev);
struct mp_ioepic_gsi {
unsigned int gsi_base;
unsigned int gsi_end;
};
/*
* cpumask fields 'domain' and 'old_domain' from APIC irq_cfg are replaced with
* int dest here. Similar to APIC in physical addressing mode, there is
* no need for a cpumask, if only one CPU bit is set in it at all times
*/
struct epic_irq_cfg {
unsigned short pin;
unsigned short epic;
unsigned short old_dest;
unsigned short dest;
unsigned short vector;
unsigned char move_in_progress : 1;
#ifdef CONFIG_INTR_REMAP
struct irq_2_iommu irq_2_iommu;
#endif
};
#define IO_EPIC_VECTOR(irq) ({ \
struct epic_irq_cfg *__cfg = irq_get_chip_data(irq); \
(__cfg) ? __cfg->vector : 0; \
})
struct io_epic_irq_attr {
int ioepic;
int ioepic_pin;
int trigger;
int rid;
};
struct irq_chip;
extern struct irq_chip ioepic_chip;
extern unsigned long used_vectors[];
extern unsigned long io_epic_base_node(int node);
/* FIXME should be removed after proper passthrough implementation */
extern unsigned int io_epic_read(unsigned int epic, unsigned int reg);
extern void io_epic_write(unsigned int epic, unsigned int reg,
unsigned int value);
extern int pirq_enable_irq(struct pci_dev *dev);
#endif /* _ASM_L_IO_EPIC_H */

View File

@ -0,0 +1,147 @@
#ifndef __ASM_L_IO_EPIC_REGS_H
#define __ASM_L_IO_EPIC_REGS_H
#include <asm/types.h>
#ifdef __LITTLE_ENDIAN
/* The structure of the IO-EPIC */
union IO_EPIC_ID {
u32 raw;
struct {
u32 id : 16,
nodeid : 16;
} __packed bits;
};
union IO_EPIC_VERSION {
u32 raw;
struct {
u32 version : 8,
__reserved2 : 8,
entries : 8,
__reserved1 : 8;
} __packed bits;
};
union IO_EPIC_INT_CTRL {
u32 raw;
struct {
u32 __reserved3 : 12,
delivery_status : 1,
software_int : 1,
__reserved2 : 1,
trigger : 1, /* 0: edge, 1: level */
mask : 1, /* 0: enabled, 1: disabled */
__reserved1 : 15;
} __packed bits;
};
union IO_EPIC_MSG_DATA {
u32 raw;
struct {
u32 vector : 10,
__reserved2 : 3,
dlvm : 3,
__reserved1 : 16;
} __packed bits;
};
union IO_EPIC_MSG_ADDR_LOW {
u32 raw;
struct {
u32 __reserved3 : 2,
msg_type : 3,
__reserved2 : 1,
dst : 10,
__reserved1 : 4,
MSI : 12;
} __packed bits;
};
union IO_EPIC_REQ_ID {
u32 raw;
struct {
u32 fn : 3,
dev : 5,
bus : 8,
__reserved1 : 16;
} __packed bits;
};
#elif defined(__BIG_ENDIAN)
/* The structure of the IO-EPIC */
union IO_EPIC_ID {
u32 raw;
struct {
u32 nodeid : 16,
id : 16;
} __packed bits;
};
union IO_EPIC_VERSION {
u32 raw;
struct {
u32 __reserved1 : 8,
entries : 8,
__reserved2 : 8,
version : 8;
} __packed bits;
};
union IO_EPIC_INT_CTRL {
u32 raw;
struct {
u32 __reserved1 : 15,
mask : 1, /* 0: enabled, 1: disabled */
trigger : 1, /* 0: edge, 1: level */
__reserved2 : 1,
software_int : 1,
delivery_status : 1,
__reserved3 : 12;
} __packed bits;
};
union IO_EPIC_MSG_DATA {
u32 raw;
struct {
u32 __reserved1 : 16,
dlvm : 3,
__reserved2 : 3,
vector : 10;
} __packed bits;
};
union IO_EPIC_MSG_ADDR_LOW {
u32 raw;
struct {
u32 MSI : 12,
__reserved1 : 4,
dst : 10,
__reserved2 : 1,
msg_type : 3,
__reserved3 : 2;
} __packed bits;
};
union IO_EPIC_REQ_ID {
u32 raw;
struct {
u32 __reserved1 : 16,
bus : 8,
dev : 5,
fn : 3;
} __packed bits;
};
#else /*__BIG_ENDIAN*/
# error What is the endianess?
#endif
struct IO_EPIC_route_entry {
union IO_EPIC_INT_CTRL int_ctrl;
union IO_EPIC_MSG_DATA msg_data;
u32 addr_high;
union IO_EPIC_MSG_ADDR_LOW addr_low;
union IO_EPIC_REQ_ID rid;
} __packed;
#endif /* __ASM_L_IO_EPIC_REGS_H */

View File

@ -0,0 +1,106 @@
#ifndef __ASM_L_IO_PIC_H
#define __ASM_L_IO_PIC_H
/*
* Choose between IO-PICs in arch/l. If CONFIG_EPIC=n, IO-APIC is chosen
* statically. If CONFIG_EPIC=y (only on e2k), use both IO-APIC and IO-EPIC
* calls, depending on nr_ioapics and nr_ioepics variables
*/
#ifdef CONFIG_EPIC
#include <asm/io_apic.h>
#include <asm/io_epic.h>
struct io_apic_irq_attr;
extern int io_epic_get_PCI_irq_vector(int bus, int devfn, int pin);
extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin,
struct io_apic_irq_attr *irq_attr);
static inline int IO_PIC_get_PCI_irq_vector(int domain, int bus, int slot,
int pin, struct io_apic_irq_attr *irq_attr)
{
int pic_irq = -1;
if (nr_ioepics)
pic_irq = io_epic_get_PCI_irq_vector(bus, slot, pin);
if (pic_irq == -1 && nr_ioapics)
pic_irq = IO_APIC_get_PCI_irq_vector(domain, bus, slot, pin,
irq_attr);
return pic_irq;
}
extern int io_epic_get_fix_irq_vector(int domain, int bus, int slot, int func,
int irq);
extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func,
int irq);
static inline int IO_PIC_get_fix_irq_vector(int domain, int bus, int slot,
int func, int irq)
{
int pic_irq = -1;
if (nr_ioepics)
pic_irq = io_epic_get_fix_irq_vector(domain, bus, slot, func,
irq);
if (pic_irq == -1 && nr_ioapics)
pic_irq = IO_APIC_get_fix_irq_vector(domain, bus, slot, func,
irq);
return pic_irq;
}
extern void __epic_setup_vector_irq(int cpu);
extern void __apic_setup_vector_irq(int cpu);
static inline void __pic_setup_vector_irq(int cpu)
{
if (nr_ioepics)
__epic_setup_vector_irq(cpu);
if (nr_ioapics)
__apic_setup_vector_irq(cpu);
}
extern void print_IO_APICs(void);
extern void print_IO_EPICs(void);
static inline void print_IO_PICs(void)
{
if (nr_ioepics)
print_IO_EPICs();
if (nr_ioapics)
print_IO_APICs();
}
#else /* !(CONFIG_EPIC) */
#include <asm/io_apic.h>
struct io_apic_irq_attr;
extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin,
struct io_apic_irq_attr *irq_attr);
static inline int IO_PIC_get_PCI_irq_vector(int domain, int bus, int slot,
int pin, struct io_apic_irq_attr *irq_attr)
{
return IO_APIC_get_PCI_irq_vector(domain, bus, slot, pin, irq_attr);
}
extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func,
int irq);
static inline int IO_PIC_get_fix_irq_vector(int domain, int bus, int slot,
int func, int irq)
{
return IO_APIC_get_fix_irq_vector(domain, bus, slot, func, irq);
}
extern void __apic_setup_vector_irq(int cpu);
static inline void __pic_setup_vector_irq(int cpu)
{
__apic_setup_vector_irq(cpu);
}
extern void print_IO_APICs(void);
static inline void print_IO_PICs(void)
{
print_IO_APICs();
}
#endif /* !(CONFIG_EPIC) */
#endif /* __ASM_L_IO_PIC_H */

View File

@ -0,0 +1,606 @@
#ifndef __ASM_L_IOLINKMASK_H
#define __ASM_L_IOLINKMASK_H
/*
* Based on include/linux/nodemask.h
* IOLINKmasks provide a bitmap suitable for representing the
* set of IOLINK's in a system, one bit position per IOLINK domain number.
*
* IOLINK can be represented by global domain number and as
* pair: node and local link number on the node,
* So main macroses and functions operate with domain number and
* can have appropriate macroses to operate with pair of node and link #,
* for axample:
* iolink_set(domain, ...)
* node_iolink_set(node, link, ...)
*
* IOLINK is common name of IO management and can be connected to IOHUB
* (controller of peripheral interfaces) or RDMA (DMA with remoute systems)
* So macroses have alternative to operate with IOLINKS as IOHUBs and RDMAs,
* for example:
* iolink_set(...)
* iohub_set(...)
* rdma_set(...)
*
* See detailed comments in the file linux/bitmap.h describing the
* data type on which these iolinkmasks are based.
*
* For details of iolinkmask_scnprintf() and iolinkmask_parse(),
* see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
* For details of iolinklist_scnprintf() and iolinklist_parse(), see
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
*
* The available iolinkmask operations are:
*
* void iolink_set(iolink, mask) turn on bit 'iolink' in mask
* void iolink_clear(iolink, mask) turn off bit 'iolink' in mask
* void iolinks_setall(mask) set all bits
* void iolinks_clear(mask) clear all bits
* int iolink_isset(iolink, mask) true iff bit 'iolink' set in mask
* int iolink_test_and_set(iolink, mask) test and set bit 'iolink' in mask
*
* void iolinks_and(dst, src1, src2) dst = src1 & src2 [intersection]
* void iolinks_or(dst, src1, src2) dst = src1 | src2 [union]
* void iolinks_xor(dst, src1, src2) dst = src1 ^ src2
* void iolinks_andnot(dst, src1, src2) dst = src1 & ~src2
* void iolinks_complement(dst, src) dst = ~src
*
* int iolinks_equal(mask1, mask2) Does mask1 == mask2?
* int iolinks_intersects(mask1, mask2) Do mask1 and mask2 intersect?
* int iolinks_subset(mask1, mask2) Is mask1 a subset of mask2?
* int iolinks_empty(mask) Is mask empty (no bits sets)?
* int iolinks_full(mask) Is mask full (all bits sets)?
* int iolinks_weight(mask) Hamming weight - number of set bits
*
* void iolinks_shift_right(dst, src, n) Shift right
* void iolinks_shift_left(dst, src, n) Shift left
*
* int first_iolink(mask) Number lowest set bit, or MAX_NUMIOLINKS
* int next_iolink(iolink, mask) Next iolink past 'iolink', or MAX_NUMIOLINKS
* int first_unset_iolink(mask) First iolink not set in mask, or
* MAX_NUMIOLINKS.
*
* iolinkmask_t iolinkmask_of_iolink(iolink) Return iolinkmask with bit 'iolink' set
* IOLINK_MASK_ALL Initializer - all bits set
* IOLINK_MASK_NONE Initializer - no bits set
* unsigned long *iolinks_addr(mask) Array of unsigned long's in mask
*
* int iolinkmask_scnprintf(buf, len, mask) Format iolinkmask for printing
* int iolinkmask_parse(ubuf, ulen, mask) Parse ascii string as iolinkmask
* int iolinklist_scnprintf(buf, len, mask) Format iolinkmask as list for printing
* int iolinklist_parse(buf, map) Parse ascii string as iolinklist
*
* for_each_iolink_mask(iolink, mask) for-loop iolink over mask
*
* int num_online_iolinks() Number of online IOLINKs
* int num_possible_iolinks() Number of all possible IOLINKs
*
* int iolink_online(iolink) Is some iolink domain online?
* int iolink_possible(iolink) Is some iolink domain possible?
*
* iolink_set_online(iolink) set bit 'iolink' in iolink_online_map
* iolink_set_offline(iolink) clear bit 'iolink' in iolink_online_map
*
* for_each_iolink(iolink) for-loop iolink over iolink_possible_map
* for_each_online_iolink(iolink) for-loop iolink over iolink_online_map
*
* Subtlety:
* 1) The 'type-checked' form of iolink_isset() causes gcc (3.3.2, anyway)
* to generate slightly worse code. So use a simple one-line #define
* for iolink_isset(), instead of wrapping an inline inside a macro, the
* way we do the other calls.
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/numa.h>
#include <linux/topology.h>
#include <asm/bug.h>
#define MAX_NUMIOLINKS MACH_MAX_NUMIOLINKS
#define MAX_NUMIOHUBS MAX_NUMIOLINKS
#define NODE_NUMIOLINKS MACH_NODE_NUMIOLINKS
typedef struct { DECLARE_BITMAP(bits, MAX_NUMIOLINKS); } iolinkmask_t;
extern iolinkmask_t _unused_iolinkmask_arg_;
#define iolink_set(domain, dst) __iolink_set((domain), &(dst))
#define node_iolink_set(node, link, dst) \
iolink_set(node_iolink_to_domain((node), (link)), (dst))
#define iohub_set(domain, dst) iolink_set((domain), (dst))
#define node_iohub_set(node, link, dst) \
iohub_set(node_iohub_to_domain((node), (link)), (dst))
#define rdma_set(domain, dst) iolink_set((domain), (dst))
#define node_rdma_set(node, link, dst) \
rdma_set(node_rdma_to_domain((node), (link)), (dst))
static inline void __iolink_set(int domain, volatile iolinkmask_t *dstp)
{
set_bit(domain, dstp->bits);
}
#define iolink_clear(domain, dst) __iolink_clear((domain), &(dst))
#define node_iolink_clear(node, link, dst) \
iolink_clear(node_iolink_to_domain((node), (link)), (dst))
#define iohub_clear(domain, dst) iolink_clear((domain), (dst))
#define node_iohub_clear(node, link, dst) \
iohub_clear(node_iohub_to_domain((node), (link)), (dst))
#define rdma_clear(domain, dst) iolink_clear((domain), (dst))
#define node_rdma_clear(node, link, dst) \
rdma_clear(node_rdma_to_domain((node), (link)), (dst))
static inline void __iolink_clear(int domain, volatile iolinkmask_t *dstp)
{
clear_bit(domain, dstp->bits);
}
#define iolinks_setall(dst) __iolinks_setall(&(dst), MAX_NUMIOLINKS)
static inline void __iolinks_setall(iolinkmask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define iolinks_clear(dst) __iolinks_clear(&(dst), MAX_NUMIOLINKS)
static inline void __iolinks_clear(iolinkmask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
/* No static inline type checking - see Subtlety (1) above. */
#define iolink_isset(domain, iolinkmask) test_bit((domain), (iolinkmask).bits)
#define node_iolink_isset(node, link, iolinkmask) \
iolink_isset(node_iolink_to_domain((node), (link)), \
(iolinkmask).bits)
#define iohub_isset(domain, iolinkmask) iolink_isset((domain), (iolinkmask))
#define node_iohub_isset(node, link, iolinkmask) \
iohub_isset(node_iohub_to_domain((node), (link)), \
(iolinkmask).bits)
#define rdma_isset(domain, iolinkmask) iolink_isset((domain), (iolinkmask))
#define node_rdma_isset(node, link, iolinkmask) \
rdma_isset(node_rdma_to_domain((node), (link)), \
(iolinkmask).bits)
#define iolink_test_and_set(domain, iolinkmask) \
__iolink_test_and_set((domain), &(iolinkmask))
#define node_iolink_test_and_set(node, link, iolinkmask) \
iolink_test_and_set(node_iolink_to_domain((node), (link)), \
(iolinkmask))
#define iohub_test_and_set(domain, iolinkmask) \
iolink_test_and_set((domain), (iolinkmask))
#define node_iohub_test_and_set(node, link, iolinkmask) \
iohub_test_and_set(node_iohub_to_domain((node), (link)), \
(iolinkmask))
#define rdma_test_and_set(domain, iolinkmask) \
iolink_test_and_set((domain), (iolinkmask))
#define node_rdma_test_and_set(node, link, iolinkmask) \
rdma_test_and_set(node_rdma_to_domain((node), (link)), \
(iolinkmask))
static inline int __iolink_test_and_set(int domain, iolinkmask_t *addr)
{
return test_and_set_bit(domain, addr->bits);
}
#define iolinks_and(dst, src1, src2) \
__iolinks_and(&(dst), &(src1), &(src2), MAX_NUMIOLINKS)
static inline void __iolinks_and(iolinkmask_t *dstp, const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define iolinks_or(dst, src1, src2) \
__iolinks_or(&(dst), &(src1), &(src2), MAX_NUMIOLINKS)
static inline void __iolinks_or(iolinkmask_t *dstp, const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define iolinks_xor(dst, src1, src2) \
__iolinks_xor(&(dst), &(src1), &(src2), MAX_NUMIOLINKS)
static inline void __iolinks_xor(iolinkmask_t *dstp, const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define iolinks_andnot(dst, src1, src2) \
__iolinks_andnot(&(dst), &(src1), &(src2), MAX_NUMIOLINKS)
static inline void __iolinks_andnot(iolinkmask_t *dstp, const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define iolinks_complement(dst, src) \
__iolinks_complement(&(dst), &(src), MAX_NUMIOLINKS)
static inline void __iolinks_complement(iolinkmask_t *dstp,
const iolinkmask_t *srcp, int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}
#define iolinks_equal(src1, src2) \
__iolinks_equal(&(src1), &(src2), MAX_NUMIOLINKS)
static inline int __iolinks_equal(const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
#define iolinks_intersects(src1, src2) \
__iolinks_intersects(&(src1), &(src2), MAX_NUMIOLINKS)
static inline int __iolinks_intersects(const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
#define iolinks_subset(src1, src2) \
__iolinks_subset(&(src1), &(src2), MAX_NUMIOLINKS)
static inline int __iolinks_subset(const iolinkmask_t *src1p,
const iolinkmask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define iolinks_empty(src) __iolinks_empty(&(src), MAX_NUMIOLINKS)
static inline int __iolinks_empty(const iolinkmask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define iolinks_full(iolinkmask) __iolinks_full(&(iolinkmask), MAX_NUMIOLINKS)
static inline int __iolinks_full(const iolinkmask_t *srcp, int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define iolinks_weight(iolinkmask) __iolinks_weight(&(iolinkmask), MAX_NUMIOLINKS)
static inline int __iolinks_weight(const iolinkmask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
#define iolinks_shift_right(dst, src, n) \
__iolinks_shift_right(&(dst), &(src), (n), MAX_NUMIOLINKS)
static inline void __iolinks_shift_right(iolinkmask_t *dstp,
const iolinkmask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
#define iolinks_shift_left(dst, src, n) \
__iolinks_shift_left(&(dst), &(src), (n), MAX_NUMIOLINKS)
static inline void __iolinks_shift_left(iolinkmask_t *dstp,
const iolinkmask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
/* FIXME: better would be to fix all architectures to never return
> MAX_NUMIOLINKS, then the silly min_ts could be dropped. */
#define first_iolink(src) __first_iolink(&(src))
static inline int __first_iolink(const iolinkmask_t *srcp)
{
return min_t(int, MAX_NUMIOLINKS, find_first_bit(srcp->bits, MAX_NUMIOLINKS));
}
#define next_iolink(n, src) __next_iolink((n), &(src))
static inline int __next_iolink(int n, const iolinkmask_t *srcp)
{
return min_t(int, MAX_NUMIOLINKS, find_next_bit(srcp->bits,
MAX_NUMIOLINKS, n+1));
}
#define iolinkmask_of_iolink(domain) \
({ \
typeof(_unused_iolinkmask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(domain); \
} else { \
iolinks_clear(m); \
iolink_set((domain), m); \
} \
m; \
})
#define iolinkmask_of_node_iolink(node, link) \
iolinkmask_of_iolink(node_iohub_to_domain((node), (link)))
#define first_unset_iolink(mask) __first_unset_iolink(&(mask))
static inline int __first_unset_iolink(const iolinkmask_t *maskp)
{
return min_t(int,MAX_NUMIOLINKS,
find_first_zero_bit(maskp->bits, MAX_NUMIOLINKS));
}
#define IOLINK_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMIOLINKS)
#if MAX_NUMIOLINKS <= BITS_PER_LONG
#define IOLINK_MASK_ALL \
((iolinkmask_t) { { \
[BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = IOLINK_MASK_LAST_WORD \
} })
#else
#define IOLINK_MASK_ALL \
((iolinkmask_t) { { \
[0 ... BITS_TO_LONGS(MAX_NUMIOLINKS)-2] = ~0UL, \
[BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = IOLINK_MASK_LAST_WORD \
} })
#endif
#define IOLINK_MASK_NONE \
((iolinkmask_t) { { \
[0 ... BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = 0UL \
} })
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
* bitmap_scnprintf - convert bitmap to an ASCII hex string.
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Exactly @nmaskbits bits are displayed. Hex digits are grouped into
* comma-separated sets of eight digits per set. Returns the number of
* characters which were written to *buf, excluding the trailing \0.
*/
static int bitmap_scnprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int i, word, bit, len = 0;
unsigned long val;
const char *sep = "";
int chunksz;
u32 chunkmask;
chunksz = nmaskbits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (maskp[word] >> bit) & chunkmask;
len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
(chunksz+3)/4, val);
chunksz = CHUNKSZ;
sep = ",";
}
return len;
}
#undef CHUNKSZ
/*
* bscnl_emit(buf, buflen, rbot, rtop, bp)
*
* Helper routine for bitmap_scnlistprintf(). Write decimal number
* or range to buf, suppressing output past buf+buflen, with optional
* comma-prefix. Return len of what was written to *buf, excluding the
* trailing \0.
*/
static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
{
if (len > 0)
len += scnprintf(buf + len, buflen - len, ",");
if (rbot == rtop)
len += scnprintf(buf + len, buflen - len, "%d", rbot);
else
len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop);
return len;
}
/**
* bitmap_scnlistprintf - convert bitmap to list format ASCII string
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Output format is a comma-separated list of decimal numbers and
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range. Output format is compatible with the format
* accepted as input by bitmap_parselist().
*
* The return value is the number of characters which were written to *buf
* excluding the trailing '\0', as per ISO C99's scnprintf.
*/
static int bitmap_scnlistprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int len = 0;
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
if (buflen == 0)
return 0;
buf[0] = 0;
cur = find_first_bit(maskp, nmaskbits);
rbot = cur;
while (cur < nmaskbits) {
rtop = cur;
cur = find_next_bit(maskp, nmaskbits, cur+1);
if (cur >= nmaskbits || cur > rtop + 1) {
len = bscnl_emit(buf, buflen, rbot, rtop, len);
rbot = cur;
}
}
return len;
}
#define iolinks_addr(src) ((src).bits)
#define iolinkmask_scnprintf(buf, len, src) \
__iolinkmask_scnprintf((buf), (len), &(src), MAX_NUMIOLINKS)
static inline int __iolinkmask_scnprintf(char *buf, int len,
const iolinkmask_t *srcp, int nbits)
{
return bitmap_scnprintf(buf, len, srcp->bits, nbits);
}
#define iolinkmask_parse(ubuf, ulen, dst) \
__iolinkmask_parse((ubuf), (ulen), &(dst), MAX_NUMIOLINKS)
static inline int __iolinkmask_parse(const char __user *buf, int len,
iolinkmask_t *dstp, int nbits)
{
return bitmap_parse(buf, len, dstp->bits, nbits);
}
#define iolinklist_scnprintf(buf, len, src) \
__iolinklist_scnprintf((buf), (len), &(src), MAX_NUMIOLINKS)
static inline int __iolinklist_scnprintf(char *buf, int len,
const iolinkmask_t *srcp, int nbits)
{
return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
}
#define iolinklist_parse(buf, dst) __iolinklist_parse((buf), &(dst), MAX_NUMIOLINKS)
static inline int __iolinklist_parse(const char *buf, iolinkmask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#if defined(CONFIG_IOHUB_DOMAINS) && MAX_NUMIOLINKS > 1
#define for_each_iolink_mask(domain, mask) \
for ((domain) = first_iolink(mask); \
(domain) < MAX_NUMIOLINKS; \
(domain) = next_iolink((domain), (mask)))
#define for_each_node_iolink_mask(domain, node, link, mask) \
for ((domain) = first_iolink(mask), \
(node) = iolink_domain_to_node((domain)), \
(link) = iolink_domain_to_link((domain)); \
(domain) < MAX_NUMIOLINKS; \
(domain) = next_iolink((domain), (mask)), \
(node) = iolink_domain_to_node((domain)), \
(link) = iolink_domain_to_link((domain)))
#else /* MAX_NUMIOLINKS == 1 */
#define for_each_iolink_mask(domain, mask) \
if (HAS_MACHINE_E2K_IOHUB) \
for ((domain) = 0; (domain) < 1; (domain)++)
#define for_each_node_iolink_mask(domain, node, link, mask) \
if (HAS_MACHINE_E2K_IOHUB) \
for ((domain) = 0, (node) = 0, (link) = 0; \
(domain) < 1; (domain)++)
#endif /* MAX_NUMIOLINKS */
/*
* The following particular system iolinkmasks and operations
* on them manage all possible and online iolinks.
*/
#if defined(CONFIG_IOHUB_DOMAINS) && MAX_NUMIOLINKS > 1
extern int iolinks_num;
extern iolinkmask_t iolink_iohub_map;
extern iolinkmask_t iolink_online_iohub_map;
extern int iolink_iohub_num;
extern int iolink_online_iohub_num;
extern iolinkmask_t iolink_rdma_map;
extern iolinkmask_t iolink_online_rdma_map;
extern int iolink_rdma_num;
extern int iolink_online_rdma_num;
#define num_online_iolinks() (num_online_iohubs() + num_online_rdmas())
#define num_possible_iolinks() iolinks_num
#define num_online_iohubs() iolink_online_iohub_num
#define num_possible_iohubs() iolink_iohub_num
#define num_online_rdmas() iolink_online_rdma_num
#define num_possible_rdmas() iolink_rdma_num
#define iolink_online(domain) (iohub_online(domain) || rdma_online(domain))
#define iolink_possible(domain) (iohab_possible(domain) || \
rdma_possible(domain))
#define node_iolink_online(node, link) \
iolink_online(node_iolink_to_domain(node, link))
#define node_iolink_possible(node, link) \
iolink_possible(node_iolink_to_domain(node, link))
#define iohub_online(domain) iolink_isset((domain), iolink_online_iohub_map)
#define iohab_possible(domain) iolink_isset((domain), iolink_iohub_map)
#define node_iohub_online(node, link) \
iohub_online(node_iohub_to_domain(node, link))
#define node_iohub_possible(node, link) \
iohab_possible(node_iohub_to_domain(node, link))
#define first_iohub_online() first_iolink(iolink_online_iohub_map)
#define rdma_online(domain) iolink_isset((domain), iolink_online_rdma_map)
#define rdma_possible(domain) iolink_isset((domain), iolink_rdma_map)
#define node_rdma_online(node, link) \
rdma_online(node_rdma_to_domain(node, link))
#define node_rdma_possible(node, link) \
rdma_possible(node_rdma_to_domain(node, link))
#else
#define iolinks_num 1
#define iolink_iohub_num 1
#define num_online_iolinks() 1
#define num_possible_iolinks() 1
#define num_online_iohubs() 1
#define num_possible_iohubs() 1
#define num_online_rdmas() 0
#define num_possible_rdmas() 0
#define iolink_online(domain) ((domain) == 0)
#define iolink_possible(domain) ((domain) == 0)
#define node_iolink_online(node, link) \
((node) == 0 && (link) == 0)
#define node_iolink_possible(node, link) \
((node) == 0 && (link) == 0)
#define iohub_online(domain) ((domain) == 0)
#define iohab_possible(domain) ((domain) == 0)
#define node_iohub_online(node, link) \
((node) == 0 && (link) == 0)
#define node_iohub_possible(node, link) \
((node) == 0 && (link) == 0)
#define first_iohub_online() 0
#define rdma_online(domain) 0
#define rdma_possible(domain) 0
#define node_rdma_online(node, link) 0
#define node_rdma_possible(node, link) 0
#endif
#define iohub_set_online(domain) \
set_bit((domain), iolink_online_iohub_map.bits)
#define iohub_set_offline(domain) \
clear_bit((domain), iolink_online_iohub_map.bits)
#define node_iohub_set_online(node, link) \
iohub_set_online(node_iohub_to_domain((node), (link))
#define node_iohub_set_offline(node, link) \
iohub_set_offline(node_iohub_to_domain((node), (link))
#define rdma_set_online(domain) \
set_bit((domain), iolink_online_rdma_map.bits)
#define rdma_set_offline(domain) \
clear_bit((domain), iolink_online_rdma_map.bits)
#define node_rdma_set_online(node, link) \
rdma_set_online(node_rdma_to_domain((node), (link))
#define node_rdma_set_offline(node, link) \
rdma_set_offline(node_rdma_to_domain((node), (link))
#define for_each_iohub(domain) \
for_each_iolink_mask((domain), iolink_iohub_map)
#define for_each_online_iohub(domain) \
for_each_iolink_mask((domain), iolink_online_iohub_map)
#define for_each_node_iohub(domain, node, link) \
for_each_node_iolink_mask((domain), (node), (link), \
iolink_iohub_map)
#define for_each_online_node_iohub(domain, node, link) \
for_each_node_iolink_mask((domain), (node), (link), \
iolink_online_iohub_map)
#define for_each_rdma(domain) \
for_each_iolink_mask((domain), iolink_rdma_map)
#define for_each_online_rdma(domain) \
for_each_iolink_mask((domain), iolink_online_rdma_map)
#define for_each_node_rdma(domain, node, link) \
for_each_node_iolink_mask((domain), (node), (link), \
iolink_rdma_map)
#define for_each_online_node_rdma(domain, node, link) \
for_each_node_iolink_mask((domain), (node), (link), \
iolink_online_rdma_map)
#endif /* __ASM_L_IOLINKMASK_H */

View File

@ -0,0 +1,164 @@
#ifndef _ASM_L_IPI_H
#define _ASM_L_IPI_H
#ifdef CONFIG_L_LOCAL_APIC
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC InterProcessor Interrupt code.
*
* Moved to include file by James Cleverdon from
* arch/x86-64/kernel/smp.c
*
* Copyrights from kernel/smp.c:
*
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
* (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
* (c) 2002,2003 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License, v.2
*/
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/smp.h>
/*
* the following functions deal with sending IPIs between CPUs.
*
* We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
*/
static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
unsigned int dest)
{
unsigned int icr = shortcut | dest;
switch (vector) {
default:
icr |= APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
icr |= APIC_DM_NMI;
break;
}
return icr;
}
static inline int __prepare_ICR2(unsigned int mask)
{
return SET_APIC_DEST_FIELD(mask);
}
static inline void __xapic_wait_icr_idle(void)
{
while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
static inline void
__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
* we have to lock out interrupts to be safe. As we don't care
* of the value read we use an atomic rmw access to avoid costly
* cli/sti. Otherwise we use an even cheaper single atomic write
* to the APIC.
*/
unsigned int cfg;
/*
* Wait for idle.
*/
__xapic_wait_icr_idle();
/*
* No need to touch the target chip field
*/
cfg = __prepare_ICR(shortcut, vector, dest);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
native_apic_mem_write(APIC_ICR, cfg);
}
/*
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
static inline void
__default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
{
unsigned long cfg;
/*
* Wait for idle.
*/
if (unlikely(vector == NMI_VECTOR))
safe_apic_wait_icr_idle();
else
__xapic_wait_icr_idle();
/*
* prepare target chip field
*/
cfg = __prepare_ICR2(mask);
native_apic_mem_write(APIC_ICR2, cfg);
/*
* program the ICR
*/
cfg = __prepare_ICR(0, vector, dest);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
native_apic_mem_write(APIC_ICR, cfg);
}
extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector);
#if 0
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
int vector);
#endif
/* Avoid include hell */
#define NMI_VECTOR 0x02
extern int no_broadcast;
static inline void __default_local_send_IPI_allbutself(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
else
__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
}
static inline void __default_local_send_IPI_all(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
apic->send_IPI_mask(cpu_online_mask, vector);
else
__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
}
#ifdef CONFIG_L_X86_32
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_allbutself(int vector);
extern void default_send_IPI_all(int vector);
extern void default_send_IPI_self(int vector);
#endif
#endif
#endif /* _ASM_L_IPI_H */

View File

@ -0,0 +1,9 @@
#ifndef _ASM_L_IRQ_NUMBERS_H
#define _ASM_L_IRQ_NUMBERS_H
#include <linux/gpio.h>
/* Number of additional (chained) interrupts */
#define I2C_SPI_IRQS_NUM 2
#endif

View File

@ -0,0 +1,6 @@
#ifndef _ASM_L_IRQ_REMAPPING_H
#define _ASM_L_IRQ_REMAPPING_H
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
#endif /* _ASM_L_IRQ_REMAPPING_H */

View File

@ -0,0 +1,201 @@
#ifndef _ASM_L_IRQ_VECTORS_H
#define _ASM_L_IRQ_VECTORS_H
#include <asm/apicdef.h>
/*
* Linux IRQ vector layout.
*
* There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
* be defined by Linux. They are used as a jump table by the CPU when a
* given vector is triggered - by a CPU-external, CPU-internal or
* software-triggered event.
*
* Linux sets the kernel code address each entry jumps to early during
* bootup, and never changes them. This is the general layout of the
* IDT entries:
*
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
* Vectors 129 ... 237 : device interrupts
* Vectors 238 ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
*
* This file enumerates the exact layout of them:
*/
#define NMI_VECTOR 0x02
#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
*/
#define FIRST_EXTERNAL_VECTOR 0x20
#if 0
/*
* We start allocating at 0x21 to spread out vectors evenly between
* priority levels. (0x80 is the syscall vector)
*/
#define VECTOR_OFFSET_START 1
#else
#define VECTOR_OFFSET_START 0
#endif
#if 0
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
# define IA32_SYSCALL_VECTOR 0x80
#else
# define IA32_SYSCALL_VECTOR 0x80
#endif
#endif
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
*
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
*/
#define SPURIOUS_APIC_VECTOR 0xff
/*
* Sanity check
*/
#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
# error SPURIOUS_APIC_VECTOR definition error
#endif
#if 0
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
#define REBOOT_VECTOR 0xf8
/* f0-f7 used for spreading out TLB flushes: */
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0
#define NUM_INVALIDATE_TLB_VECTORS 8
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
*/
#define LOCAL_TIMER_VECTOR 0xef
/*
* Generic system vector for platform specific use
*/
#define X86_PLATFORM_IPI_VECTOR 0xed
/*
* Performance monitoring pending work vector:
*/
#define LOCAL_PENDING_VECTOR 0xec
#define UV_BAU_MESSAGE 0xea
/*
* Self IPI vector for machine checks
*/
#define MCE_SELF_VECTOR 0xeb
#endif
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#ifdef CONFIG_EPIC
#define NR_VECTORS 1024
#else
#define NR_VECTORS 256
#endif
#define NR_VECTORS_APIC 256
#define FPU_IRQ 13
#define FIRST_VM86_IRQ 3
#define LAST_VM86_IRQ 15
#ifndef __ASSEMBLY__
static inline int invalid_vm86_irq(int irq)
{
return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
}
#endif
/*
* Size the maximum number of interrupts.
*
* If the irq_desc[] array has a sparse layout, we can size things
* generously - it scales up linearly with the maximum number of CPUs,
* and the maximum number of IO-APICs, whichever is higher.
*
* In other cases we size more conservatively, to not create too large
* static arrays.
*/
#if 0
#define NR_IRQS_LEGACY 16
#else
#define NR_IRQS_LEGACY 0
#endif
#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
#ifdef CONFIG_L_IO_APIC
# ifdef CONFIG_SPARSE_IRQ
# define NR_IRQS \
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# else
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
# else
# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
# endif
# endif
#else /* !CONFIG_L_IO_APIC: */
# define NR_IRQS NR_IRQS_LEGACY
#endif
#endif /* _ASM_L_IRQ_VECTORS_H */

View File

@ -0,0 +1,16 @@
#ifndef _ASM_L_IRQ_WORK_H
#define _ASM_L_IRQ_WORK_H
static inline bool arch_irq_work_has_interrupt(void)
{
//TODO only arm does it this way! (see bug 120742)
#ifdef CONFIG_SMP
return true;
#else
return false;
#endif
}
extern void arch_irq_work_raise(void);
#endif /* _ASM_L_IRQ_WORK_H */

View File

@ -0,0 +1,63 @@
#ifndef _ASM_IRQDOMAIN_H
#define _ASM_IRQDOMAIN_H
#include <linux/irqdomain.h>
#include <asm/hw_irq.h>
#ifdef CONFIG_X86_LOCAL_APIC
enum {
/* Allocate contiguous CPU vectors */
X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 0x1,
};
extern struct irq_domain *x86_vector_domain;
extern void init_irq_alloc_info(struct irq_alloc_info *info,
const struct cpumask *mask);
extern void copy_irq_alloc_info(struct irq_alloc_info *dst,
struct irq_alloc_info *src);
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
struct device_node;
struct irq_data;
enum ioapic_domain_type {
IOAPIC_DOMAIN_INVALID,
IOAPIC_DOMAIN_LEGACY,
IOAPIC_DOMAIN_STRICT,
IOAPIC_DOMAIN_DYNAMIC,
};
struct ioapic_domain_cfg {
enum ioapic_domain_type type;
const struct irq_domain_ops *ops;
struct device_node *dev;
};
extern const struct irq_domain_ops mp_ioapic_irqdomain_ops;
extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg);
extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
extern void mp_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irq_data);
extern void mp_irqdomain_deactivate(struct irq_domain *domain,
struct irq_data *irq_data);
extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
#endif /* CONFIG_X86_IO_APIC */
#ifdef CONFIG_PCI_MSI
extern void arch_init_msi_domain(struct irq_domain *domain);
#else
static inline void arch_init_msi_domain(struct irq_domain *domain) { }
#endif
#ifdef CONFIG_HT_IRQ
extern void arch_init_htirq_domain(struct irq_domain *domain);
#else
static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
#endif
#endif

View File

@ -0,0 +1,11 @@
#ifndef _L_UNCACHED_H
#define _L_UNCACHED_H
void *l_alloc_uncached(struct device *dev, size_t size,
phys_addr_t *phys_addr, gfp_t gfp);
void l_free_uncached(struct device *dev, size_t size, void *cpu_addr);
int l_init_uncached_pool(void);
void l_destroy_uncached_pool(void);
#endif /* !(_L_UNCACHED_H) */

View File

@ -0,0 +1,13 @@
#ifndef _L_IDE_H_
#define _L_IDE_H_
#include <linux/ata.h>
static void l_init_iops (ide_hwif_t *hwif)
{
}
#define L_FORCE_NATIVE_MODE 1
#define L_DEAULT_IDE_DMA_MODE ATA_UDMA5 /* default max UDMA capable */
#endif /*_L_IDE_H_*/

View File

@ -0,0 +1,140 @@
#ifndef __L_ASM_PMC_H__
#define __L_ASM_PMC_H__
#include <linux/thermal.h>
#include <linux/cpu_cooling.h>
#include <linux/i2c.h>
#include <asm/mpspec.h>
#define PMC_L_MAX_IDLE_STATES 4
#define PMC_L_TEMP_RG_CUR_REG_0 0x20
#define PMC_L_TEMP_RG_CUR_REG_1 0x24
#define PMC_L_GPE0_STS_REG 0x28
#define PMC_L_GPE0_STS_CLR 0xf
#define PMC_L_GPE0_EN_REG 0x2c
#define PMC_L_TEMP_RG0_REG 0x30
#define PMC_L_TEMP_RG1_REG 0x34
#define PMC_L_TEMP_RG2_REG 0x38
#define PMC_L_TEMP_RG3_REG 0x3c
#define PMC_L_TEMP_RG_CUR_REG_2 0x40
#define PMC_L_TEMP_RGX_FALL (0x0 << 12)
#define PMC_L_TEMP_RGX_RISE (0x3 << 12)
#define PMC_L_PC_S0_REG 0x100
#define PMC_L_PC_S1_REG 0x104
#define PMC_L_COFVID_3D_STATUS_REG 0x140
#define PMC_L_P_STATE_3D_CNTRL_REG 0x148
#define PMC_L_P_STATE_3D_STATUS_REG 0x14c
#define PMC_L_P_STATE_3D_VALUE_0_REG 0x150
#define PMC_L_P_STATE_3D_VALUE_1_REG 0x154
#define PMC_L_P_STATE_3D_VALUE_2_REG 0x158
#define PMC_L_P_STATE_3D_VALUE_3_REG 0x15c
#define PMC_L_C_STATE_3D_REG 0x160
#define PMC_L_2D_FC_REG 0x164
#define PMC_L_REGS_AREA_SIZE 0x168
/* Bits in PMC registers: */
/* P_State_value_X (RW): */
#define PMC_L_P_STATE_VALUE_VID_MASK 0x0000fe00
#define PMC_L_P_STATE_VALUE_VID_SHIFT 9
#define PMC_L_P_STATE_VALUE_DID_MASK 0x000001f0
#define PMC_L_P_STATE_VALUE_DID_SHIFT 4
#define PMC_L_P_STATE_VALUE_FID_MASK 0x0000000f
#define PMC_L_P_STATE_VALUE_FID_SHIFT 0
/* P_State_Cntrl (RW): */
#define PMC_L_P_STATE_CNTRL_MASK 0x3
#define PMC_L_P_STATE_CNTRL_SHIFT 0
#define PMC_L_P_STATE_CNTRL_P0_VAL 0x0
#define PMC_L_P_STATE_CNTRL_P1_VAL 0x1
#define PMC_L_P_STATE_CNTRL_P2_VAL 0x2
#define PMC_L_P_STATE_CNTRL_P3_VAL 0x3
/* P_State_status (RO): */
#define PMC_L_P_STATE_STATUS_MASK 0x3
#define PMC_L_P_STATE_STATUS_SHIFT 0
/* P_State_3D_Cntrl (RW): */
#define PMC_L_P_STATE_3D_CNTRL_MASK 0x3
#define PMC_L_P_STATE_3D_CNTRL_SHIFT 0
#define PMC_L_P_STATE_3D_CNTRL_P0_VAL 0x0
#define PMC_L_P_STATE_3D_CNTRL_P1_VAL 0x1
#define PMC_L_P_STATE_3D_CNTRL_P2_VAL 0x2
#define PMC_L_P_STATE_3D_CNTRL_P3_VAL 0x3
/* COVFID_status (contains RW, Status, RM, RO bits): */
#define PMC_L_COVFID_STATUS_PMCEN_VAL 0x0000000000000001 /* RW - 0 Bit */
#define PMC_L_COVFID_STATUS_RMWEN_VAL 0x4000000000000000 /* RM - 62 Bit */
#define PMC_L_COVFID_STATUS_VMAX_MASK 0x3f80000000000000 /* RM - 61:55 Bits */
#define PMC_L_COVFID_STATUS_VMAX_SHIFT 55
#define PMC_L_COVFID_STATUS_VMIN_MASK 0x007f000000000000 /* RM - 54:48 Bits */
#define PMC_L_COVFID_STATUS_VMIN_SHIFT 48
#define PMC_L_COVFID_STATUS_FMAX_MASK 0x0000ff0000000000 /* RM - 26:20 Bits */
#define PMC_L_COVFID_STATUS_FMAX_SHIFT 40
#define PMC_L_COVFID_STATUS_TRANS_VAL 0x0000000000000002 /* RO - 1 Bit */
#define PMC_L_COVFID_STATUS_PNUM_MASK 0x000000000000000c /* RO - 3:2 Bits */
#define PMC_L_COVFID_STATUS_PNUM_SHIFT 2
#define PMC_L_COVFID_STATUS_VID_MASK 0x000000000003f000 /* RO - 18:12 Bits */
#define PMC_L_COVFID_STATUS_VID_SHIFT 12
#define PMC_L_COVFID_STATUS_FID_MASK 0x0000000000000ff0 /* RO - 11:4 Bits */
#define PMC_L_COVFID_STATUS_FID_SHIFT 4
#define PMC_L_COVFID_RM_MASK (PMC_L_COVFID_STATUS_VMAX_MASK | \
PMC_L_COVFID_STATUS_VMIN_MASK | \
PMC_L_COVFID_STATUS_FMAX_MASK)
#define PMC_L_MAX_PSTATES 4
#define PMC_L_PRECISION 10
#define MAX_NUM_PMCS 1
#define SPMC_TEMP_BAD_VALUE -1000
/* The driver supports 1 passive trip point and 1 critical trip point */
enum l_pmc_thermal_trip {
LPMC_TRIP_PASSIVE,
LPMC_TRIP_CRITICAL,
LPMC_TRIP_NUM,
};
#define LPMC_TRIP_POINTS_MSK ((1 << LPMC_TRIP_NUM) - 1)
struct l_pmc {
unsigned char type;
unsigned char version;
void __iomem *cntrl_base;
void __iomem *data_base;
unsigned long vrange; /* VMAX, VMIN, FMAX */
unsigned int data_size;
unsigned int p_state[PMC_L_MAX_PSTATES]; /* VID,
* DID,
* FID
*/
unsigned int freq; /* Frequency in KHz */
struct pci_dev *pdev;
struct platform_device *i2c_chan;
struct thermal_zone_device *thermal;
enum thermal_device_mode thermal_mode;
int trip_temp[LPMC_TRIP_NUM];
int trip_hyst[LPMC_TRIP_NUM];
raw_spinlock_t thermal_lock;
struct thermal_cooling_device *cdev;
struct cpufreq_policy *policy;
};
extern struct l_pmc l_pmc[MAX_NUM_PMCS];
#if defined(CONFIG_L_PMC) || defined(CONFIG_S2_PMC)
extern int spmc_get_temp_cur0(void);
int pmc_l_gpufreq_set_scale(unsigned char scale);
int pmc_l_gpufreq_get_scale(void);
int pmc_l_gpufreq_get_frequency(void);
extern unsigned int load_threshold;
#else
int spmc_get_temp_cur0(void) { return SPMC_TEMP_BAD_VALUE; }
#endif /* CONFIG_L_PMC || CONFIG_S2_PMC */
#endif /* __L_ASM_PMC_H__ */

View File

@ -0,0 +1,14 @@
#ifndef __L_ASM_SPMC_H__
#define __L_ASM_SPMC_H__
#ifdef CONFIG_ACPI_L_SPMC
extern void do_spmc_halt(void);
#else
static inline void do_spmc_halt(void) {
printk(KERN_ERR "Board does not use KPI-2: SPMC is not present.\n");
return;
}
#endif
#endif /* __L_ASM_SPMC_H__ */

View File

@ -0,0 +1,103 @@
#ifndef _L_ASM_L_TIMER_H
#define _L_ASM_L_TIMER_H
#include <linux/types.h>
/*
* Elbrus timer
*/
extern struct clock_event_device *global_clock_event;
extern int get_lt_timer(void);
extern u32 lt_read(void);
extern struct clocksource lt_cs;
/* New timer registers */
#define PIT_COUNTER_LIMIT 0x00
#define PIT_COUNTER_START_VALUE 0x04
#define PIT_COUNTER 0x08
#define PIT_COUNTER_CONTROL 0x0c
#define PIT_WD_COUNTER 0x10
#define PIT_WD_COUNTER_LOW PIT_WD_COUNTER
#define PIT_WD_COUNTER_HIGH (PIT_WD_COUNTER_LOW + 0x04)
#define PIT_WD_LIMIT 0x18
#define PIT_POWER_COUNTER 0x1c
#define PIT_POWER_COUNTER_LOW PIT_POWER_COUNTER
#define PIT_POWER_COUNTER_HIGH (PIT_POWER_COUNTER_LOW + 0x04)
#define PIT_WD_CONTROL 0x24
#define PIT_RESET_COUNTER 0x28
#define PIT_RESET_COUNTER_LOW PIT_RESET_COUNTER
#define PIT_RESET_COUNTER_HIGH (PIT_RESET_COUNTER_LOW + 0x04)
typedef struct lt_regs {
u32 counter_limit; /* timer counter limit value */
u32 counter_start; /* start value of counter */
u32 counter; /* timer counter */
u32 counter_cntr; /* timer control register */
u32 wd_counter; /* watchdog counter */
u32 wd_prescaler; /* watchdog prescaler */
u32 wd_limit; /* watchdog limit */
u32 power_counter_lo; /* power counter low bits */
u32 power_counter_hi; /* power counter high bits */
u32 wd_control; /* watchdog control register */
u32 reset_counter_lo; /* reset counter low bits */
u32 reset_counter_hi; /* reset counter low bits */
} lt_regs_t;
extern unsigned long long lt_phys_base;
extern lt_regs_t *lt_regs;
extern void setup_lt_timer(void);
extern int __init init_lt_clocksource(void);
/* counters registers structure */
#define LT_COUNTER_SHIFT 9 /* [30: 9] counters value */
#define LT_COUNTER_LIMIT_SHIFT 31 /* [31] Limit bit */
#define LT_COUNTER_LIMIT_BIT (1 << LT_COUNTER_LIMIT_SHIFT)
#define LT_WRITE_COUNTER_VALUE(count) ((count) << LT_COUNTER_SHIFT)
#define LT_READ_COUNTER_VALUE(count) ((count) >> LT_COUNTER_SHIFT)
#define LT_NSEC_PER_COUNTER_INCR 100 /* 10 MHz == 100 nunosec */
/* counter control register structure */
#define LT_COUNTER_CNTR_START 0x00000001 /* start/stop timer */
#define LT_COUNTER_CNTR_INVERTL 0x00000002 /* invert limit bit */
#define LT_COUNTER_CNTR_LINIT 0x00000004 /* Limit bit initial state */
/* 1 - limit bit set to 1 */
#define LT_COUNTER_CNTR_LAUNCH (LT_COUNTER_CNTR_START)
#define LT_INVERT_COUNTER_CNTR_LAUNCH (LT_COUNTER_CNTR_LAUNCH | \
LT_COUNTER_CNTR_INVERTL | \
LT_COUNTER_CNTR_LINIT)
#define LT_COUNTER_CNTR_STOP (0)
#define WD_CLOCK_TICK_RATE 10000000L
#define WD_LATCH(tick_rate) (((tick_rate) + HZ/2) / HZ)
#define WD_LIMIT_SHIFT 12
#define WD_WRITE_COUNTER_VALUE(count) (count)
#define WD_READ_COUNTER_VALUE(count) ((count) << WD_LIMIT_SHIFT)
#define WD_SET_COUNTER_VAL(sek) \
(WD_WRITE_COUNTER_VALUE(WD_CLOCK_TICK_RATE * (sek)))
#define WD_INTR_MODE 0x1
#define WD_ENABLE 0x2
#define WD_EVENT 0x4
#define WD_COUNTER_BASE 0x10
/* System timer Registers (structure see asm/l_timer_regs.h) */
#define COUNTER_LIMIT 0x00
#define COUNTER_START_VALUE 0x04
#define L_COUNTER 0x08
#define COUNTER_CONTROL 0x0c
#define WD_COUNTER_L 0x10
#define WD_COUNTER_H 0x14
#define WD_LIMIT 0x18
#define POWER_COUNTER_L 0x1c
#define POWER_COUNTER_H 0x20
#define WD_CONTROL 0x24
#define RESET_COUNTER_L 0x28
#define RESET_COUNTER_H 0x2c
#endif /* _L_ASM_L_TIMER_H */

View File

@ -0,0 +1,119 @@
#ifndef _L_ASM_L_TIMER_REGS_H
#define _L_ASM_L_TIMER_REGS_H
#include <linux/types.h>
/*
* Elbrus System timer Registers
*/
#define COUNTER_LIMIT 0x00
typedef struct counter_limit_fields {
u32 unused : 9; /* [8:0] */
u32 c_l : 22; /* [30:9] */
u32 l : 1; /* [31] */
} counter_limit_fields_t;
typedef union counter_limit {
u32 word;
counter_limit_fields_t fields;
} counter_limit_t;
#define COUNTER_START_VALUE 0x04
typedef struct counter_st_v_fields {
u32 unused : 9; /* [8:0] */
u32 c_st_v : 22; /* [30:9] */
u32 l : 1; /* [31] */
} counter_st_v_fields_t;
typedef union counter_st_v {
u32 word;
counter_st_v_fields_t fields;
} counter_st_v_t;
#define COUNTER 0x08
typedef struct counter_fields {
u32 unused : 9; /* [8:0] */
u32 c : 22; /* [30:9] */
u32 l : 1; /* [31] */
} counter_fields_t;
typedef union counter {
u32 word;
counter_fields_t fields;
} counter_t;
#define COUNTER_CONTROL 0x0c
typedef struct counter_control_fields {
u32 s_s : 1; /* [0] */
u32 inv_l : 1; /* [1] */
u32 l_ini : 1; /* [2] */
u32 unused : 29; /* [31:3] */
} counter_control_fields_t;
typedef union counter_control {
u32 word;
counter_control_fields_t fields;
} counter_control_t;
#define WD_COUNTER_L 0x10
typedef struct wd_counter_l_fields {
u32 wd_c : 32; /* [31:0] */
} wd_counter_l_fields_t;
typedef union wd_counter_l {
u32 word;
wd_counter_l_fields_t fields;
} wd_counter_l_t;
#define WD_COUNTER_H 0x14
typedef struct wd_counter_h_fields {
u32 wd_c : 32; /* [31:0] */
} wd_counter_h_fields_t;
typedef union wd_counter_h {
u32 word;
wd_counter_h_fields_t fields;
} wd_counter_h_t;
#define WD_LIMIT 0x18
typedef struct wd_limit_fields {
u32 wd_l : 32; /* [31:0] */
} wd_limit_fields_t;
typedef union wd_limit {
u32 word;
wd_limit_fields_t fields;
} wd_limit_t;
#define POWER_COUNTER_L 0x1c
typedef struct power_counter_l_fields {
u32 pw_c : 32; /* [31:0] */
} power_counter_l_fields_t;
typedef union power_counter_l {
u32 word;
power_counter_l_fields_t fields;
} power_counter_l_t;
#define POWER_COUNTER_H 0x20
typedef struct power_counter_h_fields {
u32 pw_c : 32; /* [31:0] */
} power_counter_h_fields_t;
typedef union power_counter_h {
u32 word;
power_counter_h_fields_t fields;
} power_counter_h_t;
#define WD_CONTROL 0x24
typedef struct wd_control_fields {
u32 w_m : 1; /* [0] */
u32 w_out_e : 1; /* [1] */
u32 w_evn : 1; /* [2] */
u32 unused : 29; /* [31:3] */
} wd_control_fields_t;
typedef union wd_control {
u32 word;
wd_control_fields_t fields;
} wd_control_t;
#define RESET_COUNTER_L 0x28
typedef struct reset_counter_l_fields {
u32 rst : 32; /* [31:0] */
} reset_counter_l_fields_t;
typedef union reset_counter_l {
u32 word;
reset_counter_l_fields_t fields;
} reset_counter_l_t;
#define RESET_COUNTER_H 0x2c
typedef struct reset_counter_h_fields {
u32 rst : 32; /* [31:0] */
} reset_counter_h_fields_t;
typedef union reset_counter_h {
u32 word;
reset_counter_h_fields_t fields;
} reset_counter_h_t;
#endif /* _L_ASM_L_TIMER_REGS_H */

View File

@ -0,0 +1,634 @@
#ifndef __L_ASM_MPSPEC_H
#define __L_ASM_MPSPEC_H
/*
* Structure definitions for SMP machines following the
* Intel Multiprocessing Specification 1.1 and 1.4.
*/
#ifndef __ASSEMBLY__
#include <linux/init.h>
#include <linux/cpumask.h>
#include <asm/bootinfo.h>
#include <asm/apicdef.h>
#ifdef CONFIG_E2K
#include <asm/e2k.h>
#endif
/*
* This tag identifies where the SMP configuration
* information is.
*/
#ifdef __LITTLE_ENDIAN
#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
#elif __BIG_ENDIAN
#define SMP_MAGIC_IDENT ('_'|('P'<<8)|('M'<<16)|('_'<<24))
#else
#error not byte order defined
#endif /*__BIG_ENDIAN*/
/*
* a maximum of NR_CPUS APICs with the current APIC ID architecture.
* a maximum of IO-APICs is summary:
* each IO link can have IOHUB with IO-APIC
* each node can have embeded IO-APIC
*/
#define MAX_LOCAL_APICS (NR_CPUS * 2) /* apic numbering can be with holes */
#define MAX_IO_APICS (MAX_NUMIOLINKS + MAX_NUMNODES)
#define MAX_APICS MAX_LOCAL_APICS
#define SMP_FLOATING_TABLE_LEN sizeof(struct intel_mp_floating)
struct intel_mp_floating
{
char mpf_signature[4]; /* "_MP_" */
unsigned long mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
unsigned char mpf_specification;/* Specification version */
unsigned char mpf_checksum; /* Checksum (makes sum 0) */
unsigned char mpf_feature1; /* Standard or configuration ? */
unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
unsigned char mpf_feature3; /* Unused (0) */
unsigned char mpf_feature4; /* Unused (0) */
unsigned char mpf_feature5; /* Unused (0) */
};
#define MPF_64_BIT_SPECIFICATION 8 /* MPF specification describe */
/* new MP table compatible */
/* with 64-bits arch */
#define MP_SPEC_ADDR_ALIGN 4 /* addresses can be */
/* word-aligned */
#define MP_NEW_ADDR_ALIGN 8 /* all addresses should be */
/* double-word aligned */
#define ALIGN_BYTES_DOWN(addr, bytes) (((addr) / (bytes)) * (bytes))
#define ALIGN_BYTES_UP(addr, bytes) ((((addr) + (bytes)-1) / (bytes)) * \
(bytes))
#define MP_ALIGN_BYTES(addr, bytes) ALIGN_BYTES_UP(addr, bytes)
#define IS_64_BIT_MP_SPECS() \
(boot_mpf_found->mpf_specification == MPF_64_BIT_SPECIFICATION)
#define MP_ADDR_ALIGN(addr) \
(unsigned char *)(MP_ALIGN_BYTES((unsigned long long)(addr), \
(IS_64_BIT_MP_SPECS()) ? MP_NEW_ADDR_ALIGN : \
MP_SPEC_ADDR_ALIGN))
#define MP_SIZE_ALIGN(addr) \
MP_ALIGN_BYTES((unsigned long long)(addr), \
(IS_64_BIT_MP_SPECS()) ? MP_NEW_ADDR_ALIGN : \
MP_SPEC_ADDR_ALIGN)
#define enable_update_mptable 0
struct mpc_table
{
char mpc_signature[4];
#define MPC_SIGNATURE "PCMP"
unsigned short mpc_length; /* Size of table */
char mpc_spec; /* 0x01 */
char mpc_checksum;
char mpc_oem[8];
char mpc_productid[12];
unsigned int mpc_oemptr; /* 0 if not present */
unsigned short mpc_oemsize; /* 0 if not present */
unsigned short mpc_oemcount;
unsigned int mpc_lapic; /* APIC address */
unsigned short mpe_length; /* Extended Table size */
unsigned char mpe_checksum; /* Extended Table checksum */
unsigned char reserved;
};
/* Followed by entries */
#define MP_PROCESSOR 0
#define MP_BUS 1
#define MP_IOAPIC 2
#define MP_INTSRC 3
#define MP_LINTSRC 4
#define MP_TIMER 5
#define MP_I2C_SPI 6
#define MP_IOLINK 7
#define MP_PMC 8
#define MP_BDEV 9
#define MP_GPIO_ACT 10
#define MP_IOEPIC 11
struct mpc_config_processor
{
unsigned char mpc_type; /* MP_PROCESSOR */
unsigned char mpc_apicid; /* Local APIC number */
unsigned char mpc_apicver; /* Its versions */
unsigned char mpc_cpuflag;
#define CPU_ENABLED 1 /* Processor is available */
#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
unsigned int mpc_cpufeature;
#define CPU_STEPPING_MASK 0x0F
#define CPU_MODEL_MASK 0xF0
#define CPU_FAMILY_MASK 0xF00
unsigned int mpc_featureflag; /* CPUID feature value */
unsigned int mpc_cepictimerfreq; /* Frequency of CEPIC timer */
unsigned int mpc_reserved;
};
struct mpc_config_bus
{
unsigned char mpc_type; /* MP_BUS */
unsigned char mpc_busid;
unsigned char mpc_bustype[6];
};
/* List of Bus Type string values, Intel MP Spec. */
#define BUSTYPE_EISA "EISA"
#define BUSTYPE_ISA "ISA"
#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
#define BUSTYPE_MCA "MCA"
#define BUSTYPE_VL "VL" /* Local bus */
#define BUSTYPE_PCI "PCI"
#define BUSTYPE_PCMCIA "PCMCIA"
#define BUSTYPE_CBUS "CBUS"
#define BUSTYPE_CBUSII "CBUSII"
#define BUSTYPE_FUTURE "FUTURE"
#define BUSTYPE_MBI "MBI"
#define BUSTYPE_MBII "MBII"
#define BUSTYPE_MPI "MPI"
#define BUSTYPE_MPSA "MPSA"
#define BUSTYPE_NUBUS "NUBUS"
#define BUSTYPE_TC "TC"
#define BUSTYPE_VME "VME"
#define BUSTYPE_XPRESS "XPRESS"
struct mpc_ioapic
{
unsigned char type; /* MP_IOAPIC */
unsigned char apicid;
unsigned char apicver;
unsigned char flags;
#define MPC_APIC_USABLE 0x01
unsigned long apicaddr;
};
struct mpc_ioepic {
unsigned char type; /* MP_IOEPIC */
unsigned char epicver;
unsigned short epicid;
unsigned short nodeid;
unsigned char reserved[2];
unsigned long epicaddr;
} __packed;
#define MPC_IOIRQFLAG_PO_BS 0x0 /* Bus specific */
#define MPC_IOIRQFLAG_PO_AH 0x1 /* Active high */
#define MPC_IOIRQFLAG_PO_RES 0x2 /* Reserved */
#define MPC_IOIRQFLAG_PO_AL 0x3 /* Active low */
#define MPC_IOIRQFLAG_EL_BS 0x0 /* Bus specific */
#define MPC_IOIRQFLAG_EL_FS 0x4 /* Trigger by front */
#define MPC_IOIRQFLAG_EL_RES 0x8 /* Reserved */
#define MPC_IOIRQFLAG_EL_LS 0xC /* Trigger by level */
struct mpc_intsrc
{
unsigned char type; /* MP_INTSRC */
unsigned char irqtype;
unsigned short irqflag;
unsigned char srcbus;
unsigned char srcbusirq;
unsigned char dstapic;
unsigned char dstirq;
};
enum mp_irq_source_types {
mp_INT = 0,
mp_NMI = 1,
mp_SMI = 2,
mp_ExtINT = 3,
mp_FixINT = 4 /* fixed interrupt pin for PCI */
};
#define MP_IRQDIR_DEFAULT 0
#define MP_IRQDIR_HIGH 1
#define MP_IRQDIR_LOW 3
#ifdef CONFIG_BIOS
#define MP_IRQ_POLARITY_DEFAULT 0x0
#define MP_IRQ_POLARITY_HIGH 0x1
#define MP_IRQ_POLARITY_LOW 0x3
#define MP_IRQ_POLARITY_MASK 0x3
#define MP_IRQ_TRIGGER_DEFAULT 0x0
#define MP_IRQ_TRIGGER_EDGE 0x4
#define MP_IRQ_TRIGGER_LEVEL 0xc
#define MP_IRQ_TRIGGER_MASK 0xc
#endif /* CONFIG_BIOS */
struct mpc_config_lintsrc
{
unsigned char mpc_type; /* MP_LINTSRC */
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
unsigned char mpc_srcbusid;
unsigned char mpc_srcbusirq;
unsigned char mpc_destapic;
#define MP_APIC_ALL 0xFF
unsigned char mpc_destapiclint;
};
/*
* Default configurations
*
* 1 2 CPU ISA 82489DX
* 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
* 3 2 CPU EISA 82489DX
* 4 2 CPU MCA 82489DX
* 5 2 CPU ISA+PCI
* 6 2 CPU EISA+PCI
* 7 2 CPU MCA+PCI
*/
#define MAX_IRQ_SOURCES (128 * MAX_NUMIOHUBS)
/* (32 * nodes) for PCI, and one number is a special case */
#define MAX_MP_BUSSES 256
enum mp_bustype {
MP_BUS_ISA = 1,
MP_BUS_EISA,
MP_BUS_PCI,
MP_BUS_MCA
};
/*
* IO link configurations
*/
#define MAX_NUMIOLINKS MACH_MAX_NUMIOLINKS
#define MAX_NUMIOHUBS MAX_NUMIOLINKS
#define NODE_NUMIOLINKS MACH_NODE_NUMIOLINKS
typedef struct mpc_config_iolink {
unsigned char mpc_type; /* type is MP_IOLINK */
unsigned char mpc_iolink_type; /* type of IO link: IOHUB or RDMA */
unsigned short mpc_iolink_ver; /* version of IOHUB or RDMA */
unsigned int mpc_reserved; /* reserved */
int node; /* number od node: 0 - 3 */
int link; /* local number of link on node: 0-1 */
short bus_min; /* number of root bus on IOHUB */
short bus_max; /* number of max bus on IOHUB */
short apicid; /* IO-APIC id connected to the */
/* IOHUB */
short mpc_reserv16; /* reserved 16-bits value */
unsigned long pci_mem_start; /* PCI mem area for IOMMU v6 */
unsigned long pci_mem_end;
} mpc_config_iolink_t;
enum mp_iolink_type {
MP_IOLINK_IOHUB = 1, /* IO link is IOHUB */
MP_IOLINK_RDMA /* IO link is RDMA controller */
};
enum mp_iolink_ver {
MP_IOHUB_FPGA_VER = 0x10, /* IOHUB implemented on FPGA (Altera) */
};
#define MAX_MP_TIMERS 4
typedef struct mpc_config_timer {
unsigned char mpc_type; /* MP_TIMER */
unsigned char mpc_timertype;
unsigned char mpc_timerver;
unsigned char mpc_timerflags;
unsigned long mpc_timeraddr;
} mpc_config_timer_t;
enum mp_timertype {
MP_PIT_TYPE, /* programmed interval timer */
MP_LT_TYPE, /* Elbrus iohub timer */
MP_HPET_TYPE, /* High presicion eventualy timer */
MP_RTC_TYPE, /* real time clock */
MP_PM_TYPE /* power managment timer */
};
#define MP_LT_VERSION 1
#define MP_LT_FLAGS 0
#define MP_RTC_VER_CY14B101P 2
#define MP_RTC_FLAG_SYNCINTR 0x01
typedef struct mpc_config_i2c {
unsigned char mpc_type; /* MP_I2C_SPI */
unsigned char mpc_max_channel;
unsigned char mpc_i2c_irq;
unsigned char mpc_revision;
unsigned long mpc_i2ccntrladdr;
unsigned long mpc_i2cdataaddr;
} mpc_config_i2c_t;
typedef struct mpc_config_pmc {
unsigned char mpc_type; /* MP_PMC */
unsigned char mpc_pmc_type; /* Izumrud or Processor-2 */
unsigned char mpc_pmc_version;
unsigned char mpc_pmc_vmax; /* VMAX: bits 40:34 in l_pmc.vrange */
unsigned char mpc_pmc_vmin; /* VMIN: bits 33:27 in l_pmc.vrange */
unsigned char mpc_pmc_fmax; /* FMAX: bits 26:20 in l_pmc.vrange */
unsigned char reserved[2];
unsigned long mpc_pmc_cntrl_addr; /* base of pmc regs */
unsigned long mpc_pmc_data_addr;
unsigned int mpc_pmc_data_size;
unsigned int mpc_pmc_p_state[4]; /* VID 15:9, DID 8:4, FID 3:0 */
unsigned int mpc_pmc_freq; /* Frequency in KHz */
} mpc_config_pmc_t;
typedef struct mpc_bdev {
unsigned char mpc_type; /* MP_BDEV */
unsigned char mpc_bustype; /* I2C or SPI */
unsigned char mpc_nodeid;
unsigned char mpc_linkid;
unsigned char mpc_busid;
unsigned char mpc_baddr;
unsigned char mpc_bdev_name[16];
} mpc_bdev_t;
#define MPC_BDEV_DTYPE_I2C 1
#define MPC_BDEV_DTYPE_SPI 2
typedef struct mpc_gpio_act {
unsigned char mpc_type; /* MP_GPIO_ACT */
unsigned char mpc_nodeid;
unsigned char mpc_linkid;
unsigned char mpc_busid;
unsigned char mpc_gpio_pin;
unsigned char mpc_pin_direction;
unsigned char mpc_gpio_act_name[16];
} mpc_gpio_act_t;
#define MP_GPIO_ACT_DIRECTION_IN 1
#define MP_GPIO_ACT_DIRECTION_OUT 2
#ifdef __KERNEL__
struct iohub_sysdata;
void mp_pci_add_resources(struct list_head *resources,
struct iohub_sysdata *sd);
#ifdef CONFIG_IOHUB_DOMAINS
struct iohub_sysdata;
extern int mp_find_iolink_root_busnum(int node, int link);
extern int mp_find_iolink_io_apicid(int node, int link);
extern int mp_fix_io_apicid(unsigned int src_apicid, unsigned int new_apicid);
void mp_pci_add_resources(struct list_head *resources,
struct iohub_sysdata *sd);
#else
static inline int mp_fix_io_apicid(unsigned int src_apicid,
unsigned int new_apicid)
{
return 0;
}
#endif /* CONFIG_IOHUB_DOMAINS */
extern int get_bus_to_io_apicid(int busnum);
#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
extern int mp_bus_id_to_type [MAX_MP_BUSSES];
#endif
extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
extern unsigned int boot_cpu_physical_apicid;
extern int smp_found_config;
extern void find_smp_config(boot_info_t *bblock);
extern void get_smp_config(void);
extern int nr_ioapics;
extern int apic_version[MAX_LOCAL_APIC];
extern int mp_irq_entries;
extern struct mpc_intsrc mp_irqs [];
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
extern int pic_mode;
extern int using_apic_timer;
extern mpc_config_timer_t mp_timers[MAX_MP_TIMERS];
extern int nr_timers;
extern int rtc_model;
extern int rtc_syncintr;
#define early_iohub_online(node, link) mach_early_iohub_online((node), (link))
#define early_sic_init() mach_early_sic_init()
#endif /* __KERNEL__ */
#ifdef CONFIG_ENABLE_BIOS_MPTABLE
#define MPE_SYSTEM_ADDRESS_SPACE 0x80
#define MPE_BUS_HIERARCHY 0x81
#define MPE_COMPATIBILITY_ADDRESS_SPACE 0x82
struct mp_exten_config {
unsigned char mpe_type;
unsigned char mpe_length;
};
typedef struct mp_exten_config *mpe_t;
struct mp_exten_system_address_space {
unsigned char mpe_type;
unsigned char mpe_length;
unsigned char mpe_busid;
unsigned char mpe_address_type;
#define ADDRESS_TYPE_IO 0
#define ADDRESS_TYPE_MEM 1
#define ADDRESS_TYPE_PREFETCH 2
unsigned int mpe_address_base_low;
unsigned int mpe_address_base_high;
unsigned int mpe_address_length_low;
unsigned int mpe_address_length_high;
};
struct mp_exten_bus_hierarchy {
unsigned char mpe_type;
unsigned char mpe_length;
unsigned char mpe_busid;
unsigned char mpe_bus_info;
#define BUS_SUBTRACTIVE_DECODE 1
unsigned char mpe_parent_busid;
unsigned char reserved[3];
};
struct mp_exten_compatibility_address_space {
unsigned char mpe_type;
unsigned char mpe_length;
unsigned char mpe_busid;
unsigned char mpe_address_modifier;
#define ADDRESS_RANGE_SUBTRACT 1
#define ADDRESS_RANGE_ADD 0
unsigned int mpe_range_list;
#define RANGE_LIST_IO_ISA 0
/* X100 - X3FF
* X500 - X7FF
* X900 - XBFF
* XD00 - XFFF
*/
#define RANGE_LIST_IO_VGA 1
/* X3B0 - X3BB
* X3C0 - X3DF
* X7B0 - X7BB
* X7C0 - X7DF
* XBB0 - XBBB
* XBC0 - XBDF
* XFB0 - XFBB
* XFC0 - XCDF
*/
};
/* Default local apic addr */
#define LAPIC_ADDR 0xFEE00000
#ifdef __KERNEL__
void *smp_next_mpc_entry(struct mpc_table *mc);
void *smp_next_mpe_entry(struct mpc_table *mc);
void smp_write_processor(struct mpc_table *mc,
unsigned char apicid, unsigned char apicver,
unsigned char cpuflag, unsigned int cpufeature,
unsigned int featureflag, unsigned int cepictimerfreq);
void smp_write_processors(struct mpc_table *mc,
unsigned int phys_cpu_num);
void smp_write_bus(struct mpc_table *mc,
unsigned char id, unsigned char *bustype);
void smp_write_ioapic(struct mpc_table *mc,
unsigned char id, unsigned char ver,
unsigned long apicaddr);
void smp_write_ioepic(struct mpc_table *mc,
unsigned short id, unsigned short nodeid,
unsigned char ver, unsigned long epicaddr);
void smp_write_iolink(struct mpc_table *mc,
int node, int link,
short bus_min, short bus_max,
short picid,
unsigned long pci_mem_start, unsigned long pci_mem_end);
void smp_write_intsrc(struct mpc_table *mc,
unsigned char irqtype, unsigned short irqflag,
unsigned char srcbus, unsigned char srcbusirq,
unsigned char dstapic, unsigned char dstirq);
void smp_write_lintsrc(struct mpc_table *mc,
unsigned char irqtype, unsigned short irqflag,
unsigned char srcbusid, unsigned char srcbusirq,
unsigned char destapic, unsigned char destapiclint);
void smp_write_address_space(struct mpc_table *mc,
unsigned char busid, unsigned char address_type,
unsigned int address_base_low, unsigned int address_base_high,
unsigned int address_length_low, unsigned int address_length_high);
void smp_write_bus_hierarchy(struct mpc_table *mc,
unsigned char busid, unsigned char bus_info,
unsigned char parent_busid);
void smp_write_compatibility_address_space(struct mpc_table *mc,
unsigned char busid, unsigned char address_modifier,
unsigned int range_list);
unsigned char smp_compute_checksum(void *v, int len);
void smp_write_floating_table(struct intel_mp_floating *mpf);
unsigned int write_smp_table(struct intel_mp_floating *mpf, unsigned int phys_cpu_num);
void smp_i2c_spi_timer(struct mpc_table *mc,
unsigned char timertype, unsigned char timerver,
unsigned char timerflags, unsigned long timeraddr);
void smp_i2c_spi_dev(struct mpc_table *mc, unsigned char max_channel,
unsigned char irq, unsigned long i2cdevaddr);
//#define MAX_CPUS 16 /* 16 way CPU system */
#endif /* __KERNEL__ */
/* A table (per mainboard) listing the initial apicid of each cpu. */
//extern unsigned int initial_apicid[MAX_CPUS];
#endif /* CONFIG_ENABLE_BIOS_MPTABLE */
int generic_processor_info(int apicid, int version);
#ifdef __KERNEL__
extern void print_bootblock(bootblock_struct_t *bootblock);
#endif /* __KERNEL__ */
#ifdef CONFIG_ACPI
extern void mp_register_ioapic(int id, unsigned long address, u32 gsi_base);
extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
u32 gsi);
extern void mp_config_acpi_legacy_irqs(void);
struct device;
extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
int active_high_low);
extern int acpi_probe_gsi(void);
#ifdef CONFIG_L_IO_APIC
extern int mp_find_ioapic(u32 gsi);
extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
#endif
#else /* !CONFIG_ACPI: */
static inline int acpi_probe_gsi(void)
{
return 0;
}
#endif /* CONFIG_ACPI */
/* physid definitions */
/*
* On e2k and sparc lapics number is the same as cpus number
* IO-APICs number is defined by MAX_IO_APICS
* IO-APICs IDs can be placed higher than local APICs IDs or at its hole
* so physid_t cannot be a synonim to cpumask_t.
*/
#include <linux/bitmap.h>
#define MAX_PHYSID_NUM (NR_CPUS + MAX_IO_APICS)
typedef struct physid_mask {
DECLARE_BITMAP(bits, MAX_PHYSID_NUM);
} physid_mask_t;
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_PHYSID_NUM)
#define physid_set(physid, map) set_bit((physid), (map).bits)
#define physid_clear(physid, map) clear_bit((physid), (map).bits)
#define physid_isset(physid, map) test_bit((physid), (map).bits)
#define physid_test_and_set(physid, map) test_and_set_bit((physid), (map).bits)
#define physids_and(dstp, src1, src2) \
bitmap_and((dst).bits, (src1).bits, (src2).bits, MAX_PHYSID_NUM)
#define physids_or(dst, src1, src2) \
bitmap_or((dst).bits, (src1).bits, (src2).bits, MAX_PHYSID_NUM)
#define physids_clear(map) \
bitmap_zero((map).bits, MAX_PHYSID_NUM)
#define physids_complement(dst, src) \
bitmap_complement((dst).bits, (src).bits, MAX_PHYSID_NUM)
#define physids_empty(map) \
bitmap_empty((map).bits, MAX_PHYSID_NUM)
#define physids_equal(map1, map2) \
bitmap_equal((map1).bits, (map2).bits, MAX_PHYSID_NUM)
#define physids_weight(map) \
bitmap_weight((map).bits, MAX_PHYSID_NUM)
#define physids_shift_left(dst, src, n) \
bitmap_shift_left((dst).bits, (src).bits, (n), MAX_PHYSID_NUM)
static inline unsigned long physids_coerce(physid_mask_t *map)
{
return map->bits[0];
}
static inline void physids_promote(unsigned long physids, physid_mask_t *map)
{
physids_clear(*map);
map->bits[0] = physids;
}
static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
{
physids_clear(*map);
physid_set(physid, *map);
}
#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
extern physid_mask_t phys_cpu_present_map;
#endif /* __ASSEMBLY__ */
#endif /* __L_ASM_MPSPEC_H */

View File

@ -0,0 +1,53 @@
#ifndef _ASM_L_MSIDEF_H
#define _ASM_L_MSIDEF_H
/*
* Constants for Intel APIC based MSI messages.
*/
/*
* Shifts for MSI data
*/
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR_MASK 0x000000ff
#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
MSI_DATA_VECTOR_MASK)
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
/*
* Shift/mask fields for msi address
*/
#define MSI_ADDR_DEST_MODE_SHIFT 2
#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
/* dedicated cpu */
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
/* lowest priority */
#define MSI_ADDR_DEST_ID_SHIFT 12
#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
MSI_ADDR_DEST_ID_MASK)
#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00)
#define MSI_ADDR_IR_EXT_INT (1 << 4)
#define MSI_ADDR_IR_SHV (1 << 3)
#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13)
#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5)
#endif /* _ASM_L_MSIDEF_H */

View File

@ -0,0 +1,122 @@
/* Generic MTRR (Memory Type Range Register) ioctls.
Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
The postal address is:
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
*/
#ifndef _LINUX_MTRR_H
#define _LINUX_MTRR_H
#include <linux/ioctl.h>
#define MTRR_IOCTL_BASE 'M'
struct mtrr_sentry
{
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry
{
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
/* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#ifdef MTRR_NEED_STRINGS
static char *mtrr_strings[MTRR_NUM_TYPES] =
{
"uncachable", /* 0 */
"write-combining", /* 1 */
"?", /* 2 */
"?", /* 3 */
"write-through", /* 4 */
"write-protect", /* 5 */
"write-back", /* 6 */
};
#endif
#ifdef __KERNEL__
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
extern int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_del (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
static __inline__ int mtrr_del_page (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
# endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
# if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
# endif
#endif
#endif /* _LINUX_MTRR_H */

View File

@ -0,0 +1,22 @@
#ifndef _ASM_L_NMI_H
#define _ASM_L_NMI_H
#include <asm/irq.h>
#include <asm/io.h>
#ifdef ARCH_HAS_NMI_WATCHDOG
extern unsigned int nmi_watchdog;
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
#define NMI_INVALID 3
#endif
void lapic_watchdog_stop(void);
int lapic_watchdog_init(unsigned nmi_hz);
int lapic_wd_event(unsigned nmi_hz);
unsigned lapic_adjust_nmi_hz(unsigned hz);
void stop_nmi(void);
void restart_nmi(void);
#endif /* _ASM_L_NMI_H */

View File

@ -0,0 +1,47 @@
#ifndef _ASM_L_OF_DEVICE_H
#define _ASM_L_OF_DEVICE_H
#ifdef __KERNEL__
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <asm/sbus.h>
#include <linux/of.h>
/*
* The of_device is a kind of "base class" that is a superset of
* struct device for use by devices attached to an OF node and
* probed using OF properties.
*/
struct of_device;
struct of_device
{
char name[32];
struct of_device *parent;
struct device dev;
struct device_node *node;
struct resource resource[PROMREG_MAX];
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
struct proc_dir_entry *pde; /* this node's proc directory */
int registered;
// void *sysdata;
int p2s_id;
// int slot;
// int portid;
int clock_freq;
};
extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
extern struct device_node **l_allnodes;
#if 0
extern int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus);
extern void of_unregister_driver(struct of_platform_driver *drv);
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_L_OF_DEVI */

View File

@ -0,0 +1,159 @@
#ifndef _L_PCI_H
#define _L_PCI_H
#if !defined ___ASM_SPARC_PCI_H && !defined _E2K_PCI_H
# error Do not include "asm-l/pci.h" directly, use "linux/pci.h" instead
#endif
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#ifdef __KERNEL__
#define PCI_PROBE_BIOS 0x0001
#define PCI_PROBE_CONF1 0x0002
#define PCI_PROBE_CONF2 0x0004
#define PCI_PROBE_MMCONF 0x0008
#define PCI_PROBE_L 0x0010
#define PCI_PROBE_MASK 0x001f
#define PCI_NO_SORT 0x0100
#define PCI_BIOS_SORT 0x0200
#define PCI_NO_CHECKS 0x0400
#define PCI_USE_PIRQ_MASK 0x0800
#define PCI_ASSIGN_ROMS 0x1000
#define PCI_BIOS_IRQ_SCAN 0x2000
#define PCI_ASSIGN_ALL_BUSSES 0x4000
#undef CONFIG_CMD
#define CONFIG_CMD(bus, devfn, where) \
((bus&0xFF)<<20)|((devfn&0xFF)<<12)|(where&0xFFF)
#define L_IOHUB_ROOT_BUS_NUM 0x00
#define L_IOHUB_ROOT_SLOT 0x00 /* BSP IOHUB start slot (devfn) */
/* on root bus 0 */
#define SLOTS_PER_L_IOHUB 4 /* number of slots reserved per */
/* each IOHUB */
#ifndef L_IOHUB_SLOTS_NUM
#define L_IOHUB_SLOTS_NUM 2 /* number of slots (devfns) for */
/* each IOHUB on root bus */
#endif
extern int IOHUB_revision;
static inline int is_prototype(void)
{
return IOHUB_revision >= 0xf0;
}
extern unsigned long pirq_table_addr;
struct e2k_iommu;
struct pci_dev;
struct pci_bus;
enum pci_mmap_state;
struct pci_ops;
typedef struct iohub_sysdata {
#ifdef CONFIG_IOHUB_DOMAINS
int domain; /* IOHUB (PCI) domain */
int node; /* NUMA node */
int link; /* local number of IO link on the node */
#endif /* CONFIG_IOHUB_DOMAINS */
u32 pci_msi_addr_lo; /* MSI transaction address */
u32 pci_msi_addr_hi; /* MSI transaction upper address */
u8 revision; /* IOHUB revision */
u8 generation; /* IOHUB generation */
struct resource mem_space;
void *l_iommu;
} iohub_sysdata_t;
#define iohub_revision(pdev) ({ \
struct iohub_sysdata *sd = pdev->bus->sysdata; \
(sd->revision >> 1); \
})
#define iohub_generation(pdev) ({ \
struct iohub_sysdata *sd = pdev->bus->sysdata; \
sd->generation; \
})
#ifdef CONFIG_IOHUB_DOMAINS
#define pci_domain_nr(bus) ({ \
struct iohub_sysdata *sd = bus->sysdata; \
sd->domain; \
})
#define pci_proc_domain(bus) pci_domain_nr(bus)
static inline int pci_iohub_domain_to_slot(const int domain)
{
return L_IOHUB_ROOT_SLOT + domain * SLOTS_PER_L_IOHUB;
}
/* Returns the node based on pci bus */
#define __pcibus_to_node(bus) ({ \
const struct iohub_sysdata *sd = bus->sysdata; \
sd->node; \
})
#define __pcibus_to_link(bus) ({ \
const struct iohub_sysdata *sd = bus->sysdata; \
sd->link; \
})
#else /* ! CONFIG_IOHUB_DOMAINS */
#define __pcibus_to_node(bus) 0 /* only one IOHUB on node #0 */
#define __pcibus_to_link(bus) 0
#endif /* CONFIG_IOHUB_DOMAINS */
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
#ifdef CONFIG_PCI
extern unsigned int pcibios_assign_all_busses(void);
#else
#define pcibios_assign_all_busses() 0
#endif
#define pcibios_scan_all_fns(a, b) 0
/* the next function placed at drivers/pci/probe.c and updated only to */
/* support commonroot bus domains */
unsigned int pci_scan_root_child_bus(struct pci_bus *bus);
struct pci_bus * pcibios_scan_root(int bus);
/* scan a bus after allocating a iohub_sysdata for it */
extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
int node);
void __init pcibios_fixup_resources(struct pci_bus *pbus);
int pcibios_enable_resources(struct pci_dev *, int);
void pcibios_set_master(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
int l_pci_direct_init(void);
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
extern raw_spinlock_t pci_config_lock;
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
size_t count);
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state);
#ifndef L_IOPORT_RESOURCE_OFFSET
#define L_IOPORT_RESOURCE_OFFSET 0UL
#endif
#ifndef L_IOMEM_RESOURCE_OFFSET
#define L_IOMEM_RESOURCE_OFFSET 0UL
#endif
#endif /* __KERNEL__ */
#endif /* _L_PCI_H */

View File

@ -0,0 +1,7 @@
#ifndef _L_PCI_L_H
#define _L_PCI_L_H
extern unsigned int pci_probe;
#endif

View File

@ -0,0 +1,34 @@
#ifndef _ASM_L_PCIE_FIXUP_H_
#define _ASM_L_PCIE_FIXUP_H_
#undef memset_io
#define memset_io(a,b,c) \
({ \
u64 i; \
for (i = 0; i != (c); i++) { \
writeb((b), (u8 *)(a) + i); \
readb((u8 *)(a) + i); \
} \
})
#undef memcpy_fromio
#define memcpy_fromio(a,b,c) \
({ \
u64 i; \
for (i = 0; i != (c); i++) { \
u8 t = readb((u8 *)(b) + i); \
*((u8 *)(a) + i) = t; \
} \
})
#undef memcpy_toio
#define memcpy_toio(a,b,c) \
({ \
u64 i; \
for (i = 0; i != (c); i++) { \
writeb(*((u8 *)(b) + i), (u8 *)(a) + i); \
readb((u8 *)(a) + i); \
} \
})
#endif /*_ASM_L_PCIE_FIXUP_H_*/

View File

@ -0,0 +1,67 @@
#ifndef _ASM_L_PERCPU_H_
#define _ASM_L_PERCPU_H_
#ifdef CONFIG_SMP
/*
* Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
* variables that are initialized and accessed before there are per_cpu
* areas allocated.
*/
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
DEFINE_PER_CPU(_type, _name) = _initvalue; \
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
{ [0 ... NR_CPUS-1] = _initvalue }; \
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
{ [0 ... NR_CPUS-1] = _initvalue }; \
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
#define DECLARE_EARLY_PER_CPU(_type, _name) \
DECLARE_PER_CPU(_type, _name); \
extern __typeof__(_type) *_name##_early_ptr; \
extern __typeof__(_type) _name##_early_map[]
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
extern __typeof__(_type) *_name##_early_ptr; \
extern __typeof__(_type) _name##_early_map[]
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
*(early_per_cpu_ptr(_name) ? \
&early_per_cpu_ptr(_name)[_cpu] : \
&per_cpu(_name, _cpu))
#else /* !CONFIG_SMP */
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
DEFINE_PER_CPU(_type, _name) = _initvalue
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
#define DECLARE_EARLY_PER_CPU(_type, _name) \
DECLARE_PER_CPU(_type, _name)
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
#define early_per_cpu_ptr(_name) NULL
/* no early_per_cpu_map() */
#endif /* !CONFIG_SMP */
#endif /* _ASM_L_PERCPU_H_ */

View File

@ -0,0 +1,343 @@
#ifndef __ASM_L_PIC_H
#define __ASM_L_PIC_H
/*
* Choose between PICs in arch/l. If CONFIG_EPIC=n, APIC is chosen statically
* If CONFIG_EPIC=y (only on e2k), choose dynamically based on CPU_FEAT_EPIC
*/
extern int first_system_vector;
extern int apic_get_vector(void);
#ifdef CONFIG_EPIC
#include <asm/apic.h>
#include <asm/epic.h>
#include <asm/machdep.h>
static inline unsigned int read_pic_id(void)
{
if (cpu_has_epic())
return read_epic_id();
else
return read_apic_id();
}
extern void epic_processor_info(int epicid, int version,
unsigned int cepic_freq);
extern int generic_processor_info(int apicid, int version);
static inline void pic_processor_info(int picid, int picver, unsigned int freq)
{
if (cpu_has_epic())
epic_processor_info(picid, picver, freq);
else
generic_processor_info(picid, picver);
}
extern int get_cepic_timer_frequency(void);
static inline int get_pic_timer_frequency(void)
{
if (cpu_has_epic())
return get_cepic_timer_frequency();
else
return -1; /* standard constant value */
}
/* IO-APIC definitions */
struct irq_data;
extern void ioapic_ack_epic_edge(struct irq_data *data);
extern void ack_apic_edge(struct irq_data *data);
static inline void ioapic_ack_pic_edge(struct irq_data *data)
{
if (cpu_has_epic())
ioapic_ack_epic_edge(data);
else
ack_apic_edge(data);
}
extern void ioapic_ack_epic_level(struct irq_data *data);
extern void ack_apic_level(struct irq_data *data);
static inline void ioapic_ack_pic_level(struct irq_data *data)
{
if (cpu_has_epic())
ioapic_ack_epic_level(data);
else
ack_apic_level(data);
}
struct irq_chip;
extern struct irq_chip ioepic_to_apic_chip;
static inline bool irqchip_is_ioepic_to_apic(struct irq_chip *chip)
{
return chip == &ioepic_to_apic_chip;
}
/* IRQ definitions */
#ifdef CONFIG_IRQ_WORK
extern void epic_irq_work_raise(void);
extern void apic_irq_work_raise(void);
static inline void pic_irq_work_raise(void)
{
if (cpu_has_epic())
epic_irq_work_raise();
else
apic_irq_work_raise();
}
#endif
#ifdef CONFIG_SMP
extern void epic_send_call_function_ipi_mask(const struct cpumask *mask);
extern void apic_send_call_function_ipi_mask(const struct cpumask *mask);
static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask)
{
if (cpu_has_epic())
epic_send_call_function_ipi_mask(mask);
else
apic_send_call_function_ipi_mask(mask);
}
extern void epic_send_call_function_single_ipi(int cpu);
extern void apic_send_call_function_single_ipi(int cpu);
static inline void pic_send_call_function_single_ipi(int cpu)
{
if (cpu_has_epic())
epic_send_call_function_single_ipi(cpu);
else
apic_send_call_function_single_ipi(cpu);
}
extern void epic_smp_send_reschedule(int cpu);
extern void apic_smp_send_reschedule(int cpu);
static inline void pic_send_reschedule(int cpu)
{
if (cpu_has_epic())
epic_smp_send_reschedule(cpu);
else
apic_smp_send_reschedule(cpu);
}
#endif
struct pt_regs;
extern noinline notrace void epic_do_nmi(struct pt_regs *regs);
extern noinline notrace void apic_do_nmi(struct pt_regs *regs);
static inline void pic_do_nmi(struct pt_regs *regs)
{
if (cpu_has_epic())
epic_do_nmi(regs);
else
apic_do_nmi(regs);
}
static inline void ack_pic_irq(void)
{
if (cpu_has_epic())
ack_epic_irq();
else
ack_APIC_irq();
}
/* For do_postpone_tick() */
extern void cepic_timer_interrupt(void);
extern void local_apic_timer_interrupt(void);
static inline void local_pic_timer_interrupt(void)
{
if (cpu_has_epic())
cepic_timer_interrupt();
else
local_apic_timer_interrupt();
}
extern int print_local_APICs(bool force);
extern int print_epics(bool force);
static inline int print_local_pics(bool force)
{
if (cpu_has_epic())
return print_epics(force);
else
return print_local_APICs(force);
}
struct pci_dev;
extern int native_setup_msi_irqs_epic(struct pci_dev *dev, int nvec, int type);
extern int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type);
static inline int setup_msi_irqs_pic(struct pci_dev *dev, int nvec, int type)
{
if (cpu_has_epic())
return native_setup_msi_irqs_epic(dev, nvec, type);
else
return native_setup_msi_irqs_apic(dev, nvec, type);
}
extern void native_teardown_msi_irq_epic(unsigned int irq);
extern void native_teardown_msi_irq_apic(unsigned int irq);
static inline void teardown_msi_irq_pic(unsigned int irq)
{
if (cpu_has_epic())
native_teardown_msi_irq_epic(irq);
else
native_teardown_msi_irq_apic(irq);
}
extern void __init_recv setup_secondary_epic_clock(void);
extern void setup_secondary_APIC_clock(void);
static inline void __init_recv setup_secondary_pic_clock(void)
{
if (cpu_has_epic())
setup_secondary_epic_clock();
else
setup_secondary_APIC_clock();
}
extern int epic_get_vector(void);
static inline int pic_get_vector(void)
{
if (cpu_has_epic())
return epic_get_vector();
else
return apic_get_vector();
}
extern int ioepic_pin_to_irq_num(unsigned int pin, struct pci_dev *dev);
extern int ioepic_pin_to_msi_ioapic_irq(unsigned int pin, struct pci_dev *dev);
static inline int ioepic_pin_to_irq_pic(unsigned int pin, struct pci_dev *dev)
{
if (cpu_has_epic())
return ioepic_pin_to_irq_num(pin, dev);
else
return ioepic_pin_to_msi_ioapic_irq(pin, dev);
}
static inline void __init setup_boot_pic_clock(void)
{
if (cpu_has_epic())
setup_boot_epic_clock();
else
setup_boot_APIC_clock();
}
extern void __init init_apic_mappings(void);
static inline void __init init_pic_mappings(void)
{
if (!cpu_has_epic())
return init_apic_mappings();
}
extern void setup_cepic(void);
#else /* !(CONFIG_EPIC) */
#include <asm/apic.h>
static inline unsigned int read_pic_id(void)
{
return read_apic_id();
}
extern int generic_processor_info(int apicid, int version);
static inline void pic_processor_info(int picid, int picver, unsigned int freq)
{
generic_processor_info(picid, picver);
}
static inline int get_pic_timer_frequency(void)
{
return -1; /* standard constant value */
}
/* IO-APIC definitions */
struct irq_data;
extern void ack_apic_edge(struct irq_data *data);
static inline void ioapic_ack_pic_edge(struct irq_data *data)
{
ack_apic_edge(data);
}
extern void ack_apic_level(struct irq_data *data);
static inline void ioapic_ack_pic_level(struct irq_data *data)
{
ack_apic_level(data);
}
struct irq_chip;
static inline bool irqchip_is_ioepic_to_apic(struct irq_chip *chip)
{
return 0;
}
/* IRQ definitions */
extern void apic_irq_work_raise(void);
static inline void pic_irq_work_raise(void)
{
apic_irq_work_raise();
}
extern void apic_send_call_function_ipi_mask(const struct cpumask *mask);
static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask)
{
apic_send_call_function_ipi_mask(mask);
}
extern void apic_send_call_function_single_ipi(int cpu);
static inline void pic_send_call_function_single_ipi(int cpu)
{
apic_send_call_function_single_ipi(cpu);
}
extern void apic_smp_send_reschedule(int cpu);
static inline void pic_send_reschedule(int cpu)
{
apic_smp_send_reschedule(cpu);
}
struct pt_regs;
extern noinline notrace void apic_do_nmi(struct pt_regs *regs);
static inline void pic_do_nmi(struct pt_regs *regs)
{
apic_do_nmi(regs);
}
static inline void ack_pic_irq(void)
{
ack_APIC_irq();
}
/* For do_postpone_tick() */
extern void local_apic_timer_interrupt(void);
static inline void local_pic_timer_interrupt(void)
{
local_apic_timer_interrupt();
}
extern int print_local_APICs(bool force);
static inline int print_local_pics(bool force)
{
return print_local_APICs(force);
}
struct pci_dev;
extern int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type);
static inline int setup_msi_irqs_pic(struct pci_dev *dev, int nvec, int type)
{
return native_setup_msi_irqs_apic(dev, nvec, type);
}
extern void native_teardown_msi_irq_apic(unsigned int irq);
static inline void teardown_msi_irq_pic(unsigned int irq)
{
native_teardown_msi_irq_apic(irq);
}
static inline void __init setup_boot_pic_clock(void)
{
setup_boot_APIC_clock();
}
extern void __init init_apic_mappings(void);
static inline void __init init_pic_mappings(void)
{
return init_apic_mappings();
}
#endif /* !(CONFIG_EPIC) */
#endif /* __ASM_L_PIC_H */

View File

@ -0,0 +1,460 @@
/*
* include/asm-l/serial.h
*/
#ifndef _L_SERIAL_H
#define _L_SERIAL_H
/*
* This assumes you have a 1.8432 MHz clock for your UART.
*
* It'd be nice if someone built a serial card with a 24.576 MHz
* clock, since the 16550A is capable of handling a top speed of 1.5
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
#endif
#ifdef CONFIG_SERIAL_MANY_PORTS
#define FOURPORT_FLAGS ASYNC_FOURPORT
#define ACCENT_FLAGS 0
#define BOCA_FLAGS 0
#define HUB6_FLAGS 0
#define RS_TABLE_SIZE 64
#else
#define RS_TABLE_SIZE
#endif
#define NS16550_SERIAL_PORT_0 0x3f8
#define NS16550_SERIAL_PORT_1 0x2f8
#define NS16550_SERIAL_PORT_2 0x3e8
#define NS16550_SERIAL_PORT_3 0x2e8
#ifdef CONFIG_E2K
#define SERIAL_PORT_DFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
#endif
#define AM85C30_RES_Tx_P 0x28
#define AM85C30_EXT_INT_ENAB 0x01
#define AM85C30_TxINT_ENAB 0x02
#define AM85C30_RxINT_MASK 0x18
/* AM85C30 WRITE Registers */
#define AM85C30_WR0 0x00
#define AM85C30_WR1 0x01
#define AM85C30_WR2 0x02
#define AM85C30_WR3 0x03
#define AM85C30_WR4 0x04
#define AM85C30_WR5 0x05
#define AM85C30_WR6 0x06
#define AM85C30_WR7 0x07
#define AM85C30_WR8 0x08
#define AM85C30_WR9 0x09
#define AM85C30_WR10 0x0a
#define AM85C30_WR11 0x0b
#define AM85C30_WR12 0x0c
#define AM85C30_WR13 0x0d
#define AM85C30_WR14 0x0e
#define AM85C30_WR15 0x0f
/* READ (Status) Registers */
#define AM85C30_RR0 0x00
#define AM85C30_RR1 0x01
#define AM85C30_RR2 0x02
#define AM85C30_RR3 0x03
#define AM85C30_RR8 0x08
#define AM85C30_RR10 0x0a
#define AM85C30_RR12 0x0c
#define AM85C30_RR13 0x0d
#define AM85C30_D0 (0x01 << 0)
#define AM85C30_D1 (0x01 << 1)
#define AM85C30_D2 (0x01 << 2)
#define AM85C30_D3 (0x01 << 3)
#define AM85C30_D4 (0x01 << 4)
#define AM85C30_D5 (0x01 << 5)
#define AM85C30_D6 (0x01 << 6)
#define AM85C30_D7 (0x01 << 7)
/* WR0 */
/* D2,D1,D0
* Register Access Pointer
*
* 000 - N0, [N8]*
* 001 - N1, [N9]*
* 010 - N2, [N10]*
* 011 - N3, [N11]*
* 100 - N4, [N12]*
* 101 - N5, [N13]*
* 110 - N6, [N14]*
* 111 - N7, [N15]*
*
* if Point High Register Group = 1
*
* D5,D4,D3
*
* SCC Command
*
* 000 - Null Code
* 001 - Point High Register Group
* 010 - Reset Ext/Status Interrupts
* 011 - Send Abort
* 100 - Enable Int. on Next Rx Character
* 101 - Reset Tx Int. Pending
* 110 - Error Reset
* 111 - Reset Highest IUS
*
* D7,D6
* SCC Command
*
* 00 - Null Code
* 01 - Reset Rx CRC Checker
* 10 - Reset Tx CRC Generator
* 11 - Reset Tx Underrun/EOM Latch
*/
/* WR1 */
/* D0
* Ext. Int. Enable
* D1
* Tx Int. Enable
* D2
* Parity is Special Condition
* D4,D3
* Rx Int Mode
*
* 00 - Rx Int Disable
* 01 - Rx Int on First Char. or Special Condition
* 10 - Int on All Rx Char. or Special Condition
* 11 - Rx Int. on Special Condition Only
* D5
* Wait/DMA Request on Receive/Transmit
* D6
* Wait/DMA Request Function
* D7
* Wait/DMA Request Enable
*/
/* WR2 */
/* D7 - D0
* Interrupt Vector
*/
/* WR3 */
/* D0
* Rx Enable
* D1
* Sync Character Load Inhibit
* D2
* Address Search Mode (SDLC)
* D3
* Rx CRC Enable
* D4
* Enter Hunt Mode
* D5
* Auto Enable
* D7,D6
*
* 00 - Rx 5 Bits / Character
* 01 - Rx 6 Bits / Character
* 10 - Rx 7 Bits / Character
* 11 - Rx 8 Bits / Character
*/
/* WR4 */
/* D0
* ParityEnable
* D1
* Parity Even(0) / Odd(1)
* D3,D2
*
* 00 - Sync Modes Enable
* 01 - 1 Stop Bit / Character
* 10 - 1.5 Stop Bits / Character
* 11 - 2 Stop Bits / Character
* D5,D4
*
* 00 - 8-Bit Sync Character
* 01 - 16-Bit Sync Character
* 10 - SDLC Mode
* 11 - External Sync Mode
* D7,D6
*
* 00 - X1 Clock Mode
* 01 - X16 Clock Mode
* 10 - X32 Clock Mode
* 11 - X64 Clock Mode
*/
/* WR5 */
/* D0
* Tx CRC Enable
* D1
* RTS
* D2
* SDLC-/CRC-16
* D3
* Tx Enable
* D4
* Send Break
* D6,D5
*
* 00 - Tx 5 Bits / Character
* 01 - Tx 6 Bits / Character
* 10 - Tx 7 Bits / Character
* 11 - Tx 8 Bits / Character
* D7
* DTR
*/
/* WR6 */
/* D5-D0
* xN constant
* D7,D6
* Reserved (not used in asynchronous mode)
*/
/* WR7 */
/* D6-D0
* Reserved (not used in asynchronous mode)
* D7
* xN Mode Enable
*/
/* WR8 */
/* D7-D0
* Transmit Buffer
*/
/* WR9 */
/* D0
* Vector Includes Status
* D1
* No Vector
* D2
* Disable Lower Chain
* D3
* Master Interrupt Enable
* D4
* Status High/Low_
* D5
* Interrupt Masking Without INTACK_
* D7-D6
*
* 00 - No Reset
* 01 - Channel B Reset
* 10 - Channel A Reset
* 11 - Force Hardware Reset
*/
/* WR10 */
/* D0
* 6 bit / 8 bit SYNC
* D1
* Loop Mode
* D2
* Abort/Flag on Underrun
* D3
* Mark/Flag Idle
* D4
* Go Active on Poll
* D6-D5
*
* 00 - NRZ
* 01 - NRZI
* 10 - FM1 (Transition = 1)
* 11 - FM0 (Transition = 0)
* D7
* CRC Preset '1' or '0'
*/
/* WR11 */
/* D1-D0
*
* 00 - TRxC Out = XTAL output
* 01 - TRxC Out = Transmit Clock
* 10 - TRxC Out = BRG output
* 11 - TRxC Out = DPLL output
* D2
* TRxC O/I
* D4-D3
*
* 00 - Transmit Clock = RTxC pin
* 01 - Transmit Clock = TRxC pin
* 10 - Transmit Clock = BRG output
* 11 - Transmit Clock = DPLL output
* D6-D5
*
* 00 - Receive Clock = RTxC pin
* 01 - Receive Clock = TRxC pin
* 10 - Receive Clock = BRG output
* 11 - Receive Clock = DPLL output
* D7
* RTxC XTAL / NO XTAL
*/
/* WR12 */
/* D7-D0
* Lower Byte of Time Constant
*/
/* WR13 */
/* D7-D0
* Upper Byte of Time Constant
*/
/* WR14 */
/* D0
* BRG Enable
* D1
* BRG Source
* D2
* DTR / REQUESTt Function
* D3
* Auto Echo
* D4
* Local Loopback
* D7-D5
*
* 000 - Null Command
* 001 - Enter Search Mode
* 010 - Reset Missing Clock
* 011 - Disable DPLL
* 100 - Set Source = BR Generator
* 101 - Set Source = RTxC_
* 110 - Set FM Mode
* 111 - Set NRZI Mode
*/
/* WR15 */
/* D0
* SDLC/HDLC Enhancement Enable
* D1
* Zero Count IE (Interrupt Enable)
* D2
* 10 * 19-bit Frame Status FIFO Enable
* D3
* DCD IE
* D4
* Sync/Hunt IE
* D5
* CTS IE
* D6
* Tx Underrun / EOM IE
* D7
* Break/Abort IE
*/
/* RR0 */
/* D0
* Rx Character Availiable
* D1
* Zero Count
* D2
* Tx Buffer Empty
* D3
* DCD
* D4
* Sync/Hunt
* D5
* CTS
* D6
* Tx Underrun / EOM
* D7
* Break/Abort
*/
/* RR1 */
/* D0
* All Sent
* D1
* Residue Code 2
* D2
* Residue Code 1
* D3
* Residue Code 0
* D4
* Parity Error
* D5
* Rx Overrun Error
* D6
* CRC / Framing Error
* D7
* End of Frame (SDLC)
*/
/* RR2 */
/* D7-D0
* Interrupt Vector
*
* Channel A RR2 = WR2
* Channel B RR2 = Interrupt Vector Modified*
*
* *
* D3 D2 D1 Status High/Low = 0
* D4 D5 D6 Status High/Low = 1
*
* 0 0 0 Ch B Transmit Buffer Empty
* 0 0 1 Ch B External/Status Change
* 0 1 0 Ch B Receive Char. Availiable
* 0 1 1 Ch B Special Receive Condition
* 1 0 0 Ch A Transmit Buffer Empty
* 1 0 1 Ch A External/Status Change
* 1 1 0 Ch A Receive Char. Availiable
* 1 1 1 Ch A Special Receive Condition
*/
/* RR3 */
/* D0
* Channel B Ext/Status IP (Interrupt Pending)
* D1
* Channel B Tx IP
* D2
* Channel B Rx IP
* D3
* Channel A Ext/Status IP
* D4
* Channel A Tx IP
* D5
* Channel A Rx IP
* D7-D6
* Always 00
*/
/* RR8 */
/* D7-D0
* Receive Buffer
*/
/* RR10 */
/* D7-D0
* Reserved (not used in asynchronous mode)
*/
/* RR12 */
/* D7-D0
* Lower Byte of Time Constant
*/
/* RR13 */
/* D7-D0
* Upper Byte of Time Constant
*/
#endif /* ! _L_SERIAL_H */

View File

@ -0,0 +1,12 @@
#ifndef _L_SETUP_H
#define _L_SETUP_H
#include <linux/pci.h>
int l_set_ethernet_macaddr(struct pci_dev *pdev, char *macaddr);
extern int (*l_set_boot_mode)(int);
int l_setup_arch(void);
void l_setup_vga(void);
unsigned long measure_cpu_freq(int cpu);
#endif /* _L_SETUP_H */

View File

@ -0,0 +1,332 @@
#ifndef _L_SIC_REGS_H_
#define _L_SIC_REGS_H_
#ifdef __KERNEL__
#include <linux/topology.h>
#include <asm/types.h>
#include <asm/sic_regs.h>
#undef DEBUG_ERALY_NBSR_MODE
#undef DebugENBSR
#define DEBUG_ERALY_NBSR_MODE 0 /* early NBSR access */
#ifndef CONFIG_BOOT_E2K
#define DebugENBSR(fmt, args...) \
({ if (DEBUG_ERALY_NBSR_MODE) \
printk(fmt, ##args); })
#else /* CONFIG_BOOT_E2K */
#define DebugENBSR(fmt, args...) \
({ if (DEBUG_ERALY_NBSR_MODE) \
rom_printk(fmt, ##args); })
#endif /* ! CONFIG_BOOT_E2K */
#undef DEBUG_NBSR_MODE
#undef DebugNBSR
#define DEBUG_NBSR_MODE 0 /* NBSR access */
#define DebugNBSR(fmt, args...) \
({ if (DEBUG_NBSR_MODE) \
printk(fmt, ##args); })
#ifndef __ASSEMBLY__
static inline unsigned int
early_sic_read_node_nbsr_reg(int node_id, int reg_offset)
{
unsigned char *node_nbsr;
unsigned char *addr;
unsigned int reg_value;
node_nbsr = THE_NODE_NBSR_PHYS_BASE(node_id);
addr = node_nbsr + reg_offset;
reg_value = nbsr_early_read(addr);
DebugENBSR("early_sic_read_node_nbsr_reg() node %d reg 0x%x read 0x%x "
"from 0x%px\n",
node_id, reg_offset, reg_value, addr);
return reg_value;
}
static inline void
early_sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_val)
{
unsigned char *node_nbsr;
unsigned char *addr;
node_nbsr = THE_NODE_NBSR_PHYS_BASE(node_id);
DebugENBSR("early_sic_write_node_nbsr_reg() node NBSR is %px\n",
node_nbsr);
addr = node_nbsr + reg_offset;
nbsr_early_write(reg_val, addr);
DebugENBSR("early_sic_write_node_nbsr_reg() node %d reg 0x%x write "
"0x%x to 0x%px\n",
node_id, reg_offset, reg_val, addr);
}
static inline unsigned int
early_sic_read_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset)
{
unsigned int reg_value;
#ifndef CONFIG_BOOT_E2K
if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) {
printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() bad IO link "
"# %d (< 0 or >= max %d)\n",
io_link, MACH_NODE_NUMIOLINKS);
return (unsigned int)-1;
}
#endif /* ! CONFIG_BOOT_E2K */
reg_value = early_sic_read_node_nbsr_reg(node_id,
SIC_io_reg_offset(io_link, reg_offset));
return reg_value;
}
static inline void
early_sic_write_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset,
unsigned int reg_value)
{
#ifndef CONFIG_BOOT_E2K
if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) {
printk(KERN_ERR "early_sic_write_node_iolink_nbsr_reg() bad "
"IO link # %d (< 0 or >= max %d)\n",
io_link, MACH_NODE_NUMIOLINKS);
return;
}
#endif /* ! CONFIG_BOOT_E2K */
early_sic_write_node_nbsr_reg(node_id,
SIC_io_reg_offset(io_link, reg_offset), reg_value);
}
static inline unsigned int
sic_read_node_nbsr_reg(int node_id, int reg_offset)
{
unsigned char *node_nbsr;
unsigned int reg_value;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_read_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
reg_value = nbsr_read(&node_nbsr[reg_offset]);
DebugNBSR("sic_read_node_nbsr_reg() node %d reg 0x%x read 0x%x "
"from 0x%px\n",
node_id, reg_offset, reg_value,
&node_nbsr[reg_offset]);
return reg_value;
}
static inline unsigned long
sic_readll_node_nbsr_reg(int node_id, int reg_offset)
{
unsigned char *node_nbsr;
unsigned long reg_value;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_readll_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
reg_value = nbsr_readll(&node_nbsr[reg_offset]);
DebugNBSR("sic_readll_node_nbsr_reg() node %d reg 0x%x read 0x%lx "
"from 0x%px\n",
node_id, reg_offset, reg_value,
&node_nbsr[reg_offset]);
return reg_value;
}
static inline u16
sic_readw_node_nbsr_reg(int node_id, int reg_offset)
{
unsigned char *node_nbsr;
u16 reg_value;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_readw_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
reg_value = nbsr_readw(&node_nbsr[reg_offset]);
DebugNBSR("sic_readw_node_nbsr_reg() node %d reg 0x%x read 0x%x "
"from 0x%px\n",
node_id, reg_offset, reg_value,
&node_nbsr[reg_offset]);
return reg_value;
}
static inline unsigned int
sic_read_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset)
{
unsigned int reg_value;
if (!HAS_MACHINE_L_SIC) {
printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() machine has "
"not SIC\n");
return (unsigned int)-1;
}
if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) {
printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() bad IO link "
"# %d (< 0 or >= max %d)\n",
io_link, MACH_NODE_NUMIOLINKS);
return (unsigned int)-1;
}
reg_value = sic_read_node_nbsr_reg(node_id,
SIC_io_reg_offset(io_link, reg_offset));
return reg_value;
}
static inline unsigned long
sic_readll_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset)
{
unsigned long reg_value;
if (!HAS_MACHINE_L_SIC) {
printk(KERN_ERR "sic_readll_node_iolink_nbsr_reg() machine has "
"not SIC\n");
return (unsigned int)-1;
}
if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) {
printk(KERN_ERR "sic_readll_node_iolink_nbsr_reg() bad IO link "
"# %d (< 0 or >= max %d)\n",
io_link, MACH_NODE_NUMIOLINKS);
return (unsigned int)-1;
}
reg_value = sic_readll_node_nbsr_reg(node_id,
SIC_io_reg_offset(io_link, reg_offset));
return reg_value;
}
static inline void
sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_value)
{
unsigned char *node_nbsr;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_write_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
nbsr_write(reg_value, &node_nbsr[reg_offset]);
DebugNBSR("sic_write_node_nbsr_reg() node %d reg 0x%x writenn 0x%x to "
"0x%px\n",
node_id, reg_offset, reg_value, &node_nbsr[reg_offset]);
}
static inline void sic_write_node_nbsr_reg_relaxed(int node_id, int reg_offset,
unsigned int reg_value)
{
unsigned char *node_nbsr;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_write_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
nbsr_write_relaxed(reg_value, &node_nbsr[reg_offset]);
DebugNBSR("sic_write_node_nbsr_reg() node %d reg 0x%x writenn 0x%x to "
"0x%px\n",
node_id, reg_offset, reg_value, &node_nbsr[reg_offset]);
}
static inline void
sic_writell_node_nbsr_reg(int node_id, int reg_offset, unsigned long reg_value)
{
unsigned char *node_nbsr;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_writell_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
nbsr_writell(reg_value, &node_nbsr[reg_offset]);
DebugNBSR("sic_writell_node_nbsr_reg() node %d reg 0x%x written 0x%lx to "
"0x%px\n",
node_id, reg_offset, reg_value, &node_nbsr[reg_offset]);
}
static inline void
sic_writew_node_nbsr_reg(int node_id, int reg_offset, u16 reg_value)
{
unsigned char *node_nbsr;
node_nbsr = sic_get_node_nbsr_base(node_id);
if (node_nbsr == NULL) {
panic("sic_writew_node_nbsr_reg() node #%d has not mapping "
"to SIC(NBSR) registers\n", node_id);
}
nbsr_writew(reg_value, &node_nbsr[reg_offset]);
DebugNBSR("sic_writew_node_nbsr_reg() node %d reg 0x%x written 0x%x to "
"0x%px\n",
node_id, reg_offset, reg_value, &node_nbsr[reg_offset]);
}
static inline void
sic_write_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset,
unsigned int reg_value)
{
if (!HAS_MACHINE_L_SIC) {
printk(KERN_ERR "sic_write_node_iolink_nbsr_reg() machine has "
"not SIC\n");
return;
}
if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) {
printk(KERN_ERR "sic_write_node_iolink_nbsr_reg() bad IO link "
"# %d (< 0 or >= max %d)\n",
io_link, MACH_NODE_NUMIOLINKS);
return;
}
sic_write_node_nbsr_reg(node_id,
SIC_io_reg_offset(io_link, reg_offset), reg_value);
}
static inline void
sic_writell_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset,
unsigned long reg_value)
{
if (!HAS_MACHINE_L_SIC) {
printk(KERN_ERR "sic_writell_node_iolink_nbsr_reg() machine has "
"not SIC\n");
return;
}
if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) {
printk(KERN_ERR "sic_writell_node_iolink_nbsr_reg() bad IO link "
"# %d (< 0 or >= max %d)\n",
io_link, MACH_NODE_NUMIOLINKS);
return;
}
sic_writell_node_nbsr_reg(node_id,
SIC_io_reg_offset(io_link, reg_offset), reg_value);
}
static inline unsigned int
sic_read_nbsr_reg(int reg_offset)
{
return sic_read_node_nbsr_reg(numa_node_id(), reg_offset);
}
static inline unsigned int
sic_read_iolink_nbsr_reg(int io_link, int reg_offset)
{
return sic_read_node_iolink_nbsr_reg(numa_node_id(), io_link,
reg_offset);
}
static inline void
sic_write_nbsr_reg(int reg_offset, unsigned int reg_value)
{
sic_write_node_nbsr_reg(numa_node_id(), reg_offset, reg_value);
}
static inline void
sic_write_iolink_nbsr_reg(int io_link, int reg_offset, unsigned int reg_value)
{
sic_write_node_iolink_nbsr_reg(numa_node_id(), io_link, reg_offset,
reg_value);
}
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _L_SIC_REGS_H_ */

View File

@ -0,0 +1,8 @@
#ifndef _ASM_L_SMP_H
#define _ASM_L_SMP_H
extern unsigned int mp_num_processors;
extern unsigned int num_processors;
extern unsigned int disabled_cpus;
#endif /* _ASM_L_SMP_H */

View File

@ -0,0 +1,3 @@
#pragma once
extern int l_use_swiotlb;

View File

@ -0,0 +1,42 @@
#ifndef __TREE_ENTRY_H
#define __TREE_ENTRY_H
#define MAX_PROPERTY 8
#define ATTRIB_NAME 0
struct prom_property {
const char *name;
void *value;
int size;
};
struct tree_entry {
struct tree_entry *sibling;
struct tree_entry *child;
int node;
struct prom_property prop[MAX_PROPERTY]; /*NULEWOE SWOJSTWO D.B. IMENEM */
};
extern struct tree_entry *sbus_root_node;
extern void scan_sbus(struct tree_entry *root, unsigned long start_addr,
int slot_len, int slot_num);
extern void init_known_nodes(struct tree_entry *root);
extern struct tree_entry *get_te_by_node(int node);
extern struct tree_entry *copy_sbus_dev(struct tree_entry *dev);
extern void free_sbus_dev(struct tree_entry *dev);
extern int prom_getchild(int node);
extern int prom_getproperty(int node, const char *prop, char *buffer, int bufsize);
extern int prom_node_has_property(int node, char *prop);
extern int prom_getproplen(int node, const char *prop);
extern int prom_setprop(int node, const char *pname, char *value, int size);
extern char * prom_firstprop(int node, char *bufer);
extern char * prom_nextprop(int node, char *oprop, char *buffer);
extern int prom_searchsiblings(int node_start, char *nodename);
extern int prom_getsibling(int node);
extern int prom_getint(int node, char *prop);
extern int prom_getbool(int node, char *prop);
extern int prom_getintdefault(int node, char *property, int deflt);
extern void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size);
#endif /* __TREE_ENTRY_H */

113
arch/e2k/include/asm/3p.h Normal file
View File

@ -0,0 +1,113 @@
#ifndef _E2K_3P_H_
#define _E2K_3P_H_
#ifdef __KERNEL__
#include <asm/mmu_types.h>
#include <asm/tags.h>
#include <asm/prot_loader.h>
struct vm_area_struct;
struct pt_regs;
struct file;
extern int do_global_sp(struct pt_regs *regs, trap_cellar_t *tcellar);
extern int lw_global_sp(struct pt_regs *regs);
extern void free_global_sp(void);
extern int delete_records(unsigned int psl_from);
extern void mark_all_global_sp(struct pt_regs *regs, pid_t pid);
extern int interpreted_ap_code(struct pt_regs *regs,
struct vm_area_struct **vma, e2k_addr_t *address);
struct syscall_attrs {
u32 mask; /* for coding specs see systable.c */
/* The next 6 fields specify minimum allowed argument size
* in case of argument-descriptor.
* If negative value, this means size is defined by corresponding arg.
* F.e. value (-3) means size is specified by argument #3.
*/
short size1; /* min allowed size of arg1 of particular system call */
short size2; /* minimum allowed size of arg2 */
short size3; /* minimum allowed size of arg3 */
short size4; /* minimum allowed size of arg4 */
u16 size5; /* minimum allowed size of arg5 */
u16 size6; /* minimum allowed size of arg6 */
} __aligned(16) /* For faster address calculation */;
extern const struct syscall_attrs sys_protcall_args[];
extern const char *sys_call_ID_to_name[];
/*
* Definition of ttable entry number used for protected system calls.
* This is under agreement with protected mode compiler/plib team.
*/
#define PMODE_NEW_SYSCALL_TRAPNUM 10
/*
* List of protected mode system calls supported.
* For the moment it covers all the calls implemented in plib library.
*/
#define __NR_P_get_mem 500
#define __NR_P_free_mem 501
#define __NR_P_dump_umem 507
/*
* Here are some stuff that belongs to LOCAL->GLOBAL operation support
*/
typedef struct global_store_trace_record global_store_t;
typedef enum {
TYPE_GLOBAL = 0,
TYPE_BOUND,
TYPE_INIT,
} type_global_type_t;
struct global_store_trace_record {
global_store_t *prev; /*that is struct list_head list; */
global_store_t *next;
type_global_type_t type;
unsigned int lcl_psl;
unsigned int orig_psr_lw; /* to keep track */
e2k_addr_t global_p;
pid_t pid;
e2k_addr_t new_address;
e2k_addr_t old_address;
unsigned long word1; /*the first word of SAP */
unsigned long word2; /*the second word of SAP */
e2k_addr_t sbr;
/*
* just to care about perhaps I need to store the LOCAL here
* as a backup.
*/
};
#define IS_SAP_LO(addr) \
({ \
e2k_rwsap_lo_struct_t *sap_lo; \
sap_lo = (e2k_rwsap_lo_struct_t *) addr; \
(AS_SAP_STRUCT((*sap_lo)).itag == E2K_SAP_ITAG ? \
(NATIVE_LOAD_TAGD(addr) == E2K_SAP_LO_ETAG ? 1 : 0) : 0); \
})
#define IS_SAP_HI(addr) \
({ \
(NATIVE_LOAD_TAGD(addr) == E2K_SAP_HI_ETAG ? 1 : 0); \
})
#define IS_AP_LO(addr) \
({ \
e2k_rwap_lo_struct_t *ap_lo; \
ap_lo = (e2k_rwap_lo_struct_t *) addr; \
(AS_AP_STRUCT((*ap_lo)).itag == E2K_AP_ITAG ? \
(NATIVE_LOAD_TAGD(addr) == E2K_AP_LO_ETAG ? 1 : 0) : 0); \
})
#define IS_AP_HI(addr) \
({ \
(NATIVE_LOAD_TAGD(addr) == E2K_AP_HI_ETAG ? 1 : 0); \
})
#endif /* __KERNEL__ */
#endif /* _E2K_3P_H_ */

View File

@ -0,0 +1,15 @@
### generic
generic-y += bugs.h
generic-y += div64.h
generic-y += errno.h
generic-y += emergency-restart.h
generic-y += irq_regs.h
generic-y += kmap_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += xor.h
generic-y += mmiowb.h

View File

@ -0,0 +1,28 @@
#ifndef __E2K_A_OUT_H__
#define __E2K_A_OUT_H__
#ifndef __ASSEMBLY__
struct exec
{
unsigned long a_info; /* Use macros N_MAGIC, etc for access */
unsigned int a_text; /* length of text, in bytes */
unsigned int a_data; /* length of data, in bytes */
unsigned int a_bss; /* length of uninitialized data area for file, in bytes */
unsigned int a_syms; /* length of symbol table data in file, in bytes */
unsigned int a_entry; /* start address */
unsigned int a_trsize; /* length of relocation info for text, in bytes */
unsigned int a_drsize; /* length of relocation info for data, in bytes */
};
#endif /* __ASSEMBLY__ */
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifdef __KERNEL__
#endif
#endif /* __E2K_A_OUT_H__ */

View File

@ -0,0 +1,248 @@
/*
* aau_context.h - saving/loading AAU context.
*
* In this file you can see various lists of similar operations. All
* of these operations are of AAU access. The hint is the following:
* AAU regiters can be obtained only through LDAA operation with index
* hardcoded into the AAU syllable. So, index as variable can not be
* substituted. As a cosequence we can not pack them into the loop and
* they are forced to be in lists.
*/
#ifndef _E2K_AAU_CONTEXT_H_
#define _E2K_AAU_CONTEXT_H_
#include <asm/aau_regs.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
#include <asm/e2k_syswork.h>
/******************************* DEBUG DEFINES ********************************/
#undef DEBUG_AAU_CHECK
#define DEBUG_AAU_CHECK 0
#define DbgChk if (DEBUG_AAU_CHECK) printk
/******************************************************************************/
typedef union e2k_fapb_aps {
union {
struct {
u64 abs : 5; /* [4:0] area base */
u64 asz : 3; /* [7:5] area size */
u64 ind : 4; /* [11:8] initial index (si == 0) */
u64 incr : 3; /* [14:12] AAINCR number (si == 0) */
u64 d : 5; /* [19:15] AAD number */
u64 mrng : 5; /* [24:20] element size */
u64 fmt : 3; /* [27:25] format */
u64 dcd : 2; /* [29:28] data cache disabled */
u64 si : 1; /* [30] secondary index access */
u64 ct : 1; /* [31] control transfer (left ch.) */
u64 disp : 32;
};
struct {
u64 __x1 : 8;
u64 area : 5; /* [12:8] APB area index (si == 1) */
u64 am : 1; /* [13] (si == 1) */
u64 be : 1; /* [14] big endian (si == 1) */
u64 __x2 : 16;
u64 dpl : 1; /* [31] duplicate (right channel) */
u64 __x3 : 32;
};
} fields;
u64 word;
} e2k_fapb_instr_t;
/* constants to pick LSR register fields up */
#define LSR_LCNT_MASK 0xFFFFFFFF
#define LSR_LDMC_MASK 0x1
#define LSR_LDMC_SHIFT 39
#define LSR_ECNT_MASK 0x1f
#define LSR_ECNT_SHIFT 32
#define LSR_PCNT_MASK 0xf
#define LSR_PCNT_SHIFT 48
#define LSR_VLC_MASK 0x1
#define LSR_VLC_SHIFT 37
#define get_lcnt(reg) (reg & LSR_LCNT_MASK)
#define get_ldmc(reg) ((reg >> LSR_LDMC_SHIFT) & LSR_LDMC_MASK)
#define get_ecnt(reg) ((reg >> LSR_ECNT_SHIFT) & LSR_ECNT_MASK)
#define get_pcnt(reg) ((reg >> LSR_PCNT_SHIFT) & LSR_PCNT_MASK)
#define get_vlc(reg) ((reg >> LSR_VLC_SHIFT) & LSR_VLC_MASK)
static inline void
native_get_array_descriptors_v2(e2k_aau_t *context)
{
NATIVE_GET_ARRAY_DESCRIPTORS_V2(context);
}
static inline void
native_get_array_descriptors_v5(e2k_aau_t *context)
{
NATIVE_GET_ARRAY_DESCRIPTORS_V5(context);
}
static __always_inline void
native_set_array_descriptors(const e2k_aau_t *context)
{
NATIVE_SET_ARRAY_DESCRIPTORS(context);
}
static inline void
native_get_synchronous_part_v2(e2k_aau_t *context)
{
NATIVE_GET_SYNCHRONOUS_PART_V2(context);
}
static inline void
native_get_synchronous_part_v5(e2k_aau_t *context)
{
NATIVE_GET_SYNCHRONOUS_PART_V5(context);
}
static __always_inline void
native_set_synchronous_part(const e2k_aau_t *context)
{
NATIVE_SET_SYNCHRONOUS_PART(context);
}
static inline void
native_set_all_aaldis(const u64 aaldis[])
{
NATIVE_SET_ALL_AALDIS(aaldis);
}
static inline void
native_set_all_aaldas(const e2k_aalda_t aaldas_p[])
{
#ifndef __LITTLE_ENDIAN
# error This loads must be little endian to not mix aaldas up (and the same goes to SAVE_AALDA)
#endif
NATIVE_SET_ALL_AALDAS(aaldas_p);
}
/* set current array prefetch buffer indices values */
static __always_inline void native_set_aau_aaldis_aaldas(
const struct thread_info *ti, const e2k_aau_t *aau_regs)
{
native_set_all_aaldis(aau_regs->aaldi);
native_set_all_aaldas(ti->aalda);
}
/*
* It's taken that aasr was get earlier(from get_aau_context caller)
* and comparison with aasr.iab was taken.
*/
static inline void
native_get_aau_context_v2(e2k_aau_t *context)
{
NATIVE_GET_AAU_CONTEXT_V2(context);
}
static inline void
native_get_aau_context_v5(e2k_aau_t *context)
{
NATIVE_GET_AAU_CONTEXT_V5(context);
}
/*
* It's taken that comparison with aasr.iab was taken and assr
* will be set later.
*/
static __always_inline void
native_set_aau_context(e2k_aau_t *context)
{
NATIVE_SET_AAU_CONTEXT(context);
}
#ifdef CONFIG_KVM_GUEST_KERNEL
/* It is pure guest kernel without paravirtualization */
#include <asm/kvm/guest/aau_context.h>
#elif defined(CONFIG_PARAVIRT_GUEST)
/* It is paravirtualized host and guest kernel */
#include <asm/paravirt/aau_context.h>
#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */
/* native kernel without virtualization */
/* or native host kernel with virtualization support */
#define GET_ARRAY_DESCRIPTORS_V2(aau_context) \
({ \
native_get_array_descriptors_v2(aau_context); \
})
#define GET_ARRAY_DESCRIPTORS_V5(aau_context) \
({ \
native_get_array_descriptors_v5(aau_context); \
})
#define GET_SYNCHRONOUS_PART_V2(aau_context) \
({ \
native_get_synchronous_part_v2(aau_context); \
})
#define GET_SYNCHRONOUS_PART_V5(aau_context) \
({ \
native_get_synchronous_part_v5(aau_context); \
})
#define GET_AAU_CONTEXT_V2(cntx) native_get_aau_context_v2(cntx)
#define GET_AAU_CONTEXT_V5(cntx) native_get_aau_context_v5(cntx)
#define SAVE_AAU_MASK_REGS(aau_context, aasr) \
NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr)
#define RESTORE_AAU_MASK_REGS(aau_context) \
NATIVE_RESTORE_AAU_MASK_REGS(aau_context)
#define SAVE_AADS(aau_regs) \
NATIVE_SAVE_AADS(aau_regs)
#define RESTORE_AADS(aau_regs) \
NATIVE_RESTORE_AADS(aau_regs)
#define SAVE_AALDIS_V2(regs) NATIVE_SAVE_AALDIS_V2(regs)
#define SAVE_AALDIS_V5(regs) NATIVE_SAVE_AALDIS_V5(regs)
#define SAVE_AALDA(aaldas) \
({ \
register u32 aalda0, aalda4, aalda8, aalda12, \
aalda16, aalda20, aalda24, aalda28, \
aalda32, aalda36, aalda40, aalda44, \
aalda48, aalda52, aalda56, aalda60; \
\
NATIVE_GET_AAU_AALDA(aalda0, aalda32, aalda0); \
NATIVE_GET_AAU_AALDA(aalda4, aalda36, aalda4); \
NATIVE_GET_AAU_AALDA(aalda8, aalda40, aalda8); \
NATIVE_GET_AAU_AALDA(aalda12, aalda44, aalda12); \
NATIVE_GET_AAU_AALDA(aalda16, aalda48, aalda16); \
NATIVE_GET_AAU_AALDA(aalda20, aalda52, aalda20); \
NATIVE_GET_AAU_AALDA(aalda24, aalda56, aalda24); \
NATIVE_GET_AAU_AALDA(aalda28, aalda60, aalda28); \
*(u32 *) (&aaldas[0]) = aalda0; \
*(u32 *) (&aaldas[4]) = aalda4; \
*(u32 *) (&aaldas[8]) = aalda8; \
*(u32 *) (&aaldas[12]) = aalda12; \
*(u32 *) (&aaldas[16]) = aalda16; \
*(u32 *) (&aaldas[20]) = aalda20; \
*(u32 *) (&aaldas[24]) = aalda24; \
*(u32 *) (&aaldas[28]) = aalda28; \
*(u32 *) (&aaldas[32]) = aalda32; \
*(u32 *) (&aaldas[36]) = aalda36; \
*(u32 *) (&aaldas[40]) = aalda40; \
*(u32 *) (&aaldas[44]) = aalda44; \
*(u32 *) (&aaldas[48]) = aalda48; \
*(u32 *) (&aaldas[52]) = aalda52; \
*(u32 *) (&aaldas[56]) = aalda56; \
*(u32 *) (&aaldas[60]) = aalda60; \
})
#define SAVE_AAFSTR(regs) \
({ \
regs = native_read_aafstr_reg_value(); \
})
#endif /* CONFIG_KVM_GUEST_KERNEL */
/*
* for code optimization
*/
static inline int aau_working(e2k_aau_t *context)
{
e2k_aasr_t aasr = context->aasr;
return unlikely(AW(aasr) & (AAU_AASR_IAB | AAU_AASR_STB));
}
#endif /* _E2K_AAU_CONTEXT_H */

View File

@ -0,0 +1,24 @@
/*
* AAU registers description, macroses for load/store AAU context
*
* array access descriptors (AAD0, ... , AAD31);
* initial indices (AIND0, ... , AAIND15);
* indices increment values (AAINCR0, ... , AAINCR7);
* current values of "prefetch" indices (AALDI0, ... , AALDI63);
* array prefetch initialization mask (AALDV);
* prefetch attributes (AALDA0, ... , AALDA63);
* array prefetch advance mask (AALDM);
* array access status register (AASR);
* array access fault status register (AAFSTR);
* current values of "store" indices (AASTI0, ... , AASTI15);
* store attributes (AASTA0, ... , AASTA15);
*/
#ifndef _E2K_AAU_H_
#define _E2K_AAU_H_
#include <linux/types.h>
#include <asm/aau_regs_types.h>
#include <asm/aau_regs_access.h>
#endif /* _E2K_AAU_H_ */

View File

@ -0,0 +1,683 @@
/*
* AAU registers description, macroses for load/store AAU context
*
* array access descriptors (AAD0, ... , AAD31);
* initial indices (AIND0, ... , AAIND15);
* indices increment values (AAINCR0, ... , AAINCR7);
* current values of "prefetch" indices (AALDI0, ... , AALDI63);
* array prefetch initialization mask (AALDV);
* prefetch attributes (AALDA0, ... , AALDA63);
* array prefetch advance mask (AALDM);
* array access status register (AASR);
* array access fault status register (AAFSTR);
* current values of "store" indices (AASTI0, ... , AASTI15);
* store attributes (AASTA0, ... , AASTA15);
*/
#ifndef _E2K_AAU_REGS_ACCESS_H_
#define _E2K_AAU_REGS_ACCESS_H_
#include <linux/types.h>
#include <asm/aau_regs_types.h>
#include <asm/native_aau_regs_access.h>
/*
* see comment about of PREFIX_ at top of arch/e2k/include/regs_state.h
* + additional parameter:
* pv_type argument in macroses is same as prefix but by small letter
* and can be:
* native native kernel with or without virtualization support
* kvm guest kernel (can be run only as paravirtualized
* guest kernel)
* pv paravirtualized kernel (can be run as host and as guest
* paravirtualized kernels)
*/
#define PREFIX_SAVE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context, aasr) \
({ \
if (unlikely(AAU_ACTIVE(aasr))) { \
/* As it turns out AAU can be in ACTIVE state \
* in interrupt handler (bug 53227 comment 28 \
* and bug 53227 comment 36). \
* The hardware stops AAU automatically but \
* the value to be written should be corrected \
* to "stopped" so that the "DONE" instruction \
* works as expected. \
*/ \
AS(aasr).lds = AASR_STOPPED; \
} \
(aau_context)->aasr = aasr; \
if (unlikely(AAU_STOPPED(aasr))) { \
pv_type##_read_aaldv_reg(&(aau_context)->aaldv); \
pv_type##_read_aaldm_reg(&(aau_context)->aaldm); \
} else { \
AW((aau_context)->aaldv) = 0; \
AW((aau_context)->aaldm) = 0; \
} \
})
#define NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) \
PREFIX_SAVE_AAU_MASK_REGS(NATIVE, native, aau_context, aasr)
#define PREFIX_RESTORE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context) \
({ \
pv_type##_write_aafstr_reg_value(0); \
pv_type##_write_aaldm_reg(&(aau_context)->aaldm); \
pv_type##_write_aaldv_reg(&(aau_context)->aaldv); \
/* aasr can be in 'ACTIVE' state, so we set it last */ \
pv_type##_write_aasr_reg((aau_context)->aasr); \
})
#define NATIVE_RESTORE_AAU_MASK_REGS(aau_context) \
PREFIX_RESTORE_AAU_MASK_REGS(NATIVE, native, aau_context)
#define PREFIX_SAVE_AADS(PV_TYPE, pv_type, aau_regs) \
({ \
register e2k_aadj_t *aads = (aau_regs)->aads; \
\
pv_type##_read_aads_4_reg(0, &aads[0]); \
pv_type##_read_aads_4_reg(4, &aads[4]); \
pv_type##_read_aads_4_reg(8, &aads[8]); \
pv_type##_read_aads_4_reg(12, &aads[12]); \
pv_type##_read_aads_4_reg(16, &aads[16]); \
pv_type##_read_aads_4_reg(20, &aads[20]); \
pv_type##_read_aads_4_reg(24, &aads[24]); \
pv_type##_read_aads_4_reg(28, &aads[28]); \
})
#define NATIVE_SAVE_AADS(aau_regs) \
PREFIX_SAVE_AADS(NATIVE, native, aau_regs)
#define PREFIX_RESTORE_AADS(PV_TYPE, pv_type, aau_regs) \
({ \
register e2k_aadj_t *aads = (aau_regs)->aads; \
\
pv_type##_write_aads_4_reg(0, &aads[0]); \
pv_type##_write_aads_4_reg(4, &aads[4]); \
pv_type##_write_aads_4_reg(8, &aads[8]); \
pv_type##_write_aads_4_reg(12, &aads[12]); \
pv_type##_write_aads_4_reg(16, &aads[16]); \
pv_type##_write_aads_4_reg(20, &aads[20]); \
pv_type##_write_aads_4_reg(24, &aads[24]); \
pv_type##_write_aads_4_reg(28, &aads[28]); \
})
#define NATIVE_RESTORE_AADS(aau_regs) \
PREFIX_RESTORE_AADS(NATIVE, native, aau_regs)
#define PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, ISET, iset, regs) \
({ \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(0, regs[0], regs[32]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(1, regs[1], regs[33]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(2, regs[2], regs[34]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(3, regs[3], regs[35]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(4, regs[4], regs[36]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(5, regs[5], regs[37]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(6, regs[6], regs[38]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(7, regs[7], regs[39]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(8, regs[8], regs[40]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(9, regs[9], regs[41]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(10, regs[10], regs[42]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(11, regs[11], regs[43]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(12, regs[12], regs[44]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(13, regs[13], regs[45]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(14, regs[14], regs[46]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(15, regs[15], regs[47]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(16, regs[16], regs[48]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(17, regs[17], regs[49]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(18, regs[18], regs[50]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(19, regs[19], regs[51]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(20, regs[20], regs[52]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(21, regs[21], regs[53]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(22, regs[22], regs[54]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(23, regs[23], regs[55]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(24, regs[24], regs[56]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(25, regs[25], regs[57]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(26, regs[26], regs[58]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(27, regs[27], regs[59]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(28, regs[28], regs[60]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(29, regs[29], regs[61]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(30, regs[30], regs[62]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(31, regs[31], regs[63]); \
})
#define PREFIX_SAVE_AALDIS_V2(PV_TYPE, pv_type, regs) \
PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V2, v2, regs)
#define PREFIX_SAVE_AALDIS_V5(PV_TYPE, pv_type, regs) \
PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V5, v5, regs)
#define NATIVE_SAVE_AALDIS_V2(regs) \
PREFIX_SAVE_AALDIS_V2(NATIVE, native, regs)
#define NATIVE_SAVE_AALDIS_V5(regs) \
PREFIX_SAVE_AALDIS_V5(NATIVE, native, regs)
#define NATIVE_SAVE_AALDIS(regs) \
({ \
if (IS_AAU_ISET_V5()) { \
NATIVE_SAVE_AALDIS_V5(regs); \
} else if (IS_AAU_ISET_V2()) { \
NATIVE_SAVE_AALDIS_V2(regs); \
} else if (IS_AAU_ISET_GENERIC()) { \
machine.save_aaldi(regs); \
} else { \
BUILD_BUG_ON(true); \
} \
})
#define PREFIX_GET_ARRAY_DESCRIPTORS_V2(PV_TYPE, pv_type, aau_context) \
({ \
u64 *const aainds = (aau_context)->aainds; \
u64 *const aaincrs = (aau_context)->aaincrs; \
\
/* \
* get AAINDs, omit the AAIND0 saving since it has predefined 0 \
* value \
*/ \
{ \
register u32 ind1, ind2, ind3, ind4, \
ind5, ind6, ind7, ind8, \
ind9, ind10, ind11, ind12, \
ind13, ind14, ind15; \
register u32 tags; \
\
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(1, ind1, ind2); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(3, ind3, ind4); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(5, ind5, ind6); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(7, ind7, ind8); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(9, ind9, ind10); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(11, ind11, ind12); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(13, ind13, ind14); \
PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V2(ind15, tags); \
aainds[0] = 0; \
aainds[1] = ind1; \
aainds[2] = ind2; \
aainds[3] = ind3; \
aainds[4] = ind4; \
aainds[5] = ind5; \
aainds[6] = ind6; \
aainds[7] = ind7; \
aainds[8] = ind8; \
aainds[9] = ind9; \
aainds[10] = ind10; \
aainds[11] = ind11; \
aainds[12] = ind12; \
aainds[13] = ind13; \
aainds[14] = ind14; \
aainds[15] = ind15; \
context->aaind_tags = tags; \
} \
\
/* \
* get AAINCRs, omit the AAINCR0 saving since it has predefined 1 \
* value \
*/ \
{ \
register u32 incr1, incr2, incr3, incr4, \
incr5, incr6, incr7; \
register u32 tags; \
\
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(1, incr1, incr2); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(3, incr3, incr4); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(5, incr5, incr6); \
PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(incr7, tags); \
aaincrs[0] = 1; \
aaincrs[1] = (s64) (s32) incr1; \
aaincrs[2] = (s64) (s32) incr2; \
aaincrs[3] = (s64) (s32) incr3; \
aaincrs[4] = (s64) (s32) incr4; \
aaincrs[5] = (s64) (s32) incr5; \
aaincrs[6] = (s64) (s32) incr6; \
aaincrs[7] = (s64) (s32) incr7; \
context->aaincr_tags = tags; \
} \
})
#define NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context) \
PREFIX_GET_ARRAY_DESCRIPTORS_V2(NATIVE, native, aau_context)
#define PREFIX_GET_ARRAY_DESCRIPTORS_V5(PV_TYPE, pv_type, aau_context) \
({ \
u64 *const aainds = (aau_context)->aainds; \
u64 *const aaincrs = (aau_context)->aaincrs; \
\
/* \
* get AAINDs, omit the AAIND0 saving since it has predefined 0 \
* value \
*/ \
{ \
register u64 ind1, ind2, ind3, ind4, \
ind5, ind6, ind7, ind8, \
ind9, ind10, ind11, ind12, \
ind13, ind14, ind15; \
register u32 tags; \
\
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(1, ind1, ind2); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(3, ind3, ind4); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(5, ind5, ind6); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(7, ind7, ind8); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(9, ind9, ind10); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(11, ind11, ind12); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(13, ind13, ind14); \
PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V5(ind15, tags); \
aainds[0] = 0; \
aainds[1] = ind1; \
aainds[2] = ind2; \
aainds[3] = ind3; \
aainds[4] = ind4; \
aainds[5] = ind5; \
aainds[6] = ind6; \
aainds[7] = ind7; \
aainds[8] = ind8; \
aainds[9] = ind9; \
aainds[10] = ind10; \
aainds[11] = ind11; \
aainds[12] = ind12; \
aainds[13] = ind13; \
aainds[14] = ind14; \
aainds[15] = ind15; \
context->aaind_tags = tags; \
} \
\
/* \
* get AAINCRs, omit the AAINCR0 saving since it has predefined 1 \
* value \
*/ \
{ \
register u64 incr1, incr2, incr3, incr4, \
incr5, incr6, incr7; \
register u32 tags; \
\
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(1, incr1, incr2); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(3, incr3, incr4); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(5, incr5, incr6); \
PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(incr7, tags); \
aaincrs[0] = 1; \
aaincrs[1] = incr1; \
aaincrs[2] = incr2; \
aaincrs[3] = incr3; \
aaincrs[4] = incr4; \
aaincrs[5] = incr5; \
aaincrs[6] = incr6; \
aaincrs[7] = incr7; \
context->aaincr_tags = tags; \
} \
})
#define NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context) \
PREFIX_GET_ARRAY_DESCRIPTORS_V5(NATIVE, native, aau_context)
#define PREFIX_SET_ARRAY_DESCRIPTORS(PV_TYPE, pv_type, aau_context) \
({ \
const e2k_aau_t *const aau = (aau_context); \
const u64 *const aainds = aau->aainds; \
const u64 *const aaincrs = aau->aaincrs; \
\
/* \
* set AAINDs, omit the AAIND0 restoring since \
* it has predefined 0 value. \
*/ \
pv_type##_write_aainds_pair_value(1, aainds[1], aainds[2]); \
pv_type##_write_aainds_pair_value(3, aainds[3], aainds[4]); \
pv_type##_write_aainds_pair_value(5, aainds[5], aainds[6]); \
pv_type##_write_aainds_pair_value(7, aainds[7], aainds[8]); \
pv_type##_write_aainds_pair_value(9, aainds[9], aainds[10]); \
pv_type##_write_aainds_pair_value(11, aainds[11], aainds[12]); \
pv_type##_write_aainds_pair_value(13, aainds[13], aainds[14]); \
pv_type##_write_aaind_reg_value(15, aainds[15]); \
\
/* \
* set AAINCRs, omit the AAINCR0 restoring since \
* it has predefined 1 value. \
*/ \
pv_type##_write_aaincrs_pair_value(1, aaincrs[1], aaincrs[2]); \
pv_type##_write_aaincrs_pair_value(3, aaincrs[3], aaincrs[4]); \
pv_type##_write_aaincrs_pair_value(5, aaincrs[5], aaincrs[6]); \
pv_type##_write_aaincr_reg_value(7, aaincrs[7]); \
\
/* Set TAGS */ \
PV_TYPE##_SET_AAU_AAIND_AAINCR_TAGS(aau->aaind_tags, aau->aaincr_tags); \
})
#define NATIVE_SET_ARRAY_DESCRIPTORS(aau_context) \
PREFIX_SET_ARRAY_DESCRIPTORS(NATIVE, native, aau_context)
#define PREFIX_GET_SYNCHRONOUS_PART_V2(PV_TYPE, pv_type, aau_context) \
({ \
u64 *const aastis = (aau_context)->aastis; \
register u32 sti0, sti1, sti2, sti3, \
sti4, sti5, sti6, sti7, \
sti8, sti9, sti10, sti11, \
sti12, sti13, sti14, sti15; \
\
/* get AASTIs */ \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(0, sti0, sti1); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(2, sti2, sti3); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(4, sti4, sti5); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(6, sti6, sti7); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(8, sti8, sti9); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(10, sti10, sti11); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(12, sti12, sti13); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(14, sti14, sti15); \
\
aastis[0] = sti0; \
aastis[1] = sti1; \
aastis[2] = sti2; \
aastis[3] = sti3; \
aastis[4] = sti4; \
aastis[5] = sti5; \
aastis[6] = sti6; \
aastis[7] = sti7; \
aastis[8] = sti8; \
aastis[9] = sti9; \
aastis[10] = sti10; \
aastis[11] = sti11; \
aastis[12] = sti12; \
aastis[13] = sti13; \
aastis[14] = sti14; \
aastis[15] = sti15; \
(aau_context)->aasti_tags = \
pv_type##_read_aasti_tags_reg_value(); \
})
#define PREFIX_GET_SYNCHRONOUS_PART_V5(PV_TYPE, pv_type, aau_context) \
({ \
u64 *const aastis = (aau_context)->aastis; \
register u64 sti0, sti1, sti2, sti3, \
sti4, sti5, sti6, sti7, \
sti8, sti9, sti10, sti11, \
sti12, sti13, sti14, sti15; \
\
/* get AASTIs */ \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(0, sti0, sti1); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(2, sti2, sti3); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(4, sti4, sti5); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(6, sti6, sti7); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(8, sti8, sti9); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(10, sti10, sti11); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(12, sti12, sti13); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(14, sti14, sti15); \
\
aastis[0] = sti0; \
aastis[1] = sti1; \
aastis[2] = sti2; \
aastis[3] = sti3; \
aastis[4] = sti4; \
aastis[5] = sti5; \
aastis[6] = sti6; \
aastis[7] = sti7; \
aastis[8] = sti8; \
aastis[9] = sti9; \
aastis[10] = sti10; \
aastis[11] = sti11; \
aastis[12] = sti12; \
aastis[13] = sti13; \
aastis[14] = sti14; \
aastis[15] = sti15; \
(aau_context)->aasti_tags = \
pv_type##_read_aasti_tags_reg_value(); \
})
#define NATIVE_GET_SYNCHRONOUS_PART_V2(aau_context) \
PREFIX_GET_SYNCHRONOUS_PART_V2(NATIVE, native, aau_context)
#define NATIVE_GET_SYNCHRONOUS_PART_V5(aau_context) \
PREFIX_GET_SYNCHRONOUS_PART_V5(NATIVE, native, aau_context)
#define PREFIX_SET_SYNCHRONOUS_PART(PV_TYPE, pv_type, aau_context) \
({ \
const u64 *const aastis = (aau_context)->aastis; \
\
/* set AASTIs */ \
pv_type##_write_aastis_pair_value(0, aastis[0], aastis[1]); \
pv_type##_write_aastis_pair_value(2, aastis[2], aastis[3]); \
pv_type##_write_aastis_pair_value(4, aastis[4], aastis[5]); \
pv_type##_write_aastis_pair_value(6, aastis[6], aastis[7]); \
pv_type##_write_aastis_pair_value(8, aastis[8], aastis[9]); \
pv_type##_write_aastis_pair_value(10, aastis[10], aastis[11]); \
pv_type##_write_aastis_pair_value(12, aastis[12], aastis[13]); \
pv_type##_write_aastis_pair_value(14, aastis[14], aastis[15]); \
pv_type##_write_aasti_tags_reg_value((aau_context)->aasti_tags); \
})
#define NATIVE_SET_SYNCHRONOUS_PART(aau_context) \
PREFIX_SET_SYNCHRONOUS_PART(NATIVE, native, aau_context)
#define PREFIX_SET_ALL_AALDIS(PV_TYPE, pv_type, aaldis) \
({ \
pv_type##_write_aaldi_reg_value(0, aaldis[0], aaldis[32]); \
pv_type##_write_aaldi_reg_value(1, aaldis[1], aaldis[33]); \
pv_type##_write_aaldi_reg_value(2, aaldis[2], aaldis[34]); \
pv_type##_write_aaldi_reg_value(3, aaldis[3], aaldis[35]); \
pv_type##_write_aaldi_reg_value(4, aaldis[4], aaldis[36]); \
pv_type##_write_aaldi_reg_value(5, aaldis[5], aaldis[37]); \
pv_type##_write_aaldi_reg_value(6, aaldis[6], aaldis[38]); \
pv_type##_write_aaldi_reg_value(7, aaldis[7], aaldis[39]); \
pv_type##_write_aaldi_reg_value(8, aaldis[8], aaldis[40]); \
pv_type##_write_aaldi_reg_value(9, aaldis[9], aaldis[41]); \
pv_type##_write_aaldi_reg_value(10, aaldis[10], aaldis[42]); \
pv_type##_write_aaldi_reg_value(11, aaldis[11], aaldis[43]); \
pv_type##_write_aaldi_reg_value(12, aaldis[12], aaldis[44]); \
pv_type##_write_aaldi_reg_value(13, aaldis[13], aaldis[45]); \
pv_type##_write_aaldi_reg_value(14, aaldis[14], aaldis[46]); \
pv_type##_write_aaldi_reg_value(15, aaldis[15], aaldis[47]); \
pv_type##_write_aaldi_reg_value(16, aaldis[16], aaldis[48]); \
pv_type##_write_aaldi_reg_value(17, aaldis[17], aaldis[49]); \
pv_type##_write_aaldi_reg_value(18, aaldis[18], aaldis[50]); \
pv_type##_write_aaldi_reg_value(19, aaldis[19], aaldis[51]); \
pv_type##_write_aaldi_reg_value(20, aaldis[20], aaldis[52]); \
pv_type##_write_aaldi_reg_value(21, aaldis[21], aaldis[53]); \
pv_type##_write_aaldi_reg_value(22, aaldis[22], aaldis[54]); \
pv_type##_write_aaldi_reg_value(23, aaldis[23], aaldis[55]); \
pv_type##_write_aaldi_reg_value(24, aaldis[24], aaldis[56]); \
pv_type##_write_aaldi_reg_value(25, aaldis[25], aaldis[57]); \
pv_type##_write_aaldi_reg_value(26, aaldis[26], aaldis[58]); \
pv_type##_write_aaldi_reg_value(27, aaldis[27], aaldis[59]); \
pv_type##_write_aaldi_reg_value(28, aaldis[28], aaldis[60]); \
pv_type##_write_aaldi_reg_value(29, aaldis[29], aaldis[61]); \
pv_type##_write_aaldi_reg_value(30, aaldis[30], aaldis[62]); \
pv_type##_write_aaldi_reg_value(31, aaldis[31], aaldis[63]); \
})
#define NATIVE_SET_ALL_AALDIS(aaldis) \
PREFIX_SET_ALL_AALDIS(NATIVE, native, aaldis)
#define PREFIX_SET_ALL_AALDAS(PV_TYPE, pv_type, aaldas_p) \
({ \
register u32 *aaldas = (u32 *)(aaldas_p); \
\
pv_type##_write_aaldas_reg_value(0, aaldas[0], aaldas[8]); \
pv_type##_write_aaldas_reg_value(4, aaldas[1], aaldas[9]); \
pv_type##_write_aaldas_reg_value(8, aaldas[2], aaldas[10]); \
pv_type##_write_aaldas_reg_value(12, aaldas[3], aaldas[11]); \
pv_type##_write_aaldas_reg_value(16, aaldas[4], aaldas[12]); \
pv_type##_write_aaldas_reg_value(20, aaldas[5], aaldas[13]); \
pv_type##_write_aaldas_reg_value(24, aaldas[6], aaldas[14]); \
pv_type##_write_aaldas_reg_value(28, aaldas[7], aaldas[15]); \
})
/*
* It's taken that aasr was get earlier(from get_aau_context caller)
* and comparison with aasr.iab was taken.
*/
#define PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, ISET, iset, aau_context) \
({ \
/* get registers, which describe arrays in APB operations */ \
e2k_aasr_t aasr = (aau_context)->aasr; \
\
/* get descriptors & auxiliary registers */ \
if (AS(aasr).iab) \
PV_TYPE##_GET_ARRAY_DESCRIPTORS_##ISET(aau_context); \
\
/* get synchronous part of APB */ \
if (AS(aasr).stb) \
PV_TYPE##_GET_SYNCHRONOUS_PART_##ISET(aau_context); \
})
#define PREFIX_GET_AAU_CONTEXT_V2(PV_TYPE, pv_type, aau_context) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V2, v2, aau_context)
#define PREFIX_GET_AAU_CONTEXT_V5(PV_TYPE, pv_type, aau_context) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V5, v5, aau_context)
#define NATIVE_GET_AAU_CONTEXT_V2(aau_context) \
PREFIX_GET_AAU_CONTEXT_V2(NATIVE, native, aau_context)
#define NATIVE_GET_AAU_CONTEXT_V5(aau_context) \
PREFIX_GET_AAU_CONTEXT_V5(NATIVE, native, aau_context)
#define NATIVE_GET_AAU_CONTEXT(aau_context) \
({ \
if (IS_AAU_ISET_V5()) { \
NATIVE_GET_AAU_CONTEXT_V5(aau_context); \
} else if (IS_AAU_ISET_V2()) { \
NATIVE_GET_AAU_CONTEXT_V2(aau_context); \
} else if (IS_AAU_ISET_GENERIC()) { \
machine.get_aau_context(aau_context); \
} else { \
BUILD_BUG_ON(true); \
} \
})
/*
* It's taken that comparison with aasr.iab was taken and assr
* will be set later.
*/
#define PREFIX_SET_AAU_CONTEXT(PV_TYPE, pv_type, aau_context) \
do { \
const e2k_aau_t *const aau = (aau_context); \
/* retrieve common APB status register */\
e2k_aasr_t aasr = aau->aasr; \
\
/* prefetch data to restore */ \
if (AS(aasr).stb) \
prefetchw_range(aau->aastis, sizeof(aau->aastis) + \
sizeof(aau->aasti_tags)); \
if (AS(aasr).iab) \
prefetchw_range(aau->aainds, sizeof(aau->aainds) + \
sizeof(aau->aaind_tags) + sizeof(aau->aaincrs) + \
sizeof(aau->aaincr_tags) + sizeof(aau->aads)); \
if (AAU_STOPPED(aasr)) \
prefetchw_range(aau->aaldi, sizeof(aau->aaldi)); \
\
/* Make sure prefetches are issued */ \
barrier(); \
\
/* set synchronous part of APB */ \
if (AS(aasr).stb) \
pv_type##_set_synchronous_part(aau); \
\
/* set descriptors & auxiliary registers */ \
if (AS(aasr).iab) \
pv_type##_set_array_descriptors(aau); \
} while (0)
#define NATIVE_SET_AAU_CONTEXT(aau_context) \
PREFIX_SET_AAU_CONTEXT(NATIVE, native, aau_context)
#define PREFIX_SAVE_AALDAS(PV_TYPE, pv_type, aaldas_p) \
({ \
register u32 *aaldas = (u32 *)aaldas_p; \
\
pv_type##_read_aaldas_reg_value(0, &aaldas[0], &aaldas[8]); \
pv_type##_read_aaldas_reg_value(4, &aaldas[1], &aaldas[9]); \
pv_type##_read_aaldas_reg_value(8, &aaldas[2], &aaldas[10]); \
pv_type##_read_aaldas_reg_value(12, &aaldas[3], &aaldas[11]); \
pv_type##_read_aaldas_reg_value(16, &aaldas[4], &aaldas[12]); \
pv_type##_read_aaldas_reg_value(20, &aaldas[5], &aaldas[13]); \
pv_type##_read_aaldas_reg_value(24, &aaldas[6], &aaldas[14]); \
pv_type##_read_aaldas_reg_value(28, &aaldas[7], &aaldas[15]); \
})
#define NATIVE_SAVE_AALDAS(aaldas_p) \
PREFIX_SAVE_AALDAS(NATIVE, native, aaldas_p)
#define PREFIX_SAVE_AAFSTR(PV_TYPE, pv_type, aau_context) \
({ \
(aau_context)->aafstr = pv_type##_read_aafstr_reg_value(); \
})
#define NATIVE_SAVE_AAFSTR(aau_context) \
PREFIX_SAVE_AAFSTR(NATIVE, native, aau_context)
#define PREFIX_SAVE_AAU_REGS_FOR_PTRACE(PV_TYPE, pv_type, pt_regs, ti) \
({ \
e2k_aau_t *__aau_context = (pt_regs)->aau_context; \
if (__aau_context) { \
if (machine.native_iset_ver < E2K_ISET_V6) \
PV_TYPE##_SAVE_AALDIS(__aau_context->aaldi); \
PV_TYPE##_SAVE_AALDAS(ti->aalda); \
PV_TYPE##_SAVE_AAFSTR(__aau_context); \
} \
})
#define NATIVE_SAVE_AAU_REGS_FOR_PTRACE(pt_regs, ti) \
PREFIX_SAVE_AAU_REGS_FOR_PTRACE(NATIVE, native, pt_regs, ti)
#ifdef CONFIG_KVM_GUEST_KERNEL
/* It is pure guest kernel without paravirtualization */
#include <asm/kvm/aau_regs_access.h>
#elif defined(CONFIG_PARAVIRT_GUEST)
/* It is paravirtualized host and guest kernel */
#include <asm/paravirt/aau_regs_access.h>
#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */
/* native kernel without virtualization */
/* or native host kernel with virtualization support */
static __always_inline u32 read_aasr_reg_value(void)
{
return native_read_aasr_reg_value();
}
static __always_inline void write_aasr_reg_value(u32 reg_value)
{
native_write_aasr_reg_value(reg_value);
}
static inline u32 read_aafstr_reg_value(void)
{
return native_read_aafstr_reg_value();
}
static inline void write_aafstr_reg_value(u32 reg_value)
{
native_write_aafstr_reg_value(reg_value);
}
static __always_inline e2k_aasr_t read_aasr_reg(void)
{
return native_read_aasr_reg();
}
static __always_inline void write_aasr_reg(e2k_aasr_t aasr)
{
native_write_aasr_reg(aasr);
}
static inline void read_aaldm_reg(e2k_aaldm_t *aaldm)
{
native_read_aaldm_reg(aaldm);
}
static inline void write_aaldm_reg(e2k_aaldm_t *aaldm)
{
native_write_aaldm_reg(aaldm);
}
static inline void read_aaldv_reg(e2k_aaldv_t *aaldv)
{
native_read_aaldv_reg(aaldv);
}
static inline void write_aaldv_reg(e2k_aaldv_t *aaldv)
{
native_write_aaldv_reg(aaldv);
}
#ifdef CONFIG_USE_AAU
# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) \
NATIVE_SAVE_AAU_REGS_FOR_PTRACE(__regs, ti)
#else
# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti)
#endif
#endif /* CONFIG_KVM_GUEST_KERNEL */
#define SWITCH_GUEST_AAU_AASR(aasr, aau_context, do_switch) \
({ \
if (do_switch) { \
e2k_aasr_t aasr_worst_case; \
AW(aasr_worst_case) = 0; \
AS(aasr_worst_case).stb = 1; \
AS(aasr_worst_case).iab = 1; \
AS(aasr_worst_case).lds = AASR_STOPPED; \
(aau_context)->guest_aasr = *(aasr); \
*(aasr) = aasr_worst_case; \
} \
})
#define RESTORE_GUEST_AAU_AASR(aau_context, do_restore) \
({ \
if (do_restore) { \
(aau_context)->aasr = (aau_context)->guest_aasr; \
} \
})
#endif /* _E2K_AAU_REGS_ACCESS_H_ */

View File

@ -0,0 +1,178 @@
/*
* AAU registers structures description
*
* array access descriptors (AAD0, ... , AAD31);
* initial indices (AIND0, ... , AAIND15);
* indices increment values (AAINCR0, ... , AAINCR7);
* current values of "prefetch" indices (AALDI0, ... , AALDI63);
* array prefetch initialization mask (AALDV);
* prefetch attributes (AALDA0, ... , AALDA63);
* array prefetch advance mask (AALDM);
* array access status register (AASR);
* array access fault status register (AAFSTR);
* current values of "store" indices (AASTI0, ... , AASTI15);
* store attributes (AASTA0, ... , AASTA15);
*/
#ifndef _E2K_AAU_REGS_TYPES_H_
#define _E2K_AAU_REGS_TYPES_H_
#include <asm/types.h>
#include <asm/cpu_regs_types.h>
#if CONFIG_CPU_ISET >= 5
# define IS_AAU_ISET_V5() true
# define IS_AAU_ISET_V2() false
# define IS_AAU_ISET_GENERIC() false
#elif CONFIG_CPU_ISET >= 1
# define IS_AAU_ISET_V2() true
# define IS_AAU_ISET_V5() false
# define IS_AAU_ISET_GENERIC() false
#elif CONFIG_CPU_ISET == 0
# define IS_AAU_ISET_GENERIC() true
# define IS_AAU_ISET_V2() false
# define IS_AAU_ISET_V5() false
#else /* CONFIG_CPU_ISET undefined or negative */
# warning "Undefined CPU ISET VERSION #, IS_AAU_ISET_Vx is defined dinamicaly"
# define IS_AAU_ISET_GENERIC() true
# define IS_AAU_ISET_V2() false
# define IS_AAU_ISET_V5() false
#endif /* CONFIG_CPU_ISET 0-6 */
/* Values for AASR.lds */
enum {
AASR_NULL = 0,
AASR_READY = 1,
AASR_ACTIVE = 3,
AASR_STOPPED = 5
};
#define AAU_AASR_STB 0x20
#define AAU_AASR_IAB 0x40
typedef struct e2k_aasr_fields {
u32 reserved : 5; /* [4:0] */
u32 stb : 1; /* [5:5] */
u32 iab : 1; /* [6:6] */
u32 lds : 3; /* [9:7] */
} e2k_aasr_fields_t;
typedef union e2k_aasr { /* aadj quad-word */
e2k_aasr_fields_t fields;
u32 word;
} e2k_aasr_t;
/* Check up AAU state */
#define AAU_NULL(aasr) (AS(aasr).lds == AASR_NULL)
#define AAU_READY(aasr) (AS(aasr).lds == AASR_READY)
#define AAU_ACTIVE(aasr) (AS(aasr).lds == AASR_ACTIVE)
#define AAU_STOPPED(aasr) (AS(aasr).lds == AASR_STOPPED)
typedef u32 e2k_aafstr_t;
/* Values for AAD.tag */
enum {
AAD_AAUNV = 0,
AAD_AAUDT = 1,
AAD_AAUET = 2,
AAD_AAUAP = 4,
AAD_AAUSAP = 5,
AAD_AAUDS = 6
};
/* We are not using AAD SAP format here
* so it is not described in the structure */
typedef union e2k_aadj_lo_fields {
struct {
u64 ap_base : E2K_VA_SIZE; /* [E2K_VA_MSB:0] */
u64 unused1 : 53 - E2K_VA_MSB; /* [53:48] */
u64 tag : 3; /* [56:54] */
u64 mb : 1; /* [57] */
u64 ed : 1; /* [58] */
u64 rw : 2; /* [60:59] */
u64 unused2 : 3; /* [63:60] */
};
struct {
u64 sap_base : 32;
u64 psl : 16;
u64 __pad : 16;
};
} e2k_aadj_lo_fields_t;
typedef struct e2k_aadj_hi_fields {
u64 unused : 32;
u64 size : 32; /* [63:32] */
} e2k_aadj_hi_fields_t;
typedef union e2k_aadj { /* aadj quad-word */
struct {
e2k_aadj_lo_fields_t lo;
e2k_aadj_hi_fields_t hi;
} fields;
struct {
u64 lo;
u64 hi;
} word;
} e2k_aadj_t;
/* Possible values for aalda.exc field */
enum {
AALDA_EIO = 1,
AALDA_EPM = 2,
AALDA_EPMSI = 3
};
union e2k_u64_struct { /* aaldv,aaldm,aasta_restore dword */
struct {
u32 lo; /* read/write on left channel */
u32 hi; /* read/write on right channel */
};
u64 word;
};
typedef union e2k_u64_struct e2k_aaldv_t;
typedef union e2k_u64_struct e2k_aaldm_t;
typedef struct e2k_aalda_fields {
u8 exc: 2;
u8 cincr: 1;
u8 unused1: 1;
u8 root: 1;
u8 unused2: 3;
} e2k_aalda_fields_t;
typedef union e2k_aalda_struct {
e2k_aalda_fields_t fields;
u8 word;
} e2k_aalda_t;
#define AASTIS_REGS_NUM 16
#define AASTIS_TAG_no AASTIS_REGS_NUM
#define AAINDS_REGS_NUM 16
#define AAINDS_TAG_no AAINDS_REGS_NUM
#define AAINCRS_REGS_NUM 8
#define AAINCRS_TAG_no AAINCRS_REGS_NUM
#define AADS_REGS_NUM 32
#define AALDIS_REGS_NUM 64
#define AALDAS_REGS_NUM 64
/*
* For virtualization, aasr might be switched to worst-case scenario (lds = AAU_STOPPED,
* iab = 1, stb = 1). In that case, real aasr will be saved to guest_aasr
*/
typedef struct e2k_aau_context {
e2k_aasr_t aasr;
e2k_aasr_t guest_aasr;
e2k_aafstr_t aafstr;
e2k_aaldm_t aaldm;
e2k_aaldv_t aaldv;
/* Synchronous part */
u64 aastis[AASTIS_REGS_NUM];
u32 aasti_tags;
/* Asynchronous part */
u64 aainds[AAINDS_REGS_NUM];
u32 aaind_tags;
u64 aaincrs[AAINCRS_REGS_NUM];
u32 aaincr_tags;
e2k_aadj_t aads[AADS_REGS_NUM];
/* %aaldi [synonim for %aaldsi] must be saved since iset v6 */
u64 aaldi[AALDIS_REGS_NUM];
} e2k_aau_t;
#endif /* _E2K_AAU_REGS_TYPES_H_ */

View File

@ -0,0 +1,10 @@
#ifndef _ASM_E2K_ACENV_H_
#define _ASM_E2K_ACENV_H_
#include <asm/mmu_regs_access.h>
#define ACPI_FLUSH_CPU_CACHE() write_back_CACHE_L12()
#include <asm-l/acenv.h>
#endif /* _ASM_E2K_ACENV_H_ */

View File

@ -0,0 +1,6 @@
#ifndef __ASM_ACPI_H
#define __ASM_ACPI_H
#include <asm-l/acpi.h>
#endif

View File

@ -0,0 +1,193 @@
#ifndef _ASM_E2K_ALTERNATIVE_ASM_H
#define _ASM_E2K_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
/*
* Check the length of an instruction sequence, must be a multiple of 8.
*/
.macro alt_len_check start,end
.if ( \end - \start ) % 8
.error "cpu alternatives instructions length is not divisible by 8\n"
.endif
.endm
/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
* instruction. See apply_alternatives().
*/
.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
.align 4
.word \orig_start - .
.word \alt_start - .
.short \orig_end - \orig_start
.short \alt_end - \alt_start
.short \feature
.endm
.macro alt_pad_64bytes bytes, check
.if ( \bytes >= \check )
.fill 1, 4, 0x00000070
.fill 15, 4, 0
.endif
.endm
/*
* Fill up @bytes with nops.
*/
.macro alt_pad bytes
.if ( \bytes >= 576 )
ibranch . + \bytes
alt_pad_fill \bytes - 16
.else
alt_pad_64bytes \bytes, 512
alt_pad_64bytes \bytes, 448
alt_pad_64bytes \bytes, 384
alt_pad_64bytes \bytes, 320
alt_pad_64bytes \bytes, 256
alt_pad_64bytes \bytes, 192
alt_pad_64bytes \bytes, 128
alt_pad_64bytes \bytes, 64
.if ( \bytes % 64 ) == 56
.fill 1, 4, 0x00000060
.fill 13, 4, 0
.endif
.if ( \bytes % 64 ) == 48
.fill 1, 4, 0x00000050
.fill 11, 4, 0
.endif
.if ( \bytes % 64 ) == 40
.fill 1, 4, 0x00000040
.fill 9, 4, 0
.endif
.if ( \bytes % 64 ) == 32
.fill 1, 4, 0x00000030
.fill 7, 4, 0
.endif
.if ( \bytes % 64 ) == 24
.fill 1, 4, 0x00000020
.fill 5, 4, 0
.endif
.if ( \bytes % 64 ) == 16
.fill 1, 4, 0x00000010
.fill 3, 4, 0
.endif
.if ( \bytes % 64 ) == 8
.fill 2, 4, 0
.endif
.endif
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr.
*/
.macro ALTERNATIVE oldinstr, newinstr, feature
.pushsection .altinstr_replacement,"ax"
770: \newinstr
771: .popsection
772: \oldinstr
773: alt_len_check 770b, 771b
alt_len_check 772b, 773b
alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
774: .pushsection .altinstructions,"a"
alt_entry 772b, 774b, 770b, 771b, \feature
.popsection
.endm
/*
* Define an alternative between three instructions.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
.pushsection .altinstr_replacement,"ax"
770: \newinstr1
771: \newinstr2
772: .popsection
773: \oldinstr
774: alt_len_check 770b, 771b
alt_len_check 771b, 772b
alt_len_check 773b, 774b
.if ( 771b - 770b > 772b - 771b )
alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
.else
alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
.endif
775: .pushsection .altinstructions,"a"
alt_entry 773b, 775b, 770b, 771b,\feature1
alt_entry 773b, 775b, 771b, 772b,\feature2
.popsection
.endm
/*
* bug 110687: we cannot pass e2k wide instructions to GNU assembler .macro
* as a parameter in a sane way so use the following in complex cases.
* How to use:
*
* 1) There is one alternative
*
* ALTERNATIVE_1_ALTINSTR
* < alt. instruction >
* ALTERNATIVE_2_OLDINSTR
* < initial instruction >
* ALTERNATIVE_3_FEATURE
*
* 2) There are two alternatives
*
* ALTERNATIVE_1_ALTINSTR
* "< first alt. instruction >"
* ALTERNATIVE_2_ALTINSTR2
* "< second alt. instruction >"
* ALTERNATIVE_3_OLDINSTR2
* "< initial instruction >"
* ALTERNATIVE_4_FEATURE2(feature1, feature2)
*/
#define ALTERNATIVE_1_ALTINSTR \
.pushsection .altinstr_replacement,"ax" ; \
770:
#define ALTERNATIVE_2_OLDINSTR \
771: ; \
.popsection ; \
772:
#define ALTERNATIVE_3_FEATURE(feature) \
773: ; \
alt_len_check 770b, 771b ; \
alt_len_check 772b, 773b ; \
alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) ; \
774: ; \
.pushsection .altinstructions,"a" ; \
alt_entry 772b, 774b, 770b, 771b, feature ; \
.popsection
#define ALTERNATIVE_2_ALTINSTR2 \
771:
#define ALTERNATIVE_3_OLDINSTR2 \
772: ; \
.popsection ; \
773:
#define ALTERNATIVE_4_FEATURE2(feature1, feature2) \
774: ; \
alt_len_check 770b, 771b ; \
alt_len_check 771b, 772b ; \
alt_len_check 773b, 774b ; \
.if ( 771b - 770b > 772b - 771b ) ; \
alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) ; \
.else ; \
alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) ; \
.endif ; \
775: ; \
.pushsection .altinstructions,"a" ; \
alt_entry 773b, 775b, 770b, 771b, feature1 ; \
alt_entry 773b, 775b, 771b, 772b, feature2 ; \
.popsection
#endif /* __ASSEMBLY__ */
#endif /* _ASM_E2K_ALTERNATIVE_ASM_H */

View File

@ -0,0 +1,260 @@
#ifndef _ASM_E2K_ALTERNATIVE_H
#define _ASM_E2K_ALTERNATIVE_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
u16 instrlen; /* length of original instruction */
u16 replacementlen; /* length of new instruction */
u16 facility; /* facility bit set for replacement */
} __aligned(4);
void apply_alternative_instructions(void);
void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
/*
* An example when first alternative instruction is the biggest,
* and original instruction is the smallest.
*
* Original instruction is padded statically at compile time,
* while alternative instructions are padded if necessary in
* runtime when patching them in.
*
* |661: |662: |663:
* +-----------+---------+-----------------+
* | oldinstr | oldinstr_padding |
* | +---------+-----------------+
* | | ibranch if length >= 576 |
* | | 64-bytes NOPs otherwise |
* +-----------+---------+-----------------+
* ^^^^^^ static padding ^^^^^
*
* .altinstr_replacement section
* +-----------+---------+-----------------+
* |6641: |6651:
* | alternative instr 1 |
* +-----------+---------+- - - - - - - - -+
* |6642: |6652: |
* | alternative instr 2 | padding |
* +-----------+---------+- - - - - - - - -+
* ^runtime padding^
*
*
* 'struct alt_instr' holds details about how and when
* instructions must be replaced:
*
* .altinstructions section
* +----------------------------+
* | alt_instr entries for each |
* | alternative instruction |
* +----------------------------+
*/
#define b_altinstr(num) "664"#num
#define e_altinstr(num) "665"#num
#define e_oldinstr_pad_end "663"
#define oldinstr_len "662b-661b"
#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
#define oldinstr_pad_len(num) \
"-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
"((" altinstr_len(num) ")-(" oldinstr_len "))"
#define INSTR_LEN_SANITY_CHECK(len) \
".if (" len ") %% 8\n" \
"\t.error \"cpu alternatives instructions length is not divisible by 8\"\n" \
".endif\n"
#define OLDINSTR_PAD_64_BYTES(num, check) \
".if " oldinstr_pad_len(num) " >= " __stringify(check) "\n" \
"\t.fill 1, 4, 0x00000070\n" \
"\t.fill 15, 4, 0\n" \
".endif\n"
#define OLDINSTR_PADDING(oldinstr, num) \
".if " oldinstr_pad_len(num) " >= 576\n" \
"\tibranch " e_oldinstr_pad_end "f\n" \
"6620:\n" \
"\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 8, 8, 0\n" \
".else\n" \
OLDINSTR_PAD_64_BYTES(num, 512) \
OLDINSTR_PAD_64_BYTES(num, 448) \
OLDINSTR_PAD_64_BYTES(num, 384) \
OLDINSTR_PAD_64_BYTES(num, 320) \
OLDINSTR_PAD_64_BYTES(num, 256) \
OLDINSTR_PAD_64_BYTES(num, 192) \
OLDINSTR_PAD_64_BYTES(num, 128) \
OLDINSTR_PAD_64_BYTES(num, 64) \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 56\n" \
"\t.fill 1, 4, 0x00000060\n" \
"\t.fill 13, 4, 0\n" \
".endif\n" \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 48\n" \
"\t.fill 1, 4, 0x00000050\n" \
"\t.fill 11, 4, 0\n" \
".endif\n" \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 40\n" \
"\t.fill 1, 4, 0x00000040\n" \
"\t.fill 9, 4, 0\n" \
".endif\n" \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 32\n" \
"\t.fill 1, 4, 0x00000030\n" \
"\t.fill 7, 4, 0\n" \
".endif\n" \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 24\n" \
"\t.fill 1, 4, 0x00000020\n" \
"\t.fill 5, 4, 0\n" \
".endif\n" \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 16\n" \
"\t.fill 1, 4, 0x00000010\n" \
"\t.fill 3, 4, 0\n" \
".endif\n" \
".if ( " oldinstr_pad_len(num) " %% 64 ) == 8\n" \
"\t.fill 2, 4, 0\n" \
".endif\n" \
".endif\n"
#define OLDINSTR(oldinstr, num) \
"661:\n\t" oldinstr "\n662:\n" \
OLDINSTR_PADDING(oldinstr, num) \
e_oldinstr_pad_end ":\n" \
INSTR_LEN_SANITY_CHECK(oldinstr_len)
#define OLDINSTR_2(oldinstr, num1, num2) \
"661:\n\t" oldinstr "\n662:\n" \
".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
OLDINSTR_PADDING(oldinstr, num2) \
".else\n" \
OLDINSTR_PADDING(oldinstr, num1) \
".endif\n" \
e_oldinstr_pad_end ":\n" \
INSTR_LEN_SANITY_CHECK(oldinstr_len)
#define ALTINSTR_ENTRY(facility, num) \
"\t.align 4\n" \
"\t.word 661b - .\n" /* old instruction */ \
"\t.word " b_altinstr(num)"b - .\n" /* alt instruction */ \
"\t.short " oldinstr_total_len "\n" /* source len */ \
"\t.short " altinstr_len(num) "\n" /* alt instruction len */ \
"\t.short " __stringify(facility) "\n" /* facility bit */
#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
INSTR_LEN_SANITY_CHECK(altinstr_len(num))
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, altinstr, facility) \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr, 1) \
".popsection\n" \
OLDINSTR(oldinstr, 1) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility, 1) \
".popsection\n"
#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr1, 1) \
ALTINSTR_REPLACEMENT(altinstr2, 2) \
".popsection\n" \
OLDINSTR_2(oldinstr, 1, 2) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility1, 1) \
ALTINSTR_ENTRY(facility2, 2) \
".popsection\n"
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* oldinstr is padded with jump and nops at compile time if altinstr is
* longer. altinstr is padded with jump and nops at run-time during patching.
*/
#define alternative(oldinstr, altinstr, facility, clobbers...) \
asm volatile (ALTERNATIVE(oldinstr, altinstr, facility) \
::: clobbers)
#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
asm volatile (ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
altinstr2, facility2) \
::: clobbers)
/*
* How to use:
*
* 1) There is one alternative
*
* asm volatile (
* ALTERNATIVE_1_ALTINSTR
* "< alt. instruction >"
* ALTERNATIVE_2_OLDINSTR
* "< initial instruction >"
* ALTERNATIVE_3_FEATURE(feature)
* )
*
* 2) There are two alternatives
*
* asm volatile (
* ALTERNATIVE_1_ALTINSTR
* "< first alt. instruction >"
* ALTERNATIVE_2_ALTINSTR2
* "< second alt. instruction >"
* ALTERNATIVE_3_OLDINSTR2
* "< initial instruction >"
* ALTERNATIVE_4_FEATURE2(feature1, feature2)
* )
*/
#define ALTERNATIVE_1_ALTINSTR \
".pushsection .altinstr_replacement, \"ax\"\n" \
b_altinstr(1)":\n"
#define ALTERNATIVE_2_OLDINSTR \
"\n" e_altinstr(1) ":\n" \
INSTR_LEN_SANITY_CHECK(altinstr_len(1)) \
".popsection\n" \
"661:\n"
#define ALTERNATIVE_3_FEATURE(facility) \
"\n662:\n" \
OLDINSTR_PADDING(oldinstr, 1) \
e_oldinstr_pad_end ":\n" \
INSTR_LEN_SANITY_CHECK(oldinstr_len) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility, 1) \
".popsection\n"
#define ALTERNATIVE_2_ALTINSTR2 \
"\n" e_altinstr(1) ":\n" \
INSTR_LEN_SANITY_CHECK(altinstr_len(1)) \
b_altinstr(2)":\n"
#define ALTERNATIVE_3_OLDINSTR2 \
"\n" e_altinstr(2) ":\n" \
INSTR_LEN_SANITY_CHECK(altinstr_len(2)) \
".popsection\n" \
"661:\n"
#define ALTERNATIVE_4_FEATURE2(facility1, facility2) \
"\n662:\n" \
".if " altinstr_len(1) " < " altinstr_len(2) "\n" \
OLDINSTR_PADDING(oldinstr, 2) \
".else\n" \
OLDINSTR_PADDING(oldinstr, 1) \
".endif\n" \
e_oldinstr_pad_end ":\n" \
INSTR_LEN_SANITY_CHECK(oldinstr_len) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility1, 1) \
ALTINSTR_ENTRY(facility2, 2) \
".popsection\n"
#endif /* __ASSEMBLY__ */
#endif /* _ASM_E2K_ALTERNATIVE_H */

View File

@ -0,0 +1,46 @@
#ifndef __ASM_E2K_APIC_H
#define __ASM_E2K_APIC_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/e2k_api.h>
#include <asm/irq.h>
#include <asm/io.h>
#ifndef __ASSEMBLY__
/*
* Basic functions accessing APICs.
*/
static inline void arch_apic_write(unsigned int reg, unsigned int v)
{
boot_writel(v, (void __iomem *) (APIC_DEFAULT_PHYS_BASE + reg));
}
static inline unsigned int arch_apic_read(unsigned int reg)
{
return boot_readl((void __iomem *) (APIC_DEFAULT_PHYS_BASE + reg));
}
static inline void boot_arch_apic_write(unsigned int reg, unsigned int v)
{
arch_apic_write(reg, v);
}
static inline unsigned int boot_arch_apic_read(unsigned int reg)
{
return arch_apic_read(reg);
}
#if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \
IS_ENABLED(CONFIG_RDMA_NET)
extern int rdma_apic_init;
extern int rdma_node[];
#endif
#endif /* !(__ASSEMBLY__) */
#include <asm-l/apic.h>
#endif /* __KERNEL__ */
#endif /* __ASM_E2K_APIC_H */

View File

@ -0,0 +1,276 @@
#ifndef __ASM_APIC_REGS_H
#define __ASM_APIC_REGS_H
#ifndef __ASSEMBLY__
/*
* the local APIC register structure, memory mapped. Not terribly well
* tested, but we might eventually use this one in the future - the
* problem why we cannot use it right now is the P5 APIC, it has an
* errata which cannot take 8-bit reads and writes, only 32-bit ones ...
*/
#define u32 unsigned int
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
/*010*/ struct { u32 __reserved_1 : 8,
boot_strap : 1,
__reserved_2 : 2,
apic_enable : 1,
__reserved_3 : 20;
u32 __reserved[3];
} bsp;
/*020*/ struct { /* APIC ID Register */
u32 __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
u32 __reserved[3];
} id;
/*030*/ const
struct { /* APIC Version Register */
u32 version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
u32 __reserved[3];
} version;
/*040*/ struct { u32 __reserved[4]; } __reserved_03;
/*050*/ struct { u32 __reserved[4]; } __reserved_04;
/*060*/ struct { u32 __reserved[4]; } __reserved_05;
/*070*/ struct { u32 __reserved[4]; } __reserved_06;
/*080*/ struct { /* Task Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} tpr;
/*090*/ const
struct { /* Arbitration Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} apr;
/*0A0*/ const
struct { /* Processor Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} ppr;
/*0B0*/ struct { /* End Of Interrupt Register */
u32 eoi;
u32 __reserved[3];
} eoi;
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
/*0D0*/ struct { /* Logical Destination Register */
u32 __reserved_1 : 24,
logical_dest : 8;
u32 __reserved_2[3];
} ldr;
/*0E0*/ struct { /* Destination Format Register */
u32 __reserved_1 : 28,
model : 4;
u32 __reserved_2[3];
} dfr;
/*0F0*/ struct { /* Spurious Interrupt Vector Register */
u32 spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
u32 __reserved_3[3];
} svr;
/*100*/ struct { /* In Service Register */
/*170*/ u32 bitfield;
u32 __reserved[3];
} isr [8];
/*180*/ struct { /* Trigger Mode Register */
/*1F0*/ u32 bitfield;
u32 __reserved[3];
} tmr [8];
/*200*/ struct { /* Interrupt Request Register */
/*270*/ u32 bitfield;
u32 __reserved[3];
} irr [8];
/*280*/ union { /* Error Status Register */
struct {
u32 send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
u32 __reserved_3[3];
} error_bits;
struct {
u32 errors;
u32 __reserved_3[3];
} all_errors;
} esr;
/*290*/ struct { u32 __reserved[4]; } __reserved_08;
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
/*300*/ struct { /* Interrupt Command Register 1 */
u32 vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
u32 __reserved_4[3];
} icr1;
/*310*/ struct { /* Interrupt Command Register 2 */
union {
u32 __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
u32 __reserved_3 : 24,
logical_dest : 8;
} dest;
u32 __reserved_4[3];
} icr2;
/*320*/ struct { /* LVT - Timer */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
u32 __reserved_4[3];
} lvt_timer;
/*330*/ struct { u32 __reserved[4]; } __reserved_15;
/*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_pc;
/*350*/ struct { /* LVT - LINT0 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint0;
/*360*/ struct { /* LVT - LINT1 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint1;
/*370*/ struct { /* LVT - Error */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_error;
/*380*/ struct { /* Timer Initial Count Register */
u32 initial_count;
u32 __reserved_2[3];
} timer_icr;
/*390*/ const
struct { /* Timer Current Count Register */
u32 curr_count;
u32 __reserved_2[3];
} timer_ccr;
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
/*3E0*/ struct { /* Timer Divide Configuration Register */
u32 divisor : 4,
__reserved_1 : 28;
u32 __reserved_2[3];
} timer_dcr;
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
#if 0
/*3F0*/ struct { u32 __reserved[764]; } __reserved_20;
/*FE0*/ struct { /* Vector from PIC or APIC in nmi */
u32 nm_vector : 8,
__reserved : 24;
u32 __reserved[3];
} nm_vect;
/*FF0*/ struct { /* Vector */
u32 vector : 8,
__reserved_1 : 24;
u32 __reserved[3];
} vect;
#endif
} __attribute__ ((packed));
#undef u32
#endif /* !(__ASSEMBLY__) */
#endif /* __ASM_APIC_REGS_H */

View File

@ -0,0 +1,9 @@
#ifndef __ASM_E2K_APICDEF_H
#define __ASM_E2K_APICDEF_H
#ifdef __KERNEL__
#include <asm/apic_regs.h>
#include <asm-l/apicdef.h>
#endif
#endif /* __ASM_E2K_APICDEF_H */

View File

@ -0,0 +1,380 @@
#ifndef _E2K_ATOMIC_
#define _E2K_ATOMIC_
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/atomic_api.h>
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
static inline void atomic_and(int incr, atomic_t *val)
{
__api_atomic_op(incr, &val->counter, w, "ands", RELAXED_MB);
}
static inline void atomic64_and(__s64 incr, atomic64_t *val)
{
__api_atomic_op(incr, &val->counter, d, "andd", RELAXED_MB);
}
#define atomic_andnot atomic_andnot
static inline void atomic_andnot(int incr, atomic_t *val)
{
__api_atomic_op(incr, &val->counter, w, "andns", RELAXED_MB);
}
#define atomic64_andnot atomic64_andnot
static inline void atomic64_andnot(__s64 incr, atomic64_t *val)
{
__api_atomic_op(incr, &val->counter, d, "andnd", RELAXED_MB);
}
static inline void atomic_or(int incr, atomic_t *val)
{
__api_atomic_op(incr, &val->counter, w, "ors", RELAXED_MB);
}
static inline void atomic64_or(__s64 incr, atomic64_t *val)
{
__api_atomic_op(incr, &val->counter, d, "ord", RELAXED_MB);
}
static inline void atomic_xor(int incr, atomic_t *val)
{
__api_atomic_op(incr, &val->counter, w, "xors", RELAXED_MB);
}
static inline void atomic64_xor(__s64 incr, atomic64_t *val)
{
__api_atomic_op(incr, &val->counter, d, "xord", RELAXED_MB);
}
static inline void atomic_add(int incr, atomic_t *val)
{
__api_atomic_op(incr, &val->counter, w, "adds", RELAXED_MB);
}
static inline void atomic64_add(__s64 incr, atomic64_t *val)
{
__api_atomic_op(incr, &val->counter, d, "addd", RELAXED_MB);
}
static inline void atomic_sub(int incr, atomic_t *val)
{
__api_atomic_op(incr, &val->counter, w, "subs", RELAXED_MB);
}
static inline void atomic64_sub(__s64 incr, atomic64_t *val)
{
__api_atomic_op(incr, &val->counter, d, "subd", RELAXED_MB);
}
#define __atomic_add_return(v, p, mem_model) \
__api_atomic_op((int) (v), &(p)->counter, w, "adds", mem_model)
#define atomic_add_return_relaxed(v, p) __atomic_add_return((v), (p), RELAXED_MB)
#define atomic_add_return_acquire(v, p) __atomic_add_return((v), (p), ACQUIRE_MB)
#define atomic_add_return_release(v, p) __atomic_add_return((v), (p), RELEASE_MB)
#define atomic_add_return(v, p) __atomic_add_return((v), (p), STRONG_MB)
#define atomic_add_return_lock(v, p) __atomic_add_return((v), (p), LOCK_MB)
#define __atomic64_add_return(v, p, mem_model) \
__api_atomic_op((__s64) (v), &(p)->counter, d, "addd", mem_model)
#define atomic64_add_return_relaxed(v, p) __atomic64_add_return((v), (p), RELAXED_MB)
#define atomic64_add_return_acquire(v, p) __atomic64_add_return((v), (p), ACQUIRE_MB)
#define atomic64_add_return_release(v, p) __atomic64_add_return((v), (p), RELEASE_MB)
#define atomic64_add_return(v, p) __atomic64_add_return((v), (p), STRONG_MB)
#define __atomic_sub_return(v, p, mem_model) \
__api_atomic_op((int) (v), &(p)->counter, w, "subs", mem_model)
#define atomic_sub_return_relaxed(v, p) __atomic_sub_return((v), (p), RELAXED_MB)
#define atomic_sub_return_acquire(v, p) __atomic_sub_return((v), (p), ACQUIRE_MB)
#define atomic_sub_return_release(v, p) __atomic_sub_return((v), (p), RELEASE_MB)
#define atomic_sub_return(v, p) __atomic_sub_return((v), (p), STRONG_MB)
#define __atomic64_sub_return(v, p, mem_model) \
__api_atomic_op((__s64) (v), &(p)->counter, d, "subd", mem_model)
#define atomic64_sub_return_relaxed(v, p) __atomic64_sub_return((v), (p), RELAXED_MB)
#define atomic64_sub_return_acquire(v, p) __atomic64_sub_return((v), (p), ACQUIRE_MB)
#define atomic64_sub_return_release(v, p) __atomic64_sub_return((v), (p), RELEASE_MB)
#define atomic64_sub_return(v, p) __atomic64_sub_return((v), (p), STRONG_MB)
#define __atomic_fetch_add(v, p, mem_model) \
__api_atomic_fetch_op((int) (v), &(p)->counter, w, "adds", mem_model)
#define atomic_fetch_add_relaxed(v, p) __atomic_fetch_add((v), (p), RELAXED_MB)
#define atomic_fetch_add_acquire(v, p) __atomic_fetch_add((v), (p), ACQUIRE_MB)
#define atomic_fetch_add_release(v, p) __atomic_fetch_add((v), (p), RELEASE_MB)
#define atomic_fetch_add(v, p) __atomic_fetch_add((v), (p), STRONG_MB)
#define __atomic64_fetch_add(v, p, mem_model) \
__api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "addd", mem_model)
#define atomic64_fetch_add_relaxed(v, p) __atomic64_fetch_add((v), (p), RELAXED_MB)
#define atomic64_fetch_add_acquire(v, p) __atomic64_fetch_add((v), (p), ACQUIRE_MB)
#define atomic64_fetch_add_release(v, p) __atomic64_fetch_add((v), (p), RELEASE_MB)
#define atomic64_fetch_add(v, p) __atomic64_fetch_add((v), (p), STRONG_MB)
#define __atomic_fetch_sub(v, p, mem_model) \
__api_atomic_fetch_op((int) (v), &(p)->counter, w, "subs", mem_model)
#define atomic_fetch_sub_relaxed(v, p) __atomic_fetch_sub((v), (p), RELAXED_MB)
#define atomic_fetch_sub_acquire(v, p) __atomic_fetch_sub((v), (p), ACQUIRE_MB)
#define atomic_fetch_sub_release(v, p) __atomic_fetch_sub((v), (p), RELEASE_MB)
#define atomic_fetch_sub(v, p) __atomic_fetch_sub((v), (p), STRONG_MB)
#define __atomic64_fetch_sub(v, p, mem_model) \
__api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "subd", mem_model)
#define atomic64_fetch_sub_relaxed(v, p) __atomic64_fetch_sub((v), (p), RELAXED_MB)
#define atomic64_fetch_sub_acquire(v, p) __atomic64_fetch_sub((v), (p), ACQUIRE_MB)
#define atomic64_fetch_sub_release(v, p) __atomic64_fetch_sub((v), (p), RELEASE_MB)
#define atomic64_fetch_sub(v, p) __atomic64_fetch_sub((v), (p), STRONG_MB)
#define __atomic_fetch_or(v, p, mem_model) \
__api_atomic_fetch_op((int) (v), &(p)->counter, w, "ors", mem_model)
#define atomic_fetch_or_relaxed(v, p) __atomic_fetch_or((v), (p), RELAXED_MB)
#define atomic_fetch_or_acquire(v, p) __atomic_fetch_or((v), (p), ACQUIRE_MB)
#define atomic_fetch_or_release(v, p) __atomic_fetch_or((v), (p), RELEASE_MB)
#define atomic_fetch_or(v, p) __atomic_fetch_or((v), (p), STRONG_MB)
#define __atomic64_fetch_or(v, p, mem_model) \
__api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "ord", mem_model)
#define atomic64_fetch_or_relaxed(v, p) __atomic64_fetch_or((v), (p), RELAXED_MB)
#define atomic64_fetch_or_acquire(v, p) __atomic64_fetch_or((v), (p), ACQUIRE_MB)
#define atomic64_fetch_or_release(v, p) __atomic64_fetch_or((v), (p), RELEASE_MB)
#define atomic64_fetch_or(v, p) __atomic64_fetch_or((v), (p), STRONG_MB)
#define __atomic_fetch_and(v, p, mem_model) \
__api_atomic_fetch_op((int) (v), &(p)->counter, w, "ands", mem_model)
#define atomic_fetch_and_relaxed(v, p) __atomic_fetch_and((v), (p), RELAXED_MB)
#define atomic_fetch_and_acquire(v, p) __atomic_fetch_and((v), (p), ACQUIRE_MB)
#define atomic_fetch_and_release(v, p) __atomic_fetch_and((v), (p), RELEASE_MB)
#define atomic_fetch_and(v, p) __atomic_fetch_and((v), (p), STRONG_MB)
#define __atomic64_fetch_and(v, p, mem_model) \
__api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "andd", mem_model)
#define atomic64_fetch_and_relaxed(v, p) __atomic64_fetch_and((v), (p), RELAXED_MB)
#define atomic64_fetch_and_acquire(v, p) __atomic64_fetch_and((v), (p), ACQUIRE_MB)
#define atomic64_fetch_and_release(v, p) __atomic64_fetch_and((v), (p), RELEASE_MB)
#define atomic64_fetch_and(v, p) __atomic64_fetch_and((v), (p), STRONG_MB)
#define __atomic_fetch_andnot(v, p, mem_model) \
__api_atomic_fetch_op((int) (v), &(p)->counter, w, "andns", mem_model)
#define atomic_fetch_andnot_relaxed(v, p) __atomic_fetch_andnot((v), (p), RELAXED_MB)
#define atomic_fetch_andnot_acquire(v, p) __atomic_fetch_andnot((v), (p), ACQUIRE_MB)
#define atomic_fetch_andnot_release(v, p) __atomic_fetch_andnot((v), (p), RELEASE_MB)
#define atomic_fetch_andnot(v, p) __atomic_fetch_andnot((v), (p), STRONG_MB)
#define __atomic64_fetch_andnot(v, p, mem_model) \
__api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "andnd", mem_model)
#define atomic64_fetch_andnot_relaxed(v, p) __atomic64_fetch_andnot((v), (p), RELAXED_MB)
#define atomic64_fetch_andnot_acquire(v, p) __atomic64_fetch_andnot((v), (p), ACQUIRE_MB)
#define atomic64_fetch_andnot_release(v, p) __atomic64_fetch_andnot((v), (p), RELEASE_MB)
#define atomic64_fetch_andnot(v, p) __atomic64_fetch_andnot((v), (p), STRONG_MB)
#define __atomic_fetch_xor(v, p, mem_model) \
__api_atomic_fetch_op((int) (v), &(p)->counter, w, "xors", mem_model)
#define atomic_fetch_xor_relaxed(v, p) __atomic_fetch_xor((v), (p), RELAXED_MB)
#define atomic_fetch_xor_acquire(v, p) __atomic_fetch_xor((v), (p), ACQUIRE_MB)
#define atomic_fetch_xor_release(v, p) __atomic_fetch_xor((v), (p), RELEASE_MB)
#define atomic_fetch_xor(v, p) __atomic_fetch_xor((v), (p), STRONG_MB)
#define __atomic64_fetch_xor(v, p, mem_model) \
__api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "xord", mem_model)
#define atomic64_fetch_xor_relaxed(v, p) __atomic64_fetch_xor((v), (p), RELAXED_MB)
#define atomic64_fetch_xor_acquire(v, p) __atomic64_fetch_xor((v), (p), ACQUIRE_MB)
#define atomic64_fetch_xor_release(v, p) __atomic64_fetch_xor((v), (p), RELEASE_MB)
#define atomic64_fetch_xor(v, p) __atomic64_fetch_xor((v), (p), STRONG_MB)
#define __atomic_xchg(p, v, mem_model) \
(int)__api_xchg_return((int) (v), &(p)->counter, w, mem_model)
#define atomic_xchg_relaxed(p, v) __atomic_xchg((p), (v), RELAXED_MB)
#define atomic_xchg_acquire(p, v) __atomic_xchg((p), (v), ACQUIRE_MB)
#define atomic_xchg_release(p, v) __atomic_xchg((p), (v), RELEASE_MB)
#define atomic_xchg(p, v) __atomic_xchg((p), (v), STRONG_MB)
#define __atomic64_xchg(p, v, mem_model) \
__api_xchg_return((__s64) (v), &(p)->counter, d, mem_model)
#define atomic64_xchg_relaxed(p, v) __atomic64_xchg((p), (v), RELAXED_MB)
#define atomic64_xchg_acquire(p, v) __atomic64_xchg((p), (v), ACQUIRE_MB)
#define atomic64_xchg_release(p, v) __atomic64_xchg((p), (v), RELEASE_MB)
#define atomic64_xchg(p, v) __atomic64_xchg((p), (v), STRONG_MB)
#define __atomic_cmpxchg(p, o, n, mem_model) \
(int)__api_cmpxchg_word_return((int) (o), (int) (n), \
&(p)->counter, mem_model)
#define atomic_cmpxchg_relaxed(p, o, n) __atomic_cmpxchg((p), (o), (n), RELAXED_MB)
#define atomic_cmpxchg_acquire(p, o, n) __atomic_cmpxchg((p), (o), (n), ACQUIRE_MB)
#define atomic_cmpxchg_release(p, o, n) __atomic_cmpxchg((p), (o), (n), RELEASE_MB)
#define atomic_cmpxchg(p, o, n) __atomic_cmpxchg((p), (o), (n), STRONG_MB)
#define atomic_cmpxchg_lock(p, o, n) __atomic_cmpxchg((p), (o), (n), LOCK_MB)
#define __atomic64_cmpxchg(p, o, n, mem_model) \
__api_cmpxchg_dword_return((__s64) (o), (__s64) (n), \
&(p)->counter, mem_model)
#define atomic64_cmpxchg_relaxed(p, o, n) __atomic64_cmpxchg((p), (o), (n), RELAXED_MB)
#define atomic64_cmpxchg_acquire(p, o, n) __atomic64_cmpxchg((p), (o), (n), ACQUIRE_MB)
#define atomic64_cmpxchg_release(p, o, n) __atomic64_cmpxchg((p), (o), (n), RELEASE_MB)
#define atomic64_cmpxchg(p, o, n) __atomic64_cmpxchg((p), (o), (n), STRONG_MB)
#define atomic64_cmpxchg_lock(p, o, n) __atomic64_cmpxchg((p), (o), (n), LOCK_MB)
#define atomic_long_cmpxchg_lock(p, o, n) atomic64_cmpxchg_lock((p), (o), (n))
#define atomic_inc_unless_negative atomic_inc_unless_negative
static inline bool atomic_inc_unless_negative(atomic_t *p)
{
return __api_atomic32_fetch_inc_unless_negative(&p->counter) >= 0;
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
static inline bool atomic64_inc_unless_negative(atomic64_t *p)
{
return __api_atomic64_fetch_inc_unless_negative(&p->counter) >= 0;
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
static inline bool atomic_dec_unless_positive(atomic_t *p)
{
return __api_atomic32_fetch_dec_unless_positive(&p->counter) <= 0;
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
static inline bool atomic64_dec_unless_positive(atomic64_t *p)
{
return __api_atomic64_fetch_dec_unless_positive(&p->counter) <= 0;
}
/**
* atomic_dec_if_positive - decrement by 1 if old value positive
* @p: pointer of type atomic_t
*
* The function returns the old value of *p minus 1, even if
* the atomic variable, v, was not decremented.
*/
#define atomic_dec_if_positive atomic_dec_if_positive
static inline int atomic_dec_if_positive(atomic_t *p)
{
return __api_atomic32_fetch_dec_if_positive(&p->counter) - 1;
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
static inline s64 atomic64_dec_if_positive(atomic64_t *p)
{
return __api_atomic64_fetch_dec_if_positive(&p->counter) - 1;
}
/**
* atomic_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
#define atomic_fetch_add_unless atomic_fetch_add_unless
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
return __api_atomic32_fetch_add_unless(a, &v->counter, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
return __api_atomic64_fetch_add_unless(a, &v->counter, u);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
static inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = atomic_cmpxchg(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
static inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = atomic_cmpxchg_acquire(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
static inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = atomic_cmpxchg_release(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
static inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = atomic_cmpxchg_relaxed(v, o, new);
*old = r;
return likely(r == o);
}
static __always_inline bool atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = atomic_cmpxchg_lock(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = atomic64_cmpxchg(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
static inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = atomic64_cmpxchg_acquire(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
static inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = atomic64_cmpxchg_release(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
static inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = atomic64_cmpxchg_relaxed(v, o, new);
*old = r;
return likely(r == o);
}
static inline bool atomic64_try_cmpxchg_lock(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = atomic64_cmpxchg_lock(v, o, new);
*old = r;
return likely(r == o);
}
#define atomic_long_try_cmpxchg_lock(p, o, n) atomic64_try_cmpxchg_lock((p), (s64 *) (o), (n))
#endif /* _E2K_ATOMIC_ */

View File

@ -0,0 +1,892 @@
#ifndef _ASM_E2K_ATOMIC_API_H_
#define _ASM_E2K_ATOMIC_API_H_
#include <linux/types.h>
#include <asm/e2k_api.h>
#include <asm/native_cpu_regs_access.h>
#include <asm/native_dcache_regs_access.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* Special page that is accessible for reading by every user
* process is used for hardware bug #89242 workaround.
*/
#define NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS 0xff6000000000UL
#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V)
# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \
NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS
# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS \
virt_cpu_has(CPU_HWBUG_WRITE_MEMORY_BARRIER)
# ifdef E2K_FAST_SYSCALL
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU NATIVE_GET_DSREG_OPEN(clkr)
# else
# ifndef __ASSEMBLY__
# include <asm/glob_regs.h>
register unsigned long long __cpu_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID_GREG);
# endif
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU __cpu_reg
# endif
#elif defined(E2K_P2V)
# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \
(NATIVE_NV_READ_IP_REG_VALUE() & ~0x3fUL)
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0
# if !defined(CONFIG_E2K_MACHINE) || defined(CONFIG_E2K_E8C)
# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 1
# else
# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0
# endif
#else /* CONFIG_BOOT_E2K */
# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \
(NATIVE_NV_READ_IP_REG_VALUE() & ~0x3fUL)
# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0
#endif
#if !defined CONFIG_E2K_MACHINE || defined CONFIG_E2K_E8C
/* Define these here to avoid include hell... */
# define _UPSR_IE 0x20U
# define _UPSR_NMIE 0x80U
# define NATIVE_HWBUG_AFTER_LD_ACQ() \
do { \
unsigned long long __reg1, __reg2; \
if (NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS) { \
unsigned long __hwbug_cpu = NATIVE_HWBUG_AFTER_LD_ACQ_CPU; \
unsigned long __hwbug_address = \
NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS + \
(__hwbug_cpu & 0x3) * 4096; \
unsigned long __hwbug_atomic_flags; \
__hwbug_atomic_flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \
NATIVE_SET_UPSR_IRQ_BARRIER( \
__hwbug_atomic_flags & ~(_UPSR_IE | _UPSR_NMIE)); \
NATIVE_CLEAN_LD_ACQ_ADDRESS(__reg1, __reg2, __hwbug_address); \
NATIVE_WRITE_MAS_D(__hwbug_address + 0 * 4096 + 0 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 0 * 4096 + 4 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 8 * 4096 + 1 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 8 * 4096 + 5 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 16 * 4096 + 2 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 16 * 4096 + 6 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 24 * 4096 + 3 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
NATIVE_WRITE_MAS_D(__hwbug_address + 24 * 4096 + 7 * 64, 0UL, \
MAS_DCACHE_LINE_FLUSH); \
__E2K_WAIT(_fl_c); \
NATIVE_SET_UPSR_IRQ_BARRIER(__hwbug_atomic_flags); \
} \
} while (0)
#else
# define NATIVE_HWBUG_AFTER_LD_ACQ() do { } while (0)
#endif
/* FIXME: here will be paravirtualized only hardware bugs workaround macroses */
/* but in guest general case these bugs can be workarounded only on host and */
/* guest should call appropriate hypercalls to make all atomic */
/* sequence on host, because of they contain privileged actions */
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host and guest kernel */
#include <asm/paravirt/atomic_api.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
/* it is pure guest kernel (not virtualized based on pv_ops */
#include <asm/kvm/guest/atomic_api.h>
#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
/* it is native kernel with or without virtualization support */
/* examine bare hardware bugs */
#define virt_cpu_has(hwbug) cpu_has(hwbug)
#define VIRT_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ()
#endif /* CONFIG_PARAVIRT_GUEST */
#define VIRT_HWBUG_AFTER_LD_ACQ_STRONG_MB VIRT_HWBUG_AFTER_LD_ACQ
#define VIRT_HWBUG_AFTER_LD_ACQ_LOCK_MB VIRT_HWBUG_AFTER_LD_ACQ
#define VIRT_HWBUG_AFTER_LD_ACQ_ACQUIRE_MB VIRT_HWBUG_AFTER_LD_ACQ
#define VIRT_HWBUG_AFTER_LD_ACQ_RELEASE_MB()
#define VIRT_HWBUG_AFTER_LD_ACQ_RELAXED_MB()
#define virt_api_atomic32_add_if_not_negative(val, addr, mem_model) \
({ \
register int rval; \
NATIVE_ATOMIC32_ADD_IF_NOT_NEGATIVE(val, addr, rval, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define virt_api_atomic64_add_if_not_negative(val, addr, mem_model) \
({ \
register long long rval; \
NATIVE_ATOMIC64_ADD_IF_NOT_NEGATIVE(val, addr, rval, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
/* Atomically add to 16 low bits and return the new 32 bits value */
#define virt_api_atomic16_add_return32_lock(val, addr) \
({ \
register int rval, tmp; \
NATIVE_ATOMIC16_ADD_RETURN32_LOCK(val, addr, rval, tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
/* Atomically add two 32 bits values packed into one 64 bits value */
/* and return the new 64 bits value */
#define virt_api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) \
({ \
register long rval, tmp1, tmp2, tmp3; \
NATIVE_ATOMIC32_PAIR_ADD_RETURN64_LOCK(val_lo, val_hi, addr, rval, \
tmp1, tmp2, tmp3); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
/* Atomically sub two 32 bits values packed into one 64 bits value */
/* and return the new 64 bits value */
#define virt_api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) \
({ \
register long rval, tmp1, tmp2, tmp3; \
NATIVE_ATOMIC32_PAIR_SUB_RETURN64_LOCK(val_lo, val_hi, addr, rval, \
tmp1, tmp2, tmp3); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic_ticket_trylock(spinlock, tail_shift) \
({ \
register int __rval; \
register int __val; \
register int __head; \
register int __tail; \
NATIVE_ATOMIC_TICKET_TRYLOCK(spinlock, tail_shift, \
__val, __head, __tail, __rval); \
VIRT_HWBUG_AFTER_LD_ACQ_LOCK_MB(); \
__rval; \
})
/*
* Atomic support of new read/write spinlock mechanism.
* Locking is ordered and later readers cannot outrun former writers.
* Locking order based on coupons (tickets) received while first try to get
* lock, if lock is already taken by other.
*
* read/write spinlocks initial state allowing 2^32 active readers and
* only one active writer. But coupon discipline allows simultaniously
* have only 2^16 registered users of the lock: active + waiters
*/
/*
* It is test: is read/write lock can be now taken by reader
* Macros return source state of read/write lock and set bypassed boolean value
* 'success - locking can be successful'
*
* C equivalent:
*
static rwlock_val_t
atomic_can_lock_reader(arch_rwlock_t *rw, bool success // bypassed)
{
arch_rwlock_t src_lock;
u16 ticket;
u16 head;
s32 count;
src_lock.lock = rw->lock;
ticket = src_lock.ticket;
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active writers
success = (ticket == head) && (count-1 < 0);
return src_lock.lock;
}
*/
#define virt_api_atomic_can_lock_reader(__rw_addr, __success) \
({ \
register unsigned int __head; \
register unsigned int __ticket; \
register int __count; \
register unsigned long __src; \
\
NATIVE_ATOMIC_CAN_LOCK_READER(__rw_addr, __success, \
__head, __ticket, __count, __src); \
__src; \
})
/*
* It is test: is read/write lock can be now taken by writer
* Macros return source state of read/write lock and set bypassed boolean value
* 'success - locking can be successful'
*
* C equivalent:
*
static rwlock_val_t
atomic_can_lock_writer(arch_rwlock_t *rw, bool success // bypassed)
{
arch_rwlock_t src_lock;
u16 ticket;
u16 head;
s32 count;
src_lock.lock = rw->lock;
ticket = src_lock.ticket;
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active readers and writers
success = (ticket == head) && (count == 0);
return src_lock.lock;
}
*/
#define virt_api_atomic_can_lock_writer(__rw_addr, __success) \
({ \
register unsigned int __head; \
register unsigned int __ticket; \
register int __count; \
register unsigned long __src; \
\
NATIVE_ATOMIC_CAN_LOCK_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __src); \
__src; \
})
/*
* The first try to take read spinlock.
* Successful locking increment # of ticket and head, decrement active
* readers counter (negative counter)
* Macros return source state of read/write lock and set bypassed boolean value
* 'success - lockin is successful', otherwise reader receives coupon and
* should be queued as waiter similar mutex implementation
*
* C equivalent:
*
static rwlock_val_t
atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed)
{
arch_rwlock_t src_lock;
arch_rwlock_t dst_lock;
u16 ticket;
u16 head;
s32 count;
src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
ticket = src_lock.ticket;
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active writers
success = (ticket == head) && (count-1 < 0);
dst_lock.ticket = ticket + 1;
if (success) {
// take lock: increment readers (negative value),
// increment head to enable follow readers
count = count - 1;
head = head + 1;
}
dst_lock.count = count;
dst_lock.head = head;
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return src_lock.lock;
}
*/
#define virt_api_atomic_add_new_reader(__rw_addr, __success) \
({ \
register unsigned int __head; \
register unsigned int __ticket; \
register int __count; \
register unsigned long __tmp; \
register unsigned long __src; \
register unsigned long __dst; \
\
NATIVE_ATOMIC_ADD_NEW_READER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__src; \
})
/*
* Only try to take read spinlock.
* Successful locking increment # of ticket and head, decrement active
* readers counter (negative counter)
* Macros return source state of read/write lock and set bypassed boolean value
* 'success - lockin is successful', otherwise 'success' is false and
* nothing are not changed
*
* C equivalent:
*
static rwlock_val_t
atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed)
{
arch_rwlock_t src_lock;
arch_rwlock_t dst_lock;
u16 ticket;
u16 head;
s32 count;
src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
ticket = src_lock.ticket;
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active writers
success = (ticket == head) && (count-1 < 0);
if (success) {
// take lock: increment readers (negative value),
// increment head to enable follow readers
// increment ticket number for next users
dst_lock.ticket = ticket + 1;
dst_lock.count = count - 1;
dst_lock.head = head + 1;
} else {
dst_lock.lock = src_lock.lock;
}
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return src_lock.lock;
}
*/
#define virt_api_atomic_try_add_new_reader(__rw_addr, __success) \
({ \
register unsigned int __head; \
register unsigned int __ticket; \
register int __count; \
register unsigned long __tmp; \
register unsigned long __src; \
register unsigned long __dst; \
\
NATIVE_ATOMIC_TRY_ADD_NEW_READER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__src; \
})
/*
* The slow try to take read spinlock according to erlier received # of coupon
* Successful locking increment # of head, decrement active readers counter
* (negative counter)
* Macros return current updated state of read/write lock and set bypassed
* boolean value 'success - lockin is successful', otherwise reader should be
* queued again
*
* C equivalent:
*
static rwlock_val_t
atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success)
{
arch_rwlock_t dst_lock;
u16 head;
s32 count;
dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active writers
success = (ticket == head) && (count-1 < 0);
if (success) {
// take lock: increment readers (negative value),
// increment head to enable follow readers
count = count - 1;
head = head + 1;
dst_lock.count = count;
dst_lock.head = head;
}
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return dst_lock.lock;
}
*/
#define virt_api_atomic_add_slow_reader(__rw_addr, __ticket, __success) \
({ \
register unsigned int __head; \
register int __count; \
register unsigned long __tmp; \
register unsigned long __dst; \
\
NATIVE_ATOMIC_ADD_SLOW_READER(__rw_addr, __success, \
__head, __ticket, __count, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__dst; \
})
/*
* Unlocking of read spinlock.
* Need only increment active readers counter (negative counter)
* Macros return current updated state of read/write lock.
*
* C equivalent:
*
static rwlock_val_t
atomic_free_lock_reader(arch_rwlock_t *rw)
{
arch_rwlock_t dst_lock;
dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
dst_lock.count++;
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return dst_lock.lock;
}
*/
#define virt_api_atomic_free_lock_reader(__rw_addr) \
({ \
register unsigned long __dst; \
\
NATIVE_ATOMIC_FREE_LOCK_READER(__rw_addr, __dst); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__dst; \
})
/*
* The first try to take write spinlock.
* Successful locking increment # of ticket and active writers counter
* (positive value - can be only one active writer, so set counter to 1)
* Macros return source state of read/write lock and set bypassed boolean value
* 'success - lockin is successful', otherwise writer receives coupon and
* should be queued as waiter similar mutex implementation
*
* C equivalent:
*
static rwlock_val_t
atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed)
{
arch_rwlock_t src_lock;
arch_rwlock_t dst_lock;
u16 ticket;
u16 head;
s32 count;
src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
ticket = src_lock.ticket;
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active readers and writers
success = (ticket == head) && (count == 0);
dst_lock.head = head;
dst_lock.ticket = ticket + 1;
if (success) {
// take lock: increment writerss,
count = count + 1;
}
dst_lock.count = count;
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return src_lock.lock;
}
*/
#define virt_api_atomic_add_new_writer(__rw_addr, __success) \
({ \
register unsigned int __head; \
register unsigned int __ticket; \
register int __count; \
register unsigned long __tmp; \
register unsigned long __src; \
register unsigned long __dst; \
\
NATIVE_ATOMIC_ADD_NEW_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__src; \
})
/*
* Only try to take write spinlock.
* Successful locking increment # of ticket and active writers counter
* (positive value - can be only one active writer, so set counter to 1)
* Macros return source state of read/write lock and set bypassed boolean value
* 'success - lockin is successful', otherwise 'success' is set to false and
* nothing are not changed
*
* C equivalent:
*
static rwlock_val_t
atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed)
{
arch_rwlock_t src_lock;
arch_rwlock_t dst_lock;
u16 ticket;
u16 head;
s32 count;
src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
ticket = src_lock.ticket;
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active readers and writers
success = (ticket == head) && (count == 0);
if (success) {
// take lock: increment writers counter,
// increment ticket number for next readers/writers
dst_lock.head = head;
dst_lock.ticket = ticket + 1;
dst_lock.count = count + 1;
}
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return src_lock.lock;
}
*/
#define virt_api_atomic_try_add_new_writer(__rw_addr, __success) \
({ \
register unsigned int __head; \
register unsigned int __ticket; \
register int __count; \
register unsigned long __tmp; \
register unsigned long __src; \
register unsigned long __dst; \
\
NATIVE_ATOMIC_TRY_ADD_NEW_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__src; \
})
/*
* The slow try to take write spinlock according to erlier received # of coupon
* Successful locking increment active writers counter
* (positive counter - can be only one active writer, so set counter to 1)
* Macros return current updated state of read/write lock and set bypassed
* boolean value 'success - lockin is successful', otherwise writer should be
* queued again
*
* C equivalent:
*
static rwlock_val_t
atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success)
{
arch_rwlock_t dst_lock;
u16 head;
s32 count;
dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
head = src_lock.head;
count = src_lock.count;
// can lock: none waiters and active readers and writers
success = (ticket == head) && (count == 0);
if (success) {
// take lock: increment writers,
count = count + 1;
dst_lock.count = count;
}
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return dst_lock.lock;
}
*/
#define virt_api_atomic_add_slow_writer(__rw_addr, __ticket, __success) \
({ \
register unsigned int __head; \
register int __count; \
register unsigned long __tmp; \
register unsigned long __dst; \
\
NATIVE_ATOMIC_ADD_SLOW_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__dst; \
})
/*
* Unlocking of write spinlock.
* Need only increment # of queue head and decrement active writers counter
* (positive counter - can be only one writer, so set counter to 0)
* Macros return current updated state of read/write lock.
*
* C equivalent:
*
static rwlock_val_t
atomic_free_lock_writer(arch_rwlock_t *rw)
{
arch_rwlock_t dst_lock;
dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0);
dst_lock.count++;
dst_lock.head++;
E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0);
return dst_lock.lock;
}
*/
#define virt_api_atomic_free_lock_writer(__rw_addr) \
({ \
register unsigned long __dst; \
register unsigned int __head; \
register int __count; \
register unsigned long __tmp; \
\
NATIVE_ATOMIC_FREE_LOCK_WRITER(__rw_addr, \
__head, __count, __dst, __tmp); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
__dst; \
})
#define virt_api_atomic_op(val, addr, size_letter, op, mem_model) \
({ \
typeof(val) rval; \
NATIVE_ATOMIC_OP(val, addr, rval, size_letter, op, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define virt_api_atomic_fetch_op(val, addr, size_letter, op, mem_model) \
({ \
typeof(val) rval, stored_val; \
NATIVE_ATOMIC_FETCH_OP(val, addr, rval, stored_val, \
size_letter, op, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
/*
* Atomic operations with return value and acquire/release semantics
*/
#define virt_api_atomic32_fetch_inc_unless_negative(addr) \
({ \
register int rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \
w, "adds", "~ ", "adds", "", "cmplsb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic64_fetch_inc_unless_negative(addr) \
({ \
register long long rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \
d, "addd", "~ ", "addd", "", "cmpldb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic32_fetch_dec_unless_positive(addr) \
({ \
register int rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \
w, "subs", "", "adds", "~ ", "cmplesb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic64_fetch_dec_unless_positive(addr) \
({ \
register long long rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \
d, "subd", "", "addd", "~ ", "cmpledb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic32_fetch_dec_if_positive(addr) \
({ \
register int rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \
w, "subs", "~ ", "adds", "", "cmplesb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic64_fetch_dec_if_positive(addr) \
({ \
register long long rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \
d, "subd", "~ ", "addd", "", "cmpledb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic32_fetch_add_unless(val, addr, unless) \
({ \
register int rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(val, addr, unless, tmp, rval, \
w, "adds", "~ ", "adds", "", "cmpesb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define virt_api_atomic64_fetch_add_unless(val, addr, unless) \
({ \
register long long rval, tmp; \
NATIVE_ATOMIC_FETCH_OP_UNLESS(val, addr, unless, tmp, rval, \
d, "addd", "~ ", "addd", "", "cmpedb", STRONG_MB); \
VIRT_HWBUG_AFTER_LD_ACQ(); \
rval; \
})
#define __api_atomic64_fetch_xchg_if_below(val, addr, mem_model) \
({ \
register long long rval, tmp; \
NATIVE_ATOMIC_FETCH_XCHG_UNLESS(val, addr, tmp, rval, d, \
"merged", "cmpbdb", mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define virt_api_xchg_return(val, addr, size_letter, mem_model) \
({ \
register long rval; \
NATIVE_ATOMIC_XCHG_RETURN(val, addr, rval, size_letter, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define virt_api_cmpxchg_return(old, new, addr, size_letter, \
sxt_size, mem_model) \
({ \
register long rval; \
register long stored_val; \
NATIVE_ATOMIC_CMPXCHG_RETURN(old, new, addr, stored_val, rval, \
size_letter, sxt_size, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define virt_api_cmpxchg_word_return(old, new, addr, mem_model) \
({ \
int rval, stored_val; \
NATIVE_ATOMIC_CMPXCHG_WORD_RETURN(old, new, addr, \
stored_val, rval, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define virt_api_cmpxchg_dword_return(old, new, addr, mem_model) \
({ \
long long rval, stored_val; \
NATIVE_ATOMIC_CMPXCHG_DWORD_RETURN(old, new, addr, stored_val, \
rval, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
/*
* implementation of cmpxchg_double for 64-bit pairs
* and activates the logic required for the SLUB
*
* C equivalent:
*
static int
atomic_cmpxchg_double(struct page page, void *freelist_old,
unsigned long counters_old,
void *freelist_new, unsigned long counters_new)
{
unsigned long flags;
local_irq_save(flags);
slab_lock(page);
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
set_page_slub_counters(page, counters_new);
slab_unlock(page);
local_irq_restore(flags);
return true;
}
slab_unlock(page);
local_irq_restore(flags);
return false;
}
*/
#define virt_api_cmpxchg_double(addr1, addr2, old1, old2, new1, new2, \
mem_model) \
({ \
register long rval; \
NATIVE_ATOMIC_CMPXCHG_DWORD_PAIRS(addr1, old1, old2, new1, new2,\
rval, mem_model); \
VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \
rval; \
})
#define __api_cmpxchg_double(addr1, addr2, old1, old2, new1, new2) \
virt_api_cmpxchg_double(addr1, addr2, old1, old2, \
new1, new2, STRONG_MB)
#define __api_futex_atomic32_op(insn, oparg, uaddr) \
virt_api_atomic_fetch_op(oparg, uaddr, w, insn, STRONG_MB)
#define __api_atomic32_add_if_not_negative \
virt_api_atomic32_add_if_not_negative
#define __api_atomic64_add_if_not_negative \
virt_api_atomic64_add_if_not_negative
/* Atomically add and return the old value */
#define __api_atomic32_add_oldval(val, addr) \
virt_api_atomic_fetch_op(val, addr, w, "adds", STRONG_MB)
#define __api_atomic32_add_oldval_lock(val, addr) \
virt_api_atomic_fetch_op(val, addr, w, "adds", LOCK_MB)
/* Atomically add to 16 low bits and return the new 32 bits value */
#define __api_atomic16_add_return32_lock(val, addr) \
virt_api_atomic16_add_return32_lock(val, addr)
/* Atomically add two 32 bits values packed into one 64 bits value */
/* and return the new 64 bits value */
#define __api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) \
virt_api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr)
/* Atomically sub two 32 bits values packed into one 64 bits value */
/* and return the new 64 bits value */
#define __api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) \
virt_api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr)
#define __api_atomic_ticket_trylock(spinlock, tail_shift) \
virt_api_atomic_ticket_trylock(spinlock, tail_shift)
#define __api_atomic_can_lock_reader(__rw_addr, __success) \
virt_api_atomic_can_lock_reader(__rw_addr, __success)
#define __api_atomic_can_lock_writer(__rw_addr, __success) \
virt_api_atomic_can_lock_writer(__rw_addr, __success)
#define __api_atomic_add_new_reader(__rw_addr, __success) \
virt_api_atomic_add_new_reader(__rw_addr, __success)
#define __api_atomic_try_add_new_reader(__rw_addr, __success) \
virt_api_atomic_try_add_new_reader(__rw_addr, __success)
#define __api_atomic_add_slow_reader(__rw_addr, __ticket, __success) \
virt_api_atomic_add_slow_reader(__rw_addr, __ticket, __success)
#define __api_atomic_free_lock_reader(__rw_addr) \
virt_api_atomic_free_lock_reader(__rw_addr)
#define __api_atomic_add_new_writer(__rw_addr, __success) \
virt_api_atomic_add_new_writer(__rw_addr, __success)
#define __api_atomic_try_add_new_writer(__rw_addr, __success) \
virt_api_atomic_try_add_new_writer(__rw_addr, __success)
#define __api_atomic_add_slow_writer(__rw_addr, __ticket, __success) \
virt_api_atomic_add_slow_writer(__rw_addr, __ticket, \
__success)
#define __api_atomic_free_lock_writer(__rw_addr) \
virt_api_atomic_free_lock_writer(__rw_addr)
#define __api_atomic_op virt_api_atomic_op
#define __api_atomic_fetch_op virt_api_atomic_fetch_op
/*
* Atomic operations with return value and acquire/release semantics
*/
#define __api_atomic32_fetch_add_unless(val, addr, unless) \
virt_api_atomic32_fetch_add_unless(val, addr, unless)
#define __api_atomic64_fetch_add_unless(val, addr, unless) \
virt_api_atomic64_fetch_add_unless(val, addr, unless)
#define __api_atomic32_fetch_dec_if_positive virt_api_atomic32_fetch_dec_if_positive
#define __api_atomic64_fetch_dec_if_positive virt_api_atomic64_fetch_dec_if_positive
#define __api_atomic32_fetch_dec_unless_positive virt_api_atomic32_fetch_dec_unless_positive
#define __api_atomic64_fetch_dec_unless_positive virt_api_atomic64_fetch_dec_unless_positive
#define __api_atomic32_fetch_inc_unless_negative virt_api_atomic32_fetch_inc_unless_negative
#define __api_atomic64_fetch_inc_unless_negative virt_api_atomic64_fetch_inc_unless_negative
#define __api_xchg_return virt_api_xchg_return
#define __api_cmpxchg_return virt_api_cmpxchg_return
#define __api_cmpxchg_word_return virt_api_cmpxchg_word_return
#define __api_cmpxchg_dword_return virt_api_cmpxchg_dword_return
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_E2K_ATOMIC_API_H_ */

View File

@ -0,0 +1,12 @@
#ifndef _E2K_AUXVEC_H
#define _E2K_AUXVEC_H
#define AT_FAST_SYSCALLS 32
/* Skip 33 as it is assumed to be AT_SYSINFO_EHDR in Linux */
#define AT_SYSTEM_INFO 34
#ifdef __KERNEL__
# define AT_VECTOR_SIZE_ARCH 2
#endif
#endif /* _E2K_AUXVEC_H */

View File

@ -0,0 +1,165 @@
#ifndef _ASM_E2K_BARRIER_H
#define _ASM_E2K_BARRIER_H
#include <linux/compiler.h>
#include <asm/e2k_api.h>
#include <asm/atomic_api.h>
#if CONFIG_CPU_ISET >= 6
/* Cannot use this on V5 because of load-after-store dependencies -
* compiled kernel won't honour them */
# define mb() E2K_WAIT_V6(_st_c | _ld_c | _sas | _sal | _las | _lal)
#else
# define mb() E2K_WAIT(_st_c | _ld_c)
#endif
#define wmb() E2K_WAIT_ST_C_SAS()
#define rmb() E2K_WAIT_LD_C_LAL()
/*
* For smp_* variants add _mt modifier
*/
#if CONFIG_CPU_ISET >= 6
/* Cannot use this on V5 because of load-after-store dependencies -
* compiled kernel won't honour them */
# define __smp_mb() E2K_WAIT_V6(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt)
#else
# define __smp_mb() E2K_WAIT(_st_c | _ld_c)
#endif
#define __smp_wmb() E2K_WAIT_ST_C_SAS_MT()
#define __smp_rmb() E2K_WAIT_LD_C_LAL_MT()
#define dma_rmb() __smp_rmb()
#define dma_wmb() __smp_wmb()
#define __smp_read_barrier_depends() NATIVE_HWBUG_AFTER_LD_ACQ()
#if CONFIG_CPU_ISET >= 5
# define __smp_mb__after_atomic() barrier()
# define __smp_mb__before_atomic() E2K_WAIT_ST_C_SAS_LD_C_SAL_MT()
#elif CONFIG_CPU_ISET >= 3
/* Atomic operations are serializing since e2s */
# define __smp_mb__after_atomic() \
do { \
barrier(); \
NATIVE_HWBUG_AFTER_LD_ACQ(); \
} while (0)
# define __smp_mb__before_atomic() barrier()
#else
# define __smp_mb__after_atomic() E2K_WAIT(_st_c)
# define __smp_mb__before_atomic() barrier()
#endif
extern int __smp_store_release_bad(void) __attribute__((noreturn));
#if CONFIG_CPU_ISET >= 6
# define __smp_store_release(p, v) \
do { \
__typeof__(*(p)) __ssr_v = (v); \
switch (sizeof(*p)) { \
case 1: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), b, "memory"); break; \
case 2: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), h, "memory"); break; \
case 4: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), w, "memory"); break; \
case 8: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), d, "memory"); break; \
default: __smp_store_release_bad(); break; \
} \
} while (0)
#else
# define __smp_store_release(p, v) \
do { \
compiletime_assert(sizeof(*p) == 1 || sizeof(*p) == 2 || \
sizeof(*p) == 4 || sizeof(*p) == 8, \
"Need native word sized stores/loads for atomicity."); \
E2K_WAIT_ST_C_SAS_LD_C_SAL_MT(); \
WRITE_ONCE(*(p), (v)); \
} while (0)
#endif /* CONFIG_CPU_ISET >= 6 */
/*
* store_release() - same as __smp_store_release but acts on device accesses too
*/
#define store_release_v2 __smp_store_release
#define store_release_v6(p, v) \
do { \
__typeof__(*(p)) __sr6_v = (v); \
switch (sizeof(*p)) { \
case 1: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), b, "memory"); break; \
case 2: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), h, "memory"); break; \
case 4: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), w, "memory"); break; \
case 8: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), d, "memory"); break; \
default: __smp_store_release_bad(); break; \
} \
} while (0)
#define store_release(p, v) \
do { \
if (cpu_has(CPU_FEAT_ISET_V6)) \
store_release_v6((p), (v)); \
else \
store_release_v2((p), (v)); \
} while (0)
#if CONFIG_CPU_ISET >= 6
extern int __smp_load_acquire_bad(void) __attribute__((noreturn));
# define __smp_load_acquire(p) \
({ \
union { typeof(*(p)) __ret_la; char __c[1]; } __u; \
switch (sizeof(*p)) { \
case 1: LOAD_NV_MAS((p), (*(__u8 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), b, "memory"); \
break; \
case 2: LOAD_NV_MAS((p), (*(__u16 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), h, "memory"); \
break; \
case 4: LOAD_NV_MAS((p), (*(__u32 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), w, "memory"); \
break; \
case 8: LOAD_NV_MAS((p), (*(__u64 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), d, "memory"); \
break; \
default: __smp_load_acquire_bad(); break; \
} \
__u.__ret_la; \
})
#else
# define __smp_load_acquire(p) \
({ \
typeof(*(p)) ___p1 = READ_ONCE(*(p)); \
compiletime_assert(sizeof(*p) == 1 || sizeof(*p) == 2 || \
sizeof(*p) == 4 || sizeof(*p) == 8, \
"Need native word sized stores/loads for atomicity."); \
E2K_RF_WAIT_LOAD(___p1); \
___p1; \
})
#endif
/*
* e2k is in-order architecture, thus loads are not speculated by hardware
* and we only have to protect against compiler optimizations
*/
#define smp_acquire__after_ctrl_dep() barrier()
/**
* array_index_mask_nospec - hide 'index' from compiler so that
* it does not try to load array speculatively across this point
*
* On e2k there is no hardware speculation, only software, so the
* trick with mask is not needed.
*/
#define array_index_mask_nospec array_index_mask_nospec
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
OPTIMIZER_HIDE_VAR(index);
return -1UL;
}
/*
* Follow the example of RISC-V and forbid IO crossing of scheduling
* boundary by using mb() instead of smp_mb(). This should not have
* any measurable performance impact on e2k. The bad case is when
* task is preempted after writeX() and migrated to another CPU fast
* enough so that the CPU it was preempted on has not called any
* spin_unlock()'s yet.
*/
#define smp_mb__after_spinlock() mb()
#include <asm-generic/barrier.h>
#endif /* _ASM_E2K_BARRIER_H */

View File

@ -0,0 +1,116 @@
/*
* $Id: bios_map.h,v 1.1 2009/01/15 13:47:21 kostin Exp $
* Bios cmos map distribution.
*/
#ifndef _E2K_BIOS_MAP_H_
#define _E2K_BIOS_MAP_H_
#ifdef __KERNEL__
#define ECMOS_PORT(ext) (0x70 + (ext))
/*
* The yet supported machines all access the RTC index register via
* an ISA port access but the way to access the date register differs ...
*/
#define ECMOS_READ(addr, ext) ({ \
outb_p((addr),ECMOS_PORT(ext + 0)); \
inb_p(ECMOS_PORT(ext + 1)); \
})
#define ECMOS_WRITE(val, addr, ext) ({ \
outb_p((addr),ECMOS_PORT(ext + 0)); \
outb_p((val),ECMOS_PORT(ext + 1)); \
})
static inline unsigned char bios_read(int addr)
{
char byte;
if (addr & 0x80) byte = ECMOS_READ(addr - 0x80, 2);
else byte = ECMOS_READ(addr, 0);
return byte;
}
static inline void bios_write(unsigned char val, int addr)
{
if (addr & 0x80) ECMOS_WRITE(val, addr - 0x80, 2);
else ECMOS_WRITE(val, addr, 0);
}
#endif /* __KERNEL__ */
//#define bios_read(addr) ECMOS_READ(addr)
//#define bios_write(val, addr) ECMOS_WRITE(val, addr)
#define BIOS_UNSET_ONE -1
#define name_length 15
#define cmdline_length 127
#define CMOS_BASE 128 + 64
#define CMOS_SIZE 64
#define CMOS_FILE_LENGTH 15
#define BIOS_PROC_MASK CMOS_BASE + 0
#define BIOS_DEV_NUM CMOS_BASE + 3 /* device number(0 - 3) */
#define BIOS_AUTOBOOT_TIMER CMOS_BASE + 4 /* boot waiting seconds */
#define BIOS_BOOT_ITEM CMOS_BASE + 5 /* boot item: kernel, lintel,
tests - 'Ñ','Ë','Ô' */
#define BIOS_BOOT_KNAME CMOS_BASE + 6 /* kernel name */
#define BIOS_TEST_FLAG 0x6c
#define BIOS_TEST_FLAG2 0x6d
#define BIOS_SERIAL_RATE 0x6e /* 3 - 38400 other - 115200 */
#define BIOS_MACHINE_TYPE CMOS_BASE + 28 /* architecture type */
#define BIOS_PASSWD_FLAG CMOS_BASE + 29
#define BIOS_PASSWD_FLAG2 CMOS_BASE + 30
#define BIOS_PASSWD1 CMOS_BASE + 31
#define BIOS_PASSWD2 CMOS_BASE + 32
#define BIOS_PASSWD3 CMOS_BASE + 33
#define BIOS_PASSWD4 CMOS_BASE + 34
#define BIOS_PASSWD5 CMOS_BASE + 35
#define BIOS_PASSWD6 CMOS_BASE + 36
#define BIOS_PASSWD7 CMOS_BASE + 37
#define BIOS_PASSWD8 CMOS_BASE + 38
#define BIOS_PASSWD9 CMOS_BASE + 39
#define BIOS_PASSWD10 CMOS_BASE + 40
#define BIOS_CSUM CMOS_BASE + 61 /* checksum lsb */
#define BIOS_CSUM2 CMOS_BASE + 62 /* checksum msb */
typedef struct e2k_bios_param {
char kernel_name[name_length + 1];
char command_line[cmdline_length + 1];
int booting_item;
int dev_num;
int serial_rate;
int autoboot_timer;
int machine_type;
} e2k_bios_param_t;
#ifdef __KERNEL__
static unsigned int _bios_csum(unsigned int counter, unsigned int len)
{
unsigned int csum = 0;
len = len + counter;
while(counter < len) {
csum += bios_read(counter);
counter++;
}
return csum;
}
static inline unsigned int _bios_checksum(void)
{
unsigned int csum = 0;
csum = _bios_csum( 106, 6);
csum += _bios_csum( 192, 21);
csum += _bios_csum( 220, 12 );
return csum;
}
#endif /* __KERNEL__ */
#endif /*_E2K_BIOS_MAP_H_ */

View File

@ -0,0 +1,63 @@
#ifndef _E2K_BITOPS_H_
#define _E2K_BITOPS_H_
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/barrier.h>
#include <asm/e2k_api.h>
/* This is better than generic definition */
static inline int fls(unsigned int x)
{
return 8 * sizeof(int) - E2K_LZCNTS(x);
}
static inline unsigned int __arch_hweight32(unsigned int w)
{
return E2K_POPCNTS(w);
}
static inline unsigned int __arch_hweight16(unsigned int w)
{
return E2K_POPCNTS(w & 0xffff);
}
static inline unsigned int __arch_hweight8(unsigned int w)
{
return E2K_POPCNTS(w & 0xff);
}
static inline unsigned long __arch_hweight64(unsigned long w)
{
return E2K_POPCNTD(w);
}
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/non-atomic.h>
#if defined E2K_P2V && !defined CONFIG_BOOT_E2K
extern unsigned long boot_find_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
extern unsigned long boot_find_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
# define find_next_bit boot_find_next_bit
# define find_next_zero_bit boot_find_next_zero_bit
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#endif /* _E2K_BITOPS_H_ */

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BITREV_H
#define __ASM_BITREV_H
static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
{
return __builtin_e2k_bitrevs(x);
}
static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
{
return __builtin_e2k_bitrevs((u32) x) >> 16;
}
static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
{
return __builtin_e2k_bitrevs((u32) x) >> 24;
}
#endif

View File

@ -0,0 +1,8 @@
#ifndef __ASM_E2K_BITSPERLONG_H
#define __ASM_E2K_BITSPERLONG_H
#define __BITS_PER_LONG 64
#include <asm-generic/bitsperlong.h>
#endif /* __ASM_E2K_BITSPERLONG_H */

View File

@ -0,0 +1,123 @@
/*
* E2K boot info flags support.
*/
#ifndef _E2K_BOOT_FLAGS_H
#define _E2K_BOOT_FLAGS_H
#include <linux/types.h>
#include <asm/e2k_api.h>
#include <asm/bootinfo.h>
#include <asm/mas.h>
#include <asm/e2k.h>
/*
* bootblock manipulations (read/write/set/reset) in virtual kernel mode
* on physical level:
* write through and uncachable access on physical address
* bootblock virtual address can be only read
*/
#define DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, mas) \
({ \
u64 field_value; \
switch (sizeof((bootblock_p)->blk_field)) { \
case 1: \
field_value = \
NATIVE_READ_MAS_B(&((bootblock_p)->blk_field), \
mas); \
break; \
case 2: \
field_value = \
NATIVE_READ_MAS_H(&((bootblock_p)->blk_field), \
mas); \
break; \
case 4: \
field_value = \
NATIVE_READ_MAS_W(&((bootblock_p)->blk_field), \
mas); \
break; \
case 8: \
field_value = \
NATIVE_READ_MAS_D(&((bootblock_p)->blk_field), \
mas); \
break; \
default: \
BUG(); \
} \
(field_value); \
})
#define DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value, mas) \
({ \
switch (sizeof((bootblock_p)->blk_field)) { \
case 1: \
NATIVE_WRITE_MAS_B(&((bootblock_p)->blk_field), \
(field_value), mas); \
break; \
case 2: \
NATIVE_WRITE_MAS_H(&((bootblock_p)->blk_field), \
(field_value), mas); \
break; \
case 4: \
NATIVE_WRITE_MAS_W(&((bootblock_p)->blk_field), \
(field_value), mas); \
break; \
case 8: \
NATIVE_WRITE_MAS_D(&((bootblock_p)->blk_field), \
(field_value), mas); \
break; \
default: \
BUG(); \
} \
})
#define NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \
DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, MAS_IOADDR)
#define NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \
DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \
field_value, MAS_IOADDR)
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is native guest kernel */
#include <asm/kvm/guest/boot_flags.h>
#elif defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host and guest kernel */
#include <asm/paravirt/boot_flags.h>
#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */
/* it is native kernel without virtualization support */
/* or host kernel with virtualization support */
#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \
NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field)
#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \
NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \
field_value)
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
static inline u64
read_bootblock_flags(bootblock_struct_t *bootblock)
{
return READ_BOOTBLOCK_FIELD(bootblock, kernel_flags);
}
static inline void
write_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags)
{
WRITE_BOOTBLOCK_FIELD(bootblock, boot_flags, new_flags);
WRITE_BOOTBLOCK_FIELD(bootblock, kernel_flags, new_flags);
}
static inline void
set_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags)
{
u64 cur_flags = read_bootblock_flags(bootblock);
write_bootblock_flags(bootblock, cur_flags | new_flags);
}
static inline void
reset_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags)
{
u64 cur_flags = read_bootblock_flags(bootblock);
write_bootblock_flags(bootblock, cur_flags & ~new_flags);
}
#endif /* _E2K_BOOT_FLAGS_H */

View File

@ -0,0 +1,34 @@
#ifndef _ASM_E2K_BOOT_PROFILING_H
#define _ASM_E2K_BOOT_PROFILING_H
#include <linux/time.h>
#include <asm-l/boot_profiling.h>
#ifdef CONFIG_BOOT_TRACE
extern void notrace boot_add_boot_trace_event(char *name);
/* EARLY_BOOT_TRACEPOINT should be used if virtual memory
* is not working yet. It does not support formatted strings. */
# define EARLY_BOOT_TRACEPOINT(name) \
boot_add_boot_trace_event(name)
#ifdef CONFIG_RECOVERY
/* Clears boot trace data (needed to trace recovery times). */
void reinitialize_boot_trace_data(void);
#endif /* CONFIG_RECOVERY */
#define boot_trace_get_cycles get_cycles
/* Convert boot counter cycles to ms */
static inline u64 boot_cycles_to_ms(u64 cycles)
{
u64 cpu_hz = cpu_data[0].proc_freq;
return MSEC_PER_SEC * cycles / cpu_hz;
}
#else /* !CONFIG_BOOT_TRACE */
# define EARLY_BOOT_TRACEPOINT(name)
#endif /* CONFIG_BOOT_TRACE */
#endif /* _ASM_E2K_BOOT_PROFILING_H */

View File

@ -0,0 +1,42 @@
/* $Id: boot_recovery.h,v 1.12 2009/06/29 11:52:31 atic Exp $
*
* boot-time recovery of kernel from control point.
*/
#ifndef _E2K_BOOT_RECOVERY_H
#define _E2K_BOOT_RECOVERY_H
#include <asm/types.h>
#include <asm/console.h>
#include <asm/cpu_regs_types.h>
/* To use stgd upon kernel entry task_struct must be aligned
* (since %gd_lo.base points to it) */
struct aligned_task {
struct task_struct t;
} __aligned(E2K_ALIGN_GLOBALS_SZ);
extern struct aligned_task task_to_restart[];
extern struct task_struct *task_to_recover;
/*
* Forwards of boot-time functions to recover system state
*/
extern void boot_recovery(bootblock_struct_t *bootblock);
extern void recover_kernel(void);
extern int restart_system(void (*restart_func)(void *), void *arg);
#define full_phys_mem nodes_phys_mem
#define START_KERNEL_SYSCALL 12
extern inline void
scall2(bootblock_struct_t *bootblock)
{
(void) E2K_SYSCALL(START_KERNEL_SYSCALL, /* Trap number */
0, /* empty sysnum */
1, /* single argument */
(long) bootblock); /* the argument */
}
#endif /* _E2K_BOOT_RECOVERY_H */

View File

@ -0,0 +1,10 @@
#ifndef _E2K_BOOTINFO_H_
#define _E2K_BOOTINFO_H_
#ifdef __KERNEL__
#include <asm-l/bootinfo.h>
#endif
#include <uapi/asm/bootinfo.h>
#endif /* _E2K_BOOTINFO_H_ */

View File

@ -0,0 +1,20 @@
#ifndef _E2K_BUG_H
#define _E2K_BUG_H
#ifdef CONFIG_BUG
# include <asm/e2k_api.h>
# define BUG() \
do { \
__EMIT_BUG(0); \
unreachable(); \
} while (0)
# define __WARN_FLAGS(flags) __EMIT_BUG(BUGFLAG_WARNING|(flags));
# define HAVE_ARCH_BUG
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
#endif /* _E2K_BUG_H */

View File

@ -0,0 +1,10 @@
#ifndef _E2K_BYTEORDER_H_
#define _E2K_BYTEORDER_H_
#include <asm/types.h>
#define __BYTEORDER_HAS_U64__
#include <linux/byteorder/little_endian.h>
#endif /* _E2K_BYTEORDER_H_ */

View File

@ -0,0 +1,81 @@
#ifndef _E2K_CACHE_H_
#define _E2K_CACHE_H_
#include <asm/es2.h>
#include <asm/e2s.h>
#include <asm/e8c.h>
#include <asm/e8c2.h>
#include <asm/e1cp.h>
#include <asm/e16c.h>
#include <asm/e12c.h>
#include <asm/e2c3.h>
#define _max_(a, b) ((a) > (b) ? (a) : (b))
#define _max3_(a, b, c) _max_((a), _max_((b), (c)))
#ifdef CONFIG_E2K_MACHINE
# if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU)
# define L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
# elif defined(CONFIG_E2K_E2S)
# define L1_CACHE_SHIFT E2S_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E2S_L2_CACHE_SHIFT
# elif defined(CONFIG_E2K_E8C)
# define L1_CACHE_SHIFT E8C_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E8C_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E1CP)
# define L1_CACHE_SHIFT E1CP_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E1CP_L2_CACHE_SHIFT
# elif defined(CONFIG_E2K_E8C2)
# define L1_CACHE_SHIFT E8C2_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E8C2_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E8C2_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E12C)
# define L1_CACHE_SHIFT E12C_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E12C_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E12C_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E16C)
# define L1_CACHE_SHIFT E16C_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E16C_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E16C_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E2C3)
# define L1_CACHE_SHIFT E2C3_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E2C3_L2_CACHE_SHIFT
# else
# error "E2K MACHINE type does not defined"
# endif
# ifndef L3_CACHE_SHIFT
# define L3_CACHE_SHIFT 0
# endif
#else /* ! CONFIG_E2K_MACHINE */
/*
* FIXME: Take it in mind while adding new cpu type
*/
# define L1_CACHE_SHIFT_MAX ES2_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT_MAX ES2_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT_MAX E8C_L3_CACHE_SHIFT
# define L1_CACHE_SHIFT L1_CACHE_SHIFT_MAX
# define L2_CACHE_SHIFT L2_CACHE_SHIFT_MAX
# define L3_CACHE_SHIFT L3_CACHE_SHIFT_MAX
#endif /* CONFIG_E2K_MACHINE */
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
#define L3_CACHE_BYTES (L3_CACHE_SHIFT ? (1 << L3_CACHE_SHIFT) : 0)
/* Stores pass through L1$, so we should use the biggest size. */
#define SMP_CACHE_BYTES _max3_(L1_CACHE_BYTES, L2_CACHE_BYTES, \
L3_CACHE_BYTES)
#define INTERNODE_CACHE_SHIFT _max3_(L1_CACHE_SHIFT, L2_CACHE_SHIFT, \
L3_CACHE_SHIFT)
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
#define cache_line_size() _max3_(L1_CACHE_BYTES, L2_CACHE_BYTES, \
L3_CACHE_BYTES)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _E2K_CACHE_H_ */

View File

@ -0,0 +1,230 @@
/*
* pgalloc.h: the functions and defines necessary to allocate
* page tables.
*
* Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru)
*/
#ifndef _E2K_CACHEFLUSH_H
#define _E2K_CACHEFLUSH_H
#include <asm/debug_print.h>
#include <asm/mmu_regs.h>
#include <asm/machdep.h>
#include <uapi/asm/e2k_syswork.h>
#include <asm/mmu_regs_access.h>
#undef DEBUG_MR_MODE
#undef DebugMR
#define DEBUG_MR_MODE 0 /* MMU registers access */
#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__)
/*
* Caches flushing routines. This is the kind of stuff that can be very
* expensive, so should try to avoid them whenever possible.
*/
/*
* Caches aren't brain-dead on the E2K
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
/*
* Invalidate all ICAHES of the host processor
*/
typedef struct icache_range_array {
icache_range_t *ranges;
int count;
struct mm_struct *mm;
} icache_range_array_t;
extern void __flush_icache_all(void);
extern void native_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
extern void __flush_icache_range_array(
icache_range_array_t *icache_range_arr);
extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
#ifndef CONFIG_SMP
#define flush_icache_all() __flush_icache_all()
#define flush_icache_range(start, end) __flush_icache_range(start, end)
#define flush_icache_range_array __flush_icache_range_array
#define flush_icache_page(vma, page) __flush_icache_page(vma, page)
#define native_smp_flush_icache_range(start, end)
#define native_smp_flush_icache_range_array(icache_range_arr)
#define native_smp_flush_icache_page(vma, page)
#define native_smp_flush_icache_all()
#define native_smp_flush_icache_kernel_line(addr)
#else /* CONFIG_SMP */
extern void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
extern void native_smp_flush_icache_range_array(
icache_range_array_t *icache_range_arr);
extern void native_smp_flush_icache_page(struct vm_area_struct *vma,
struct page *page);
extern void native_smp_flush_icache_all(void);
extern void native_smp_flush_icache_kernel_line(e2k_addr_t addr);
#define flush_icache_all() smp_flush_icache_all()
#define flush_icache_range(start, end) \
({ \
if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \
__flush_icache_range(start, end); \
else \
smp_flush_icache_range(start, end); \
})
#define flush_icache_range_array smp_flush_icache_range_array
#define flush_icache_page(vma, page) \
({ \
if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \
__flush_icache_page(vma, page); \
else \
smp_flush_icache_page(vma, page); \
})
#endif /* ! (CONFIG_SMP) */
/*
* Some usefull routines to flush caches
*/
/*
* Write Back and Invalidate all caches (instruction and data).
* "local_" versions work on the calling CPU only.
*/
extern void local_write_back_cache_all(void);
extern void local_write_back_cache_range(unsigned long start, size_t size);
extern void write_back_cache_all(void);
extern void write_back_cache_range(unsigned long start, size_t size);
/*
* Flush multiple DCACHE lines
*/
static inline void
native_flush_DCACHE_range(void *addr, size_t len)
{
char *cp, *end;
unsigned long stride;
DebugMR("Flush DCACHE range: virtual addr 0x%lx, len %lx\n", addr, len);
/* Although L1 cache line is 32 bytes, coherency works
* with 64 bytes granularity. So a single flush_dc_line
* can flush _two_ lines from L1 */
stride = SMP_CACHE_BYTES;
end = PTR_ALIGN(addr + len, SMP_CACHE_BYTES);
E2K_WAIT_ST;
for (cp = addr; cp < end; cp += stride)
flush_DCACHE_line((unsigned long) cp);
E2K_WAIT_FLUSH;
}
/*
* Clear multiple DCACHE L1 lines
*/
static inline void
native_clear_DCACHE_L1_range(void *virt_addr, size_t len)
{
unsigned long cp;
unsigned long end = (unsigned long) virt_addr + len;
unsigned long stride;
stride = cacheinfo_get_l1d_line_size();
for (cp = (u64) virt_addr; cp < end; cp += stride)
clear_DCACHE_L1_line(cp);
}
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host and guest kernel */
#include <asm/paravirt/cacheflush.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
/* it is native guest kernel */
#include <asm/kvm/guest/cacheflush.h>
#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
/* it is native kernel without virtualization support */
/* or native kernel with virtualization support */
static inline void
smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end)
{
native_smp_flush_icache_range(start, end);
}
static inline void
smp_flush_icache_range_array(icache_range_array_t *icache_range_arr)
{
native_smp_flush_icache_range_array(icache_range_arr);
}
static inline void
smp_flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
native_smp_flush_icache_page(vma, page);
}
static inline void
smp_flush_icache_all(void)
{
native_smp_flush_icache_all();
}
static inline void
smp_flush_icache_kernel_line(e2k_addr_t addr)
{
native_smp_flush_icache_kernel_line(addr);
}
static inline void
__flush_icache_range(e2k_addr_t start, e2k_addr_t end)
{
native_flush_icache_range(start, end);
}
static inline void
flush_DCACHE_range(void *addr, size_t len)
{
native_flush_DCACHE_range(addr, len);
}
static inline void
clear_DCACHE_L1_range(void *virt_addr, size_t len)
{
native_clear_DCACHE_L1_range(virt_addr, len);
}
#endif /* CONFIG_PARAVIRT_GUEST */
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst,
const void *src, unsigned long len)
{
if (IS_ALIGNED((unsigned long) dst, 8) &&
IS_ALIGNED((unsigned long) src, 8) && IS_ALIGNED(len, 8)) {
tagged_memcpy_8(dst, src, len);
} else {
memcpy(dst, src, len);
}
flush_icache_range((unsigned long) dst, (unsigned long) dst + len);
}
static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst,
const void *src, size_t len)
{
if (IS_ALIGNED((unsigned long) dst, 8) &&
IS_ALIGNED((unsigned long) src, 8) && IS_ALIGNED(len, 8)) {
tagged_memcpy_8(dst, src, len);
} else {
memcpy(dst, src, len);
}
}
#endif /* _E2K_CACHEFLUSH_H */

View File

@ -0,0 +1,141 @@
#ifndef _E2K_CHECKSUM_H_
#define _E2K_CHECKSUM_H_
#include <linux/prefetch.h>
#include <linux/in6.h>
extern unsigned int __pure e2k_do_csum(const unsigned char *buff, int len);
/*
* Fold a partial checksum
*/
#define csum_fold csum_fold
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
return (__force __sum16) ((~sum - __builtin_e2k_scls(sum, 16)) >> 16);
}
static inline u32 from64to32(u64 x)
{
x += __builtin_e2k_scld(x, 32);
return (u32) (x >> 32);
}
/*
* ihl is always 5 or greater, almost always is 5,
* and iph is word aligned the majority of the time.
*/
static inline __wsum ip_fast_csum_nofold_maybe_unaligned(const void *iph, unsigned int ihl)
{
const u32 *iph32 = iph;
size_t i;
u64 sum;
sum = (u64) iph32[0] + (u64) iph32[1] + (u64) iph32[2] +
(u64) iph32[3] + (u64) iph32[4];
if (unlikely(ihl > 5)) {
for (i = 5; i < ihl; i++)
sum += (u64) iph32[i];
}
return (__force __wsum) from64to32(sum);
}
#define ip_fast_csum ip_fast_csum
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
if (cpu_has(CPU_HWBUG_UNALIGNED_LOADS) &&
!IS_ALIGNED((unsigned long) iph, 4))
return (__force __sum16) ~e2k_do_csum(iph, ihl*4);
else
return csum_fold(ip_fast_csum_nofold_maybe_unaligned(iph, ihl));
}
static inline u32 add32_with_carry(u32 a, u32 b)
{
u64 arg1 = ((u64) a << 32ULL) | (u64) b;
u64 arg2 = ((u64) b << 32ULL) | (u64) a;
return (arg1 + arg2) >> 32ULL;
}
#define HAVE_ARCH_CSUM_ADD
static inline __wsum csum_add(__wsum csum, __wsum addend)
{
return (__force __wsum) add32_with_carry((__force u32) csum,
(__force u32) addend);
}
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum __csum_partial(const void *buff, int len, __wsum sum);
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0 &&
!cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) {
u64 sum_64 = (__force u32) sum;
if (len == 2)
sum_64 += *(const u16 *) buff;
if (len >= 4)
sum_64 += *(const u32 *) buff;
if (len == 6)
sum_64 += *(const u16 *) (buff + 4);
if (len >= 8)
sum_64 += *(const u32 *) (buff + 4);
if (len == 10)
sum_64 += *(const u16 *) (buff + 8);
if (len >= 12)
sum_64 += *(const u32 *) (buff + 8);
if (len == 14)
sum_64 += *(const u16 *) (buff + 12);
if (len >= 16)
sum_64 += *(const u32 *) (buff + 12);
sum = from64to32(sum_64);
} else if (__builtin_constant_p(len) && (len & 3) == 0 &&
!cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) {
sum = csum_add(sum, ip_fast_csum_nofold_maybe_unaligned(buff, len >> 2));
} else {
E2K_PREFETCH_L1((__force void *) buff);
sum = __csum_partial(buff, len, sum);
}
return sum;
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto, __wsum sum)
{
u64 s = (__force u32) sum;
s += (__force u32) saddr;
s += (__force u32) daddr;
s += (proto + len) << 8;
return (__force __wsum) from64to32(s);
}
#define _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __wsum csum);
#include <asm-generic/checksum.h>
#endif /* _E2K_CHECKSUM_H_ */

View File

@ -0,0 +1,22 @@
#ifndef _ASM_E2K_CLKR_H
#define _ASM_E2K_CLKR_H
#include <asm/cpu.h>
#include <asm-l/clkr.h>
extern __interrupt u64 fast_syscall_read_clkr(void);
extern u64 last_clkr;
DECLARE_PER_CPU(u64, clkr_offset);
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized guest and host kernel */
#include <asm/paravirt/clkr.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
#include <asm/kvm/guest/clkr.h>
#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
/* native kernel with or without virtualization support */
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* _ASM_E2K_CLKR_H */

View File

@ -0,0 +1,109 @@
/*
* Kernel performance measuring tool and support
*/
#ifndef _E2K_CLOCK_INFO_H
#define _E2K_CLOCK_INFO_H
#include <linux/types.h>
#ifndef __ASSEMBLY__
#include <asm/types.h>
#endif /* __ASSEMBLY__ */
#ifndef __ASSEMBLY__
typedef u64 e2k_clock_t;
typedef enum {
SYSTEM_CALL_TT = 1, /* system calls */
TRAP_TT /* traps */
} times_type_t;
typedef struct {
int syscall_num; /* # of system call */
int signals_num; /* number of handled signals */
e2k_clock_t start; /* start clock of system call */
e2k_clock_t end; /* end clock */
e2k_clock_t pt_regs_set; /* pt_regs structure is set */
e2k_clock_t save_stack_regs;
e2k_clock_t save_sys_regs;
e2k_clock_t save_stacks_state;
e2k_clock_t save_thread_state;
e2k_clock_t scall_switch;
e2k_clock_t scall_done;
e2k_clock_t restore_thread_state;
e2k_clock_t check_pt_regs;
e2k_clock_t do_signal_start;
e2k_clock_t do_signal_done;
e2k_clock_t restore_start;
e2k_clock_t restore_user_regs;
e2k_pshtp_t pshtp;
u64 psp_ind;
e2k_pshtp_t pshtp_to_done;
u64 psp_ind_to_done;
} scall_times_t;
typedef struct {
e2k_clock_t start; /* start clock of system call */
e2k_clock_t end; /* end clock */
e2k_clock_t pt_regs_set; /* pt_regs structure is set */
e2k_clock_t signal_done;
int nr_TIRs;
e2k_tir_t TIRs[TIR_NUM];
e2k_psp_hi_t psp_hi;
e2k_pshtp_t pshtp;
u64 psp_ind;
e2k_pcsp_hi_t pcsp_hi;
u64 ctpr1;
u64 ctpr2;
u64 ctpr3;
u8 ps_bounds;
u8 pcs_bounds;
int trap_num;
e2k_psp_hi_t psp_hi_to_done;
e2k_pshtp_t pshtp_to_done;
e2k_pcsp_hi_t pcsp_hi_to_done;
u64 ctpr1_to_done;
u64 ctpr2_to_done;
u64 ctpr3_to_done;
} trap_times_t;
typedef struct kernel_times {
times_type_t type;
union {
scall_times_t syscall; /* system calls */
trap_times_t trap; /* traps */
} of;
} kernel_times_t;
#ifdef CONFIG_MAX_KERNEL_TIMES_NUM
#define MAX_KERNEL_TIMES_NUM CONFIG_MAX_KERNEL_TIMES_NUM
#else
#define MAX_KERNEL_TIMES_NUM 20
#endif /* CONFIG_MAX_KERNEL_TIMES_NUM */
#define INCR_KERNEL_TIMES_COUNT(ti) { \
(ti)->times_index ++; \
(ti)->times_num ++; \
if ((ti)->times_index >= MAX_KERNEL_TIMES_NUM) \
(ti)->times_index = 0; \
}
#define GET_DECR_KERNEL_TIMES_COUNT(ti, count) { \
(count) = (ti)->times_index; \
if ((ti)->times_num == 0) \
(ti)->times_num = 1; \
(count) --; \
if ((count) < 0) \
(count) = MAX_KERNEL_TIMES_NUM - 1; \
}
#define E2K_SAVE_CLOCK_REG(clock) { \
(clock) = E2K_GET_DSREG(clkr); \
}
#define CALCULATE_CLOCK_TIME(start_clock, end_clock) \
((end_clock) - (start_clock))
extern void sys_e2k_print_kernel_times(struct task_struct *task,
kernel_times_t *times, long times_num, int times_index);
#endif /* __ASSEMBLY__ */
#endif /* _E2K_THREAD_INFO_H */

View File

@ -0,0 +1,37 @@
#ifndef _ASM_CMOS_H
#define _ASM_CMOS_H
#include <asm/io.h>
#include <asm/e2k.h>
#ifndef RTC_PORT
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
#endif
static inline char mc146818_cmos_read(char addr)
{
if (HAS_MACHINE_E2K_IOHUB) {
WARN_ONCE(1, "Warning: CMOS_READ attempted on a machine without a functioning CMOS\n");
return 0;
}
outb_p((addr),RTC_PORT(0));
return inb_p(RTC_PORT(1));
}
static inline void mc146818_cmos_write(char val, char addr)
{
if (HAS_MACHINE_E2K_IOHUB) {
WARN_ONCE(1, "Warning: CMOS_WRITE attempted on a machine without a functioning CMOS\n");
return;
}
outb_p(addr, RTC_PORT(0));
outb_p(val, RTC_PORT(1));
}
#define CMOS_READ(addr) mc146818_cmos_read(addr)
#define CMOS_WRITE(val, addr) mc146818_cmos_write(val, addr)
#endif

View File

@ -0,0 +1,102 @@
#ifndef ASM_E2K_CMPXCHG_H
#define ASM_E2K_CMPXCHG_H
#include <linux/compiler.h>
#include <asm/machdep.h>
#include <asm/e2k_api.h>
#include <asm/atomic_api.h>
/*
* Non-existant functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error().
*/
extern void __xchg_wrong_size(void)
__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
#define __xchg(ptr, val, mem_model) \
({ \
volatile void *__x_ptr = (volatile void *) (ptr); \
u64 __x_ret, __x_val = (u64) (val); \
switch (sizeof(*(ptr))) { \
case 1: \
__x_ret = __api_xchg_return(__x_val, (volatile u8 *) __x_ptr, \
b, mem_model); \
break; \
case 2: \
__x_ret = __api_xchg_return(__x_val, (volatile u16 *) __x_ptr, \
h, mem_model); \
break; \
case 4: \
__x_ret = __api_xchg_return(__x_val, (volatile u32 *) __x_ptr, \
w, mem_model); \
break; \
case 8: \
__x_ret = __api_xchg_return(__x_val, (volatile u64 *) __x_ptr, \
d, mem_model); \
break; \
default: \
__x_ret = 0; \
__xchg_wrong_size(); \
break; \
} \
(__typeof__(*(ptr))) __x_ret; \
})
#define xchg_relaxed(ptr, v) __xchg((ptr), (v), RELAXED_MB)
#define xchg_acquire(ptr, v) __xchg((ptr), (v), ACQUIRE_MB)
#define xchg_release(ptr, v) __xchg((ptr), (v), RELEASE_MB)
#define xchg(ptr, v) __xchg((ptr), (v), STRONG_MB)
#define __cmpxchg(ptr, old, new, mem_model) \
({ \
volatile void *__x_ptr = (volatile void *) (ptr); \
u64 __x_ret, __x_old = (u64) (old), __x_new = (u64) (new); \
switch (sizeof(*(ptr))) { \
case 1: \
__x_ret = __api_cmpxchg_return(__x_old, __x_new, \
(volatile u8 *) __x_ptr, b, 0x4, mem_model); \
break; \
case 2: \
__x_ret = __api_cmpxchg_return(__x_old, __x_new, \
(volatile u16 *) __x_ptr, h, 0x5, mem_model); \
break; \
case 4: \
__x_ret = __api_cmpxchg_word_return(__x_old, __x_new, \
(volatile u32 *) __x_ptr, mem_model); \
break; \
case 8: \
__x_ret = __api_cmpxchg_dword_return(__x_old, __x_new, \
(volatile u64 *) __x_ptr, mem_model); \
break; \
default: \
__x_ret = 0; \
__cmpxchg_wrong_size(); \
break; \
} \
(__typeof__(*(ptr))) __x_ret; \
})
#define cmpxchg_relaxed(ptr, o, n) __cmpxchg((ptr), (o), (n), RELAXED_MB)
#define cmpxchg_acquire(ptr, o, n) __cmpxchg((ptr), (o), (n), ACQUIRE_MB)
#define cmpxchg_release(ptr, o, n) __cmpxchg((ptr), (o), (n), RELEASE_MB)
#define cmpxchg(ptr, o, n) __cmpxchg((ptr), (o), (n), STRONG_MB)
#define cmpxchg_lock(ptr, o, n) __cmpxchg((ptr), (o), (n), LOCK_MB)
#define __cmpxchg64(ptr, o, n, mem_model) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
(u64) __cmpxchg((ptr), (o), (n), mem_model); \
})
#define cmpxchg64_relaxed(ptr, o, n) __cmpxchg64((ptr), (o), (n), RELAXED_MB)
#define cmpxchg64_acquire(ptr, o, n) __cmpxchg64((ptr), (o), (n), ACQUIRE_MB)
#define cmpxchg64_release(ptr, o, n) __cmpxchg64((ptr), (o), (n), RELEASE_MB)
#define cmpxchg64(ptr, o, n) __cmpxchg64((ptr), (o), (n), STRONG_MB)
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
__api_cmpxchg_double(p1, p2, o1, o2, n1, n2)
#define cmpxchg_local(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
#define system_has_cmpxchg_double() 1
#endif /* ASM_E2K_CMPXCHG_H */

View File

@ -0,0 +1,359 @@
/* $Id: cnt_point.h,v 1.3 2009/06/29 11:51:48 atic Exp $
*
* Recovery the system from control point.
*/
#ifndef _E2K_CNT_POINT_H
#define _E2K_CNT_POINT_H
#include <asm/types.h>
#include <asm/boot_recovery.h>
#include <asm/boot_flags.h>
/*
* Core dump header on the disk
* Total size of header should be one page of memory = one block on disk
* Note that the first kilobyte is reserved for boot loader or
* disk label stuff...
* The following first bytes should contain signature and the last bytes
* of header - magic value to indicate dump header itegrety
* Other structures are aligned to have constant offset in the header
* by adding zip areas in the structure end.
*/
#define TOTAL_DUMP_HEADER_SIZE PAGE_SIZE
#define BOOTBITS_DUMP_HEADER_SIZE 0x400 /* offset 0x000 */
#define DUMP_INFO_HEADER_SIZE 0x100 /* offset 0x400 */
#define CORE_DUMP_HEADER_SIZE 0x500 /* offset 0x500 */
/* offset 0xa00 - gap */
/* offset 0xff8 - magic */
/*
* Dump device and common dump state info
* Dump file space layout:
* block 0 dump file header
* block 1 core dump area start
* ---------------------------------
* | header | core dump area |
* ---------------------------------
* 0 block
* 1 block
*/
#define CORE_DUMP_AREA_OFFSET 1
#define DEFAULT_CORE_AREA_MAX_SIZE (16 * 1024L) /* 16 Gb */
typedef struct dump_desc {
u64 signature; /* signature to indicate dump */
/* header structure start */
/* should be first bytes of useful */
/* part of the header */
u8 cntp_valid; /* control points header of file */
/* is created and valid */
u8 core_valid; /* system core dump header of file */
/* is created and valid */
u64 file_size; /* total size of dump file */
/* in pages */
/* (page size = block size) */
u64 cntp_offset; /* offset (in blocks = page) */
/* of control points area in */
/* the dump file */
u64 cntp_size; /* size of control points area */
/* in blocks */
u64 core_offset; /* offset (in blocks = page) */
/* of core dump area in */
/* the dump file */
u64 core_size; /* size of core dump area */
/* in blocks */
} dump_desc_t;
/*
* System core dump state info
*/
typedef struct core_dump {
} core_dump_t;
/*
* Dump header on the disk structure
*/
typedef struct dump_header {
/* Space for disklabel etc. */
u8 bootbits[BOOTBITS_DUMP_HEADER_SIZE];
dump_desc_t info; /* Device & dump state common info */
u8 zip1[DUMP_INFO_HEADER_SIZE - sizeof (dump_desc_t)];
core_dump_t core; /* System core dump header stuff */
u8 zip3[CORE_DUMP_HEADER_SIZE - sizeof (core_dump_t)];
/* zip area to make size of */
/* header - constant == PAGE_SIZE */
u8 gap[ TOTAL_DUMP_HEADER_SIZE -
BOOTBITS_DUMP_HEADER_SIZE -
DUMP_INFO_HEADER_SIZE -
CORE_DUMP_HEADER_SIZE -
8]; /* u64 : magic */
u64 magic; /* magic value to indicate control */
/* point header structure */
/* should be last bytes of the */
/* header */
} dump_header_t;
#define DUMP_HEADER_SIGNATURE 0xe2c0c0e226143210
#define DUMP_HEADER_MAGIC 0xe2c0c0e22614cdef
#define DUMP_BLOCK_TO_SECTOR(block) ((block) * (PAGE_SIZE >> 9))
#define CORE_BLOCK_TO_SECTOR(block) DUMP_BLOCK_TO_SECTOR(block)
/*
* Forwards of some functions to recover system state
*/
extern struct vm_area_struct *cntp_find_vma(struct task_struct *ts,
unsigned long addr);
extern void dump_prepare(u16 dump_dev, u64 dump_sector);
extern void start_emergency_dump(void);
extern int create_dump_point(void);
extern void init_dump_analyze_mode(void);
extern void start_dump_analyze(void);
extern e2k_addr_t cntp_kernel_address_to_phys(e2k_addr_t address);
extern e2k_addr_t cntp_user_address_to_phys(struct task_struct *tsk,
e2k_addr_t address);
extern int map_memory_region(e2k_addr_t mem_base, e2k_addr_t mem_end,
int *just_mapped_point);
extern int run_init_process(const char *init_filename);
#if defined(CONFIG_EMERGENCY_DUMP)
extern unsigned int nr_swapfiles;
extern struct swap_info_struct *swap_info[MAX_SWAPFILES];
#endif
extern e2k_addr_t cntp_kernel_base;
extern int cur_cnt_point;
extern int cntp_small_kern_mem_div;
extern int dump_analyze_mode;
extern int dump_analyze_opt;
extern char dump_analyze_cmd[];
#define boot_cur_cnt_point \
boot_get_vo_value(cur_cnt_point)
#define boot_cntp_small_kern_mem_div \
boot_get_vo_value(cntp_small_kern_mem_div)
#define boot_dump_analyze_mode \
boot_get_vo_value(dump_analyze_mode)
#define boot_dump_analyze_opt \
boot_get_vo_value(dump_analyze_opt)
#define boot_dump_analyze_cmd \
boot_vp_to_pp((char *)dump_analyze_cmd)
extern inline e2k_size_t
get_dump_analyze_bank_size(e2k_phys_bank_t *phys_bank, int cntp_num)
{
e2k_addr_t base, new_base;
e2k_size_t size, new_size;
BUG_ON(cntp_num == 0 || cntp_num == 1);
size = phys_bank->pages_num * PAGE_SIZE;
base = phys_bank->base_addr;
new_base = LARGE_PAGE_ALIGN_DOWN(base);
new_size = size - (new_base - base);
return LARGE_PAGE_ALIGN_UP(new_size / cntp_num);
}
extern inline e2k_size_t
get_dump_analyze_memory_len(e2k_phys_bank_t *phys_bank, int cntp, int cntp_num)
{
e2k_size_t size = get_dump_analyze_bank_size(phys_bank, cntp_num);
e2k_size_t len = size;
e2k_addr_t base;
e2k_addr_t new_base;
BUG_ON(cntp_num == 0 || cntp_num == 1);
BUG_ON(cntp != cntp_num - 1);
base = phys_bank->base_addr;
new_base = LARGE_PAGE_ALIGN_DOWN(base);
len += phys_bank->pages_num * PAGE_SIZE -
((new_base - base) + size * cntp_num);
return len;
}
extern inline e2k_addr_t
get_dump_analyze_memory_offset(e2k_phys_bank_t *phys_bank, int cntp,
int cntp_num)
{
e2k_size_t size;
e2k_addr_t offset = 0;
e2k_addr_t base;
e2k_addr_t new_base;
BUG_ON(cntp_num == 0 || cntp_num == 1);
BUG_ON(cntp != cntp_num - 1);
size = get_dump_analyze_bank_size(phys_bank, cntp_num);
base = phys_bank->base_addr;
new_base = LARGE_PAGE_ALIGN_DOWN(base);
offset = (new_base - base) + size * cntp;
return offset;
}
extern inline e2k_addr_t
get_dump_analyze_memory_base(e2k_phys_bank_t *phys_bank, int cntp, int cntp_num)
{
e2k_addr_t offset = get_dump_analyze_memory_offset(
phys_bank, cntp, cntp_num);
e2k_addr_t base = phys_bank->base_addr;
base += offset;
return base;
}
extern inline e2k_addr_t
boot_get_dump_analyze_kernel_base(void)
{
e2k_phys_bank_t *phys_bank;
e2k_addr_t base;
e2k_addr_t new_base;
e2k_size_t cntp_size;
int node;
int bank;
for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) {
phys_bank = full_phys_mem[node].banks;
if (phys_bank->pages_num == 0)
continue; /* node has not memory */
for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank ++) {
if (phys_bank->pages_num == 0)
break;
cntp_size = get_dump_analyze_memory_len(
phys_bank,
boot_cntp_small_kern_mem_div - 1,
boot_cntp_small_kern_mem_div);
if (cntp_size < boot_kernel_image_size)
goto next_bank;
base = get_dump_analyze_memory_base(
phys_bank,
boot_cntp_small_kern_mem_div - 1,
boot_cntp_small_kern_mem_div);
new_base = _PAGE_ALIGN_DOWN(base, E2K_KERNEL_PAGE_SIZE);
if (new_base - base + boot_kernel_image_size <=
cntp_size)
return new_base;
next_bank:
phys_bank ++;
}
}
/*
* TODO: avoid this
*/
BUG();
return -1;
}
/*
* bootblock manipulations (read/write/set/reset) in virtual kernel mode
* on physical level:
* write through and uncachable access on physical address
* bootblock virtual address can be only read
*/
static inline u64
read_bootblock_cur_cnt_point(bootblock_struct_t *bootblock)
{
return READ_BOOTBLOCK_FIELD(bootblock, cur_cnt_point);
}
extern inline void
write_bootblock_cur_cnt_point(bootblock_struct_t *bootblock, u64 new_cnt_point)
{
WRITE_BOOTBLOCK_FIELD(bootblock, cur_cnt_point, new_cnt_point);
}
extern inline void
write_bootblock_mem_cnt_points(bootblock_struct_t *bootblock, u64 new_mem_points)
{
WRITE_BOOTBLOCK_FIELD(bootblock, mem_cnt_points, new_mem_points);
}
extern inline void
write_bootblock_disk_cnt_points(bootblock_struct_t *bootblock,
u64 new_disk_points)
{
WRITE_BOOTBLOCK_FIELD(bootblock, disk_cnt_points, new_disk_points);
}
extern inline void
write_bootblock_kernel_base(bootblock_struct_t *bootblock,
u64 new_kernel_base)
{
WRITE_BOOTBLOCK_FIELD(bootblock, info.kernel_base, new_kernel_base);
}
extern inline u64
read_bootblock_cntp_kernel_base(bootblock_struct_t *bootblock, int cntp)
{
return READ_BOOTBLOCK_FIELD(bootblock,
info.cntp_info[cntp].kernel_base);
}
extern inline void
write_bootblock_cntp_kernel_base(bootblock_struct_t *bootblock, int cntp,
u64 kernel_base)
{
WRITE_BOOTBLOCK_FIELD(bootblock, info.cntp_info[cntp].kernel_base,
kernel_base);
}
extern inline void
set_bootblock_cntp_created(bootblock_struct_t *bootblock)
{
WRITE_BOOTBLOCK_FIELD(bootblock, cnt_points_created, 1);
}
/*
* Convert virtual address of kernel item in a control point context
* to the consistent physical address.
*/
#define cntp_va_to_pa(virt_addr, cntp_kernel_phys_base, ts) \
({ \
e2k_addr_t phys = 0; \
e2k_addr_t virt = (e2k_addr_t)virt_addr; \
\
if (virt > 0 && virt < PAGE_OFFSET) \
phys = cntp_user_address_to_phys(ts, virt); \
else if (virt >= PAGE_OFFSET && virt < PAGE_OFFSET + MAX_PM_SIZE) \
phys = __pa(virt); \
else if (virt >= KERNEL_BASE && virt <= KERNEL_END) \
phys = virt - KERNEL_BASE + cntp_kernel_phys_base; \
else if (virt != 0) \
phys = cntp_kernel_address_to_phys(virt); \
\
phys; \
})
#define cntp_va(virt_addr, ts) \
({ \
void *virt = (void*)0; \
if ((e2k_addr_t)virt_addr != 0) { \
virt = (void *) cntp_va_to_pa(virt_addr, cntp_kernel_base, ts);\
if (((unsigned long) virt) != -1) \
virt = __va(virt); \
} \
virt; \
})
#endif /* _E2K_CNT_POINT_H */

View File

@ -0,0 +1,214 @@
#ifndef _ASM_E2K_COMPAT_H
#define _ASM_E2K_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <asm/regs_state.h>
#include <asm/traps.h>
#include <asm/debug_print.h>
#include <asm-generic/compat.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_key_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
typedef u64 compat_u64;
typedef s64 compat_s64;
struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
compat_off_t st_size;
compat_time_t st_atime;
compat_ulong_t st_atime_nsec;
compat_time_t st_mtime;
compat_ulong_t st_mtime_nsec;
compat_time_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_off_t st_blksize;
compat_off_t st_blocks;
u32 __unused4[2];
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
short __unused;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
short __unused;
};
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen;
int f_frsize;
int f_flags;
int f_spare[4];
};
#define COMPAT_RLIM_INFINITY 0x7fffffff
typedef u32 compat_old_sigset_t;
#undef DebugUS
#define DEBUG_US 0 /* Allocate User Space */
#define DebugUS(...) DebugPrint(DEBUG_US ,##__VA_ARGS__)
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
typedef struct sigevent32 {
sigval_t sigev_value;
int sigev_signo;
int sigev_notify;
union {
int _pad[SIGEV_PAD_SIZE32];
struct {
u32 _function;
u32 _attribute; /* really pthread_attr_t */
} _sigev_thread;
} _sigev_un;
} sigevent_t32;
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
typedef struct user_regs_struct compat_elf_gregset_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
extern void __user *arch_compat_alloc_user_space(unsigned long len);
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short __pad1;
compat_mode_t mode;
unsigned short __pad2;
unsigned short seq;
unsigned long __unused1; /* yes they really are 64bit pads */
unsigned long __unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_ulong_t __unused1;
compat_ulong_t sem_otime;
compat_ulong_t sem_otime_high;
compat_ulong_t sem_ctime;
compat_ulong_t sem_nsems;
compat_ulong_t sem_ctime_high;
compat_ulong_t __unused2;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_ulong_t __unused1;
compat_ulong_t msg_stime;
compat_ulong_t msg_stime_high;
compat_ulong_t msg_rtime;
compat_ulong_t msg_rtime_high;
compat_ulong_t msg_ctime;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t msg_ctime_high;
compat_ulong_t __unused2;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_ulong_t __unused1;
compat_ulong_t shm_atime;
compat_ulong_t shm_atime_high;
compat_ulong_t shm_dtime;
compat_ulong_t shm_dtime_high;
compat_ulong_t shm_ctime;
compat_size_t shm_segsz;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t shm_ctime_high;
compat_ulong_t __unused2;
};
static inline int is_compat_task(void)
{
return current->thread.flags & E2K_FLAG_32BIT;
}
#endif /* _ASM_E2K_COMPAT_H */

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_COMPILER_H
#define _ASM_COMPILER_H
#include <asm/glob_regs.h>
#undef barrier
#undef barrier_data
#undef RELOC_HIDE
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#if GCC_VERSION >= 40400
/* builtin version has better throughput but worse latency */
#undef __HAVE_BUILTIN_BSWAP32__
#endif
#endif
#define __PREEMPTION_CLOBBERS_1(cpu_greg, offset_greg) \
"g" #cpu_greg, "g" #offset_greg
#define __PREEMPTION_CLOBBERS(cpu_greg, offset_greg) \
__PREEMPTION_CLOBBERS_1(cpu_greg, offset_greg)
/* If a compiler barrier is used in loop, these clobbers will
* force the compiler to always access *current* per-cpu area
* instead of moving its address calculation out from the loop.
*
* The same goes for preemption-disabled sections: these clobbers
* will forbid compiler to move per-cpu area address calculation out
* from them. Since disabling interrupts also disables preemption,
* we also need these clobbers when writing PSR/UPSR. */
#define PREEMPTION_CLOBBERS __PREEMPTION_CLOBBERS(SMP_CPU_ID_GREG, MY_CPU_OFFSET_GREG)
#ifdef CONFIG_DEBUG_LCC_VOLATILE_ATOMIC
#define NOT_VOLATILE volatile
#else
#define NOT_VOLATILE
#endif
/* See bug #89623, bug #94946 */
#define barrier() \
do { \
int unused; \
__asm__ NOT_VOLATILE("" : "=r" (unused) : : "memory", PREEMPTION_CLOBBERS);\
} while (0)
/* See comment before PREEMPTION_CLOBBERS */
#define barrier_preemption() \
do { \
int unused; \
__asm__ NOT_VOLATILE("" : "=r" (unused) : : PREEMPTION_CLOBBERS);\
} while (0)
#define barrier_data(ptr) \
do { \
__asm__ NOT_VOLATILE("" : : "r"(ptr) : "memory", PREEMPTION_CLOBBERS); \
} while (0)
#define RELOC_HIDE(ptr, off) \
({ \
unsigned long __ptr; \
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); \
})
#if defined(__LCC__) && (__LCC__ > 125 || __LCC__ == 125 && __LCC_MINOR__ >= 9)
# define builtin_expect_wrapper(x, val) __builtin_expect_with_probability((x), (val), 0.9999)
#else
# define builtin_expect_wrapper(x, val) __builtin_expect((x), (val))
#endif
#endif /* _ASM_COMPILER_H */

View File

@ -0,0 +1,47 @@
#ifndef _E2K_CONSOLE_H_
#define _E2K_CONSOLE_H_
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/init.h>
#include <asm/types.h>
#include <stdarg.h>
#include <asm/machdep.h>
#include <asm-l/console.h>
#include <asm/kvm/guest/console.h>
static inline void
native_virt_console_dump_putc(char c)
{
#ifdef CONFIG_EARLY_VIRTIO_CONSOLE
if (IS_HV_GM()) {
/* virtio console is actual only for guest mode */
kvm_virt_console_dump_putc(c);
}
#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */
}
extern void init_bug(const char *fmt_v, ...);
extern void init_warning(const char *fmt_v, ...);
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized guest and host kernel */
#include <asm/paravirt/console.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
#include <asm/kvm/guest/console.h>
#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
/* native kernel or native kernel with virtualization support */
static inline void
virt_console_dump_putc(char c)
{
native_virt_console_dump_putc(c);
}
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _E2K_CONSOLE_H_ */

View File

@ -0,0 +1,114 @@
/*
* convert_array.h - Linux syscall interfaces (arch-specific)
*
* Copyright (c) 2019 MCST
*
* This file is released under the GPLv2.
* See the file COPYING for more details.
*/
#ifndef _ASM_E2K_UAPI_CONVERT_ARRAY_H
#define _ASM_E2K_UAPI_CONVERT_ARRAY_H
#ifdef CONFIG_PROTECTED_MODE
#define convert_array(prot_array, new_array, max_prot_array_size, fields, \
items, mask_type, mask_align) \
convert_array_3(prot_array, new_array, max_prot_array_size, fields, \
items, mask_type, mask_align, 0, 0)
extern int convert_array_3(long __user *prot_array, long *new_array,
const int max_prot_array_size, const int fields,
const int items, const long mask_type,
const long mask_align, const long mask_rw,
const int rval_mode);
/*
* Converts the given array of structures, which can contain
* protected user pointers to memory, function descriptors, and int values.
* prot_array - pointer to the original (user-space) array
* new_array - pointer to area where to put converted array
* max_prot_array_size - the maximum size, which prot_array can occupy
* fileds - number of enries in each element
* items - number of identical elements in the array to convert
* mask_type - mask for encoding of field type in each element:
* 2 bits per each entry:
* --- 00 (0x0) - int
* --- 01 (0x1) - long
* --- 10 (0x2) - pointer to function
* --- 11 (0x3) - pointer to memory (descriptor)
* mask_align - mask for encoding of alignment of the NEXT! field
* 2 bits per each entry:
* --- 00 (0x0) - next field aligned as int (to 4 bytes)
* --- 01 (0x1) - next field aligned as long (to 8 bytes)
* --- 10 (0x2) - not used yet
* --- 11 (0x3) - next field aligned as pointer (to 16 bytes)
* mask_rw - mask for encoding access type of the structure elements
* 2 bits per each entry:
* --- 01 (0x1) - the field's content gets read by syscall (READ-able)
* --- 02 (0x2) - the field's content gets updated by syscall (WRITE-able)
* --- 11 (0x3) - the field is both READ-able and WRITE-able
* --- 00 (0x0) - default type; the same as (READ-able)
* rval_mode - error (return value) reporting mode mask:
* 0 - report only critical problems in prot_array structure;
* 1 - return with -EFAULT if wrong tag in 'int' field;
* 2 - --'-- --'-- 'long' field;
* 4 - --'-- --'-- 'func' field;
* 8 - --'-- --'-- 'descr' field;
* 16 - ignore errors in 'int' field;
* 32 - --'-- --'-- 'long' field;
* 64 - --'-- --'-- 'func' field;
* 128 - --'-- --'-- 'descr' field.
* Returns: 0 - if converted OK;
* error number - otherwise.
*/
#define CONV_ARR_WRONG_INT_FLD 1
#define CONV_ARR_WRONG_LONG_FLD 2
#define CONV_ARR_WRONG_FUNC_FLD 4
#define CONV_ARR_WRONG_DSCR_FLD 8
#define CONV_ARR_WRONG_ANY_FLD 15 /* error if any field appeared bad */
#define CONV_ARR_IGNORE_INT_FLD_ERR 16
#define CONV_ARR_IGNORE_LONG_FLD_ERR 32
#define CONV_ARR_IGNORE_FUNC_FLD_ERR 64
#define CONV_ARR_IGNORE_DSCR_FLD_ERR 128
extern int check_args_array(const long __user *args_array,
const long tags,
const int arg_num,
const long mask_type,
const int rval_mode,
const char *ErrMsgHeader);
/*
* This function checks protected syscall arguments on correspondence with
* the given mask:
* args_array - pointer to argument array
* tags - argument tags (4 bits per arg; lower to higher bits ordered)
* arg_num - number of arguments
* mask_type - mask for encoding of field type in each element
* 2 bits per each entry:
* --- 00 (0x0) - int
* --- 01 (0x1) - long
* --- 10 (0x2) - pointer to function
* --- 11 (0x3) - pointer to memory.
* rval_mode - error (return value) reporting mode mask:
* 0 - report only critical problems;
* 1 - return with -EFAULT if wrong tag in 'int' field;
* 2 - --'-- --'-- 'long' field;
* 4 - --'-- --'-- 'func' field;
* 8 - --'-- --'-- 'descr' field;
* 16 - ignore errors in 'int' field;
* 32 - --'-- --'-- 'long' field;
* 64 - --'-- --'-- 'func' field;
* 128 - --'-- --'-- 'descr' field.
* Returns: 0 - if converted OK;
* error number - otherwise.
*/
#else
# define convert_array(...) 0
#endif /* CONFIG_PROTECTED_MODE */
#endif /* _ASM_E2K_UAPI_CONVERT_ARRAY_H */

View File

@ -0,0 +1,10 @@
#ifndef _E2K_COREDUMP_H
#define _E2K_COREDUMP_H
/*
* For coredump
*/
extern void clear_delayed_free_hw_stacks(struct mm_struct *mm);
extern void create_delayed_free_hw_stacks(void);
#endif /* _E2K_COREDUMP_H */

View File

@ -0,0 +1,36 @@
#ifndef _ASM_E2K_CPU_H_
#define _ASM_E2K_CPU_H_
#include <linux/cpu.h>
extern int arch_register_cpu(int num);
#ifdef CONFIG_HOTPLUG_CPU
extern void arch_unregister_cpu(int);
#endif
static inline unsigned long
native_get_cpu_running_cycles(void)
{
/* native kernel is running always */
return get_cycles();
}
extern void store_cpu_info(int cpuid);
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized guest and host kernel */
#include <asm/paravirt/cpu.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
#include <asm/kvm/guest/cpu.h>
#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
/* native kernel or native kernel with virtualization support */
static inline unsigned long
get_cpu_running_cycles(void)
{
return native_get_cpu_running_cycles();
}
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* _ASM_E2K_CPU_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,549 @@
#ifndef _E2K_CPU_REGS_ACCESS_H_
#define _E2K_CPU_REGS_ACCESS_H_
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/bitops.h>
#include <asm/cpu_regs_types.h>
#include <asm/native_cpu_regs_access.h>
#ifndef __ASSEMBLY__
#if CONFIG_CPU_ISET >= 3
# define native_read_CORE_MODE_reg_value() \
NATIVE_READ_CORE_MODE_REG_VALUE()
# define native_write_CORE_MODE_reg_value(modes) \
NATIVE_WRITE_CORE_MODE_REG_VALUE((modes));
#else
# define native_read_CORE_MODE_reg_value() \
(machine.rrd(E2K_REG_CORE_MODE))
# define native_write_CORE_MODE_reg_value(modes) \
(machine.rwd(E2K_REG_CORE_MODE, modes))
#endif
#define native_read_OSCUTD_reg_value() \
(machine.rrd(E2K_REG_OSCUTD))
#define native_write_OSCUTD_reg_value(modes) \
(machine.rwd(E2K_REG_OSCUTD, modes))
#define native_read_OSCUIR_reg_value() \
(machine.rrd(E2K_REG_OSCUIR))
#define native_write_OSCUIR_reg_value() \
(machine.rwd(E2K_REG_OSCUIR))
#define boot_native_read_CORE_MODE_reg_value() \
({ \
typeof(boot_machine.boot_rrd) func; \
func = boot_native_vp_to_pp(boot_machine.boot_rrd); \
func(E2K_REG_CORE_MODE); \
})
#define boot_native_write_CORE_MODE_reg_value(modes) \
({ \
typeof(boot_machine.boot_rwd) func; \
func = boot_native_vp_to_pp(boot_machine.boot_rwd); \
func(E2K_REG_CORE_MODE, modes); \
})
#define boot_native_read_OSCUTD_reg_value() \
({ \
typeof(boot_machine.boot_rrd) func; \
func = boot_native_vp_to_pp(boot_machine.boot_rrd); \
func(E2K_REG_OSCUTD); \
})
#define boot_native_write_OSCUTD_reg_value(v) \
({ \
typeof(boot_machine.boot_rwd) func; \
func = boot_native_vp_to_pp(boot_machine.boot_rwd); \
func(E2K_REG_OSCUTD, (v)); \
})
#define boot_native_read_OSCUIR_reg_value() \
({ \
typeof(boot_machine.boot_rrd) func; \
func = boot_native_vp_to_pp(boot_machine.boot_rrd); \
func(E2K_REG_OSCUIR); \
})
#define boot_native_write_OSCUIR_reg_value(v) \
({ \
typeof(boot_machine.boot_rwd) func; \
func = boot_native_vp_to_pp(boot_machine.boot_rwd); \
func(E2K_REG_OSCUIR, v); \
})
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is native guest kernel (not paravirtualized based on pv_ops) */
#include <asm/kvm/cpu_regs_access.h>
#elif defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host and guest kernel */
#include <asm/paravirt/cpu_regs_access.h>
#else /* native kernel */
/* it is native kernel without any virtualization */
/* or host kernel with virtualization support */
/*
* Set flags of updated VCPU registers
*/
#define PUT_UPDATED_CPU_REGS_FLAGS(flags)
/*
* Read/write word Procedure Stack Harware Top Pointer (PSHTP)
*/
#define READ_PSHTP_REG_VALUE() NATIVE_NV_READ_PSHTP_REG_VALUE()
#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \
NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value)
/*
* Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP)
*/
#define READ_PCSHTP_REG_SVALUE() NATIVE_READ_PCSHTP_REG_SVALUE()
#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \
NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue)
/*
* Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD)
*/
#define READ_OSCUD_LO_REG_VALUE() NATIVE_READ_OSCUD_LO_REG_VALUE()
#define READ_OSCUD_HI_REG_VALUE() NATIVE_READ_OSCUD_HI_REG_VALUE()
#define BOOT_READ_OSCUD_LO_REG_VALUE() NATIVE_READ_OSCUD_LO_REG_VALUE()
#define BOOT_READ_OSCUD_HI_REG_VALUE() NATIVE_READ_OSCUD_HI_REG_VALUE()
#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \
NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value)
#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \
NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value)
#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \
NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value)
#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \
NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value)
/*
* Read/write low/hgh double-word OS Globals Register (OSGD)
*/
#define READ_OSGD_LO_REG_VALUE() NATIVE_READ_OSGD_LO_REG_VALUE()
#define READ_OSGD_HI_REG_VALUE() NATIVE_READ_OSGD_HI_REG_VALUE()
#define BOOT_READ_OSGD_LO_REG_VALUE() NATIVE_READ_OSGD_LO_REG_VALUE()
#define BOOT_READ_OSGD_HI_REG_VALUE() NATIVE_READ_OSGD_HI_REG_VALUE()
#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \
NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value)
#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \
NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value)
#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \
NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value)
#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \
NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value)
/*
* Read/write low/high double-word Compilation Unit Register (CUD)
*/
#define READ_CUD_LO_REG_VALUE() NATIVE_READ_CUD_LO_REG_VALUE()
#define READ_CUD_HI_REG_VALUE() NATIVE_READ_CUD_HI_REG_VALUE()
#define BOOT_READ_CUD_LO_REG_VALUE() NATIVE_READ_CUD_LO_REG_VALUE()
#define BOOT_READ_CUD_HI_REG_VALUE() NATIVE_READ_CUD_HI_REG_VALUE()
#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \
NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value)
#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \
NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value)
#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \
NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value)
#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \
NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value)
/*
* Read/write low/high double-word Globals Register (GD)
*/
#define READ_GD_LO_REG_VALUE() NATIVE_READ_GD_LO_REG_VALUE()
#define READ_GD_HI_REG_VALUE() NATIVE_READ_GD_HI_REG_VALUE()
#define BOOT_READ_GD_LO_REG_VALUE() NATIVE_READ_GD_LO_REG_VALUE()
#define BOOT_READ_GD_HI_REG_VALUE() NATIVE_READ_GD_HI_REG_VALUE()
#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \
NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value)
#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \
NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value)
#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \
NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value)
#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \
NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value)
/*
* Read/write low/high quad-word Procedure Stack Pointer Register (PSP)
*/
#define READ_PSP_LO_REG_VALUE() NATIVE_NV_READ_PSP_LO_REG_VALUE()
#define READ_PSP_HI_REG_VALUE() NATIVE_NV_READ_PSP_HI_REG_VALUE()
#define BOOT_READ_PSP_LO_REG_VALUE() NATIVE_NV_READ_PSP_LO_REG_VALUE()
#define BOOT_READ_PSP_HI_REG_VALUE() NATIVE_NV_READ_PSP_HI_REG_VALUE()
#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \
NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value)
#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \
NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value)
#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \
NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value)
#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \
NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value)
/*
* Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP)
*/
#define READ_PCSP_LO_REG_VALUE() NATIVE_NV_READ_PCSP_LO_REG_VALUE()
#define READ_PCSP_HI_REG_VALUE() NATIVE_NV_READ_PCSP_HI_REG_VALUE()
#define BOOT_READ_PCSP_LO_REG_VALUE() NATIVE_NV_READ_PCSP_LO_REG_VALUE()
#define BOOT_READ_PCSP_HI_REG_VALUE() NATIVE_NV_READ_PCSP_HI_REG_VALUE()
#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \
NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value)
#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \
NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value)
#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \
NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value)
#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \
NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value)
/*
* Read/write low/high quad-word Current Chain Register (CR0/CR1)
*/
#define READ_CR0_LO_REG_VALUE() NATIVE_NV_READ_CR0_LO_REG_VALUE()
#define READ_CR0_HI_REG_VALUE() NATIVE_NV_READ_CR0_HI_REG_VALUE()
#define READ_CR1_LO_REG_VALUE() NATIVE_NV_READ_CR1_LO_REG_VALUE()
#define READ_CR1_HI_REG_VALUE() NATIVE_NV_READ_CR1_HI_REG_VALUE()
#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \
NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value)
#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \
NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value)
#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \
NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value)
#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \
NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value)
/*
* Read/write double-word Control Transfer Preparation Registers
* (CTPR1/CTPR2/CTPR3)
*/
#define READ_CTPR_REG_VALUE(reg_no) NATIVE_NV_READ_CTPR_REG_VALUE(reg_no)
#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \
NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value)
/*
* Read/write low/high double-word Non-Protected User Stack Descriptor
* Register (USD)
*/
#define READ_USD_LO_REG_VALUE() NATIVE_NV_READ_USD_LO_REG_VALUE()
#define READ_USD_HI_REG_VALUE() NATIVE_NV_READ_USD_HI_REG_VALUE()
#define BOOT_READ_USD_LO_REG_VALUE() NATIVE_NV_READ_USD_LO_REG_VALUE()
#define BOOT_READ_USD_HI_REG_VALUE() NATIVE_NV_READ_USD_HI_REG_VALUE()
#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \
NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value)
#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \
NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value)
#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \
NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value)
#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \
NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value)
/*
* Read/write low/high double-word Protected User Stack Descriptor
* Register (PUSD)
*/
#define READ_PUSD_LO_REG_VALUE() NATIVE_NV_READ_PUSD_LO_REG_VALUE()
#define READ_PUSD_HI_REG_VALUE() NATIVE_NV_READ_PUSD_HI_REG_VALUE()
#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \
NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value)
#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \
NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value)
/*
* Read/write double-word User Stacks Base Register (USBR)
*/
#define READ_USBR_REG_VALUE() NATIVE_NV_READ_USBR_REG_VALUE()
#define READ_SBR_REG_VALUE() NATIVE_NV_READ_SBR_REG_VALUE()
#define BOOT_READ_USBR_REG_VALUE() NATIVE_NV_READ_USBR_REG_VALUE()
#define BOOT_READ_SBR_REG_VALUE() NATIVE_NV_READ_SBR_REG_VALUE()
#define WRITE_USBR_REG_VALUE(USBR_value) \
NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value)
#define WRITE_SBR_REG_VALUE(SBR_value) \
NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value)
#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \
NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value)
#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \
NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value)
/*
* Read/write double-word Window Descriptor Register (WD)
*/
#define READ_WD_REG_VALUE() NATIVE_READ_WD_REG_VALUE()
#define WRITE_WD_REG_VALUE(WD_value) \
NATIVE_WRITE_WD_REG_VALUE(WD_value)
#ifdef NEED_PARAVIRT_LOOP_REGISTERS
/*
* Read/write double-word Loop Status Register (LSR)
*/
#define READ_LSR_REG_VALUE() NATIVE_READ_LSR_REG_VALUE()
#define WRITE_LSR_REG_VALUE(LSR_value) \
NATIVE_WRITE_LSR_REG_VALUE(LSR_value)
/*
* Read/write double-word Initial Loop Counters Register (ILCR)
*/
#define READ_ILCR_REG_VALUE() NATIVE_READ_ILCR_REG_VALUE()
#define WRITE_ILCR_REG_VALUE(ILCR_value) \
NATIVE_WRITE_ILCR_REG_VALUE(ILCR_value)
/*
* Write double-word LSR/ILCR registers in complex
*/
#define WRITE_LSR_LSR1_ILCR_ILCR1_REGS_VALUE(lsr, lsr1, ilcr, ilcr1) \
NATIVE_WRITE_LSR_LSR1_ILCR_ILCR1_REGS_VALUE(lsr, lsr1, \
ilcr, ilcr1)
#endif /* NEED_PARAVIRT_LOOP_REGISTERS */
/*
* Read/write OS register which point to current process thread info
* structure (OSR0)
*/
#define READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE()
#define BOOT_READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE()
#define WRITE_CURRENT_REG_VALUE(osr0_value) \
NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value)
#define BOOT_WRITE_CURRENT_REG_VALUE(osr0_value) \
NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value)
/*
* Read/write OS Entries Mask (OSEM)
*/
#define READ_OSEM_REG_VALUE() NATIVE_READ_OSEM_REG_VALUE()
#define WRITE_OSEM_REG_VALUE(osem_value) \
NATIVE_WRITE_OSEM_REG_VALUE(osem_value)
/*
* Read/write word Base Global Register (BGR)
*/
#define READ_BGR_REG_VALUE() NATIVE_READ_BGR_REG_VALUE()
#define BOOT_READ_BGR_REG_VALUE() NATIVE_READ_BGR_REG_VALUE()
#define WRITE_BGR_REG_VALUE(BGR_value) \
NATIVE_WRITE_BGR_REG_VALUE(BGR_value)
#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \
NATIVE_WRITE_BGR_REG_VALUE(BGR_value)
/*
* Read CPU current clock regigister (CLKR)
*/
#define READ_CLKR_REG_VALUE() NATIVE_READ_CLKR_REG_VALUE()
/*
* Read/Write system clock registers (SCLKM)
*/
#define READ_SCLKR_REG_VALUE() NATIVE_READ_SCLKR_REG_VALUE()
#define READ_SCLKM1_REG_VALUE() NATIVE_READ_SCLKM1_REG_VALUE()
#define READ_SCLKM2_REG_VALUE() NATIVE_READ_SCLKM2_REG_VALUE()
#define READ_SCLKM3_REG_VALUE() NATIVE_READ_SCLKM3_REG_VALUE()
#define WRITE_SCLKR_REG_VALUE(reg_value) \
NATIVE_WRITE_SCLKR_REG_VALUE(reg_value)
#define WRITE_SCLKM1_REG_VALUE(reg_value) \
NATIVE_WRITE_SCLKM1_REG_VALUE(reg_value)
#define WRITE_SCLKM2_REG_VALUE(reg_value) \
NATIVE_WRITE_SCLKM2_REG_VALUE(reg_value)
#define WRITE_SCLKM3_REG_VALUE(reg_value) \
NATIVE_WRITE_SCLKM3_REG_VALUE(reg_value)
/*
* Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1)
*/
#define READ_CU_HW0_REG_VALUE() NATIVE_READ_CU_HW0_REG_VALUE()
#define READ_CU_HW1_REG_VALUE() NATIVE_READ_CU_HW1_REG_VALUE()
#define WRITE_CU_HW0_REG_VALUE(reg) NATIVE_WRITE_CU_HW0_REG_VALUE(reg)
#define WRITE_CU_HW1_REG_VALUE(reg) NATIVE_WRITE_CU_HW1_REG_VALUE(reg)
/*
* Read/write low/high double-word Recovery point register (RPR)
*/
#define READ_RPR_LO_REG_VALUE() NATIVE_READ_RPR_LO_REG_VALUE()
#define READ_RPR_HI_REG_VALUE() NATIVE_READ_RPR_HI_REG_VALUE()
#define READ_SBBP_REG_VALUE() NATIVE_READ_SBBP_REG_VALUE()
#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \
NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo_value)
#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \
NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi_value)
/*
* Read double-word CPU current Instruction Pointer register (IP)
*/
#define READ_IP_REG_VALUE() NATIVE_NV_READ_IP_REG_VALUE()
/*
* Read debug and monitors regigisters
*/
#define READ_DIBCR_REG_VALUE() NATIVE_READ_DIBCR_REG_VALUE()
#define READ_DIBSR_REG_VALUE() NATIVE_READ_DIBSR_REG_VALUE()
#define READ_DIMCR_REG_VALUE() NATIVE_READ_DIMCR_REG_VALUE()
#define READ_DIBAR0_REG_VALUE() NATIVE_READ_DIBAR0_REG_VALUE()
#define READ_DIBAR1_REG_VALUE() NATIVE_READ_DIBAR1_REG_VALUE()
#define READ_DIBAR2_REG_VALUE() NATIVE_READ_DIBAR2_REG_VALUE()
#define READ_DIBAR3_REG_VALUE() NATIVE_READ_DIBAR3_REG_VALUE()
#define READ_DIMAR0_REG_VALUE() NATIVE_READ_DIMAR0_REG_VALUE()
#define READ_DIMAR1_REG_VALUE() NATIVE_READ_DIMAR1_REG_VALUE()
#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \
NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR_value)
#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \
NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR_value)
#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \
NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR_value)
#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \
NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0_value)
#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \
NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1_value)
#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \
NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2_value)
#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \
NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3_value)
#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \
NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0_value)
#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \
NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1_value)
/*
* Read/write double-word Compilation Unit Table Register (CUTD)
*/
#define READ_CUTD_REG_VALUE() NATIVE_NV_READ_CUTD_REG_VALUE()
#define WRITE_CUTD_REG_VALUE(CUTD_value) \
NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value)
/*
* Read word Compilation Unit Index Register (CUIR)
*/
#define READ_CUIR_REG_VALUE() NATIVE_READ_CUIR_REG_VALUE()
/*
* Read/write word Processor State Register (PSR)
*/
#define READ_PSR_REG_VALUE() NATIVE_NV_READ_PSR_REG_VALUE()
#define BOOT_READ_PSR_REG_VALUE() NATIVE_NV_READ_PSR_REG_VALUE()
#define WRITE_PSR_REG_VALUE(PSR_value) \
NATIVE_WRITE_PSR_REG_VALUE(PSR_value)
#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \
NATIVE_WRITE_PSR_REG_VALUE(PSR_value)
#define WRITE_PSR_IRQ_BARRIER(PSR_value) \
NATIVE_WRITE_PSR_IRQ_BARRIER(PSR_value)
/*
* Read/write word User Processor State Register (UPSR)
*/
#define READ_UPSR_REG_VALUE() NATIVE_NV_READ_UPSR_REG_VALUE()
#define BOOT_READ_UPSR_REG_VALUE() NATIVE_NV_READ_UPSR_REG_VALUE()
#define WRITE_UPSR_REG_VALUE(UPSR_value) \
NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value)
#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \
NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value)
#define WRITE_UPSR_IRQ_BARRIER(UPSR_value) \
NATIVE_WRITE_UPSR_IRQ_BARRIER(UPSR_value)
/*
* Read/write word floating point control registers (PFPFR/FPCR/FPSR)
*/
#define READ_PFPFR_REG_VALUE() NATIVE_NV_READ_PFPFR_REG_VALUE()
#define READ_FPCR_REG_VALUE() NATIVE_NV_READ_FPCR_REG_VALUE()
#define READ_FPSR_REG_VALUE() NATIVE_NV_READ_FPSR_REG_VALUE()
#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \
NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR_value)
#define WRITE_FPCR_REG_VALUE(FPCR_value) \
NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR_value)
#define WRITE_FPSR_REG_VALUE(FPSR_value) \
NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR_value)
/*
* Read/write low/high double-word Intel segments registers (xS)
*/
#define READ_CS_LO_REG_VALUE() NATIVE_READ_CS_LO_REG_VALUE()
#define READ_CS_HI_REG_VALUE() NATIVE_READ_CS_HI_REG_VALUE()
#define READ_DS_LO_REG_VALUE() NATIVE_READ_DS_LO_REG_VALUE()
#define READ_DS_HI_REG_VALUE() NATIVE_READ_DS_HI_REG_VALUE()
#define READ_ES_LO_REG_VALUE() NATIVE_READ_ES_LO_REG_VALUE()
#define READ_ES_HI_REG_VALUE() NATIVE_READ_ES_HI_REG_VALUE()
#define READ_FS_LO_REG_VALUE() NATIVE_READ_FS_LO_REG_VALUE()
#define READ_FS_HI_REG_VALUE() NATIVE_READ_FS_HI_REG_VALUE()
#define READ_GS_LO_REG_VALUE() NATIVE_READ_GS_LO_REG_VALUE()
#define READ_GS_HI_REG_VALUE() NATIVE_READ_GS_HI_REG_VALUE()
#define READ_SS_LO_REG_VALUE() NATIVE_READ_SS_LO_REG_VALUE()
#define READ_SS_HI_REG_VALUE() NATIVE_READ_SS_HI_REG_VALUE()
#define WRITE_CS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_CS_LO_REG_VALUE(sd)
#define WRITE_CS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_CS_HI_REG_VALUE(sd)
#define WRITE_DS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_DS_LO_REG_VALUE(sd)
#define WRITE_DS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_DS_HI_REG_VALUE(sd)
#define WRITE_ES_LO_REG_VALUE(sd) NATIVE_CL_WRITE_ES_LO_REG_VALUE(sd)
#define WRITE_ES_HI_REG_VALUE(sd) NATIVE_CL_WRITE_ES_HI_REG_VALUE(sd)
#define WRITE_FS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_FS_LO_REG_VALUE(sd)
#define WRITE_FS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_FS_HI_REG_VALUE(sd)
#define WRITE_GS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_GS_LO_REG_VALUE(sd)
#define WRITE_GS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_GS_HI_REG_VALUE(sd)
#define WRITE_SS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_SS_LO_REG_VALUE(sd)
#define WRITE_SS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_SS_HI_REG_VALUE(sd)
/*
* Read doubleword User Processor Identification Register (IDR)
*/
#define READ_IDR_REG_VALUE() NATIVE_READ_IDR_REG_VALUE()
#define BOOT_READ_IDR_REG_VALUE() NATIVE_READ_IDR_REG_VALUE()
/*
* Processor Core Mode Register (CORE_MODE)
*/
#define READ_CORE_MODE_REG_VALUE() native_read_CORE_MODE_reg_value()
#define BOOT_READ_CORE_MODE_REG_VALUE() boot_native_read_CORE_MODE_reg_value()
#define WRITE_CORE_MODE_REG_VALUE(modes) \
native_write_CORE_MODE_reg_value(modes)
#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \
boot_native_write_CORE_MODE_reg_value(modes)
/*
* OS Compilation Unit Table Descriptor Register (OSCUTD)
*/
#define READ_OSCUTD_REG_VALUE() native_read_OSCUTD_reg_value()
#define BOOT_READ_OSCUTD_REG_VALUE() boot_native_read_OSCUTD_reg_value()
#define WRITE_OSCUTD_REG_VALUE(desc) \
native_write_OSCUTD_reg_value(desc)
#define BOOT_WRITE_OSCUTD_REG_VALUE(desc) \
boot_native_write_OSCUTD_reg_value((desc))
/*
* OS Compilation Unit Index Register (OSCUIR)
*/
#define READ_OSCUIR_REG_VALUE() native_read_OSCUIR_reg_value()
#define WRITE_OSCUIR_REG_VALUE(v) native_write_OSCUIR_reg_value((v))
#define BOOT_READ_OSCUIR_REG_VALUE() boot_native_read_OSCUIR_reg_value()
#define BOOT_WRITE_OSCUIR_REG_VALUE(v) boot_native_write_OSCUIR_reg_value((v))
#endif /* CONFIG_KVM_GUEST_KERNEL */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _E2K_CPU_REGS_ACCESS_H_ */

Some files were not shown because too many files have changed in this diff Show More