linux-headers-5.4.0-3.9

This commit is contained in:
Alibek Omarov 2021-11-14 18:13:25 +03:00
parent 6609503008
commit 000d6cbdce
298 changed files with 3085 additions and 3642 deletions

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 91
EXTRAVERSION = -3.6
SUBLEVEL = 143
EXTRAVERSION = -3.9
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -794,16 +794,16 @@ KBUILD_CFLAGS += -Wno-tautological-compare
KBUILD_CFLAGS += -mno-global-merge
else
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += -Wno-unused-but-set-variable
# Warn about unmarked fall-throughs in switch statement.
# Disabled for clang while comment to attribute conversion happens and
# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
endif
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
@ -954,12 +954,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
# ensure -fcf-protection is disabled when using retpoline as it is
# incompatible with -mindirect-branch=thunk-extern
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
@ -977,7 +971,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
endif
# make the checker run with the right architecture
@ -1215,11 +1209,19 @@ define filechk_utsrelease.h
endef
define filechk_version.h
echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
if [ $(SUBLEVEL) -gt 255 ]; then \
echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + 255); \
else \
echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
fi; \
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \
((c) > 255 ? 255 : (c)))'
endef
$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
$(version_h): FORCE
$(call filechk,version.h)
$(Q)rm -f $(old_version_h)

View File

@ -8,8 +8,8 @@
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000UL
#define APIC_DEFAULT_PHYS_BASE 0xfee00000UL
/*
* This is the IO-APIC register space as specified

View File

@ -16,28 +16,13 @@
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
#else
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF
#endif
#ifdef CONFIG_SERIAL_MANY_PORTS
#define FOURPORT_FLAGS ASYNC_FOURPORT
#define ACCENT_FLAGS 0
#define BOCA_FLAGS 0
#define HUB6_FLAGS 0
#define RS_TABLE_SIZE 64
#else
#define RS_TABLE_SIZE
#endif
#define NS16550_SERIAL_PORT_0 0x3f8
#define NS16550_SERIAL_PORT_1 0x2f8
#define NS16550_SERIAL_PORT_2 0x3e8
#define NS16550_SERIAL_PORT_3 0x2e8
#ifdef CONFIG_E2K
#define SERIAL_PORT_DFNS \
/* UART CLK PORT IRQ FLAGS */ \
@ -91,370 +76,4 @@
#define AM85C30_D6 (0x01 << 6)
#define AM85C30_D7 (0x01 << 7)
/* WR0 */
/* D2,D1,D0
* Register Access Pointer
*
* 000 - N0, [N8]*
* 001 - N1, [N9]*
* 010 - N2, [N10]*
* 011 - N3, [N11]*
* 100 - N4, [N12]*
* 101 - N5, [N13]*
* 110 - N6, [N14]*
* 111 - N7, [N15]*
*
* if Point High Register Group = 1
*
* D5,D4,D3
*
* SCC Command
*
* 000 - Null Code
* 001 - Point High Register Group
* 010 - Reset Ext/Status Interrupts
* 011 - Send Abort
* 100 - Enable Int. on Next Rx Character
* 101 - Reset Tx Int. Pending
* 110 - Error Reset
* 111 - Reset Highest IUS
*
* D7,D6
* SCC Command
*
* 00 - Null Code
* 01 - Reset Rx CRC Checker
* 10 - Reset Tx CRC Generator
* 11 - Reset Tx Underrun/EOM Latch
*/
/* WR1 */
/* D0
* Ext. Int. Enable
* D1
* Tx Int. Enable
* D2
* Parity is Special Condition
* D4,D3
* Rx Int Mode
*
* 00 - Rx Int Disable
* 01 - Rx Int on First Char. or Special Condition
* 10 - Int on All Rx Char. or Special Condition
* 11 - Rx Int. on Special Condition Only
* D5
* Wait/DMA Request on Receive/Transmit
* D6
* Wait/DMA Request Function
* D7
* Wait/DMA Request Enable
*/
/* WR2 */
/* D7 - D0
* Interrupt Vector
*/
/* WR3 */
/* D0
* Rx Enable
* D1
* Sync Character Load Inhibit
* D2
* Address Search Mode (SDLC)
* D3
* Rx CRC Enable
* D4
* Enter Hunt Mode
* D5
* Auto Enable
* D7,D6
*
* 00 - Rx 5 Bits / Character
* 01 - Rx 6 Bits / Character
* 10 - Rx 7 Bits / Character
* 11 - Rx 8 Bits / Character
*/
/* WR4 */
/* D0
* ParityEnable
* D1
* Parity Even(0) / Odd(1)
* D3,D2
*
* 00 - Sync Modes Enable
* 01 - 1 Stop Bit / Character
* 10 - 1.5 Stop Bits / Character
* 11 - 2 Stop Bits / Character
* D5,D4
*
* 00 - 8-Bit Sync Character
* 01 - 16-Bit Sync Character
* 10 - SDLC Mode
* 11 - External Sync Mode
* D7,D6
*
* 00 - X1 Clock Mode
* 01 - X16 Clock Mode
* 10 - X32 Clock Mode
* 11 - X64 Clock Mode
*/
/* WR5 */
/* D0
* Tx CRC Enable
* D1
* RTS
* D2
* SDLC-/CRC-16
* D3
* Tx Enable
* D4
* Send Break
* D6,D5
*
* 00 - Tx 5 Bits / Character
* 01 - Tx 6 Bits / Character
* 10 - Tx 7 Bits / Character
* 11 - Tx 8 Bits / Character
* D7
* DTR
*/
/* WR6 */
/* D5-D0
* xN constant
* D7,D6
* Reserved (not used in asynchronous mode)
*/
/* WR7 */
/* D6-D0
* Reserved (not used in asynchronous mode)
* D7
* xN Mode Enable
*/
/* WR8 */
/* D7-D0
* Transmit Buffer
*/
/* WR9 */
/* D0
* Vector Includes Status
* D1
* No Vector
* D2
* Disable Lower Chain
* D3
* Master Interrupt Enable
* D4
* Status High/Low_
* D5
* Interrupt Masking Without INTACK_
* D7-D6
*
* 00 - No Reset
* 01 - Channel B Reset
* 10 - Channel A Reset
* 11 - Force Hardware Reset
*/
/* WR10 */
/* D0
* 6 bit / 8 bit SYNC
* D1
* Loop Mode
* D2
* Abort/Flag on Underrun
* D3
* Mark/Flag Idle
* D4
* Go Active on Poll
* D6-D5
*
* 00 - NRZ
* 01 - NRZI
* 10 - FM1 (Transition = 1)
* 11 - FM0 (Transition = 0)
* D7
* CRC Preset '1' or '0'
*/
/* WR11 */
/* D1-D0
*
* 00 - TRxC Out = XTAL output
* 01 - TRxC Out = Transmit Clock
* 10 - TRxC Out = BRG output
* 11 - TRxC Out = DPLL output
* D2
* TRxC O/I
* D4-D3
*
* 00 - Transmit Clock = RTxC pin
* 01 - Transmit Clock = TRxC pin
* 10 - Transmit Clock = BRG output
* 11 - Transmit Clock = DPLL output
* D6-D5
*
* 00 - Receive Clock = RTxC pin
* 01 - Receive Clock = TRxC pin
* 10 - Receive Clock = BRG output
* 11 - Receive Clock = DPLL output
* D7
* RTxC XTAL / NO XTAL
*/
/* WR12 */
/* D7-D0
* Lower Byte of Time Constant
*/
/* WR13 */
/* D7-D0
* Upper Byte of Time Constant
*/
/* WR14 */
/* D0
* BRG Enable
* D1
* BRG Source
* D2
* DTR / REQUESTt Function
* D3
* Auto Echo
* D4
* Local Loopback
* D7-D5
*
* 000 - Null Command
* 001 - Enter Search Mode
* 010 - Reset Missing Clock
* 011 - Disable DPLL
* 100 - Set Source = BR Generator
* 101 - Set Source = RTxC_
* 110 - Set FM Mode
* 111 - Set NRZI Mode
*/
/* WR15 */
/* D0
* SDLC/HDLC Enhancement Enable
* D1
* Zero Count IE (Interrupt Enable)
* D2
* 10 * 19-bit Frame Status FIFO Enable
* D3
* DCD IE
* D4
* Sync/Hunt IE
* D5
* CTS IE
* D6
* Tx Underrun / EOM IE
* D7
* Break/Abort IE
*/
/* RR0 */
/* D0
* Rx Character Availiable
* D1
* Zero Count
* D2
* Tx Buffer Empty
* D3
* DCD
* D4
* Sync/Hunt
* D5
* CTS
* D6
* Tx Underrun / EOM
* D7
* Break/Abort
*/
/* RR1 */
/* D0
* All Sent
* D1
* Residue Code 2
* D2
* Residue Code 1
* D3
* Residue Code 0
* D4
* Parity Error
* D5
* Rx Overrun Error
* D6
* CRC / Framing Error
* D7
* End of Frame (SDLC)
*/
/* RR2 */
/* D7-D0
* Interrupt Vector
*
* Channel A RR2 = WR2
* Channel B RR2 = Interrupt Vector Modified*
*
* *
* D3 D2 D1 Status High/Low = 0
* D4 D5 D6 Status High/Low = 1
*
* 0 0 0 Ch B Transmit Buffer Empty
* 0 0 1 Ch B External/Status Change
* 0 1 0 Ch B Receive Char. Availiable
* 0 1 1 Ch B Special Receive Condition
* 1 0 0 Ch A Transmit Buffer Empty
* 1 0 1 Ch A External/Status Change
* 1 1 0 Ch A Receive Char. Availiable
* 1 1 1 Ch A Special Receive Condition
*/
/* RR3 */
/* D0
* Channel B Ext/Status IP (Interrupt Pending)
* D1
* Channel B Tx IP
* D2
* Channel B Rx IP
* D3
* Channel A Ext/Status IP
* D4
* Channel A Tx IP
* D5
* Channel A Rx IP
* D7-D6
* Always 00
*/
/* RR8 */
/* D7-D0
* Receive Buffer
*/
/* RR10 */
/* D7-D0
* Reserved (not used in asynchronous mode)
*/
/* RR12 */
/* D7-D0
* Lower Byte of Time Constant
*/
/* RR13 */
/* D7-D0
* Upper Byte of Time Constant
*/
#endif /* ! _L_SERIAL_H */

View File

@ -4,12 +4,15 @@ generic-y += bugs.h
generic-y += div64.h
generic-y += errno.h
generic-y += emergency-restart.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += kmap_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += param.h
generic-y += qrwlock.h
generic-y += statfs.h
generic-y += termios.h
generic-y += xor.h
generic-y += mmiowb.h

View File

@ -119,10 +119,10 @@ native_set_all_aaldas(const e2k_aalda_t aaldas_p[])
/* set current array prefetch buffer indices values */
static __always_inline void native_set_aau_aaldis_aaldas(
const struct thread_info *ti, const e2k_aau_t *aau_regs)
const e2k_aalda_t *aaldas, const e2k_aau_t *aau_regs)
{
native_set_all_aaldis(aau_regs->aaldi);
native_set_all_aaldas(ti->aalda);
native_set_all_aaldas(aaldas);
}
/*
@ -130,24 +130,24 @@ static __always_inline void native_set_aau_aaldis_aaldas(
* and comparison with aasr.iab was taken.
*/
static inline void
native_get_aau_context_v2(e2k_aau_t *context)
native_get_aau_context_v2(e2k_aau_t *context, e2k_aasr_t aasr)
{
NATIVE_GET_AAU_CONTEXT_V2(context);
NATIVE_GET_AAU_CONTEXT_V2(context, aasr);
}
static inline void
native_get_aau_context_v5(e2k_aau_t *context)
native_get_aau_context_v5(e2k_aau_t *context, e2k_aasr_t aasr)
{
NATIVE_GET_AAU_CONTEXT_V5(context);
NATIVE_GET_AAU_CONTEXT_V5(context, aasr);
}
/*
* It's taken that comparison with aasr.iab was taken and assr
* will be set later.
*/
static __always_inline void
native_set_aau_context(e2k_aau_t *context)
static __always_inline void native_set_aau_context(const e2k_aau_t *context,
const e2k_aalda_t *aalda, e2k_aasr_t aasr)
{
NATIVE_SET_AAU_CONTEXT(context);
NATIVE_SET_AAU_CONTEXT(context, aalda, aasr);
}
#ifdef CONFIG_KVM_GUEST_KERNEL
@ -177,14 +177,14 @@ native_set_aau_context(e2k_aau_t *context)
native_get_synchronous_part_v5(aau_context); \
})
#define GET_AAU_CONTEXT_V2(cntx) native_get_aau_context_v2(cntx)
#define GET_AAU_CONTEXT_V5(cntx) native_get_aau_context_v5(cntx)
#define GET_AAU_CONTEXT_V2(cntx, aasr) native_get_aau_context_v2(cntx, aasr)
#define GET_AAU_CONTEXT_V5(cntx, aasr) native_get_aau_context_v5(cntx, aasr)
#define SAVE_AAU_MASK_REGS(aau_context, aasr) \
NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr)
#define RESTORE_AAU_MASK_REGS(aau_context) \
NATIVE_RESTORE_AAU_MASK_REGS(aau_context)
#define RESTORE_AAU_MASK_REGS(aau_context, aaldv, aasr) \
NATIVE_RESTORE_AAU_MASK_REGS(aau_context, aaldv, aasr)
#define SAVE_AADS(aau_regs) \
NATIVE_SAVE_AADS(aau_regs)
@ -234,10 +234,10 @@ native_set_aau_context(e2k_aau_t *context)
regs = native_read_aafstr_reg_value(); \
})
static __always_inline void
set_aau_context(e2k_aau_t *context)
static __always_inline void set_aau_context(e2k_aau_t *context,
const e2k_aalda_t *aalda, e2k_aasr_t aasr)
{
native_set_aau_context(context);
native_set_aau_context(context, aalda, aasr);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */
@ -245,10 +245,8 @@ set_aau_context(e2k_aau_t *context)
/*
* for code optimization
*/
static inline int aau_working(e2k_aau_t *context)
static inline int aau_working(e2k_aasr_t aasr)
{
e2k_aasr_t aasr = context->aasr;
return unlikely(AW(aasr) & (AAU_AASR_IAB | AAU_AASR_STB));
}

View File

@ -35,42 +35,49 @@
*/
#define PREFIX_SAVE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context, aasr) \
({ \
if (unlikely(AAU_ACTIVE(aasr))) { \
/* As it turns out AAU can be in ACTIVE state \
* in interrupt handler (bug 53227 comment 28 \
* and bug 53227 comment 36). \
* The hardware stops AAU automatically but \
* the value to be written should be corrected \
* to "stopped" so that the "DONE" instruction \
* works as expected. \
*/ \
AS(aasr).lds = AASR_STOPPED; \
do { \
if (aau_context) { \
if (unlikely(AAU_STOPPED(aasr))) { \
pv_type##_read_aaldv_reg(&(aau_context)->aaldv); \
pv_type##_read_aaldm_reg(&(aau_context)->aaldm); \
} else { \
AW((aau_context)->aaldv) = 0; \
AW((aau_context)->aaldm) = 0; \
} \
} \
(aau_context)->aasr = aasr; \
if (unlikely(AAU_STOPPED(aasr))) { \
pv_type##_read_aaldv_reg(&(aau_context)->aaldv); \
pv_type##_read_aaldm_reg(&(aau_context)->aaldm); \
} else { \
AW((aau_context)->aaldv) = 0; \
AW((aau_context)->aaldm) = 0; \
} \
})
} while (0)
#define NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) \
#define NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) \
PREFIX_SAVE_AAU_MASK_REGS(NATIVE, native, aau_context, aasr)
#define PREFIX_RESTORE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context) \
static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
{
if (unlikely(AAU_ACTIVE(aasr))) {
/* As it turns out AAU can be in ACTIVE state
* in interrupt handler (bug 53227 comment 28
* and bug 53227 comment 36).
* The hardware stops AAU automatically but
* the value to be written should be corrected
* to "stopped" so that the "DONE" instruction
* works as expected.
*/
aasr.lds = AASR_STOPPED;
}
return aasr;
}
#define PREFIX_RESTORE_AAU_MASK_REGS(PV_TYPE, pv_type, aaldm, aaldv, aasr) \
({ \
pv_type##_write_aafstr_reg_value(0); \
pv_type##_write_aaldm_reg(&(aau_context)->aaldm); \
pv_type##_write_aaldv_reg(&(aau_context)->aaldv); \
pv_type##_write_aaldm_reg(aaldm); \
pv_type##_write_aaldv_reg(aaldv); \
/* aasr can be in 'ACTIVE' state, so we set it last */ \
pv_type##_write_aasr_reg((aau_context)->aasr); \
pv_type##_write_aasr_reg(aasr); \
})
#define NATIVE_RESTORE_AAU_MASK_REGS(aau_context) \
PREFIX_RESTORE_AAU_MASK_REGS(NATIVE, native, aau_context)
#define NATIVE_RESTORE_AAU_MASK_REGS(aaldm, aaldv, aasr) \
PREFIX_RESTORE_AAU_MASK_REGS(NATIVE, native, aaldm, aaldv, aasr)
#define PREFIX_SAVE_AADS(PV_TYPE, pv_type, aau_regs) \
({ \
@ -491,74 +498,68 @@
* It's taken that aasr was get earlier(from get_aau_context caller)
* and comparison with aasr.iab was taken.
*/
#define PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, ISET, iset, aau_context) \
#define PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, ISET, iset, aau_context, aasr) \
({ \
/* get registers, which describe arrays in APB operations */ \
e2k_aasr_t aasr = (aau_context)->aasr; \
\
/* get descriptors & auxiliary registers */ \
if (AS(aasr).iab) \
PV_TYPE##_GET_ARRAY_DESCRIPTORS_##ISET(aau_context); \
if (aasr.iab) \
PV_TYPE##_GET_ARRAY_DESCRIPTORS_##ISET(aau_context); \
\
/* get synchronous part of APB */ \
if (AS(aasr).stb) \
if (aasr.stb) \
PV_TYPE##_GET_SYNCHRONOUS_PART_##ISET(aau_context); \
})
#define PREFIX_GET_AAU_CONTEXT_V2(PV_TYPE, pv_type, aau_context) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V2, v2, aau_context)
#define PREFIX_GET_AAU_CONTEXT_V5(PV_TYPE, pv_type, aau_context) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V5, v5, aau_context)
#define NATIVE_GET_AAU_CONTEXT_V2(aau_context) \
PREFIX_GET_AAU_CONTEXT_V2(NATIVE, native, aau_context)
#define NATIVE_GET_AAU_CONTEXT_V5(aau_context) \
PREFIX_GET_AAU_CONTEXT_V5(NATIVE, native, aau_context)
#define NATIVE_GET_AAU_CONTEXT(aau_context) \
({ \
#define PREFIX_GET_AAU_CONTEXT_V2(PV_TYPE, pv_type, aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V2, v2, aau_context, aasr)
#define PREFIX_GET_AAU_CONTEXT_V5(PV_TYPE, pv_type, aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V5, v5, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT_V2(aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT_V2(NATIVE, native, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT_V5(aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT_V5(NATIVE, native, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT(aau_context, aasr) \
do { \
if (IS_AAU_ISET_V5()) { \
NATIVE_GET_AAU_CONTEXT_V5(aau_context); \
NATIVE_GET_AAU_CONTEXT_V5(aau_context, aasr); \
} else if (IS_AAU_ISET_V2()) { \
NATIVE_GET_AAU_CONTEXT_V2(aau_context); \
NATIVE_GET_AAU_CONTEXT_V2(aau_context, aasr); \
} else if (IS_AAU_ISET_GENERIC()) { \
machine.get_aau_context(aau_context); \
machine.get_aau_context(aau_context, aasr); \
} else { \
BUILD_BUG_ON(true); \
} \
})
} while (0)
/*
* It's taken that comparison with aasr.iab was taken and assr
* will be set later.
*/
#define PREFIX_SET_AAU_CONTEXT(PV_TYPE, pv_type, aau_context) \
#define PREFIX_SET_AAU_CONTEXT(PV_TYPE, pv_type, aau_context, aalda, aasr) \
do { \
const e2k_aau_t *const aau = (aau_context); \
/* retrieve common APB status register */\
e2k_aasr_t aasr = aau->aasr; \
\
/* prefetch data to restore */ \
if (AS(aasr).stb) \
if (aasr.stb) \
prefetch_nospec_range(aau->aastis, sizeof(aau->aastis) + \
sizeof(aau->aasti_tags)); \
if (AS(aasr).iab) \
if (aasr.iab) \
prefetch_nospec_range(aau->aainds, sizeof(aau->aainds) + \
sizeof(aau->aaind_tags) + sizeof(aau->aaincrs) + \
sizeof(aau->aaincr_tags) + sizeof(aau->aads)); \
if (AAU_STOPPED(aasr)) \
if (AAU_STOPPED(aasr)) { \
prefetch_nospec_range(aau->aaldi, sizeof(aau->aaldi)); \
if (!cpu_has(CPU_FEAT_ISET_V6)) \
prefetch_nospec_range(aalda, sizeof(e2k_aalda_t) * AALDAS_REGS_NUM); \
} \
\
/* Make sure prefetches are issued */ \
barrier(); \
\
/* set synchronous part of APB */ \
if (AS(aasr).stb) \
if (aasr.stb) \
pv_type##_set_synchronous_part(aau); \
\
/* set descriptors & auxiliary registers */ \
if (AS(aasr).iab) \
if (aasr.iab) \
pv_type##_set_array_descriptors(aau); \
} while (0)
#define NATIVE_SET_AAU_CONTEXT(aau_context) \
PREFIX_SET_AAU_CONTEXT(NATIVE, native, aau_context)
#define NATIVE_SET_AAU_CONTEXT(aau_context, aalda, aasr) \
PREFIX_SET_AAU_CONTEXT(NATIVE, native, (aau_context), (aalda), (aasr))
#define PREFIX_SAVE_AALDAS(PV_TYPE, pv_type, aaldas_p) \
({ \
@ -638,7 +639,7 @@ static inline void read_aaldm_reg(e2k_aaldm_t *aaldm)
{
native_read_aaldm_reg(aaldm);
}
static inline void write_aaldm_reg(e2k_aaldm_t *aaldm)
static inline void write_aaldm_reg(e2k_aaldm_t aaldm)
{
native_write_aaldm_reg(aaldm);
}
@ -646,7 +647,7 @@ static inline void read_aaldv_reg(e2k_aaldv_t *aaldv)
{
native_read_aaldv_reg(aaldv);
}
static inline void write_aaldv_reg(e2k_aaldv_t *aaldv)
static inline void write_aaldv_reg(e2k_aaldv_t aaldv)
{
native_write_aaldv_reg(aaldv);
}
@ -662,24 +663,4 @@ static inline void write_aaldv_reg(e2k_aaldv_t *aaldv)
#endif /* CONFIG_KVM_GUEST_KERNEL */
#define SWITCH_GUEST_AAU_AASR(aasr, aau_context, do_switch) \
({ \
if (do_switch) { \
e2k_aasr_t aasr_worst_case; \
AW(aasr_worst_case) = 0; \
AS(aasr_worst_case).stb = 1; \
AS(aasr_worst_case).iab = 1; \
AS(aasr_worst_case).lds = AASR_STOPPED; \
(aau_context)->guest_aasr = *(aasr); \
*(aasr) = aasr_worst_case; \
} \
})
#define RESTORE_GUEST_AAU_AASR(aau_context, do_restore) \
({ \
if (do_restore) { \
(aau_context)->aasr = (aau_context)->guest_aasr; \
} \
})
#endif /* _E2K_AAU_REGS_ACCESS_H_ */

View File

@ -48,22 +48,25 @@ enum {
};
#define AAU_AASR_STB 0x20
#define AAU_AASR_IAB 0x40
typedef struct e2k_aasr_fields {
u32 reserved : 5; /* [4:0] */
u32 stb : 1; /* [5:5] */
u32 iab : 1; /* [6:6] */
u32 lds : 3; /* [9:7] */
} e2k_aasr_fields_t;
typedef union e2k_aasr { /* aadj quad-word */
e2k_aasr_fields_t fields;
struct {
u32 reserved : 5; /* [4:0] */
u32 stb : 1; /* [5:5] */
u32 iab : 1; /* [6:6] */
u32 lds : 3; /* [9:7] */
};
u32 word;
} e2k_aasr_t;
#define E2K_FULL_AASR ((e2k_aasr_t) { .stb = 1, .iab = 1, .lds = AASR_STOPPED })
/* Check up AAU state */
#define AAU_NULL(aasr) (AS(aasr).lds == AASR_NULL)
#define AAU_READY(aasr) (AS(aasr).lds == AASR_READY)
#define AAU_ACTIVE(aasr) (AS(aasr).lds == AASR_ACTIVE)
#define AAU_STOPPED(aasr) (AS(aasr).lds == AASR_STOPPED)
#define AAU_NULL(aasr) (aasr.lds == AASR_NULL)
#define AAU_READY(aasr) (aasr.lds == AASR_READY)
#define AAU_ACTIVE(aasr) (aasr.lds == AASR_ACTIVE)
#define AAU_STOPPED(aasr) (aasr.lds == AASR_STOPPED)
#define aau_has_state(aasr) (!AAU_NULL(aasr) || aasr.iab || aasr.stb)
typedef u32 e2k_aafstr_t;
@ -150,13 +153,7 @@ typedef union e2k_aalda_struct {
#define AALDIS_REGS_NUM 64
#define AALDAS_REGS_NUM 64
/*
* For virtualization, aasr might be switched to worst-case scenario (lds = AAU_STOPPED,
* iab = 1, stb = 1). In that case, real aasr will be saved to guest_aasr
*/
typedef struct e2k_aau_context {
e2k_aasr_t aasr;
e2k_aasr_t guest_aasr;
e2k_aafstr_t aafstr;
e2k_aaldm_t aaldm;
e2k_aaldv_t aaldv;

View File

@ -27,9 +27,9 @@
# else
# ifndef __ASSEMBLY__
# include <asm/glob_regs.h>
register unsigned long long __cpu_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID_GREG);
register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID_GREG);
# endif
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU __cpu_reg
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU ((unsigned int) __cpu_preempt_reg)
# endif
#elif defined(E2K_P2V)

View File

@ -4,7 +4,6 @@
#include <linux/compiler.h>
#include <asm/e2k_api.h>
#include <asm/alternative.h>
#include <asm/atomic_api.h>
#if CONFIG_CPU_ISET >= 6

View File

@ -26,7 +26,10 @@
* The same goes for preemption-disabled sections: these clobbers
* will forbid compiler to move per-cpu area address calculation out
* from them. Since disabling interrupts also disables preemption,
* we also need these clobbers when writing PSR/UPSR. */
* we also need these clobbers when writing PSR/UPSR.
*
* And of course operations on preempt_count must not be moved
* out of/into preemption disabled sections. */
#define PREEMPTION_CLOBBERS __PREEMPTION_CLOBBERS(SMP_CPU_ID_GREG, MY_CPU_OFFSET_GREG)
#ifdef CONFIG_DEBUG_LCC_VOLATILE_ATOMIC

View File

@ -280,12 +280,9 @@ static __always_inline s64 get_ps_copy_size(u64 cur_window_q, s64 u_pshtp_size)
return u_pshtp_size - (E2K_MAXSR - cur_window_q) * EXT_4_NR_SZ;
}
#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION
# define E2K_CF_MAX_FILL (E2K_CF_MAX_FILL_FILLC_q * 0x10)
#else
extern int cf_max_fill_return;
# define E2K_CF_MAX_FILL cf_max_fill_return
#endif
#define E2K_CF_MAX_FILL (cpu_has(CPU_FEAT_FILLC) ? \
(E2K_CF_MAX_FILL_FILLC_q * 0x10) : cf_max_fill_return)
static __always_inline s64 get_pcs_copy_size(s64 u_pcshtp_size)
{

View File

@ -0,0 +1,64 @@
#ifndef _E2K_ASM_CPU_FEATURES_H
#define _E2K_ASM_CPU_FEATURES_H
#ifndef __ASSEMBLY__
enum {
/* Hardware bugs */
CPU_HWBUG_LARGE_PAGES,
CPU_HWBUG_LAPIC_TIMER,
CPU_HWBUG_PIO_READS,
CPU_HWBUG_ATOMIC,
CPU_HWBUG_CLW,
CPU_HWBUG_PAGE_A,
CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR,
CPU_HWBUG_UNALIGNED_LOADS,
CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE,
CPU_HWBUG_DMA_AT_APIC_ADDR,
CPU_HWBUG_KERNEL_DATA_MONITOR,
CPU_HWBUG_WRITE_MEMORY_BARRIER,
CPU_HWBUG_BAD_RESET,
CPU_HWBUG_BREAKPOINT_INSTR,
CPU_HWBUG_E8C_WATCHDOG,
CPU_HWBUG_IOMMU,
CPU_HWBUG_WC_DAM,
CPU_HWBUG_TRAP_CELLAR_S_F,
CPU_HWBUG_SS,
CPU_HWBUG_AAU_AALDV,
CPU_HWBUG_LEVEL_EOI,
CPU_HWBUG_FALSE_SS,
CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG,
CPU_HWBUG_TLB_FLUSH_L1D,
CPU_HWBUG_GUEST_ASYNC_PM,
CPU_HWBUG_E16C_SLEEP,
CPU_HWBUG_L1I_STOPS_WORKING,
CPU_HWBUG_CLW_STALE_L1_ENTRY,
CPU_HWBUG_PIPELINE_FREEZE_MONITORS,
CPU_HWBUG_C3_WAIT_MA_C,
CPU_HWBUG_VIRT_SCLKM3_INTC,
CPU_HWBUG_VIRT_PUSD_PSL,
CPU_HWBUG_USD_ALIGNMENT,
CPU_HWBUG_VIRT_PSIZE_INTERCEPTION,
CPU_NO_HWBUG_SOFT_WAIT,
CPU_HWBUG_SOFT_WAIT_E8C2,
CPU_HWBUG_C3,
/* Features, not bugs */
CPU_FEAT_WC_PCI_PREFETCH,
CPU_FEAT_FLUSH_DC_IC,
CPU_FEAT_EPIC,
CPU_FEAT_TRAP_V5,
CPU_FEAT_TRAP_V6,
CPU_FEAT_QPREG,
CPU_FEAT_HW_PREFETCHER,
CPU_FEAT_SEPARATE_TLU_CACHE,
CPU_FEAT_FILLR,
CPU_FEAT_FILLC,
CPU_FEAT_ISET_V3,
CPU_FEAT_ISET_V5,
CPU_FEAT_ISET_V6,
NR_CPU_FEATURES
};
#endif
#endif

View File

@ -4,7 +4,7 @@
#ifdef __KERNEL__
#include <asm/types.h>
#include <linux/types.h>
#ifndef __ASSEMBLY__
@ -619,47 +619,65 @@ typedef e2k_rwap_struct_t psp_struct_t;
* describes the full procedure chain stack memory as well as the current
* pointer to the top of a procedure chain stack memory part.
*/
typedef union {
struct {
u64 base : E2K_VA_SIZE;
u64 : 58 - E2K_VA_SIZE;
u64 p : 1;
u64 rw : 2;
u64 : 3;
};
e2k_rwap_lo_fields_t fields;
u64 word;
} e2k_pcsp_lo_t;
#define _PCSP_lo_rw rw
#define E2K_PCSR_RW_PROTECTIONS E2_RWAR_RW_ENABLE;
#define PCSP_lo_base base
#define PCSP_lo_half word
/*
* Structure of lower word
* access PCSP.lo.PCSP_lo_xxx or PCSP -> lo.PCSP_lo_xxx
* or PCSP_lo.PCSP_lo_xxx or PCSP_lo -> PCSP_lo_xxx
*/
typedef e2k_rwap_lo_struct_t e2k_pcsp_lo_t;
#define _PCSP_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */
/* should be "RW" */
#define E2K_PCSR_RW_PROTECTIONS E2_RWAR_RW_ENABLE;
#define PCSP_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */
#define PCSP_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */
/* double-word of register */
/*
* Structure of high word
* access PCSP.hi.PCSP_hi_xxx or PCSP -> hi.PCSP_hi_xxx
* or PCSP_hi.PCSP_hi_xxx or PCSP_hi -> PCSP_hi_xxx
*/
typedef e2k_rwap_hi_struct_t e2k_pcsp_hi_t;
#define PCSP_hi_size E2K_RPSP_hi_size /* [63:32] - size */
#define PCSP_hi_ind E2K_RPSP_hi_ind /* [31: 0] - index for SPILL */
/* and FILL */
#define PCSP_hi_half E2K_RPSP_hi_half /* [63: 0] - entire high */
typedef union {
struct {
u64 ind : 32;
u64 size : 32;
};
e2k_rpsp_hi_fields_t fields;
u64 word;
} e2k_pcsp_hi_t;
#define PCSP_hi_size size
#define PCSP_hi_ind ind
#define PCSP_hi_half word
/*
* Structure of quad-word register
* access PCSP.PCSP_xxx or PCSP -> PCSP_xxx
*/
typedef e2k_rwap_struct_t pcsp_struct_t;
#define _PCSP_rw E2K_RWAP_rw /* [60:59] - read/write flags */
/* should be "RW" */
#define PCSP_base E2K_RWAP_base /* [47: 0] - base address */
#define PCSP_size E2K_RPSP_size /* [63:32] - size */
#define PCSP_ind E2K_RPSP_ind /* [31: 0] - index for SPILL */
/* and FILL */
#define PCSP_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */
/* double-word of register */
#define PCSP_hi_reg E2K_RPSP_hi_reg /* [63: 0] - entire high */
/* double-word of register */
#define PCSP_lo_struct E2K_RWAP_lo_struct /* low register structure */
#define PCSP_hi_struct E2K_RPSP_hi_struct /* high register structure */
typedef struct {
union {
struct {
u64 base : E2K_VA_SIZE;
u64 : 58 - E2K_VA_SIZE;
u64 p : 1;
u64 rw : 2;
u64 : 3;
};
e2k_pcsp_lo_t lo;
};
union {
struct {
u64 ind : 32;
u64 size : 32;
};
e2k_pcsp_hi_t hi;
};
} pcsp_struct_t;
#define PCSP_rw rw
#define PCSP_base base
#define PCSP_size size
#define PCSP_ind ind
#define PCSP_lo_reg lo.word
#define PCSP_hi_reg hi.word
#define PCSP_lo_struct lo
#define PCSP_hi_struct hi
#endif /* !(__ASSEMBLY__) */
#define E2K_ALIGN_PCSTACK 12 /* Procedure chain stack */
@ -2398,33 +2416,19 @@ typedef union {
u32 btf : 1;
u32 gm : 1;
};
struct {
u32 v0 : 1;
u32 t0 : 1;
u32 v1 : 1;
u32 t1 : 1;
u32 v2 : 1;
u32 t2 : 1;
u32 v3 : 1;
u32 t3 : 1;
u32 bt : 1;
u32 stop : 1;
u32 btf : 1;
u32 gm : 1;
} fields;
u32 word;
} e2k_dibcr_t;
#define DIBCR_reg word
#define E2K_DIBCR_MASK(cp_num) (0x3ULL << ((cp_num) * 2))
typedef union {
typedef union e2k_dimtp {
struct {
struct {
u64 base : E2K_VA_SIZE;
u64 __pad1 : 59 - E2K_VA_SIZE;
u64 rw : 2;
u64 __pad2 : 3;
u64 base : E2K_VA_SIZE;
u64 : 59 - E2K_VA_SIZE;
u64 rw : 2;
u64 : 3;
};
struct {
u64 ind : 32;

View File

@ -19,9 +19,6 @@ extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
#endif
#define E12C_CPU_VENDOR ES2_CPU_VENDOR
#define E12C_CPU_FAMILY E16C_CPU_FAMILY
#define E12C_NR_NODE_CPUS 12
#define E12C_MAX_NR_NODE_CPUS 16

View File

@ -19,9 +19,6 @@ extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
#endif
#define E16C_CPU_VENDOR ES2_CPU_VENDOR
#define E16C_CPU_FAMILY 6
#define E16C_NR_NODE_CPUS 16
#define E16C_MAX_NR_NODE_CPUS 16

View File

@ -13,9 +13,6 @@ extern void boot_e1cp_setup_arch(void);
extern void e1cp_setup_machine(void);
#endif
#define E1CP_CPU_VENDOR ES2_CPU_VENDOR
#define E1CP_CPU_FAMILY ES2_CPU_FAMILY
#define E1CP_NR_NODE_CPUS 1
#define E1CP_MAX_NR_NODE_CPUS E1CP_NR_NODE_CPUS

View File

@ -19,9 +19,6 @@ extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
#endif
#define E2C3_CPU_VENDOR ES2_CPU_VENDOR
#define E2C3_CPU_FAMILY E16C_CPU_FAMILY
#define E2C3_NR_NODE_CPUS 2
#define E2C3_MAX_NR_NODE_CPUS 16

View File

@ -419,4 +419,6 @@ static inline void set_mach_type_id(void)
extern unsigned long machine_serial_num;
#define ELBRUS_CPU_VENDOR "Elbrus-MCST"
#endif /* _ASM_E2K_H_ */

View File

@ -2,7 +2,11 @@
#define _E2K_API_H_
#include <linux/stringify.h>
#include <asm/alternative.h>
#include <asm/cpu_features.h>
#include <asm/cpu_regs_types.h> /* For instr_cs1_t */
#include <asm/mas.h>
#include <uapi/asm/e2k_api.h>
@ -4602,76 +4606,6 @@ do { \
__res; \
})
#if !defined CONFIG_E2K_MACHINE || \
defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU || \
(defined CONFIG_E2K_E2S && defined CONFIG_NUMA)
# define WORKAROUND_WAIT_HWBUG(num) (((num) & (_st_c | _all_c | _sas)) ? \
((num) | _ma_c) : (num))
#else
# define WORKAROUND_WAIT_HWBUG(num) num
#endif
#define __E2K_WAIT(_num) \
do { \
int unused, num = WORKAROUND_WAIT_HWBUG(_num); \
instr_cs1_t cs1 = { \
.opc = CS1_OPC_WAIT, \
.param = num \
}; \
\
/* Use "asm volatile" around tricky barriers such as _ma_c, _fl_c, etc */ \
if ((_num) & ~(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt)) \
asm volatile ("" ::: "memory"); \
\
/* CPU_NO_HWBUG_SOFT_WAIT: use faster workaround for "lal" barriers */ \
if ((_num) == (_ld_c | _lal) || (_num) == (_ld_c | _lal | _mt)) { \
_Pragma("no_asm_inline") \
asm NOT_VOLATILE (ALTERNATIVE( \
/* Default version - add nop 5 */ \
".word 0x00008281\n" \
".word %[cs1]\n", \
/* CPU_NO_HWBUG_SOFT_WAIT version */ \
".word 0x00008001\n" \
".word %[cs1]\n", \
%[facility]) \
: "=r" (unused) \
: [cs1] "i" (cs1.word), \
[facility] "i" (CPU_NO_HWBUG_SOFT_WAIT) \
: "memory"); \
} else { \
instr_cs1_t cs1_no_soft_barriers = { \
.opc = CS1_OPC_WAIT, \
.param = num & ~(_lal | _las | _sal | _sas) \
}; \
/* #79245 - use .word to encode relaxed barriers */ \
_Pragma("no_asm_inline") \
asm NOT_VOLATILE (ALTERNATIVE( \
/* Default version */ \
".word 0x00008001\n" \
".word %[cs1_no_soft_barriers]\n", \
/* CPU_NO_HWBUG_SOFT_WAIT version - use soft barriers */ \
".word 0x00008001\n" \
".word %[cs1]\n", \
%[facility]) \
: "=r" (unused) \
: [cs1] "i" (cs1.word), \
[cs1_no_soft_barriers] "i" (cs1_no_soft_barriers.word), \
[facility] "i" (CPU_NO_HWBUG_SOFT_WAIT) \
: "memory"); \
} \
\
/* Use "asm volatile" around tricky barriers such as _ma_c */ \
if ((_num) & ~(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt)) \
asm volatile ("" ::: "memory"); \
} while (0)
#define E2K_WAIT(num) \
do { \
__E2K_WAIT(num); \
if (num & (_st_c | _ld_c | _all_c | _ma_c)) \
NATIVE_HWBUG_AFTER_LD_ACQ(); \
} while (0)
#define _mem_mod 0x2000 /* watch for modification */
#define _int 0x1000 /* stop the conveyor untill interrupt */
#define _mt 0x800
@ -4688,6 +4622,92 @@ do { \
#define _all_e 0x2 /* stop until prev. operations issue all exceptions */
#define _all_c 0x1 /* stop until prev. operations complete */
#if !defined CONFIG_E2K_MACHINE || \
defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU || \
(defined CONFIG_E2K_E2S && defined CONFIG_NUMA)
# define WORKAROUND_WAIT_HWBUG(num) (((num) & (_st_c | _all_c | _sas)) ? \
((num) | _ma_c) : (num))
#else
# define WORKAROUND_WAIT_HWBUG(num) num
#endif
#ifndef __ASSEMBLY__
/* We use a static inline function instead of a macro
* because otherwise the preprocessed files size will
* increase tenfold making compile times much worse. */
__attribute__((__always_inline__))
static inline void __E2K_WAIT(int _num)
{
int unused, num = WORKAROUND_WAIT_HWBUG(_num);
instr_cs1_t cs1 = {
.opc = CS1_OPC_WAIT,
.param = num
};
/* Use "asm volatile" around tricky barriers such as _ma_c, _fl_c, etc */
if (_num & ~(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt))
asm volatile ("" ::: "memory");
/* Header dependency hell, cannot use here:
* cpu_has(CPU_HWBUG_SOFT_WAIT_E8C2)
* so just check straight for E8C2 */
if (IS_ENABLED(CONFIG_CPU_E8C2) && (num & (_sas | _sal)))
asm ("{nop}" ::: "memory");
/* CPU_NO_HWBUG_SOFT_WAIT: use faster workaround for "lal" barriers */
if (_num == (_ld_c | _lal) || _num == (_ld_c | _lal | _mt)) {
#pragma no_asm_inline
asm NOT_VOLATILE (ALTERNATIVE(
/* Default version - add "nop 5" after and a separate
* wide instruction before the barrier. */
"{nop}"
".word 0x00008281\n"
".word %[cs1]\n",
/* CPU_NO_HWBUG_SOFT_WAIT version */
".word 0x00008011\n"
".word %[cs1]\n"
".word 0x0\n"
".word 0x0\n",
%[facility])
: "=r" (unused)
: [cs1] "i" (cs1.word),
[facility] "i" (CPU_NO_HWBUG_SOFT_WAIT)
: "memory");
} else {
instr_cs1_t cs1_no_soft_barriers = {
.opc = CS1_OPC_WAIT,
.param = num & ~(_lal | _las | _sal | _sas)
};
/* #79245 - use .word to encode relaxed barriers */
#pragma no_asm_inline
asm NOT_VOLATILE (ALTERNATIVE(
/* Default version */
".word 0x00008001\n"
".word %[cs1_no_soft_barriers]\n",
/* CPU_NO_HWBUG_SOFT_WAIT version - use soft barriers */
".word 0x00008001\n"
".word %[cs1]\n",
%[facility])
: "=r" (unused)
: [cs1] "i" (cs1.word),
[cs1_no_soft_barriers] "i" (cs1_no_soft_barriers.word),
[facility] "i" (CPU_NO_HWBUG_SOFT_WAIT)
: "memory");
}
/* Use "asm volatile" around tricky barriers such as _ma_c, _fl_c, etc */
if (_num & ~(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt))
asm volatile ("" ::: "memory");
}
#endif
#define E2K_WAIT(num) \
do { \
__E2K_WAIT(num); \
if (num & (_st_c | _ld_c | _all_c | _ma_c)) \
NATIVE_HWBUG_AFTER_LD_ACQ(); \
} while (0)
/*
* IMPORTANT NOTE!!!
* Do not add 'sas' and 'sal' here, as they are modifiers
@ -5060,15 +5080,19 @@ static inline void E2K_SET_USER_STACK(int x)
#ifdef CONFIG_SMP
# define SMP_ONLY(...) __VA_ARGS__
# define NOT_SMP_ONLY(...)
#else
# define SMP_ONLY(...)
# define NOT_SMP_ONLY(...) __VA_ARGS__
#endif
#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION
# define NATIVE_FILL_HARDWARE_STACKS() \
asm volatile ("{fillc; fillr}" ::: "memory")
#else
# define NATIVE_FILL_HARDWARE_STACKS() \
#define NATIVE_FILL_HARDWARE_STACKS__HW() \
do { \
/* "{fillc; fillr}" */ \
_Pragma("no_asm_inline") \
asm volatile (".word 0x00008001; .word 0x7000000c" ::: "memory"); \
} while (0)
#define NATIVE_FILL_HARDWARE_STACKS__SW() \
do { \
asm volatile ( \
"{\n" \
@ -5106,8 +5130,14 @@ do { \
offsetof(struct task_struct, thread_info)),) \
[task_ti_offset] "i" (offsetof(struct task_struct, thread_info)) \
: "ctpr1", "ctpr3", "memory"); \
/* If CPU supports only FILLC but not FILLR, then we use the return \
* trick above to fill RF and FILLC instruction to fill CF. */ \
if (cpu_has(CPU_FEAT_FILLC)) { \
/* "{fillc}" */ \
_Pragma("no_asm_inline") \
asm volatile (".word 0x00008001; .word 0x70000008" ::: "memory"); \
} \
} while (0)
#endif
#ifndef __ASSEMBLY__
@ -5354,10 +5384,18 @@ do { \
__E2K_JUMP_FUNC_WITH_ARGUMENTS_8(FUNC_TO_NAME(func), \
arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
#ifdef CONFIG_CPU_HWBUG_IBRANCH
# define WORKAROUND_IBRANCH_HWBUG "{nop} {nop} \n"
#else
# define WORKAROUND_IBRANCH_HWBUG
#endif
#define E2K_GOTO_ARG0(func) \
do { \
_Pragma("no_asm_inline") \
asm volatile ("ibranch " #func "\n" :: ); \
asm volatile ("{ibranch " #func "}\n" \
WORKAROUND_IBRANCH_HWBUG \
:: ); \
} while (0)
#define E2K_GOTO_ARG1(label, arg1) \
do { \
@ -5367,6 +5405,7 @@ _Pragma("no_asm_inline") \
"addd \t 0, %0, %%dr0\n" \
"ibranch \t" #label "\n" \
"}\n" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u64_t) (arg1)) \
); \
@ -5380,6 +5419,7 @@ _Pragma("no_asm_inline") \
"addd \t 0, %1, %%dr1\n" \
"ibranch \t" #label "\n" \
"}\n" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u64_t) (arg1)), \
"ri" ((__e2k_u64_t) (arg2)) \
@ -5395,6 +5435,7 @@ _Pragma("no_asm_inline") \
"addd \t 0, %2, %%dr2\n" \
"ibranch \t" #label "\n" \
"}\n" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u64_t) (arg1)), \
"ri" ((__e2k_u64_t) (arg2)), \
@ -5412,6 +5453,7 @@ _Pragma("no_asm_inline") \
"addd \t 0, %3, %%dr3\n" \
"ibranch \t" #label "\n" \
"}\n" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u64_t) (arg1)), \
"ri" ((__e2k_u64_t) (arg2)), \
@ -5419,10 +5461,9 @@ _Pragma("no_asm_inline") \
"ri" ((__e2k_u64_t) (arg4)) \
); \
} while (false)
#define E2K_GOTO_AND_RETURN_ARG6(label, \
arg1, arg2, arg3, arg4, arg5, arg6) \
#define E2K_GOTO_ARG7(label, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
do { \
_Pragma("no_asm_inline") \
_Pragma("no_asm_inline") \
asm volatile ("\n" \
"{\n" \
"addd \t 0, %0, %%dr0\n" \
@ -5433,8 +5474,8 @@ _Pragma("no_asm_inline") \
"addd \t 0, %5, %%dr5\n" \
"}\n" \
"{\n" \
"rrd \t %%nip, %%dr6\n" \
"ibranch \t" #label \
"addd \t 0, %6, %%dr6\n" \
"ibranch \t" #label "\n" \
"}\n" \
: \
: "ri" ((__e2k_u64_t) (arg1)), \
@ -5442,7 +5483,107 @@ _Pragma("no_asm_inline") \
"ri" ((__e2k_u64_t) (arg3)), \
"ri" ((__e2k_u64_t) (arg4)), \
"ri" ((__e2k_u64_t) (arg5)), \
"ri" ((__e2k_u64_t) (arg6)) \
"ri" ((__e2k_u64_t) (arg6)), \
"ri" ((__e2k_u64_t) (arg7)) \
: "r0", "r1", "r2", "r3", "r4", "r5", "r6" \
); \
} while (false)
#define E2K_SCALL_ARG7(trap_num, ret, sys_num, arg1, arg2, arg3, \
arg4, arg5, arg6) \
do { \
_Pragma("no_asm_inline") \
asm volatile ("\n" \
"{\n" \
"addd \t 0, %[_sys_num], %%db[0]\n" \
"addd \t 0, %[_arg1], %%db[1]\n" \
"addd \t 0, %[_arg2], %%db[2]\n" \
"addd \t 0, %[_arg3], %%db[3]\n" \
"addd \t 0, %[_arg4], %%db[4]\n" \
"addd \t 0, %[_arg5], %%db[5]\n" \
"}\n" \
"{\n" \
"addd \t 0, %[_arg6], %%db[6]\n" \
"sdisp \t %%ctpr1, 0x"#trap_num"\n" \
"}\n" \
"{\n" \
"call %%ctpr1, wbs = %#\n" \
"}\n" \
"{\n" \
"addd,0,sm 0x0, %%db[0], %[_ret]\n" \
"}\n" \
: [_ret] "=r" (ret) \
: [_sys_num] "ri" ((__e2k_u64_t) (sys_num)), \
[_arg1] "ri" ((__e2k_u64_t) (arg1)), \
[_arg2] "ri" ((__e2k_u64_t) (arg2)), \
[_arg3] "ri" ((__e2k_u64_t) (arg3)), \
[_arg4] "ri" ((__e2k_u64_t) (arg4)), \
[_arg5] "ri" ((__e2k_u64_t) (arg5)), \
[_arg6] "ri" ((__e2k_u64_t) (arg6)) \
: "b[0]", "b[1]", "b[2]", "b[3]", "b[4]", "b[5]", \
"b[6]", "ctpr1" \
); \
} while (false)
#define E2K_GOTO_AND_RETURN_ARG6(label, \
arg1, arg2, arg3, arg4, arg5, arg6) \
do { \
_Pragma("no_asm_inline") \
asm volatile ("\n" \
"{\n" \
"addd \t 0, %1, %%dr1\n" \
"addd \t 0, %2, %%dr2\n" \
"addd \t 0, %3, %%dr3\n" \
"addd \t 0, %4, %%dr4\n" \
"addd \t 0, %5, %%dr5\n" \
"addd \t 0, %6, %%dr6\n" \
"}\n" \
"{\n" \
"addd \t 0, %0, %%dr0\n" \
"ibranch \t" #label "\n" \
"}\n" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "i" ((__e2k_u64_t) (arg1)), \
"ri" ((__e2k_u64_t) (arg2)), \
"ri" ((__e2k_u64_t) (arg3)), \
"ri" ((__e2k_u64_t) (arg4)), \
"ri" ((__e2k_u64_t) (arg5)), \
"ri" ((__e2k_u64_t) (arg6)), \
"ri" ((__e2k_u64_t) (arg7)) \
); \
} while (false)
#define E2K_SCALL_ARG7(trap_num, ret, sys_num, arg1, arg2, arg3, \
arg4, arg5, arg6) \
do { \
_Pragma("no_asm_inline") \
asm volatile ("\n" \
"{\n" \
"addd \t 0, %[_sys_num], %%db[0]\n" \
"addd \t 0, %[_arg1], %%db[1]\n" \
"addd \t 0, %[_arg2], %%db[2]\n" \
"addd \t 0, %[_arg3], %%db[3]\n" \
"addd \t 0, %[_arg4], %%db[4]\n" \
"addd \t 0, %[_arg5], %%db[5]\n" \
"}\n" \
"{\n" \
"addd \t 0, %[_arg6], %%db[6]\n" \
"sdisp \t %%ctpr1, 0x"#trap_num"\n" \
"}\n" \
"{\n" \
"call %%ctpr1, wbs = %#\n" \
"}\n" \
"{\n" \
"addd,0,sm 0x0, %%db[0], %[_ret]\n" \
"}\n" \
: [_ret] "=r" (ret) \
: [_sys_num] "ri" ((__e2k_u64_t) (sys_num)), \
[_arg1] "ri" ((__e2k_u64_t) (arg1)), \
[_arg2] "ri" ((__e2k_u64_t) (arg2)), \
[_arg3] "ri" ((__e2k_u64_t) (arg3)), \
[_arg4] "ri" ((__e2k_u64_t) (arg4)), \
[_arg5] "ri" ((__e2k_u64_t) (arg5)), \
[_arg6] "ri" ((__e2k_u64_t) (arg6)) \
: "b[0]", "b[1]", "b[2]", "b[3]", "b[4]", "b[5]", \
"b[6]", "ctpr1" \
); \
} while (false)
#define E2K_COND_GOTO(label, cond, pred_no) \
@ -5453,6 +5594,7 @@ _Pragma("no_asm_inline") \
"\n{" \
"\nibranch \t" #label " ? ~%%pred" #pred_no \
"\n}" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u32_t) (cond)) \
: "pred" #pred_no \
@ -5467,6 +5609,7 @@ _Pragma("no_asm_inline") \
"\naddd \t 0, %1, %%dr0 ? ~%%pred" #pred_no \
"\nibranch \t" #label " ? ~%%pred" #pred_no \
"\n}" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u32_t) (cond)), \
"ri" ((__e2k_u64_t) (arg1)) \
@ -5483,6 +5626,7 @@ _Pragma("no_asm_inline") \
"\naddd \t 0, %2, %%dr1 ? ~%%pred" #pred_no \
"\nibranch \t" #label " ? ~%%pred" #pred_no \
"\n}" \
WORKAROUND_IBRANCH_HWBUG \
: \
: "ri" ((__e2k_u32_t) (cond)), \
"ri" ((__e2k_u64_t) (arg1)), \
@ -6875,6 +7019,62 @@ do { \
"{call %%ctpr2, wbs=%#}\n" \
::: "call"); \
} while (0)
/*
* Arithmetic operations that are atomic with regard to interrupts.
* I.e. an interrupt can arrive only before or after the operation.
*/
#define E2K_INSFD_ATOMIC(src1, src2, src3_dst) \
do { \
_Pragma("no_asm_inline") \
asm ("insfd %[new_value], %[insf_params], %[reg], %[reg]" \
: [reg] "+r" (src3_dst) \
: [insf_params] "i" (src2), \
[new_value] "ir" (src1)); \
} while (0)
#define E2K_ADDD_ATOMIC(src1_dst, src2) \
do { \
_Pragma("no_asm_inline") \
asm ("addd %[reg], %[val], %[reg]" \
: [reg] "+r" (src1_dst) \
: [val] "ir" (src2)); \
} while (0)
#define E2K_SUBD_ATOMIC(src1_dst, src2) \
do { \
_Pragma("no_asm_inline") \
asm ("subd %[reg], %[val], %[reg]" \
: [reg] "+r" (src1_dst) \
: [val] "ir" (src2)); \
} while (0)
#define E2K_SUBD_ATOMIC__SHRD32(src1_dst, src2, _old) \
do { \
asm ("{subd %[reg], %[val], %[reg]\n" \
" shrd %[reg], 32, %[old]}" \
: [reg] "+r" (src1_dst), \
[old] "=r" (_old) \
: [val] "i" (src2)); \
} while (0)
#endif /* __ASSEMBLY__ */
#define DECOMPRESSOR_READ_CORE_MODE() \
({ \
e2k_core_mode_t __core_mode; \
register u64 __value asm("b[0]"); \
\
/* Cannot use "rrd %core_mode" here since the file is compiled \
* for generic architecture so use hard-coded read */ \
_Pragma("no_asm_inline") \
asm (".word 0x04100011\n" \
".word 0x3f04c000\n" \
".word 0x01c00000\n" \
".word 0x00000000\n" \
: "=r" (__value) ::); \
__core_mode.word = __value; \
__core_mode; \
})
#endif /* _E2K_API_H_ */

View File

@ -82,8 +82,8 @@ typedef int (*parse_chain_fn_t)(e2k_mem_crs_t *crs,
int flags, void *arg);
#define PCS_USER 0x1
#define PCS_OPEN_IRQS 0x2
extern notrace int parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg);
extern notrace long parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg);
extern void *kernel_symtab;
@ -198,8 +198,6 @@ extern void print_chain_stack(struct stack_regs *regs,
int show_reg_window);
extern void copy_stack_regs(struct task_struct *task,
const struct pt_regs *limit_regs, struct stack_regs *regs);
extern int parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg);
extern struct stack_regs stack_regs_cache[NR_CPUS];
extern int debug_userstack;
@ -607,7 +605,7 @@ static inline int set_hardware_data_breakpoint(u64 addr, u64 size,
e2k_dibcr_t dibcr;
dibcr = READ_DIBCR_REG();
AS(dibcr).stop = 1;
dibcr.stop = 1;
WRITE_DIBCR_REG(dibcr);
}
@ -746,17 +744,16 @@ print_aau_regs(char *str, e2k_aau_t *context, struct pt_regs *regs,
"ctpr2 = 0x%llx\n"
"lsr = 0x%llx\n"
"ilcr = 0x%llx\n",
AW(context->aasr),
AAU_NULL(context->aasr) ? "NULL" :
AAU_READY(context->aasr) ? "READY" :
AAU_ACTIVE(context->aasr) ? "ACTIVE" :
AAU_STOPPED(context->aasr) ? "STOPPED":
AW(regs->aasr),
AAU_NULL(regs->aasr) ? "NULL" :
AAU_READY(regs->aasr) ? "READY" :
AAU_ACTIVE(regs->aasr) ? "ACTIVE" :
AAU_STOPPED(regs->aasr) ? "STOPPED" :
"undefined",
AS(context->aasr).iab,
AS(context->aasr).stb,
regs->aasr.iab, regs->aasr.stb,
AW(regs->ctpr2), regs->lsr, regs->ilcr);
if (AAU_STOPPED(context->aasr)) {
if (AAU_STOPPED(regs->aasr)) {
pr_info("aaldv = 0x%llx\n"
"aaldm = 0x%llx\n",
AW(context->aaldv), AW(context->aaldm));
@ -767,7 +764,7 @@ print_aau_regs(char *str, e2k_aau_t *context, struct pt_regs *regs,
"AALDM will not be printed\n");
}
if (AS(context->aasr).iab) {
if (regs->aasr.iab) {
for (i = 0; i < 32; i++) {
pr_info("aad[%d].hi = 0x%llx ", i,
AW(context->aads[i]).hi);
@ -794,7 +791,7 @@ print_aau_regs(char *str, e2k_aau_t *context, struct pt_regs *regs,
"AAINCR, AAINCR_TAGS\n");
}
if (AS(context->aasr).stb) {
if (regs->aasr.stb) {
for (i = 0; i < 16; i++) {
pr_info("aasti[%d] = 0x%llx\n", i, (old_iset) ?
(u64) (u32) context->aastis[i] :

View File

@ -172,7 +172,7 @@ typedef union { /* Common array pointer */
long lo;
long hi;
} word;
} e2k_ptr_t;
} __aligned(16) e2k_ptr_t;
#define R_ENABLE 0x1
#define W_ENABLE 0x2

View File

@ -20,9 +20,6 @@ extern void setup_APIC_vector_handler(int vector,
extern void sic_error_interrupt(struct pt_regs *regs);
#endif
#define E2S_CPU_VENDOR ES2_CPU_VENDOR
#define E2S_CPU_FAMILY ES2_CPU_FAMILY
#define E2S_NR_NODE_CPUS 4
#define E2S_MAX_NR_NODE_CPUS E2S_NR_NODE_CPUS

View File

@ -20,9 +20,6 @@ extern void setup_APIC_vector_handler(int vector,
extern void sic_error_interrupt(struct pt_regs *regs);
#endif
#define E8C_CPU_VENDOR ES2_CPU_VENDOR
#define E8C_CPU_FAMILY ES2_CPU_FAMILY
#define E8C_NR_NODE_CPUS 8
#define E8C_MAX_NR_NODE_CPUS 16

View File

@ -20,9 +20,6 @@ extern void setup_APIC_vector_handler(int vector,
extern void sic_error_interrupt(struct pt_regs *regs);
#endif
#define E8C2_CPU_VENDOR ES2_CPU_VENDOR
#define E8C2_CPU_FAMILY 5
#define E8C2_NR_NODE_CPUS E8C_NR_NODE_CPUS
#define E8C2_MAX_NR_NODE_CPUS E8C_MAX_NR_NODE_CPUS

View File

@ -5,8 +5,8 @@
* Constants for e2k EPICs (CEPIC, IOEPIC)
*/
#define IO_EPIC_DEFAULT_PHYS_BASE 0xfec00000
#define EPIC_DEFAULT_PHYS_BASE 0xfee00000
#define IO_EPIC_DEFAULT_PHYS_BASE 0xfec00000UL
#define EPIC_DEFAULT_PHYS_BASE 0xfee00000UL
#include <asm-l/epicdef.h>

View File

@ -22,9 +22,6 @@ extern void setup_APIC_vector_handler(int vector,
extern void eldsp_interrupt(struct pt_regs *regs);
#endif
#define ES2_CPU_VENDOR "Elbrus-MCST"
#define ES2_CPU_FAMILY 4
#define ES2_NR_NODE_CPUS 2
#define ES2_MAX_NR_NODE_CPUS 4

View File

@ -9,6 +9,9 @@
#include <asm/sclkr.h>
#include <asm/clkr.h>
#include <asm/trap_table.h>
#include <asm/gregs.h>
#include <asm/hw_stacks.h>
#include <uapi/asm/ucontext.h>
struct fast_syscalls_data {
struct timekeeper *tk;
@ -41,6 +44,7 @@ int native_do_fast_clock_gettime(const clockid_t which_clock,
int native_fast_sys_clock_gettime(const clockid_t which_clock,
struct timespec __user *tp);
int native_do_fast_gettimeofday(struct timeval *tv);
int native_do_fast_sys_set_return(u64 ip, int flags);
int native_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize);
#ifdef CONFIG_KVM_GUEST_KERNEL
@ -58,22 +62,103 @@ do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp)
return native_do_fast_clock_gettime(which_clock, tp);
}
static inline int
fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
{
return native_fast_sys_clock_gettime(which_clock, tp);
}
static inline int
do_fast_gettimeofday(struct timeval *tv)
{
return native_do_fast_gettimeofday(tv);
}
static inline int
fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize)
do_fast_sys_set_return(u64 ip, int flags)
{
return native_fast_sys_siggetmask(oset, sigsetsize);
return native_do_fast_sys_set_return(ip, flags);
}
#ifdef CONFIG_KVM_HOST_MODE
extern long ret_from_fast_sys_call(void);
static inline long kvm_return_from_fast_syscall(thread_info_t *ti, long arg1)
{
/* Restore vcpu state reg old value (guest user) */
HOST_VCPU_STATE_REG_RESTORE(ti);
/* TODO: Cleanup guest kernel's pgds in shadow page table */
/* Get current parameters of top chain stack frame */
e2k_cr0_lo_t cr0_lo = READ_CR0_LO_REG();
e2k_cr0_hi_t cr0_hi = READ_CR0_HI_REG();
e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG();
e2k_cr1_hi_t cr1_hi = READ_CR1_HI_REG();
/*
* Correct ip in current chain stack frame to return to guest user
* through special trap function ret_from_fast_syscall_trampoline
*/
AS(cr0_lo).pf = -1ULL;
AS(cr0_hi).ip = ((u64)ret_from_fast_sys_call) >> 3;
AS(cr1_lo).psr = AW(E2K_KERNEL_PSR_DISABLED);
AS(cr1_lo).cui = KERNEL_CODES_INDEX;
WRITE_CR0_LO_REG(cr0_lo);
WRITE_CR0_HI_REG(cr0_hi);
WRITE_CR1_LO_REG(cr1_lo);
WRITE_CR1_HI_REG(cr1_hi);
return arg1;
}
static inline long kvm_set_return_user_ip(thread_info_t *gti, u64 ip, int flags)
{
e2k_pcsp_lo_t pcsp_lo;
e2k_pcsp_hi_t pcsp_hi;
e2k_cr0_hi_t cr0_hi;
e2k_mem_crs_t *frame, *base;
u64 prev_ip;
E2K_FLUSHC;
if (unlikely(flags))
return -EINVAL;
if (unlikely(ip >= USER_DS.seg))
return -EFAULT;
pcsp_hi = READ_PCSP_HI_REG(); /* We don't use %pcsp_hi.size */
pcsp_lo = READ_PCSP_LO_REG();
base = (e2k_mem_crs_t *) GET_PCS_BASE(&gti->u_hw_stack);
frame = (e2k_mem_crs_t *) (AS(pcsp_lo).base + AS(pcsp_hi).ind);
do {
--frame;
cr0_hi = frame->cr0_hi;
prev_ip = AS(cr0_hi).ip << 3;
} while (unlikely(prev_ip >= GUEST_TASK_SIZE && frame > base));
/* No user frames above? */
if (unlikely(prev_ip >= GUEST_TASK_SIZE))
return -EPERM;
/* Modify stack */
AS(cr0_hi).ip = ip >> 3;
frame->cr0_hi = cr0_hi;
return 0;
}
#endif /* CONFIG_KVM_HOST_MODE */
/* trap table entry started by direct branch (it is closer to fast system */
/* call wirthout switch and use user local data stack */
#define goto_ttable_entry1_args3(sys_num, arg1, arg2, ret) \
E2K_GOTO_ARG7(native_ttable_entry1, sys_num, arg1, arg2, 0, 0, 0, 0)
#define goto_ttable_entry1_args4(sys_num, arg1, arg2, arg3, ret) \
E2K_GOTO_ARG7(native_ttable_entry1, sys_num, arg1, arg2, arg3, 0, 0, 0)
#define goto_ttable_entry3_args3(sys_num, arg1, arg2, ret) \
E2K_GOTO_ARG7(native_ttable_entry3, sys_num, arg1, arg2, 0, 0, 0, 0)
#define goto_ttable_entry3_args4(sys_num, arg1, arg2, arg3, ret) \
E2K_GOTO_ARG7(native_ttable_entry3, sys_num, arg1, arg2, arg3, 0, 0, 0)
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
/*
@ -209,29 +294,18 @@ typedef long (*ttable_entry_args4)(int sys_num, u64 arg1, u64 arg2, u64 arg3);
#define ttable_entry3_args4(sys_num, arg1, arg2) \
((ttable_entry_args4)(get_ttable_entry3))(sys_num, arg1, arg2, arg3)
/* trap table entry started by direct branch (it is closer to fast system */
/* call wirthout switch and use user local data stack */
#define goto_ttable_entry_args3(entry_label, sys_num, arg1, arg2) \
E2K_GOTO_ARG3(entry_label, sys_num, arg1, arg2)
#define goto_ttable_entry_args4(entry_label, sys_num, arg1, arg2, arg3) \
E2K_GOTO_ARG4(entry_label, sys_num, arg1, arg2, arg3)
#define goto_ttable_entry3_args3(sys_num, arg1, arg2) \
goto_ttable_entry_args3(ttable_entry3, sys_num, arg1, arg2)
#define goto_ttable_entry3_args4(sys_num, arg1, arg2, arg3) \
goto_ttable_entry_args4(ttable_entry3, sys_num, arg1, arg2, arg3)
#define ttable_entry_clock_gettime(which, time) \
/* ibranch */ goto_ttable_entry3_args3(__NR_clock_gettime, which, time)
#define ttable_entry_clock_gettime(which, time, ret) \
/* ibranch */ goto_ttable_entry3_args3(__NR_clock_gettime, which, time, ret)
/* call ttable_entry3_args3(__NR_clock_gettime, which, time) */
#define ttable_entry_gettimeofday(tv, tz) \
/* ibranch */ goto_ttable_entry3_args3(__NR_gettimeofday, tv, tz)
#define ttable_entry_gettimeofday(tv, tz, ret) \
/* ibranch */ goto_ttable_entry3_args3(__NR_gettimeofday, tv, tz, ret)
/* ttable_entry3_args3(__NR_gettimeofday, tv, tz) */
#define ttable_entry_sigprocmask(how, nset, oset) \
/* ibranch */ goto_ttable_entry3_args4(__NR_sigprocmask, how, nset, oset)
#define ttable_entry_sigprocmask(how, nset, oset, ret) \
/* ibranch */ goto_ttable_entry3_args4(__NR_sigprocmask, how, \
nset, oset, ret)
/* ttable_entry3_args4(__NR_sigprocmask, how, nset, oset) */
#define ttable_entry_getcpu(cpup, nodep, unused) \
/* ibranch */ goto_ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused)
#define ttable_entry_getcpu(cpup, nodep, unused, ret) \
/* ibranch */ goto_ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused, ret)
/* ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused) */
static inline int
@ -242,18 +316,13 @@ FAST_SYS_CLOCK_GETTIME(const clockid_t which_clock, struct timespec __user *tp)
prefetch_nospec(&fsys_data);
#ifdef CONFIG_KVM_HOST_MODE
if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)))
ttable_entry_clock_gettime((u64) which_clock, (u64) tp);
#endif
tp = (typeof(tp)) ((u64) tp & E2K_VA_MASK);
if (unlikely((u64) tp + sizeof(struct timespec) > ti->addr_limit.seg))
return -EFAULT;
r = do_fast_clock_gettime(which_clock, tp);
if (unlikely(r))
ttable_entry_clock_gettime((u64) which_clock, (u64) tp);
ttable_entry_clock_gettime(which_clock, tp, r);
return r;
}
@ -278,9 +347,6 @@ FAST_SYS_SIGGETMASK(u64 __user *oset, size_t sigsetsize)
{
struct thread_info *const ti = READ_CURRENT_REG();
struct task_struct *task = thread_info_task(ti);
#ifdef CONFIG_KVM_HOST_MODE
bool guest = test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE);
#endif
u64 set;
set = task->blocked.sig[0];
@ -288,11 +354,6 @@ FAST_SYS_SIGGETMASK(u64 __user *oset, size_t sigsetsize)
if (unlikely(sigsetsize != 8))
return -EINVAL;
#ifdef CONFIG_KVM_HOST_MODE
if (unlikely(guest))
ttable_entry_sigprocmask((u64) 0, (u64) NULL, (u64) oset);
#endif
oset = (typeof(oset)) ((u64) oset & E2K_VA_MASK);
if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg))
return -EFAULT;
@ -310,9 +371,7 @@ struct getcpu_cache;
int fast_sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
struct getcpu_cache __user *unused);
int fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize);
struct ucontext;
int fast_sys_getcontext(struct ucontext __user *ucp, size_t sigsetsize);
int fast_sys_set_return(u64 ip, int flags);
struct compat_timespec;
int compat_fast_sys_clock_gettime(const clockid_t which_clock,
@ -321,7 +380,6 @@ struct compat_timeval;
int compat_fast_sys_gettimeofday(struct compat_timeval __user *tv,
struct timezone __user *tz);
int compat_fast_sys_siggetmask(u32 __user *oset, size_t sigsetsize);
struct ucontext_32;
int compat_fast_sys_getcontext(struct ucontext_32 __user *ucp,
size_t sigsetsize);
int compat_fast_sys_set_return(u32 ip, int flags);
@ -333,5 +391,97 @@ int protected_fast_sys_gettimeofday(u32 tags,
int protected_fast_sys_getcpu(u32 tags, u64 arg2, u64 arg3, u64 arg4, u64 arg5);
int protected_fast_sys_siggetmask(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize);
int protected_fast_sys_getcontext(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize);
#endif /* _ASM_E2K_FAST_SYSCALLS_H */
/* Inlined handlers for fast syscalls */
notrace __interrupt __section(.entry_handlers)
static inline int _fast_sys_gettimeofday(struct timeval __user *__restrict tv,
struct timezone __user *__restrict tz)
{
struct thread_info *const ti = READ_CURRENT_REG();
int ret;
prefetch_nospec(&fsys_data);
tv = (typeof(tv)) ((u64) tv & E2K_VA_MASK);
tz = (typeof(tz)) ((u64) tz & E2K_VA_MASK);
if (unlikely((u64) tv + sizeof(struct timeval) > ti->addr_limit.seg
|| (u64) tz + sizeof(struct timezone)
> ti->addr_limit.seg))
return -EFAULT;
if (likely(tv)) {
ret = do_fast_gettimeofday(tv);
if (unlikely(ret))
ttable_entry_gettimeofday((u64) tv, (u64) tz, ret);
} else {
ret = 0;
}
if (tz) {
tz->tz_minuteswest = sys_tz.tz_minuteswest;
tz->tz_dsttime = sys_tz.tz_dsttime;
}
return ret;
}
notrace __interrupt __section(.entry_handlers)
static inline int _fast_sys_clock_gettime(const clockid_t which_clock,
struct timespec __user *tp)
{
return FAST_SYS_CLOCK_GETTIME(which_clock, tp);
}
notrace __interrupt __section(.entry_handlers)
static inline int _fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize)
{
return FAST_SYS_SIGGETMASK(oset, sigsetsize);
}
notrace __interrupt __section(.entry_handlers)
static inline int _fast_sys_getcontext(struct ucontext __user *ucp,
size_t sigsetsize)
{
struct thread_info *const ti = READ_CURRENT_REG();
struct task_struct *task = thread_info_task(ti);
register u64 pcsp_lo, pcsp_hi;
register u32 fpcr, fpsr, pfpfr;
u64 set, key;
BUILD_BUG_ON(sizeof(task->blocked.sig[0]) != 8);
set = task->blocked.sig[0];
if (unlikely(sigsetsize != 8))
return -EINVAL;
ucp = (typeof(ucp)) ((u64) ucp & E2K_VA_MASK);
if (unlikely((u64) ucp + sizeof(struct ucontext) > ti->addr_limit.seg))
return -EFAULT;
key = context_ti_key_fast_syscall(ti);
E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi);
/* We want stack to point to user frame that called us */
pcsp_hi -= SZ_OF_CR;
*((u64 *) &ucp->uc_sigmask) = set;
ucp->uc_mcontext.sbr = key;
ucp->uc_mcontext.pcsp_lo = pcsp_lo;
ucp->uc_mcontext.pcsp_hi = pcsp_hi;
ucp->uc_extra.fpcr = fpcr;
ucp->uc_extra.fpsr = fpsr;
ucp->uc_extra.pfpfr = pfpfr;
return 0;
}
notrace __interrupt __section(.entry_handlers)
static inline int fast_sys_set_return(u64 ip, int flags)
{
return do_fast_sys_set_return(ip, flags);
}
#endif /* _ASM_E2K_FAST_SYSCALLS_H */

View File

@ -1 +0,0 @@
#include <asm-generic/fcntl.h>

View File

@ -9,8 +9,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
/*
* MAP of global registers using for the user purposes
*/

View File

@ -1 +0,0 @@
#include <asm-generic/ioctl.h>

View File

@ -1,17 +0,0 @@
#ifndef _E2K_IOCTLS_H_
#define _E2K_IOCTLS_H_
/*
* We are too far from real ioctl handling and it is difficult to predict
* any errors now. So I accept i386(ia64) ioctl's stuff as the basis.
*/
#include <asm/ioctl.h>
#include <asm-generic/ioctls.h>
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define TIOCGDB 0x547F /* enable GDB stub mode on this tty */
#endif /* _E2K_IOCTLS_H_ */

View File

@ -1,28 +0,0 @@
#ifndef _E2K_IPCBUF_H_
#define _E2K_IPCBUF_H_
/*
* The ipc64_perm structure for E2K architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 32-bit seq
* - 2 miscellaneous 64-bit values
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_mode_t mode;
unsigned short seq;
unsigned short __pad1;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _E2K_IPCBUF_H_ */

View File

@ -11,12 +11,11 @@ enum die_val {
DIE_BREAKPOINT
};
extern void printk_address(unsigned long address, int reliable);
extern void printk_address(unsigned long address, int reliable) __cold;
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
unsigned long *sp, unsigned long bp);
extern void __show_regs(struct pt_regs *regs, int all);
extern void show_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
unsigned long *sp, unsigned long bp) __cold;
extern void __show_regs(struct pt_regs *regs, int all) __cold;
extern void show_regs(struct pt_regs *regs) __cold;
extern void die(const char *str, struct pt_regs *regs, long err) __cold;
#endif /* _ASM_E2K_KDEBUG_H */

View File

@ -192,8 +192,7 @@
})
#define KVM_GET_AAU_AASTI_TAG() \
GUEST_GET_AAU_SREG(aasti_tags)
#define KVM_GET_AAU_AASR() \
GUEST_GET_AAU_SREG(aasr)
#define KVM_GET_AAU_AASR() GUEST_GET_CPU_SREG(AASR)
#define KVM_GET_AAU_AAFSTR() \
GUEST_GET_AAU_SREG(aafstr)
#define KVM_GET_AAU_AALDI(AALDI_no, lval, rval) \
@ -289,8 +288,7 @@ do { \
})
#define KVM_SET_AAU_AASTI_TAG(val) \
GUEST_SET_AAU_SREG(aasti_tags, val)
#define KVM_SET_AAU_AASR(val) \
GUEST_SET_AAU_SREG(aasr, val)
#define KVM_SET_AAU_AASR(val) GUEST_SET_CPU_SREG(AASR, val)
#define KVM_SET_AAU_AAFSTR(val) \
GUEST_SET_AAU_SREG(aafstr, val)
#define KVM_SET_AAU_AALDI(AALDI_no, lval, rval) \
@ -559,9 +557,9 @@ kvm_read_aaldm_reg(e2k_aaldm_t *aaldm)
kvm_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi);
}
static inline void
kvm_write_aaldm_reg(e2k_aaldm_t *aaldm)
kvm_write_aaldm_reg(e2k_aaldm_t aaldm)
{
kvm_write_aaldm_reg_value(aaldm->lo, aaldm->hi);
kvm_write_aaldm_reg_value(aaldm.lo, aaldm.hi);
}
static inline void
kvm_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value)
@ -583,9 +581,9 @@ kvm_read_aaldv_reg(e2k_aaldv_t *aaldv)
kvm_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi);
}
static inline void
kvm_write_aaldv_reg(e2k_aaldv_t *aaldv)
kvm_write_aaldv_reg(e2k_aaldv_t aaldv)
{
kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi);
kvm_write_aaldm_reg_value(aaldv.lo, aaldv.hi);
}
static inline void
@ -647,17 +645,17 @@ static inline void read_aaldm_reg(e2k_aaldm_t *aaldm)
{
kvm_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi);
}
static inline void write_aaldm_reg(e2k_aaldm_t *aaldm)
static inline void write_aaldm_reg(e2k_aaldm_t aaldm)
{
kvm_write_aaldm_reg_value(aaldm->lo, aaldm->hi);
kvm_write_aaldm_reg_value(aaldm.lo, aaldm.hi);
}
static inline void read_aaldv_reg(e2k_aaldv_t *aaldv)
{
kvm_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi);
}
static inline void write_aaldv_reg(e2k_aaldv_t *aaldv)
static inline void write_aaldv_reg(e2k_aaldv_t aaldv)
{
kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi);
kvm_write_aaldm_reg_value(aaldv.lo, aaldv.hi);
}
#define clear_apb() kvm_clear_apb()

View File

@ -398,22 +398,12 @@ pv_vcpu_user_hw_stacks_prepare(struct kvm_vcpu *vcpu, pt_regs_t *regs,
"set to 0x%x\n",
stacks->pcshtp);
} else if (!syscall && pcshtp == 0 && guest_user) {
e2k_pcsp_hi_t k_pcsp_hi;
unsigned long flags;
/* in this case the trampoline frame is added into the guest */
/* kernel chain stack */
/* set flag for unconditional injection to do not copy */
/* from guest user space */
regs->need_inject = true;
/* reserve one bottom frames for trampoline */
/* the guest handler replaces guest user trapped frame */
raw_all_irq_save(flags);
NATIVE_FLUSHC;
k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG();
BUG_ON(k_pcsp_hi.PCSP_hi_ind);
k_pcsp_hi.PCSP_hi_ind += 1 * SZ_OF_CR;
NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi);
raw_all_irq_restore(flags);
}
/*

View File

@ -108,6 +108,27 @@
ONLY_RESTORE_KERNEL_GREGS(task__, cpu_id__, cpu_off__); \
})
#define HOST_VCPU_STATE_REG_SWITCH_TO_GUEST(vcpu) \
({ \
machine.save_kernel_gregs(&vcpu->arch.host_ctxt.k_gregs); \
\
u64 guest_vs = GET_GUEST_VCPU_STATE_POINTER(vcpu); \
E2K_SET_DGREG(GUEST_VCPU_STATE_GREG, guest_vs); \
})
#define HOST_VCPU_STATE_REG_RESTORE(host_ti) \
({ \
struct kvm_vcpu *vcpu = host_ti->vcpu; \
\
struct kernel_gregs h_gregs; \
machine.save_kernel_gregs(&h_gregs); \
\
NATIVE_RESTORE_KERNEL_GREGS(&vcpu->arch.host_ctxt.k_gregs); \
machine.save_kernel_gregs(&host_ti->k_gregs_light); \
\
NATIVE_RESTORE_KERNEL_GREGS(&h_gregs); \
})
#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) \
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs_light, false)

View File

@ -66,6 +66,7 @@ typedef struct kvm_cpu_regs {
u64 CPU_SBBP[SBBP_ENTRIES_NUM];
e2k_wd_t CPU_WD; /* Window Descriptor Register */
e2k_bgr_t CPU_BGR; /* Base Global Register */
e2k_aasr_t CPU_AASR;
e2k_lsr_t CPU_LSR; /* Loop Status Register */
e2k_lsr_t CPU_LSR1; /* */
e2k_ilcr_t CPU_ILCR; /* Initial Loop Counters Register */

View File

@ -29,13 +29,13 @@
} \
})
#define KVM_RESTORE_AAU_MASK_REGS(aau_context) \
#define KVM_RESTORE_AAU_MASK_REGS(aaldm, aaldv, aau_context) \
({ \
if (IS_HV_GM()) { \
E2K_CMD_SEPARATOR; \
NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \
NATIVE_RESTORE_AAU_MASK_REGS(aaldm, aaldv, aau_context); \
} else { \
PREFIX_RESTORE_AAU_MASK_REGS(KVM, kvm, aau_context); \
PREFIX_RESTORE_AAU_MASK_REGS(KVM, kvm, aaldm, aaldv, aau_context); \
} \
})
@ -155,30 +155,30 @@
} \
})
#define KVM_GET_AAU_CONTEXT(context) \
({ \
#define KVM_GET_AAU_CONTEXT(context, aasr) \
do { \
if (IS_HV_GM()) { \
NATIVE_GET_AAU_CONTEXT(context); \
NATIVE_GET_AAU_CONTEXT(context, aasr); \
} else { \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context, aasr); \
} \
})
#define KVM_GET_AAU_CONTEXT_V2(context) \
({ \
} while (0)
#define KVM_GET_AAU_CONTEXT_V2(context, aasr) \
do { \
if (IS_HV_GM()) { \
NATIVE_GET_AAU_CONTEXT_V2(context); \
NATIVE_GET_AAU_CONTEXT_V2(context, aasr); \
} else { \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context, aasr); \
} \
})
#define KVM_GET_AAU_CONTEXT_V5(context) \
({ \
} while (0)
#define KVM_GET_AAU_CONTEXT_V5(context, aasr) \
do { \
if (IS_HV_GM()) { \
NATIVE_GET_AAU_CONTEXT_V5(context); \
NATIVE_GET_AAU_CONTEXT_V5(context, aasr); \
} else { \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context, aasr); \
} \
})
} while (0)
static inline void
kvm_save_aaldi(u64 *aaldis)
@ -238,24 +238,13 @@ kvm_get_synchronous_part_v5(e2k_aau_t *context)
* It's taken that aasr was get earlier(from get_aau_context caller)
* and comparison with aasr.iab was taken.
*/
static inline void
kvm_get_aau_context(e2k_aau_t *context)
static inline void kvm_get_aau_context(e2k_aau_t *context, e2k_aasr_t aasr)
{
KVM_GET_AAU_CONTEXT(context);
}
static inline void
kvm_get_aau_context_v2(e2k_aau_t *context)
{
KVM_GET_AAU_CONTEXT_V2(context);
}
static inline void
kvm_get_aau_context_v5(e2k_aau_t *context)
{
KVM_GET_AAU_CONTEXT_V5(context);
KVM_GET_AAU_CONTEXT(context, aasr);
}
static __always_inline void
kvm_set_aau_context(e2k_aau_t *context)
static __always_inline void kvm_set_aau_context(e2k_aau_t *context,
const e2k_aalda_t *aalda, e2k_aasr_t aasr)
{
/* AAU contesxt should restore host */
}
@ -266,8 +255,8 @@ kvm_set_aau_context(e2k_aau_t *context)
#define SAVE_AAU_MASK_REGS(aau_context, aasr) \
KVM_SAVE_AAU_MASK_REGS(aau_context, aasr)
#define RESTORE_AAU_MASK_REGS(aau_context) \
KVM_RESTORE_AAU_MASK_REGS(aau_context)
#define RESTORE_AAU_MASK_REGS(aaldm, aaldv, aau_context) \
KVM_RESTORE_AAU_MASK_REGS(aaldm, aaldv, aau_context)
#define SAVE_AADS(aau_regs) \
KVM_SAVE_AADS(aau_regs)
@ -295,8 +284,8 @@ kvm_set_aau_context(e2k_aau_t *context)
#define GET_SYNCHRONOUS_PART_V5(context) \
KVM_GET_SYNCHRONOUS_PART_V5(context)
#define GET_AAU_CONTEXT_V2(context) KVM_GET_AAU_CONTEXT_V2(context)
#define GET_AAU_CONTEXT_V5(context) KVM_GET_AAU_CONTEXT_V5(context)
#define GET_AAU_CONTEXT_V2(context, aasr) KVM_GET_AAU_CONTEXT_V2(context, aasr)
#define GET_AAU_CONTEXT_V5(context, aasr) KVM_GET_AAU_CONTEXT_V5(context, aasr)
static inline void
save_aaldi(u64 *aaldis)
@ -309,15 +298,15 @@ set_array_descriptors(e2k_aau_t *context)
kvm_set_array_descriptors(context);
}
static inline void
get_aau_context(e2k_aau_t *context)
get_aau_context(e2k_aau_t *context, e2k_aasr_t aasr)
{
kvm_get_aau_context(context);
kvm_get_aau_context(context, aasr);
}
static __always_inline void
set_aau_context(e2k_aau_t *context)
static __always_inline void set_aau_context(e2k_aau_t *context,
const e2k_aalda_t *aalda, e2k_aasr_t aasr)
{
kvm_set_aau_context(context);
kvm_set_aau_context(context, aalda, aasr);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */

View File

@ -4,7 +4,6 @@
#include <asm/mas.h>
#include <asm/kvm/guest/boot.h>
#define E2K_VIRT_CPU_VENDOR "Elbrus-MCST"
#define E2K_VIRT_CPU_FAMILY 0xff
#define E2K_VIRT_CPU_MODEL IDR_E2K_VIRT_MDL
#define E2K_VIRT_CPU_REVISION 16 /* 2016 year */

View File

@ -11,32 +11,43 @@ int kvm_fast_sys_clock_gettime(const clockid_t which_clock,
struct timespec __user *tp);
int kvm_do_fast_gettimeofday(struct timeval *tv);
int kvm_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize);
int kvm_do_fast_sys_set_return(u64 ip, int flags);
#ifdef CONFIG_KVM_GUEST_KERNEL
#define __NR_fast_gettimeofday 0
#define __NR_fast_clock_gettime 1
#define __NR_fast_getcpu 2
#define __NR_fast_siggetmask 3
#define __NR_fast_getcontext 4
#define __NR_fast_set_return 5
#define goto_ttable_entry1_args3(sys_num, arg1, arg2, ret) \
E2K_SCALL_ARG7(1, ret, sys_num, arg1, arg2, 0, 0, 0, 0)
#define goto_ttable_entry1_args4(sys_num, arg1, arg2, arg3, ret) \
E2K_SCALL_ARG7(1, ret, sys_num, arg1, arg2, arg3, 0, 0, 0)
#define goto_ttable_entry3_args3(sys_num, arg1, arg2, ret) \
E2K_SCALL_ARG7(3, ret, sys_num, arg1, arg2, 0, 0, 0, 0)
#define goto_ttable_entry3_args4(sys_num, arg1, arg2, arg3, ret) \
E2K_SCALL_ARG7(3, ret, sys_num, arg1, arg2, arg3, 0, 0, 0)
/* it is native guest kernel (not paravirtualized based on pv_ops) */
static inline int
do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp)
{
return kvm_do_fast_clock_gettime(which_clock, tp);
}
static inline int
fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
{
return kvm_fast_sys_clock_gettime(which_clock, tp);
}
static inline int
do_fast_gettimeofday(struct timeval *tv)
{
return kvm_do_fast_gettimeofday(tv);
}
static inline int
fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize)
do_fast_sys_set_return(u64 ip, int flags)
{
return kvm_fast_sys_siggetmask(oset, sigsetsize);
return kvm_do_fast_sys_set_return(ip, flags);
}
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
#endif /* _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H */

View File

@ -3,15 +3,15 @@
#ifdef __KERNEL__
extern void kvm_get_mm_notifier_locked(struct mm_struct *mm);
extern int kvm_get_mm_notifier_locked(struct mm_struct *mm);
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
static inline void
static inline int
get_mm_notifier_locked(struct mm_struct *mm)
{
/* create mm notifier to trace some events over mm */
kvm_get_mm_notifier_locked(mm);
return kvm_get_mm_notifier_locked(mm);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */

View File

@ -7,7 +7,8 @@
extern void kvm_activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm);
extern void kvm_get_mm_notifier_locked(struct mm_struct *mm);
extern int kvm_get_mm_notifier(struct mm_struct *mm);
extern int kvm_get_mm_notifier_locked(struct mm_struct *mm);
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is pure guest kernel (not paravirtualized based on pv_ops) */

View File

@ -113,19 +113,18 @@ kvm_preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks,
static __always_inline void
kvm_jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from)
{
if (from & FROM_SYSCALL_N_PROT) {
if (from & (FROM_SYSCALL_N_PROT | FROM_SIGRETURN | FROM_RET_FROM_FORK)) {
switch (regs->kernel_entry) {
case 1:
case 3:
case 4:
KVM_WRITE_UPSR_REG(E2K_KERNEL_UPSR_ENABLED);
regs->stack_regs_saved = true;
__E2K_JUMP_WITH_ARGUMENTS_8(handle_sys_call,
regs->sys_func,
regs->args[1], regs->args[2],
regs->args[3], regs->args[4],
regs->args[5], regs->args[6],
regs);
/*
* Unconditional return to host with guest's return value,
* because of only host can recover initial state of stacks
* and some other registers state to restart system call
*/
E2K_SYSCALL_RETURN(regs->sys_rval);
default:
BUG();
}

View File

@ -81,6 +81,11 @@ static inline bool kvm_guest_syscall_enter(struct pt_regs *regs)
return false; /* it is not nested guest system call */
}
static inline void kvm_pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
{
/* Do nothing in guest mode */
}
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is pure guest kernel (not paravrtualized) */
@ -148,6 +153,11 @@ static inline bool guest_syscall_enter(struct pt_regs *regs,
return kvm_guest_syscall_enter(regs);
}
static inline void pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
{
kvm_pv_vcpu_syscall_intc(ti, regs);
}
static inline void guest_exit_intc(struct pt_regs *regs,
bool intc_emul_flag) { }
static inline void guest_syscall_exit_trap(struct pt_regs *regs,

View File

@ -17,145 +17,85 @@
extern void kvm_pv_flush_tlb_all(void);
extern void kvm_pv_flush_tlb_mm(struct mm_struct *mm);
extern void kvm_pv_flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr);
extern void kvm_pv_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start,
e2k_addr_t end);
extern void kvm_pv_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end);
extern void kvm_pv_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start,
e2k_addr_t end);
extern void kvm_pv_flush_tlb_range_and_pgtables(struct mm_struct *mm,
e2k_addr_t start,
e2k_addr_t end);
#ifndef CONFIG_SMP
static inline void
kvm_flush_tlb_all(void)
e2k_addr_t start, e2k_addr_t end);
static inline void kvm_flush_tlb_all(void)
{
if (IS_HV_GM())
__flush_tlb_all();
native_flush_tlb_all();
else
kvm_pv_flush_tlb_all();
}
static inline void
kvm_flush_tlb_mm(struct mm_struct *mm)
static inline void kvm_flush_tlb_mm(struct mm_struct *mm)
{
if (IS_HV_GM())
__flush_tlb_mm(mm);
native_flush_tlb_mm(mm);
else
kvm_pv_flush_tlb_mm(mm);
}
static inline void
kvm_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr)
static inline void kvm_flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr)
{
if (IS_HV_GM())
__flush_tlb_page(vma->vm_mm, addr);
native_flush_tlb_page(mm, addr);
else
kvm_pv_flush_tlb_page(vma->vm_mm, addr);
kvm_pv_flush_tlb_page(mm, addr);
}
static inline void
kvm_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end)
static inline void kvm_flush_tlb_range(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
__flush_tlb_range(mm, start, end);
else
kvm_pv_flush_tlb_range(mm, start, end);
}
static inline void
kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
__flush_tlb_all();
else
kvm_pv_flush_tlb_kernel_range(start, end);
}
static inline void
kvm_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start,
e2k_addr_t end)
{
if (IS_HV_GM())
__flush_pmd_tlb_range(mm, start, end);
else
kvm_pv_flush_pmd_tlb_range(mm, start, end);
}
static inline void
kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
__flush_tlb_range_and_pgtables(mm, start, end);
native_flush_tlb_range(mm, start, end);
else
kvm_pv_flush_tlb_range_and_pgtables(mm, start, end);
}
#else /* CONFIG_SMP */
extern void kvm_pv_smp_flush_tlb_mm(struct mm_struct *const mm);
extern void kvm_pv_smp_flush_tlb_all(void);
extern void kvm_pv_smp_flush_tlb_page(struct vm_area_struct *const vma,
const e2k_addr_t addr);
extern void kvm_pv_smp_flush_tlb_range(struct mm_struct *const mm,
const e2k_addr_t start, const e2k_addr_t end);
extern void kvm_pv_smp_flush_pmd_tlb_range(struct mm_struct *const mm,
const e2k_addr_t start, const e2k_addr_t end);
extern void kvm_pv_smp_flush_tlb_range_and_pgtables(struct mm_struct *const mm,
const e2k_addr_t start, const e2k_addr_t end);
extern void kvm_pv_smp_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end);
static inline void
kvm_flush_tlb_all(void)
static inline void kvm_flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long stride, u32 levels_mask)
{
if (IS_HV_GM())
native_smp_flush_tlb_all();
native_flush_tlb_mm_range(mm, start, end, stride, levels_mask);
else
kvm_pv_smp_flush_tlb_all();
kvm_pv_flush_tlb_range_and_pgtables(mm, start, end);
}
static inline void
kvm_flush_tlb_mm(struct mm_struct *mm)
static inline void kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
native_smp_flush_tlb_mm(mm);
native_flush_tlb_kernel_range(start, end);
else
kvm_pv_smp_flush_tlb_mm(mm);
kvm_pv_flush_tlb_kernel_range(start, end);
}
static inline void
kvm_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr)
static inline void kvm_flush_pmd_tlb_range(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
native_smp_flush_tlb_page(vma, addr);
native_flush_pmd_tlb_range(mm, start, end);
else
kvm_pv_smp_flush_tlb_page(vma, addr);
kvm_pv_flush_pmd_tlb_range(mm, start, end);
}
static inline void
kvm_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
native_smp_flush_tlb_range(mm, start, end);
else
kvm_pv_smp_flush_tlb_range(mm, start, end);
}
static inline void
kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
native_smp_flush_tlb_all();
else
kvm_pv_smp_flush_tlb_kernel_range(start, end);
}
static inline void
kvm_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start,
e2k_addr_t end)
{
if (IS_HV_GM())
native_smp_flush_pmd_tlb_range(mm, start, end);
else
kvm_pv_smp_flush_pmd_tlb_range(mm, start, end);
}
static inline void
kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm,
static inline void kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
if (IS_HV_GM())
native_smp_flush_tlb_range_and_pgtables(mm, start, end);
native_flush_tlb_range_and_pgtables(mm, start, end);
else
kvm_pv_smp_flush_tlb_range_and_pgtables(mm, start, end);
kvm_pv_flush_tlb_range_and_pgtables(mm, start, end);
}
static inline void kvm_flush_tlb_page_and_pgtables(struct mm_struct *mm,
unsigned long uaddr)
{
kvm_flush_tlb_range_and_pgtables(mm, uaddr, uaddr + 1);
}
#endif /* CONFIG_SMP */
/* it is native KVM guest kernel (not paravirtualized) */
/* guest kernel does not support other virtual machines and guests */

View File

@ -164,10 +164,19 @@ kvm_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
ttable_entry; \
})
#define FILL_HARDWARE_STACKS() \
#define FILL_HARDWARE_STACKS__HW() \
do { \
if (IS_HV_GM()) { \
NATIVE_FILL_HARDWARE_STACKS(); \
NATIVE_FILL_HARDWARE_STACKS__HW(); \
} else { \
KVM_FILL_HARDWARE_STACKS(); \
} \
} while (false)
#define FILL_HARDWARE_STACKS__SW() \
do { \
if (IS_HV_GM()) { \
NATIVE_FILL_HARDWARE_STACKS__SW(); \
} else { \
KVM_FILL_HARDWARE_STACKS(); \
} \

View File

@ -40,6 +40,8 @@ extern int kvm_host_apply_psp_delta_to_signal_stack(unsigned long base,
extern int kvm_host_apply_pcsp_delta_to_signal_stack(unsigned long base,
unsigned long size, unsigned long start,
unsigned long end, unsigned long delta);
extern int kvm_host_apply_usd_delta_to_signal_stack(unsigned long top,
unsigned long delta, bool incr);
static inline unsigned long
kvm_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar)
@ -108,6 +110,16 @@ static inline int host_apply_pcsp_delta_to_signal_stack(unsigned long base,
start, end, delta);
}
static inline int host_apply_usd_delta_to_signal_stack(unsigned long top,
unsigned long delta, bool incr)
{
if (IS_HV_GM()) {
return native_host_apply_usd_delta_to_signal_stack(top, delta,
incr);
}
return kvm_host_apply_usd_delta_to_signal_stack(top, delta, incr);
}
static inline void
handle_interrupt(struct pt_regs *regs)
{

View File

@ -0,0 +1,180 @@
#ifndef GVA_CACHE_E2K_H
#define GVA_CACHE_E2K_H
#include <linux/kvm_types.h>
#include <asm/page.h>
#include <asm/kvm/mmu_exc.h>
/* Format of address record in gva cache */
typedef union {
struct {
u64 addr : 64 - PAGE_SHIFT;
u64 flags : PAGE_SHIFT;
} fields;
u64 word;
} cache_addr_t;
#define ADDR_VALID_MASK (1 << 0)
#define GVA_ADDR gva.fields.addr
#define GVA_FLAGS gva.fields.flags
#define GVA_WHOLE gva.word
/* Cache "cell" with gva -> gpa,hva translation */
typedef struct gva_cache_cell {
cache_addr_t gva;
gfn_t gfn;
u32 pte_access;
u32 level;
u64 replace_data;
} gva_cache_cell_t;
/* gva -> gpa cache size */
#define KVM_GVA_CACHE_SZ PAGE_SIZE
/* 2 ^ KVM_GVA_CACHE_BUCKET_BITS buckets in cache */
#define KVM_GVA_CACHE_BUCKET_BITS 5
#define KVM_GVA_CACHE_BUCKETS (1 << KVM_GVA_CACHE_BUCKET_BITS)
#define KVM_GVA_CACHE_BUCKET_SZ \
(KVM_GVA_CACHE_SZ / KVM_GVA_CACHE_BUCKETS)
#define KVM_GVA_CACHE_BUCKET_LEN \
(KVM_GVA_CACHE_BUCKET_SZ / sizeof(gva_cache_cell_t))
#define KVM_GVA_CACHE_LEN \
(KVM_GVA_CACHE_SZ / sizeof(gva_cache_cell_t))
typedef enum REPLACE_POLICY {
LRU = 0,
RAND = 1
} replace_policy_t;
/*
* gva cache stores translations from gva to gpa and hva to avoid
* guest page table and kvm memory slots lookup.
*/
typedef struct gva_cache {
spinlock_t bucket_locks[KVM_GVA_CACHE_BUCKETS];
gva_cache_cell_t *data;
replace_policy_t replace_policy;
} gva_cache_t;
typedef struct gva_translation {
gva_t gva;
gpa_t gpa;
u32 pte_access;
u32 level;
} gva_translation_t;
typedef struct gva_cache_query {
gva_t gva;
u32 access;
} gva_cache_query_t;
typedef gpa_t (*gva_tranlslator_t)(struct kvm_vcpu *, gva_t,
u32, struct kvm_arch_exception*);
gpa_t gva_cache_translate(gva_cache_t *cache, gva_t gva, u32 access,
struct kvm_vcpu *vcpu, kvm_arch_exception_t *exc,
gva_tranlslator_t gva_translate);
void gva_cache_fetch_addr(gva_cache_t *cache, gva_t gva, gpa_t gpa,
u32 access);
void gva_cache_flush_addr(gva_cache_t *cache, gva_t gva);
gva_cache_t *gva_cache_init(void);
void gva_cache_erase(gva_cache_t *cache);
#ifdef CONFIG_KVM_GVA_CACHE_STAT
int gva_cache_stat_dev_init(void);
/* Statistics of access to gva->gpa cache */
typedef struct gva_caches_stat {
u64 accesses;
u64 hits;
u64 misses;
u64 sum_hit_time;
u64 sum_miss_pen;
u64 conflict_misses;
u64 cold_misses;
u64 flushes;
u64 fetches;
} gva_caches_stat_t;
extern gva_caches_stat_t caches_stat;
#define gva_cache_stat_lookup_start(start) \
({ \
caches_stat.accesses++; \
start = ktime_get_ns(); \
})
#define gva_cache_stat_lookup_hit_end(start, stop) \
({ \
stop = ktime_get_ns(); \
caches_stat.hits++; \
caches_stat.sum_hit_time += (stop - start); \
})
#define gva_cache_stat_lookup_miss_start(start) \
({ \
caches_stat.misses++; \
start = ktime_get_ns(); \
})
#define gva_cache_stat_lookup_miss_stop(start, stop) \
({ \
stop = ktime_get_ns(); \
caches_stat.sum_miss_pen += (stop - start); \
})
#define gva_cache_stat_lookup_miss_stop(start, stop) \
({ \
stop = ktime_get_ns(); \
caches_stat.sum_miss_pen += (stop - start); \
})
#define gva_cache_stat_lookup_miss_conflict(is_conflict) \
({ \
if (is_conflict) \
caches_stat.conflict_misses++; \
else \
caches_stat.cold_misses++; \
})
#define gva_cache_stat_replace_conflict(is_conflict, conflict) \
({ \
if (is_conflict) \
*is_conflict = conflict; \
})
#define gva_cache_stat_fetch() \
({ \
caches_stat.accesses++; \
caches_stat.fetches++; \
})
#define gva_cache_stat_flush() \
({ \
caches_stat.accesses++; \
caches_stat.flushes++; \
})
#else /* CONFIG_KVM_GVA_CACHE_STAT */
#define gva_cache_stat_lookup_start(start)
#define gva_cache_stat_lookup_hit_end(start, stop)
#define gva_cache_stat_lookup_miss_start(start)
#define gva_cache_stat_lookup_miss_stop(start, stop)
#define gva_cache_stat_lookup_miss_stop(start, stop)
#define gva_cache_stat_lookup_miss_conflict(is_conflict)
#define gva_cache_stat_replace_conflict(is_conflict, conflict)
#define gva_cache_stat_fetch()
#define gva_cache_stat_flush()
#endif /* CONFIG_KVM_GVA_CACHE_STAT */
#endif /* GVA_CACHE_H */

View File

@ -246,6 +246,10 @@ static inline unsigned long generic_hypercall6(unsigned long nr,
#define KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK 31
/* notify host kernel aboout switch to updated procedure chain stack on guest */
#define KVM_HCALL_SWITCH_TO_EXPANDED_CHAIN_STACK 32
/* return back to guest user from fast syscall handler */
#define KVM_HCALL_RETURN_FROM_FAST_SYSCALL 33
/* change return ip in user stack */
#define KVM_HCALL_SET_RETURN_USER_IP 34
typedef struct kvm_hw_stacks_flush {
unsigned long psp_lo;
@ -472,6 +476,19 @@ HYPERVISOR_switch_to_expanded_guest_chain_stack(long delta_size,
delta_size, delta_offset, (unsigned long)decr_gk_pcs);
}
static inline unsigned long
HYPERVISOR_return_from_fast_syscall(long ret_val)
{
return light_hypercall1(KVM_HCALL_RETURN_FROM_FAST_SYSCALL, ret_val);
}
static inline unsigned long
HYPERVISOR_set_return_user_ip(u64 gti, u64 ip, int flags)
{
return light_hypercall3(KVM_HCALL_SET_RETURN_USER_IP, gti,
ip, flags);
}
/*
* KVM hypervisor (host) <-> guest generic hypercalls list
*/
@ -487,6 +504,9 @@ HYPERVISOR_switch_to_expanded_guest_chain_stack(long delta_size,
#define KVM_HCALL_COMPLETE_LONG_JUMP 12 /* long jump completion */
#define KVM_HCALL_LAUNCH_SIG_HANDLER 14 /* launch guest user signal */
/* handler */
#define KVM_HCALL_APPLY_USD_BOUNDS 15 /* update user data */
/* stack pointers after stack */
/* bounds handling */
#define KVM_HCALL_SWITCH_TO_VIRT_MODE 16 /* switch from physical to */
/* virtual addresses mode */
/* (enable paging, TLB, TLU) */
@ -895,6 +915,14 @@ HYPERVISOR_apply_pcsp_bounds(unsigned long base, unsigned long size,
return generic_hypercall5(KVM_HCALL_APPLY_PCSP_BOUNDS,
base, size, start, end, delta);
}
static inline unsigned long
HYPERVISOR_apply_usd_bounds(unsigned long top, unsigned long delta, bool incr)
{
return generic_hypercall3(KVM_HCALL_APPLY_USD_BOUNDS,
top, delta, incr);
}
static inline unsigned long
HYPERVISOR_correct_trap_return_ip(unsigned long return_ip)
{

View File

@ -7,6 +7,7 @@
#include <linux/kvm.h>
#include <asm/kvm/nid.h>
#include <asm/kvm/gva_cache.h>
#define GMMID_MAX_LIMIT (GPID_MAX_LIMIT)
#define RESERVED_GMMIDS 1 /* 0 is reserved for init_mm */
@ -52,6 +53,7 @@ typedef struct gmm_struct {
/* the guest mm */
cpumask_t cpu_vm_mask; /* mask of CPUs where the mm is */
/* in use or was some early */
gva_cache_t *gva_cache; /* gva -> gpa,hva cache */
} gmm_struct_t;
/* same as accessor for struct mm_struct's cpu_vm_mask but for guest mm */

View File

@ -15,10 +15,11 @@
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
/* it is native kernel without any virtualization */
/* it is native host kernel with virtualization support */
static inline void
static inline int
get_mm_notifier_locked(struct mm_struct *mm)
{
/* Do not need mmu notifier in native mode */
return 0;
}
#elif defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host and guest kernel */

View File

@ -0,0 +1,20 @@
#ifndef MMU_GVA_TRANSLATION_H
#define MMU_GVA_TRANSLATION_H
#include <linux/types.h>
#undef E2K_INVALID_PAGE
#define E2K_INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
#define arch_is_error_gpa(gpa) ((gpa_t)(gpa) == UNMAPPED_GVA)
typedef struct kvm_arch_exception {
bool error_code_valid; /* PFERR_* flags is valid */
u32 error_code; /* PFERR_* flags */
u64 address; /* page fault gpa */
u64 ip; /* IP to inject trap */
} kvm_arch_exception_t;
#endif /* MMU_GVA_TRANSLATION_H */

View File

@ -0,0 +1,58 @@
#ifndef MMU_PTE_H
#define MMU_PTE_H
/* uwx (u - user mode, w - writable, x executable) */
#define ACC_EXEC_MASK 0x1
#define ACC_WRITE_MASK 0x2
#define ACC_USER_MASK 0x4
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
/* page tables directories always privileged & not executable */
#define ACC_PT_DIR (ACC_WRITE_MASK)
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
#define PFERR_NOT_PRESENT_BIT 5
#define PFERR_PT_FAULT_BIT 6
#define PFERR_INSTR_FAULT_BIT 7
#define PFERR_INSTR_PROT_BIT 8
#define PFERR_FORCED_BIT 9
#define PFERR_WAIT_LOCK_BIT 10
#define PFERR_GPTE_CHANGED_BIT 11
#define PFERR_MMIO_BIT 12
#define PFERR_ONLY_VALID_BIT 13
#define PFERR_READ_PROT_BIT 14
#define PFERR_IS_UNMAPPED_BIT 15
#define PFERR_FAPB_BIT 16
#define PFERR_ACCESS_SIZE_BIT 24
#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
#define PFERR_NOT_PRESENT_MASK (1U << PFERR_NOT_PRESENT_BIT)
#define PFERR_PT_FAULT_MASK (1U << PFERR_PT_FAULT_BIT)
#define PFERR_INSTR_FAULT_MASK (1U << PFERR_INSTR_FAULT_BIT)
#define PFERR_INSTR_PROT_MASK (1U << PFERR_INSTR_PROT_BIT)
#define PFERR_FORCED_MASK (1U << PFERR_FORCED_BIT)
#define PFERR_WAIT_LOCK_MASK (1U << PFERR_WAIT_LOCK_BIT)
#define PFERR_GPTE_CHANGED_MASK (1U << PFERR_GPTE_CHANGED_BIT)
#define PFERR_MMIO_MASK (1U << PFERR_MMIO_BIT)
#define PFERR_ONLY_VALID_MASK (1U << PFERR_ONLY_VALID_BIT)
#define PFERR_READ_PROT_MASK (1U << PFERR_READ_PROT_BIT)
#define PFERR_IS_UNMAPPED_MASK (1U << PFERR_IS_UNMAPPED_BIT)
#define PFERR_FAPB_MASK (1U << PFERR_FAPB_BIT)
#define PFERR_ACCESS_SIZE_MASK (~0U << PFERR_ACCESS_SIZE_BIT)
#define PFRES_GET_ACCESS_SIZE(pfres) \
(((pfres) & PFERR_ACCESS_SIZE_MASK) >> PFERR_ACCESS_SIZE_BIT)
#define PFRES_SET_ACCESS_SIZE(pfres, size) \
(((pfres) & ~PFERR_ACCESS_SIZE_MASK) | \
((size) << PFERR_ACCESS_SIZE_BIT))
#endif /* MMU_PTE_H */

View File

@ -177,7 +177,7 @@ static inline void BOOT_KVM_WRITE_MMU_PID_REG(mmu_reg_t reg_val)
/* all hardware MMU registers, but it is not so now, */
/* for example PT roots and context registers are controled */
/* by hypervisor as for paravirtualized kernels */
NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all);
NATIVE_FLUSH_TLB_ALL(flush_op_tlb_all);
}
}
static inline unsigned long BOOT_KVM_READ_MMU_PID_REG(void)
@ -246,11 +246,11 @@ static inline void
KVM_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr)
{
if (unlikely(flush_addr_get_pid(flush_addr) == E2K_KERNEL_CONTEXT)) {
pr_warn("%s(): CPU #%d try to flush %s addr 0x%lx pid 0x%03lx\n",
pr_warn("%s(): CPU #%d try to flush %s addr 0x%llx pid 0x%03llx\n",
__func__, smp_processor_id(),
(flush_op_get_type(flush_op) == flush_op_tlb_page_sys) ?
(flush_op_get_type(flush_op) == FLUSH_TLB_PAGE_OP) ?
"TLB page" : "???",
flush_addr_get_va(flush_addr),
FLUSH_VADDR_TO_VA(flush_addr),
flush_addr_get_pid(flush_addr));
}
}

View File

@ -127,7 +127,6 @@ extern void insert_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu,
extern void kvm_emulate_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs,
trap_pt_regs_t *trap);
extern void return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs);
extern bool pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs);
static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu)
{

View File

@ -99,6 +99,12 @@ native_guest_syscall_enter(struct pt_regs *regs)
return false; /* it is not guest system call */
}
static inline void
native_pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
{
/* Nothing to do in native mode */
}
#ifdef CONFIG_VIRTUALIZATION
/*
@ -278,106 +284,6 @@ static inline void kvm_switch_to_host_mmu_pid(struct kvm_vcpu *vcpu,
raw_all_irq_restore(flags);
}
static inline void kvm_switch_debug_regs(struct kvm_sw_cpu_context *sw_ctxt,
int is_active)
{
u64 b_dimar0, b_dimar1, b_ddmar0, b_ddmar1, b_dibar0, b_dibar1,
b_dibar2, b_dibar3, b_ddbar0, b_ddbar1, b_ddbar2, b_ddbar3,
a_dimar0, a_dimar1, a_ddmar0, a_ddmar1, a_dibar0, a_dibar1,
a_dibar2, a_dibar3, a_ddbar0, a_ddbar1, a_ddbar2, a_ddbar3;
e2k_dimcr_t b_dimcr, a_dimcr;
e2k_ddmcr_t b_ddmcr, a_ddmcr;
e2k_dibcr_t b_dibcr, a_dibcr;
e2k_dibsr_t b_dibsr, a_dibsr;
e2k_ddbcr_t b_ddbcr, a_ddbcr;
e2k_ddbsr_t b_ddbsr, a_ddbsr;
b_dibcr = sw_ctxt->dibcr;
b_ddbcr = sw_ctxt->ddbcr;
b_dibsr = sw_ctxt->dibsr;
b_ddbsr = sw_ctxt->ddbsr;
b_dimcr = sw_ctxt->dimcr;
b_ddmcr = sw_ctxt->ddmcr;
b_dibar0 = sw_ctxt->dibar0;
b_dibar1 = sw_ctxt->dibar1;
b_dibar2 = sw_ctxt->dibar2;
b_dibar3 = sw_ctxt->dibar3;
b_ddbar0 = sw_ctxt->ddbar0;
b_ddbar1 = sw_ctxt->ddbar1;
b_ddbar2 = sw_ctxt->ddbar2;
b_ddbar3 = sw_ctxt->ddbar3;
b_dimar0 = sw_ctxt->dimar0;
b_dimar1 = sw_ctxt->dimar1;
b_ddmar0 = sw_ctxt->ddmar0;
b_ddmar1 = sw_ctxt->ddmar1;
a_dibcr = NATIVE_READ_DIBCR_REG();
a_ddbcr = NATIVE_READ_DDBCR_REG();
a_dibsr = NATIVE_READ_DIBSR_REG();
a_ddbsr = NATIVE_READ_DDBSR_REG();
a_dimcr = NATIVE_READ_DIMCR_REG();
a_ddmcr = NATIVE_READ_DDMCR_REG();
a_dibar0 = NATIVE_READ_DIBAR0_REG_VALUE();
a_dibar1 = NATIVE_READ_DIBAR1_REG_VALUE();
a_dibar2 = NATIVE_READ_DIBAR2_REG_VALUE();
a_dibar3 = NATIVE_READ_DIBAR3_REG_VALUE();
a_ddbar0 = NATIVE_READ_DDBAR0_REG_VALUE();
a_ddbar1 = NATIVE_READ_DDBAR1_REG_VALUE();
a_ddbar2 = NATIVE_READ_DDBAR2_REG_VALUE();
a_ddbar3 = NATIVE_READ_DDBAR3_REG_VALUE();
a_ddmar0 = NATIVE_READ_DDMAR0_REG_VALUE();
a_ddmar1 = NATIVE_READ_DDMAR1_REG_VALUE();
a_dimar0 = NATIVE_READ_DIMAR0_REG_VALUE();
a_dimar1 = NATIVE_READ_DIMAR1_REG_VALUE();
if (is_active) {
/* These two must be written first to disable monitoring */
NATIVE_WRITE_DIBCR_REG(b_dibcr);
NATIVE_WRITE_DDBCR_REG(b_ddbcr);
}
NATIVE_WRITE_DIBAR0_REG_VALUE(b_dibar0);
NATIVE_WRITE_DIBAR1_REG_VALUE(b_dibar1);
NATIVE_WRITE_DIBAR2_REG_VALUE(b_dibar2);
NATIVE_WRITE_DIBAR3_REG_VALUE(b_dibar3);
NATIVE_WRITE_DDBAR0_REG_VALUE(b_ddbar0);
NATIVE_WRITE_DDBAR1_REG_VALUE(b_ddbar1);
NATIVE_WRITE_DDBAR2_REG_VALUE(b_ddbar2);
NATIVE_WRITE_DDBAR3_REG_VALUE(b_ddbar3);
NATIVE_WRITE_DDMAR0_REG_VALUE(b_ddmar0);
NATIVE_WRITE_DDMAR1_REG_VALUE(b_ddmar1);
NATIVE_WRITE_DIMAR0_REG_VALUE(b_dimar0);
NATIVE_WRITE_DIMAR1_REG_VALUE(b_dimar1);
NATIVE_WRITE_DIBSR_REG(b_dibsr);
NATIVE_WRITE_DDBSR_REG(b_ddbsr);
NATIVE_WRITE_DIMCR_REG(b_dimcr);
NATIVE_WRITE_DDMCR_REG(b_ddmcr);
if (!is_active) {
/* These two must be written last to enable monitoring */
NATIVE_WRITE_DIBCR_REG(b_dibcr);
NATIVE_WRITE_DDBCR_REG(b_ddbcr);
}
sw_ctxt->dibcr = a_dibcr;
sw_ctxt->ddbcr = a_ddbcr;
sw_ctxt->dibsr = a_dibsr;
sw_ctxt->ddbsr = a_ddbsr;
sw_ctxt->dimcr = a_dimcr;
sw_ctxt->ddmcr = a_ddmcr;
sw_ctxt->dibar0 = a_dibar0;
sw_ctxt->dibar1 = a_dibar1;
sw_ctxt->dibar2 = a_dibar2;
sw_ctxt->dibar3 = a_dibar3;
sw_ctxt->ddbar0 = a_ddbar0;
sw_ctxt->ddbar1 = a_ddbar1;
sw_ctxt->ddbar2 = a_ddbar2;
sw_ctxt->ddbar3 = a_ddbar3;
sw_ctxt->ddmar0 = a_ddmar0;
sw_ctxt->ddmar1 = a_ddmar1;
sw_ctxt->dimar0 = a_dimar0;
sw_ctxt->dimar1 = a_dimar1;
}
#ifdef CONFIG_CLW_ENABLE
static inline void kvm_switch_clw_regs(struct kvm_sw_cpu_context *sw_ctxt, bool guest_enter)
{
@ -421,6 +327,8 @@ switch_ctxt_trap_enable_mask(struct kvm_sw_cpu_context *sw_ctxt)
sw_ctxt->osem = osem;
}
extern void kvm_switch_debug_regs(struct kvm_sw_cpu_context *sw_ctxt, int is_active);
static inline void host_guest_enter(struct thread_info *ti,
struct kvm_vcpu_arch *vcpu, unsigned flags)
{
@ -444,6 +352,10 @@ static inline void host_guest_enter(struct thread_info *ti,
}
}
/* This makes a call so switch it before AAU */
if (flags & DEBUG_REGS_SWITCH)
kvm_switch_debug_regs(sw_ctxt, true);
if (flags & FROM_HYPERCALL_SWITCH) {
/*
* Hypercalls - both hardware and software virtualization
@ -470,7 +382,8 @@ static inline void host_guest_enter(struct thread_info *ti,
*/
#ifdef CONFIG_USE_AAU
if (!(flags & DONT_AAU_CONTEXT_SWITCH))
machine.calculate_aau_aaldis_aaldas(NULL, ti, &sw_ctxt->aau_context);
machine.calculate_aau_aaldis_aaldas(NULL, ti->aalda,
&sw_ctxt->aau_context);
#endif
if (machine.flushts)
@ -501,7 +414,7 @@ static inline void host_guest_enter(struct thread_info *ti,
* before restoring %aasr, so we must restore all AAU registers.
*/
native_clear_apb();
native_set_aau_context(&sw_ctxt->aau_context);
native_set_aau_context(&sw_ctxt->aau_context, ti->aalda, E2K_FULL_AASR);
/*
* It's important to restore AAD after all return operations.
@ -519,9 +432,6 @@ static inline void host_guest_enter(struct thread_info *ti,
kvm_switch_mmu_regs(sw_ctxt, false);
}
if (flags & DEBUG_REGS_SWITCH)
kvm_switch_debug_regs(sw_ctxt, true);
KVM_BUG_ON(vcpu->is_hv && !NATIVE_READ_MMU_US_CL_D());
/* Switch data stack after all function calls */
@ -617,19 +527,16 @@ static inline void host_guest_exit(struct thread_info *ti,
*/
#ifdef CONFIG_USE_AAU
if (!(flags & DONT_AAU_CONTEXT_SWITCH)) {
e2k_aasr_t aasr;
/*
* We cannot rely on %aasr value since interception could have
* happened in guest user before "bap" or in guest trap handler
* before restoring %aasr, so we must save all AAU registers.
* Several macroses use %aasr to determine, which registers to
* save/restore, so pass worst-case %aasr to them in
* sw_ctxt->aau_context, and save the actual guest value to
* sw_ctxt->aasr
* save/restore, so pass worst-case %aasr to them directly
* while saving the actual guest value to sw_ctxt->aasr
*/
aasr = native_read_aasr_reg();
SWITCH_GUEST_AAU_AASR(&aasr, &sw_ctxt->aau_context, 1);
sw_ctxt->aasr = aasr_parse(native_read_aasr_reg());
/*
* This is placed before saving intc cellar since it is done
* with 'mmurr' instruction which requires AAU to be stopped.
@ -637,7 +544,7 @@ static inline void host_guest_exit(struct thread_info *ti,
* Do this before saving %sbbp as it uses 'alc'
* and thus zeroes %aaldm.
*/
NATIVE_SAVE_AAU_MASK_REGS(&sw_ctxt->aau_context, aasr);
NATIVE_SAVE_AAU_MASK_REGS(&sw_ctxt->aau_context, E2K_FULL_AASR);
/* It's important to save AAD before all call operations. */
NATIVE_SAVE_AADS(&sw_ctxt->aau_context);
@ -650,7 +557,7 @@ static inline void host_guest_exit(struct thread_info *ti,
/* Since iset v6 %aaldi must be saved too */
NATIVE_SAVE_AALDIS(sw_ctxt->aau_context.aaldi);
machine.get_aau_context(&sw_ctxt->aau_context);
machine.get_aau_context(&sw_ctxt->aau_context, E2K_FULL_AASR);
native_clear_apb();
}
@ -687,6 +594,7 @@ static inline void host_guest_exit(struct thread_info *ti,
kvm_switch_mmu_regs(sw_ctxt, false);
}
/* This makes a call so switch it after AAU */
if (flags & DEBUG_REGS_SWITCH)
kvm_switch_debug_regs(sw_ctxt, false);
}
@ -1183,18 +1091,32 @@ host_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs,
host_switch_trap_enable_mask(ti, regs, false);
}
static inline void __guest_exit(struct thread_info *ti,
struct kvm_vcpu_arch *vcpu, unsigned flags);
/*
* The function should return bool 'is the system call from guest?'
*/
static inline bool host_guest_syscall_enter(struct pt_regs *regs,
bool ts_host_at_vcpu_mode)
{
struct kvm_vcpu *vcpu;
if (likely(!ts_host_at_vcpu_mode))
return false; /* it is not guest system call */
clear_ts_flag(TS_HOST_AT_VCPU_MODE);
return pv_vcpu_syscall_intc(current_thread_info(), regs);
vcpu = current_thread_info()->vcpu;
__guest_exit(current_thread_info(), &vcpu->arch, 0);
/* return to hypervisor MMU context to emulate intercept */
kvm_switch_to_host_mmu_pid(vcpu, current->mm);
kvm_set_intc_emul_flag(regs);
return true;
}
extern void host_pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs);
#endif /* CONFIG_VIRTUALIZATION */
#if defined(CONFIG_PARAVIRT_GUEST)
@ -1278,6 +1200,10 @@ static inline bool guest_syscall_enter(struct pt_regs *regs,
{
return native_guest_syscall_enter(regs);
}
static inline void pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
{
native_pv_vcpu_syscall_intc(ti, regs);
}
static inline void guest_exit_intc(struct pt_regs *regs,
bool intc_emul_flag) { }
static inline void guest_syscall_exit_trap(struct pt_regs *regs,
@ -1365,6 +1291,11 @@ static inline bool guest_syscall_enter(struct pt_regs *regs,
return host_guest_syscall_enter(regs, ts_host_at_vcpu_mode);
}
static inline void pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
{
host_pv_vcpu_syscall_intc(ti, regs);
}
static inline void guest_exit_intc(struct pt_regs *regs, bool intc_emul_flag)
{
if (unlikely(intc_emul_flag)) {

View File

@ -18,31 +18,10 @@
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
extern void mmu_pv_flush_tlb_address(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
e2k_addr_t addr);
extern void mmu_pv_flush_tlb_address_pgtables(struct kvm_vcpu *vcpu,
gmm_struct_t *gmm,
e2k_addr_t addr);
extern void mmu_pv_flush_tlb_page(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
e2k_addr_t addr);
extern void mmu_pv_flush_tlb_mm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm);
extern void mmu_pv_flush_tlb_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
const e2k_addr_t start, const e2k_addr_t end);
extern void mmu_pv_flush_pmd_tlb_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
unsigned long start, unsigned long end);
extern void mmu_pv_flush_tlb_kernel_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
const e2k_addr_t start, const e2k_addr_t end);
extern void mmu_pv_flush_tlb_pgtables(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
e2k_addr_t start, e2k_addr_t end);
extern void mmu_pv_flush_tlb_range_and_pgtables(struct kvm_vcpu *vcpu,
gmm_struct_t *gmm,
e2k_addr_t start, e2k_addr_t end);
extern void mmu_pv_flush_tlb_page_and_pgtables(struct kvm_vcpu *vcpu,
gmm_struct_t *gmm,
unsigned long address);
extern void mmu_pv_flush_cpu_root_pt_mm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm);
extern void mmu_pv_flush_cpu_root_pt(struct kvm_vcpu *vcpu);
@ -51,56 +30,28 @@ extern long kvm_pv_sync_and_flush_tlb(struct kvm_vcpu *vcpu,
extern long kvm_pv_sync_addr_range(struct kvm_vcpu *vcpu,
gva_t start_gva, gva_t end_gva);
extern void mmu_pv_smp_flush_tlb_mm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm);
extern void mmu_pv_smp_flush_tlb_page(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
e2k_addr_t addr);
extern void mmu_pv_smp_flush_tlb_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
e2k_addr_t start, e2k_addr_t end);
extern void mmu_pv_smp_flush_pmd_tlb_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
e2k_addr_t start, e2k_addr_t end);
extern void mmu_pv_smp_flush_tlb_kernel_range(struct kvm_vcpu *vcpu,
gmm_struct_t *gmm,
e2k_addr_t start, e2k_addr_t end);
extern void mmu_pv_smp_flush_tlb_range_and_pgtables(struct kvm_vcpu *vcpu,
gmm_struct_t *gmm,
e2k_addr_t start, e2k_addr_t end);
extern void host_flush_shadow_pt_tlb_range(struct kvm_vcpu *vcpu,
gva_t start, gva_t end, pgprot_t spte, int level);
extern void host_flush_shadow_pt_level_tlb(struct kvm *kvm, gmm_struct_t *gmm,
gva_t gva, int level, pgprot_t new_spte, pgprot_t old_spte);
#ifndef CONFIG_SMP
#define host_flush_tlb_mm(vcpu, gmm) \
mmu_pv_flush_tlb_mm(vcpu, gmm)
#define host_flush_tlb_page(vcpu, gmm, addr) \
mmu_pv_flush_tlb_page(vcpu, gmm, addr)
#define host_flush_tlb_range(vcpu, gmm, start, end) \
mmu_pv_flush_tlb_range(vcpu, gmm, start, end)
#define host_flush_pmd_tlb_range(vcpu, gmm, start, end) \
mmu_pv_flush_pmd_tlb_range(vcpu, gmm, start, end)
#define host_flush_tlb_mm_range(vcpu, gmm, start, end) \
mmu_pv_flush_tlb_range(vcpu, gmm, start, end)
#define host_flush_tlb_kernel_range(vcpu, gmm, start, end) \
mmu_pv_flush_tlb_kernel_range(vcpu, gmm, start, end)
#define host_flush_tlb_range_and_pgtables(vcpu, gmm, start, end) \
mmu_pv_flush_tlb_range_and_pgtables(vcpu, gmm, start, end)
#else /* CONFIG_SMP */
#define host_flush_tlb_mm(vcpu, gmm) \
mmu_pv_smp_flush_tlb_mm(vcpu, gmm)
#define host_flush_tlb_page(vcpu, gmm, addr) \
mmu_pv_smp_flush_tlb_page(vcpu, gmm, addr)
#define host_flush_tlb_range(vcpu, gmm, start, end) \
mmu_pv_smp_flush_tlb_range(vcpu, gmm, start, end)
#define host_flush_pmd_tlb_range(vcpu, gmm, start, end) \
mmu_pv_smp_flush_pmd_tlb_range(vcpu, gmm, start, end)
#define host_flush_tlb_kernel_range(vcpu, gmm, start, end) \
mmu_pv_smp_flush_tlb_kernel_range(vcpu, gmm, start, end)
#define host_flush_tlb_mm_range(vcpu, gmm, start, end) \
mmu_pv_smp_flush_tlb_range(vcpu, gmm, start, end)
#define host_flush_tlb_range_and_pgtables(vcpu, gmm, start, end) \
mmu_pv_smp_flush_tlb_range_and_pgtables(vcpu, gmm, start, end)
#endif /* !CONFIG_SMP */
/*
* Shadow PT TLB flushing (same as flush_tlb_*() but for gmm)
* Real switching to a new gmm context (PID) will be a little later
* while return from hypercall to the guest mode (host_guest_enter()),
* and it is then that the PID will become active, but now it is still passive.
*/
extern void host_flush_tlb_mm(gmm_struct_t *gmm);
extern void host_flush_tlb_page(gmm_struct_t *gmm, unsigned long addr);
extern void host_flush_tlb_range(gmm_struct_t *gmm,
unsigned long start, unsigned long end);
extern void host_flush_tlb_kernel_range(gmm_struct_t *gmm,
unsigned long start, unsigned long end);
extern void host_flush_tlb_range_and_pgtables(gmm_struct_t *gmm,
unsigned long start, unsigned long end);
extern void host_flush_pmd_tlb_range(gmm_struct_t *gmm,
unsigned long start, unsigned long end);
#endif /* _E2K_KVM_TLBFLUSH_H */

View File

@ -796,9 +796,10 @@ TRACE_EVENT(
TRACE_EVENT(
intc_aau,
TP_PROTO(const e2k_aau_t *aau_ctxt, u64 lsr, u64 lsr1, u64 ilcr, u64 ilcr1),
TP_PROTO(const e2k_aau_t *aau_ctxt, e2k_aasr_t aasr,
u64 lsr, u64 lsr1, u64 ilcr, u64 ilcr1),
TP_ARGS(aau_ctxt, lsr, lsr1, ilcr, ilcr1),
TP_ARGS(aau_ctxt, aasr, lsr, lsr1, ilcr, ilcr1),
TP_STRUCT__entry(
__field( u32, aasr )
@ -829,7 +830,7 @@ TRACE_EVENT(
TP_fast_assign(
int i;
__entry->aasr = AW(aau_ctxt->guest_aasr);
__entry->aasr = AW(aasr);
__entry->lsr = lsr;
__entry->lsr1 = lsr1;
__entry->ilcr = ilcr;

View File

@ -295,6 +295,13 @@ TRACE_EVENT(
__field( u64, pcsp_hi )
__field( u64, pshtp )
__field( unsigned int, pcshtp )
/* Backup stacks */
__dynamic_array(u64, frames, hw_ctxt->bu_pcsp_hi.ind / SZ_OF_CR)
__field(size_t, frames_len)
__field( u64, bu_psp_lo )
__field( u64, bu_psp_hi )
__field( u64, bu_pcsp_lo )
__field( u64, bu_pcsp_hi )
/* CRs */
__field( u64, cr0_lo )
__field( u64, cr0_hi )
@ -303,6 +310,15 @@ TRACE_EVENT(
),
TP_fast_assign(
u64 *frames = __get_dynamic_array(frames);
e2k_mem_crs_t *chain_stack = (e2k_mem_crs_t *) hw_ctxt->bu_pcsp_lo.base;
size_t len = hw_ctxt->bu_pcsp_hi.ind / SZ_OF_CR;
unsigned long i;
__entry->frames_len = len;
for (i = 0; i < len; i++)
frames[i] = chain_stack[i].cr0_hi.ip << 3;
__entry->sbr = AW(sw_ctxt->sbr);
__entry->usd_lo = AW(sw_ctxt->usd_lo);
__entry->usd_hi = AW(sw_ctxt->usd_hi);
@ -316,17 +332,28 @@ TRACE_EVENT(
__entry->cr0_hi = AW(crs->cr0_hi);
__entry->cr1_lo = AW(crs->cr1_lo);
__entry->cr1_hi = AW(crs->cr1_hi);
__entry->bu_psp_lo = AW(hw_ctxt->bu_psp_lo);
__entry->bu_psp_hi = AW(hw_ctxt->bu_psp_hi);
__entry->bu_pcsp_lo = AW(hw_ctxt->bu_pcsp_lo);
__entry->bu_pcsp_hi = AW(hw_ctxt->bu_pcsp_hi);
),
TP_printk("sbr 0x%llx, usd_lo 0x%llx, usd_hi 0x%llx\n"
"sh_psp_lo 0x%llx, sh_psp_hi 0x%llx, sh_pcsp_lo 0x%llx, sh_pcsp_hi 0x%llx\n"
"sh_pshtp 0x%llx, sh_pcshtp 0x%x\n"
"sh_psp_lo 0x%llx, sh_psp_hi 0x%llx, sh_pshtp 0x%llx\n"
"sh_pcsp_lo 0x%llx, sh_pcsp_hi 0x%llx, sh_pcshtp 0x%x\n"
"cr0_lo 0x%llx, cr0_hi 0x%llx, cr1_lo 0x%llx, cr1_hi 0x%llx\n"
"bu_psp_lo 0x%llx, bu_psp_hi 0x%llx\n"
"bu_pcsp_lo 0x%llx, bu_pcsp_hi 0x%llx\n"
"backup chain stack IPs: %s\n"
,
__entry->sbr, __entry->usd_lo, __entry->usd_hi,
__entry->psp_lo, __entry->psp_hi, __entry->pcsp_lo, __entry->pcsp_hi,
__entry->pshtp, __entry->pcshtp,
__entry->cr0_lo, __entry->cr0_hi, __entry->cr1_lo, __entry->cr1_hi)
__entry->psp_lo, __entry->psp_hi, __entry->pshtp,
__entry->pcsp_lo, __entry->pcsp_hi, __entry->pcshtp,
__entry->cr0_lo, __entry->cr0_hi, __entry->cr1_lo, __entry->cr1_hi,
__entry->bu_psp_lo, __entry->bu_psp_hi,
__entry->bu_pcsp_lo, __entry->bu_pcsp_hi,
__print_array(__get_dynamic_array(frames),
__entry->frames_len, sizeof(u64)))
);
@ -544,6 +571,7 @@ TRACE_EVENT(
__entry->us_cl_m0, __entry->us_cl_m1, __entry->us_cl_m2, __entry->us_cl_m3)
);
#endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */

View File

@ -123,7 +123,7 @@
shld %dr0, 32, %dr0 ? \ptmp0;
}
{
SMP_ONLY(shld,1 GCPUID, 3, GCPUOFFSET ? \ptmp0)
SMP_ONLY(shld,1 GCPUID_PREEMPT, 3, GCPUOFFSET ? \ptmp0)
puttagd %dr3, 0, %dr3 ? \ptmp0;
puttagd %dr4, 0, %dr4 ? \ptmp0;
ord %dr0, \entry_num, %dr0 ? \ptmp0;
@ -141,6 +141,9 @@
puttagd %dr7, 0, %dr7 ? \ptmp0;
ibranch goto_guest_kernel_ttable_C ? \ptmp0;
}
#ifdef CONFIG_CPU_HWBUG_IBRANCH
{nop} {nop}
#endif
.endm /* GOTO_GUEST_KERNEL_TTABLE */
# ifdef CONFIG_PARAVIRT_GUEST

View File

@ -106,12 +106,6 @@ kvm_is_hw_pv_vm_available(void)
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
#undef E2K_INVALID_PAGE
#define E2K_INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
#define arch_is_error_gpa(gpa) ((gpa_t)(gpa) == UNMAPPED_GVA)
/*
* See include/linux/kvm_host.h
* For the normal pfn, the highest 12 bits should be zero,
@ -711,6 +705,7 @@ typedef struct kvm_sw_cpu_context {
mmu_reg_t trap_count;
bool no_switch_pt; /* do not switch PT registers */
/* Monitors and breakpoints */
e2k_dibcr_t dibcr;
e2k_ddbcr_t ddbcr;
e2k_dibsr_t dibsr;
@ -729,9 +724,11 @@ typedef struct kvm_sw_cpu_context {
u64 ddbar1;
u64 ddbar2;
u64 ddbar3;
e2k_dimtp_t dimtp;
#ifdef CONFIG_USE_AAU
e2k_aau_t aau_context;
e2k_aasr_t aasr;
#endif
u64 cs_lo;
@ -911,6 +908,8 @@ typedef struct kvm_host_context {
unsigned osem; /* OSEM register state */
/* the host kernel's signal/trap stack of contexts */
kvm_signal_context_t signal;
/* kgregs of host kernel */
struct kernel_gregs k_gregs;
} kvm_host_context_t;
#ifdef CONFIG_KVM_ASYNC_PF

View File

@ -6,65 +6,13 @@
#include <uapi/asm/iset_ver.h>
#include <asm/sections.h>
#include <asm/aau_regs_types.h>
#include <asm/cpu_features.h>
#include <asm/sections.h>
#include <asm/mmu_types.h>
#include <asm/mlt.h>
#ifdef __KERNEL__
enum {
/* Hardware bugs */
CPU_HWBUG_LARGE_PAGES,
CPU_HWBUG_LAPIC_TIMER,
CPU_HWBUG_PIO_READS,
CPU_HWBUG_ATOMIC,
CPU_HWBUG_CLW,
CPU_HWBUG_PAGE_A,
CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR,
CPU_HWBUG_UNALIGNED_LOADS,
CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE,
CPU_HWBUG_DMA_AT_APIC_ADDR,
CPU_HWBUG_KERNEL_DATA_MONITOR,
CPU_HWBUG_WRITE_MEMORY_BARRIER,
CPU_HWBUG_BAD_RESET,
CPU_HWBUG_BREAKPOINT_INSTR,
CPU_HWBUG_E8C_WATCHDOG,
CPU_HWBUG_IOMMU,
CPU_HWBUG_WC_DAM,
CPU_HWBUG_TRAP_CELLAR_S_F,
CPU_HWBUG_SS,
CPU_HWBUG_AAU_AALDV,
CPU_HWBUG_LEVEL_EOI,
CPU_HWBUG_FALSE_SS,
CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG,
CPU_HWBUG_TLB_FLUSH_L1D,
CPU_HWBUG_GUEST_ASYNC_PM,
CPU_HWBUG_E16C_SLEEP,
CPU_HWBUG_L1I_STOPS_WORKING,
CPU_HWBUG_CLW_STALE_L1_ENTRY,
CPU_HWBUG_PIPELINE_FREEZE_MONITORS,
CPU_HWBUG_C3_WAIT_MA_C,
CPU_HWBUG_VIRT_SCLKM3_INTC,
CPU_HWBUG_VIRT_PUSD_PSL,
CPU_HWBUG_USD_ALIGNMENT,
CPU_HWBUG_VIRT_PSIZE_INTERCEPTION,
CPU_NO_HWBUG_SOFT_WAIT,
/* Features, not bugs */
CPU_FEAT_WC_PCI_PREFETCH,
CPU_FEAT_FLUSH_DC_IC,
CPU_FEAT_EPIC,
CPU_FEAT_TRAP_V5,
CPU_FEAT_TRAP_V6,
CPU_FEAT_QPREG,
CPU_FEAT_ISET_V3,
CPU_FEAT_ISET_V5,
CPU_FEAT_ISET_V6,
NR_CPU_FEATURES
};
struct cpuinfo_e2k;
struct pt_regs;
struct seq_file;
@ -72,9 +20,10 @@ struct global_regs;
struct kernel_gregs;
struct local_gregs;
struct e2k_aau_context;
struct e2k_mlt;
struct kvm_vcpu_arch;
struct e2k_dimtp;
struct thread_info;
union e2k_dimtp;
#include <asm/kvm/machdep.h> /* virtualization support */
@ -139,17 +88,17 @@ typedef struct machdep {
void (*restore_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
unsigned long not_restore_gregs_mask);
void (*save_dimtp)(e2k_dimtp_t *);
void (*restore_dimtp)(const e2k_dimtp_t *);
void (*save_dimtp)(union e2k_dimtp *);
void (*restore_dimtp)(const union e2k_dimtp *);
void (*save_kvm_context)(struct kvm_vcpu_arch *);
void (*restore_kvm_context)(const struct kvm_vcpu_arch *);
void (*calculate_aau_aaldis_aaldas)(const struct pt_regs *regs,
struct thread_info *ti, struct e2k_aau_context *context);
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
void (*do_aau_fault)(int aa_field, struct pt_regs *regs);
void (*save_aaldi)(u64 *aaldis);
void (*get_aau_context)(struct e2k_aau_context *);
void (*get_aau_context)(struct e2k_aau_context *, e2k_aasr_t);
unsigned long (*rrd)(int reg);
void (*rwd)(int reg, unsigned long value);
@ -161,7 +110,7 @@ typedef struct machdep {
#ifdef CONFIG_MLT_STORAGE
void (*invalidate_MLT)(void);
void (*get_and_invalidate_MLT_context)(e2k_mlt_t *mlt_state);
void (*get_and_invalidate_MLT_context)(struct e2k_mlt *mlt_state);
#endif
void (*flushts)(void);
@ -196,7 +145,7 @@ typedef struct machdep {
* being executed on.
*/
typedef void (*cpuhas_initcall_t)(int cpu, int revision, int iset_ver,
int guest_cpu, struct machdep *machine);
int guest_cpu, bool is_hardware_guest, struct machdep *machine);
extern cpuhas_initcall_t __cpuhas_initcalls[], __cpuhas_initcalls_end[];
/*
@ -217,7 +166,7 @@ extern cpuhas_initcall_t __cpuhas_initcalls[], __cpuhas_initcalls_end[];
__init \
static void feat##_initializer(const int cpu, const int revision, \
const int iset_ver, const int guest_cpu, \
struct machdep *const machine) { \
bool is_hardware_guest, struct machdep *const machine) { \
bool is_static = (_is_static); \
if (is_static && (static_cond) || !is_static && (dynamic_cond)) \
set_bit(feat, (machine)->cpu_features); \
@ -465,13 +414,17 @@ CPUHAS(CPU_HWBUG_CLW_STALE_L1_ENTRY,
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E16C_MDL && revision == 0);
/* #125405 - CPU pipeline freeze feature conflicts with performance monitoring.
* Workaround - disable pipeline freeze when monitoring is enabled. */
* Workaround - disable pipeline freeze when monitoring is enabled.
*
* Note (#132311): disable workaround on e16c.rev0/e2c3.rev0 since it conflicts
* with #134929 workaround. */
CPUHAS(CPU_HWBUG_PIPELINE_FREEZE_MONITORS,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_E8C2) || IS_ENABLED(CONFIG_CPU_E16C) ||
IS_ENABLED(CONFIG_CPU_E2C3) || IS_ENABLED(CONFIG_CPU_E12C),
cpu == IDR_E8C2_MDL || cpu == IDR_E16C_MDL ||
cpu == IDR_E2C3_MDL || cpu == IDR_E12C_MDL);
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
IS_ENABLED(CONFIG_CPU_E8C2) || IS_ENABLED(CONFIG_CPU_E12C),
cpu == IDR_E8C2_MDL || cpu == IDR_E12C_MDL ||
cpu == IDR_E16C_MDL && revision > 0 ||
cpu == IDR_E2C3_MDL && revision > 0);
/* #126587 - "wait ma_c=1" does not wait for all L2$ writebacks to complete
* when disabling CPU core with "wait trap=1" algorithm.
* Workaround - manually insert 66 NOPs before "wait trap=1" */
@ -518,12 +471,31 @@ CPUHAS(CPU_HWBUG_VIRT_PSIZE_INTERCEPTION,
cpu == IDR_E2C3_MDL && revision == 0);
/* #130066, #134351 - L1/L2 do not respect "lal"/"las"/"sas"/"st_rel" barriers.
* Workaround - do not use "las"/"sas"/"st_rel", and add 5 nops after "lal". */
* Workaround - do not use "las"/"sas"/"st_rel", and add 5 nops after "lal".
* #133605 - "lal"/"las"/"sas"/"sal" barriers do not work in certain conditions.
* Workaround - add {nop} before them.
*
* Note that #133605 workaround is split into two parts:
* CPU_NO_HWBUG_SOFT_WAIT - for e16c/e2c3
* CPU_HWBUG_SOFT_WAIT_E8C2 - for e8c2
* This is done because it is very convenient to merge #130066, #134351
* and #133605 bugs workarounds together for e16c/e2c3. */
CPUHAS(CPU_NO_HWBUG_SOFT_WAIT,
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
true,
!(cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0));
CPUHAS(CPU_HWBUG_SOFT_WAIT_E8C2,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E8C2_MDL);
/* #132693 - C3 idle state does not work.
* Workaround - do not use it. */
CPUHAS(CPU_HWBUG_C3,
!IS_ENABLED(CONFIG_CPU_E16C),
false,
cpu == IDR_E16C_MDL && revision == 0);
/*
* Not bugs but features go here
@ -568,6 +540,56 @@ CPUHAS(CPU_FEAT_QPREG,
CONFIG_CPU_ISET != 0,
CONFIG_CPU_ISET >= 5,
iset_ver >= E2K_ISET_V5);
/* Hardware prefetcher that resides in L2 and works on phys. addresses */
CPUHAS(CPU_FEAT_HW_PREFETCHER,
IS_ENABLED(CONFIG_E2K_MACHINE),
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
cpu != IDR_ES2_DSP_MDL && cpu != IDR_ES2_RU_MDL &&
cpu != IDR_E2S_MDL && cpu != IDR_E8C_MDL &&
cpu != IDR_E1CP_MDL && cpu != IDR_E8C2_MDL &&
cpu != IDR_E16C_MDL && cpu != IDR_E2C3_MDL);
/* When flushing high order page table entries we must also flush
* all links below it. E.g. when flushing PMD also flush PMD->PTE
* link (i.e. DTLB entry for address 0xff8000000000|(address >> 9)).
*
* Otherwise the following can happen:
* 1) High-order page is allocated.
* 2) Someone accesses the PMD->PTE link (e.g. half-spec. load) and
* creates invalid entry in DTLB.
* 3) High-order page is split into 4 Kb pages.
* 4) Someone accesses the PMD->PTE link address (e.g. DTLB entry
* probe) and reads the invalid entry created earlier.
*
* Since v6 we have separate TLBs for intermediate page table levels
* (TLU_CACHE.PWC) and for last level and invalid records (TLB).
* So the invalid entry created in 2) would go into TLB while access
* in 4) will search TLU_CACHE.PWC rendering this flush unneeded. */
CPUHAS(CPU_FEAT_SEPARATE_TLU_CACHE,
CONFIG_CPU_ISET != 0,
CONFIG_CPU_ISET >= 6,
iset_ver >= E2K_ISET_V6);
/* Set if FILLR instruction is supported.
*
* #135233 - FILLR does not work in hardware guests.
* Workaround - do not use it in hardware guests. */
CPUHAS(CPU_FEAT_FILLR,
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E12C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
CONFIG_CPU_ISET >= 6,
iset_ver >= E2K_ISET_V6 &&
!((cpu == IDR_E16C_MDL || cpu == IDR_E12C_MDL ||
cpu == IDR_E2C3_MDL) && is_hardware_guest));
/* Set if FILLC instruction is supported.
*
* #135233 - software emulation of FILLC does not work in hardware guests.
* Workaround - use FILLC in hardware guests. */
CPUHAS(CPU_FEAT_FILLC,
CONFIG_CPU_ISET != 0,
CONFIG_CPU_ISET >= 6,
iset_ver >= E2K_ISET_V6);
/* Optimized version of machine.iset check */
CPUHAS(CPU_FEAT_ISET_V3,
CONFIG_CPU_ISET != 0,
@ -599,10 +621,12 @@ static inline unsigned long test_feature_dynamic(struct machdep *machine, int fe
test_feature(machine_p, feature)
#define boot_cpu_has(feature) boot_machine_has(&boot_machine, feature)
#ifndef E2K_P2V
#ifdef CONFIG_BOOT_E2K
# define cpu_has(feature) test_feature(&machine, feature)
#else
#elif defined(E2K_P2V)
# define cpu_has(feature) boot_cpu_has(feature)
#else
# define cpu_has(feature) test_feature(&machine, feature)
#endif
/* Normally cpu_has() is passed symbolic name of feature (e.g. CPU_FEAT_*),
@ -670,25 +694,25 @@ extern void restore_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
unsigned long mask_not_restore);
extern void restore_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr,
unsigned long mask_not_restore);
extern void save_dimtp_v6(e2k_dimtp_t *);
extern void restore_dimtp_v6(const e2k_dimtp_t *);
extern void save_dimtp_v6(union e2k_dimtp *);
extern void restore_dimtp_v6(const union e2k_dimtp *);
extern void save_kvm_context_v6(struct kvm_vcpu_arch *);
extern void restore_kvm_context_v6(const struct kvm_vcpu_arch *);
extern void qpswitchd_sm(int);
extern void calculate_aau_aaldis_aaldas_v2(const struct pt_regs *regs,
struct thread_info *ti, struct e2k_aau_context *context);
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
extern void calculate_aau_aaldis_aaldas_v5(const struct pt_regs *regs,
struct thread_info *ti, struct e2k_aau_context *context);
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
extern void calculate_aau_aaldis_aaldas_v6(const struct pt_regs *regs,
struct thread_info *ti, struct e2k_aau_context *context);
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
extern void do_aau_fault_v2(int aa_field, struct pt_regs *regs);
extern void do_aau_fault_v5(int aa_field, struct pt_regs *regs);
extern void do_aau_fault_v6(int aa_field, struct pt_regs *regs);
extern void save_aaldi_v2(u64 *aaldis);
extern void save_aaldi_v5(u64 *aaldis);
extern void get_aau_context_v2(struct e2k_aau_context *);
extern void get_aau_context_v5(struct e2k_aau_context *);
extern void get_aau_context_v2(struct e2k_aau_context *, e2k_aasr_t);
extern void get_aau_context_v5(struct e2k_aau_context *, e2k_aasr_t);
extern void flushts_v3(void);
@ -723,9 +747,9 @@ void native_set_cu_hw1_v5(u64);
void invalidate_MLT_v2(void);
void invalidate_MLT_v3(void);
void get_and_invalidate_MLT_context_v2(e2k_mlt_t *mlt_state);
void get_and_invalidate_MLT_context_v3(e2k_mlt_t *mlt_state);
void get_and_invalidate_MLT_context_v6(e2k_mlt_t *mlt_state);
void get_and_invalidate_MLT_context_v2(struct e2k_mlt *mlt_state);
void get_and_invalidate_MLT_context_v3(struct e2k_mlt *mlt_state);
void get_and_invalidate_MLT_context_v6(struct e2k_mlt *mlt_state);
#ifdef CONFIG_SMP
void native_clock_off_v3(void);

View File

@ -1,7 +1,7 @@
#ifndef _E2K_MAS_H_
#define _E2K_MAS_H_
#include <asm/types.h>
#include <linux/types.h>
#include <uapi/asm/mas.h>

View File

@ -8,6 +8,9 @@
#include <asm/kvm/mm_hooks.h>
extern int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@ -16,7 +19,12 @@ static inline void arch_unmap(struct mm_struct *mm,
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
get_mm_notifier_locked(mm);
}
static inline int arch_bprm_mm_init_locked(struct mm_struct *mm,
struct vm_area_struct *vma)
{
return get_mm_notifier_locked(mm);
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,

View File

@ -80,7 +80,7 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
page_prot = vm_flags & VM_CUI;
if (vm_flags & VM_PRIVILEGED)
_PAGE_SET_PRIV(page_prot);
page_prot = _PAGE_SET_PRIV(page_prot);
return __pgprot(page_prot);
}
@ -121,7 +121,7 @@ enum exec_mmu_ret {
};
extern enum exec_mmu_ret execute_mmu_operations(trap_cellar_t *tcellar,
trap_cellar_t *next_tcellar, struct pt_regs *regs,
int rg, int zeroing, e2k_addr_t *addr,
int zeroing, e2k_addr_t *addr,
bool (*is_spill_fill_recovery)(tc_cond_t cond,
e2k_addr_t address, bool s_f,
struct pt_regs *regs),

View File

@ -7,6 +7,11 @@
#ifndef _ASM_E2K_MMU_REGS_TYPES_V2_H
#define _ASM_E2K_MMU_REGS_TYPES_V2_H
/* Avoid header dependency loop of probe_entry_t and DTLB_ENTRY_PH_BOUND_V2 */
#ifndef _E2K_TLB_REGS_TYPES_H_
# error Do not include <asm/mmu-regs-types-v2.h> directly, use <asm/tlb_regs_types.h> instead
#endif
/*
* This file contains the functions and defines necessary to modify and
* use the E2K ISET V2-V5 page tables.

View File

@ -7,6 +7,11 @@
#ifndef _ASM_E2K_MMU_REGS_TYPES_V6_H
#define _ASM_E2K_MMU_REGS_TYPES_V6_H
/* Avoid header dependency loop of probe_entry_t and DTLB_ENTRY_PH_BOUND_V6 */
#ifndef _E2K_TLB_REGS_TYPES_H_
# error Do not include <asm/mmu-regs-types-v6.h> directly, use <asm/tlb_regs_types.h> instead
#endif
/*
* This file contains the functions and defines necessary to modify and
* use the E2K ISET V2-V5 page tables.

View File

@ -58,9 +58,6 @@ extern unsigned long mmu_last_context;
#define my_cpu_last_context1(num_cpu) mmu_last_context
#endif /* CONFIG_SMP */
extern int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void
reload_context_mask(unsigned long mask)
{

View File

@ -448,30 +448,34 @@ read_MMU_US_CL_D(void)
static inline void
____flush_TLB_page(flush_op_t flush_op, flush_addr_t flush_addr)
{
unsigned long flags;
bool fl_c_needed = cpu_has(CPU_HWBUG_TLB_FLUSH_L1D);
DebugTLB("Flush TLB page : op 0x%lx extended virtual addr 0x%lx\n",
flush_op_val(flush_op), flush_addr_val(flush_addr));
flush_op, flush_addr);
raw_all_irq_save(flags);
FLUSH_TLB_ENTRY(flush_op_val(flush_op), flush_addr_val(flush_addr));
if (fl_c_needed)
FLUSH_TLB_ENTRY(flush_op, flush_addr);
if (cpu_has(CPU_HWBUG_TLB_FLUSH_L1D))
__E2K_WAIT(_fl_c);
raw_all_irq_restore(flags);
}
#define flush_TLB_page_begin()
#define flush_TLB_page_end() \
do { \
__E2K_WAIT(_fl_c | _ma_c); \
__E2K_WAIT(_fl_c); \
} while (0)
static inline void __flush_TLB_page_tlu_cache(unsigned long virt_addr,
unsigned long context, u64 type)
{
u64 va_tag = (virt_addr >> (9 * type + 12));
____flush_TLB_page(FLUSH_TLB_PAGE_TLU_CACHE_OP,
(va_tag << 21) | (context << 50) | type);
}
static inline void
__flush_TLB_page(e2k_addr_t virt_addr, unsigned long context)
{
____flush_TLB_page(flush_op_tlb_page_sys,
flush_addr_make_sys(virt_addr, context));
____flush_TLB_page(FLUSH_TLB_PAGE_OP, flush_addr_make_sys(virt_addr, context));
}
static inline void
@ -499,8 +503,7 @@ flush_TLB_kernel_page(e2k_addr_t virt_addr)
static inline void
__flush_TLB_ss_page(e2k_addr_t virt_addr, unsigned long context)
{
____flush_TLB_page(flush_op_tlb_page_sys,
flush_addr_make_ss(virt_addr, context));
____flush_TLB_page(FLUSH_TLB_PAGE_OP, flush_addr_make_ss(virt_addr, context));
}
static inline void
@ -628,8 +631,8 @@ static inline void
__flush_ICACHE_line(flush_op_t flush_op, flush_addr_t flush_addr)
{
DebugMR("Flush ICACHE line : op 0x%lx extended virtual addr 0x%lx\n",
flush_op_val(flush_op), flush_addr_val(flush_addr));
FLUSH_ICACHE_LINE(flush_op_val(flush_op), flush_addr_val(flush_addr));
flush_op, flush_addr);
FLUSH_ICACHE_LINE(flush_op, flush_addr);
}
#define flush_ICACHE_line_begin()
@ -641,8 +644,7 @@ do { \
static inline void
__flush_ICACHE_line_user(e2k_addr_t virt_addr)
{
__flush_ICACHE_line(flush_op_icache_line_user,
flush_addr_make_user(virt_addr));
__flush_ICACHE_line(FLUSH_ICACHE_LINE_USER_OP, flush_addr_make_user(virt_addr));
}
static inline void
@ -656,8 +658,7 @@ flush_ICACHE_line_user(e2k_addr_t virt_addr)
static inline void
__flush_ICACHE_line_sys(e2k_addr_t virt_addr, unsigned long context)
{
__flush_ICACHE_line(flush_op_icache_line_sys,
flush_addr_make_sys(virt_addr, context));
__flush_ICACHE_line(FLUSH_ICACHE_LINE_SYS_OP, flush_addr_make_sys(virt_addr, context));
}
static inline void
@ -691,9 +692,9 @@ boot_native_invalidate_CACHE_L12(void)
raw_all_irq_save(flags);
E2K_WAIT_MA;
if (invalidate_supported)
NATIVE_FLUSH_CACHE_L12(_flush_op_invalidate_cache_L12);
NATIVE_FLUSH_CACHE_L12(flush_op_invalidate_cache_L12);
else
NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12);
NATIVE_FLUSH_CACHE_L12(flush_op_write_back_cache_L12);
E2K_WAIT_FLUSH;
raw_all_irq_restore(flags);
}
@ -708,7 +709,7 @@ static inline void
native_raw_write_back_CACHE_L12(void)
{
__E2K_WAIT(_ma_c);
NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12);
NATIVE_FLUSH_CACHE_L12(flush_op_write_back_cache_L12);
__E2K_WAIT(_fl_c | _ma_c);
}
@ -716,8 +717,8 @@ static inline void
write_back_CACHE_L12(void)
{
DebugMR("Flush : Write back all CACHEs (op 0x%lx)\n",
_flush_op_write_back_cache_L12);
FLUSH_CACHE_L12(_flush_op_write_back_cache_L12);
flush_op_write_back_cache_L12);
FLUSH_CACHE_L12(flush_op_write_back_cache_L12);
}
/*
@ -728,15 +729,15 @@ static inline void
native_raw_flush_TLB_all(void)
{
__E2K_WAIT(_st_c);
NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all);
NATIVE_FLUSH_TLB_ALL(flush_op_tlb_all);
__E2K_WAIT(_fl_c | _ma_c);
}
static inline void
flush_TLB_all(void)
{
DebugMR("Flush all TLBs (op 0x%lx)\n", _flush_op_tlb_all);
FLUSH_TLB_ALL(_flush_op_tlb_all);
DebugMR("Flush all TLBs (op 0x%lx)\n", flush_op_tlb_all);
FLUSH_TLB_ALL(flush_op_tlb_all);
}
/*
@ -745,8 +746,8 @@ flush_TLB_all(void)
static inline void
flush_ICACHE_all(void)
{
DebugMR("Flush all ICACHE op 0x%lx\n", _flush_op_icache_all);
FLUSH_ICACHE_ALL(_flush_op_icache_all);
DebugMR("Flush all ICACHE op 0x%lx\n", flush_op_icache_all);
FLUSH_ICACHE_ALL(flush_op_icache_all);
}
/*

View File

@ -536,33 +536,15 @@ typedef dcache_addr_t dcache_l2_addr_t;
typedef e2k_addr_t flush_op_t;
#endif /* ! __ASSEMBLY__ */
#define flush_op_val(flush_op) (flush_op)
#define FLUSH_OP_TYPE ULL(7) /* type of operation */
#define FLUSH_ICACHE_LINE_USER_OP ULL(0)
#define FLUSH_TLB_PAGE_OP ULL(1)
#define FLUSH_ICACHE_LINE_SYS_OP ULL(2)
#define FLUSH_TLB_PAGE_TLU_CACHE_OP ULL(3)
#define __flush_op(flush_op_val) (flush_op_val)
#define _FLUSH_OP_TYPE 0x0000000000000007 /* type of */
/* operation */
#define _FLUSH_ICACHE_LINE_USER_OP 0x0000000000000000
#define _FLUSH_TLB_PAGE_SYS_OP 0x0000000000000001
#define _FLUSH_ICACHE_LINE_SYS_OP 0x0000000000000002
#define flush_op_get_type(flush_op) \
(flush_op_val(flush_op) & _FLUSH_OP_TYPE)
#define flush_op_get_type(flush_op) ((flush_op) & FLUSH_OP_TYPE)
#define flush_op_set_type(flush_op, type) \
(__flush_op((flush_op_val(flush_op) & ~_FLUSH_OP_TYPE) | \
((type) & _FLUSH_OP_TYPE)))
#define flush_op_set_icache_line_user(flush_op) \
flush_op_set_type(flush_op, _FLUSH_ICACHE_LINE_USER_OP)
#define flush_op_set_icache_line_sys(flush_op) \
flush_op_set_type(flush_op, _FLUSH_ICACHE_LINE_SYS_OP)
#define flush_op_set_tlb_page_sys(flush_op) \
flush_op_set_type(flush_op, _FLUSH_TLB_PAGE_SYS_OP)
#define _flush_op_icache_line_user ((long)_FLUSH_ICACHE_LINE_USER_OP)
#define _flush_op_icache_line_sys ((long)_FLUSH_ICACHE_LINE_SYS_OP)
#define _flush_op_tlb_page_sys ((long)_FLUSH_TLB_PAGE_SYS_OP)
#define flush_op_icache_line_user __flush_op(_flush_op_icache_line_user)
#define flush_op_icache_line_sys __flush_op(_flush_op_icache_line_sys)
#define flush_op_tlb_page_sys __flush_op(_flush_op_tlb_page_sys)
(((flush_op) & ~FLUSH_OP_TYPE) | ((type) & FLUSH_OP_TYPE))
/* ICACHE/DTLB/ITLB line flush extended virtual address structure */
@ -570,60 +552,30 @@ typedef e2k_addr_t flush_op_t;
typedef e2k_addr_t flush_addr_t;
#endif /* ! __ASSEMBLY__ */
#define flush_addr_val(flush_addr) (flush_addr)
#define FLUSH_ADDR_CONTEXT_SHIFT ULL(50) /* [61:50] */
#define FLUSH_ADDR_VA ULL(0x0000ffffffffffff) /* virtual address */
#define FLUSH_ADDR_CONTEXT ULL(0x3ffc000000000000) /* context # */
#define FLUSH_ADDR_ROOT ULL(0x4000000000000000) /* should be 0 */
#define FLUSH_ADDR_PHYS ULL(0x8000000000000000) /* should be 0 */
#define __flush_addr(flush_addr_val) (flush_addr_val)
#define _FLUSH_ADDR_CONTEXT_SHIFT 50 /* [61:50] */
#define _FLUSH_ADDR_VA 0x0000ffffffffffff /* virtual address */
/* [47: 0] */
#define _FLUSH_ADDR_CONTEXT 0x3ffc000000000000 /* context # */
#define _FLUSH_ADDR_ROOT 0x4000000000000000 /* should be 0 */
#define _FLUSH_ADDR_PHYS 0x8000000000000000 /* should be 0 */
#define FLUSH_VADDR_TO_VA(virt_addr) ((virt_addr) & _FLUSH_ADDR_VA)
#define _FLUSH_ADDR_KERNEL(virt_addr) (FLUSH_VADDR_TO_VA(virt_addr) | \
((long)E2K_KERNEL_CONTEXT << _FLUSH_ADDR_CONTEXT_SHIFT))
#define FLUSH_ADDR_KERNEL(virt_addr) \
__flush_addr(_FLUSH_ADDR_KERNEL(virt_addr))
#define flush_addr_get_va(flush_addr) \
(flush_addr_val(flush_addr) & _FLUSH_ADDR_VA)
#define flush_addr_set_va(flush_addr, virt_addr) \
(__flush_addr((flush_addr_val(flush_addr) & ~_FLUSH_ADDR_VA) | \
((va_page) & _FLUSH_ADDR_VA)))
#define FLUSH_VADDR_TO_VA(virt_addr) ((virt_addr) & FLUSH_ADDR_VA)
#define flush_addr_get_pid(flush_addr) \
((flush_addr_val(flush_addr) & _FLUSH_ADDR_CONTEXT) >> \
_FLUSH_ADDR_CONTEXT_SHIFT)
#define flush_addr_get_context(flush_addr) \
(flush_addr_val(flush_addr) & _FLUSH_ADDR_CONTEXT)
#define flush_addr_set_context(flush_addr, context) \
(__flush_addr((flush_addr_val(flush_addr) & \
~_FLUSH_ADDR_CONTEXT) | \
((long)(context) << _FLUSH_ADDR_CONTEXT_SHIFT) & \
_FLUSH_ADDR_CONTEXT))
(((flush_addr) & FLUSH_ADDR_CONTEXT) >> FLUSH_ADDR_CONTEXT_SHIFT)
#define _flush_addr_make_sys(virt_addr, context, root) \
({ \
e2k_addr_t __addr_val = FLUSH_VADDR_TO_VA(virt_addr); \
__addr_val |= (((long)(context) << \
_FLUSH_ADDR_CONTEXT_SHIFT) & \
_FLUSH_ADDR_CONTEXT); \
__addr_val |= (((long)(context) << FLUSH_ADDR_CONTEXT_SHIFT) & \
FLUSH_ADDR_CONTEXT); \
if (root) \
__addr_val |= _FLUSH_ADDR_ROOT; \
__addr_val |= FLUSH_ADDR_ROOT; \
__addr_val; \
})
#define _flush_addr_make_user(virt_addr) \
FLUSH_VADDR_TO_VA(virt_addr)
#define flush_addr_make_sys(virt_addr, context) \
__flush_addr(_flush_addr_make_sys(virt_addr, context, 0))
#define flush_addr_make_user(virt_addr) \
__flush_addr(_flush_addr_make_user(virt_addr))
_flush_addr_make_sys((virt_addr), (context), 0)
#define flush_addr_make_user(virt_addr) FLUSH_VADDR_TO_VA(virt_addr)
#define flush_addr_make_ss(virt_addr, context) \
__flush_addr(_flush_addr_make_sys(virt_addr, context, 1))
_flush_addr_make_sys((virt_addr), (context), 1)
/*
* CACHE(s) flush operations
@ -644,14 +596,8 @@ typedef e2k_addr_t flush_addr_t;
flush_op_set_type(flush_op, _FLUSH_WRITE_BACK_CACHE_L12_OP)
#define flush_op_set_cache_all(flush_op) \
flush_op_set_write_back_cache_L12(flush_op)
#define _flush_op_invalidate_cache_L12 ((long)_FLUSH_INVALIDATE_CACHE_L12_OP)
#define _flush_op_write_back_cache_L12 ((long)_FLUSH_WRITE_BACK_CACHE_L12_OP)
#define _flush_op_cache_all _flush_op_write_back_cache_L12
#define flush_op_invalidate_cache_L12 \
__flush_op(_flush_op_invalidate_cache_L12)
#define flush_op_write_back_cache_L12 \
__flush_op(_flush_op_write_back_cache_L12)
#define flush_op_cache_all flush_op_write_back_cache_L12
#define flush_op_invalidate_cache_L12 ((long)_FLUSH_INVALIDATE_CACHE_L12_OP)
#define flush_op_write_back_cache_L12 ((long)_FLUSH_WRITE_BACK_CACHE_L12_OP)
/*
* ICACHE/TLB flush operations
@ -666,10 +612,8 @@ typedef e2k_addr_t flush_addr_t;
flush_op_set_type(flush_op, _FLUSH_ICACHE_ALL_OP)
#define flush_op_set_tlb_all(flush_op) \
flush_op_set_type(flush_op, _FLUSH_TLB_ALL_OP)
#define _flush_op_icache_all ((long)_FLUSH_ICACHE_ALL_OP)
#define _flush_op_tlb_all ((long)_FLUSH_TLB_ALL_OP)
#define flush_op_icache_all __flush_op(_flush_op_icache_all)
#define flush_op_tlb_all __flush_op(_flush_op_tlb_all)
#define flush_op_icache_all ((long)_FLUSH_ICACHE_ALL_OP)
#define flush_op_tlb_all ((long)_FLUSH_TLB_ALL_OP)
/*
@ -839,7 +783,7 @@ typedef union {
u64 sf0 : 1;
u64 hw0 : 1;
u64 t0 : 1;
u64 __x0 : 1;
u64 : 1;
u64 v1 : 1;
u64 root1 : 1;
u64 rw1 : 2;
@ -850,7 +794,7 @@ typedef union {
u64 sf1 : 1;
u64 hw1 : 1;
u64 t1 : 1;
u64 __x1 : 1;
u64 : 1;
u64 v2 : 1;
u64 root2 : 1;
u64 rw2 : 2;
@ -861,7 +805,7 @@ typedef union {
u64 sf2 : 1;
u64 hw2 : 1;
u64 t2 : 1;
u64 __x2 : 1;
u64 : 1;
u64 v3 : 1;
u64 root3 : 1;
u64 rw3 : 2;
@ -872,56 +816,9 @@ typedef union {
u64 sf3 : 1;
u64 hw3 : 1;
u64 t3 : 1;
u64 __x3 : 1;
u64 : 1;
u64 gm : 1;
};
struct {
u64 v0 : 1;
u64 root0 : 1;
u64 rw0 : 2;
u64 lng0 : 3;
u64 sync0 : 1;
u64 spec0 : 1;
u64 ap0 : 1;
u64 sf0 : 1;
u64 hw0 : 1;
u64 t0 : 1;
u64 __x0 : 1;
u64 v1 : 1;
u64 root1 : 1;
u64 rw1 : 2;
u64 lng1 : 3;
u64 sync1 : 1;
u64 spec1 : 1;
u64 ap1 : 1;
u64 sf1 : 1;
u64 hw1 : 1;
u64 t1 : 1;
u64 __x1 : 1;
u64 v2 : 1;
u64 root2 : 1;
u64 rw2 : 2;
u64 lng2 : 3;
u64 sync2 : 1;
u64 spec2 : 1;
u64 ap2 : 1;
u64 sf2 : 1;
u64 hw2 : 1;
u64 t2 : 1;
u64 __x2 : 1;
u64 v3 : 1;
u64 root3 : 1;
u64 rw3 : 2;
u64 lng3 : 3;
u64 sync3 : 1;
u64 spec3 : 1;
u64 ap3 : 1;
u64 sf3 : 1;
u64 hw3 : 1;
u64 t3 : 1;
u64 __x3 : 1;
u64 gm : 1;
} fields;
u64 word;
} e2k_ddbcr_t;
#define DDBCR_reg word

View File

@ -1,8 +1,9 @@
#ifndef _E2K_MMU_TYPES_H_
#define _E2K_MMU_TYPES_H_
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/mas.h>
#include <uapi/asm/iset_ver.h>
#ifndef __ASSEMBLY__
@ -132,6 +133,12 @@ typedef struct { pgprotval_t pgprot; } pgprot_t;
#define E2K_PUD_LEVEL_NUM 3 /* level number of native pud */
#define E2K_PGD_LEVEL_NUM 4 /* level number of native pgd */
#define E2K_PAGES_LEVEL_MASK (1 << E2K_PAGES_LEVEL_NUM)
#define E2K_PTE_LEVEL_MASK (1 << E2K_PTE_LEVEL_NUM)
#define E2K_PMD_LEVEL_MASK (1 << E2K_PMD_LEVEL_NUM)
#define E2K_PUD_LEVEL_MASK (1 << E2K_PUD_LEVEL_NUM)
#define E2K_PGD_LEVEL_MASK (1 << E2K_PGD_LEVEL_NUM)
/* max level # on which can map huge pages: pmd, pud */
#define MAX_HUGE_PAGES_LEVEL E2K_PUD_LEVEL_NUM
@ -501,18 +508,68 @@ typedef union mmu_tc_cond_dword {
} mmu_tc_cond_dword_t;
typedef union {
struct {
u64 dst :10; // [0-9]
u64 opcode :6; // [10-15]
u64 r0 :1; // [16]
u64 store :1; // [17]
u64 mode_80 :1; // [18]
u64 s_f :1; // [19]
u64 mas :7; // [20-26]
u64 root :1; // [27]
u64 scal :1; // [28]
u64 sru :1; // [29]
u64 spec :1; // [30]
u64 pm :1; // [31]
u64 chan :2; // [32-33]
u64 r1 :1; // [34]
u64 fault_type :13; // [35-47]
u64 miss_lvl :2; // [48-49]
u64 num_align :1; // [50]
u64 empt :1; // [51]
u64 clw :1; // [52]
u64 dst_rcv :10; // [53-62]
u64 rcv :1; // [63]
};
struct {
u64 address :8; // [0-7]
u64 vr :1; // [8]
u64 vl :1; // [9]
u64 fmt :3; // [10-12]
/* Be careful: npsp=1 => access is not protected,
* but npsp=0 does not mean that access is protected. */
u64 npsp :1; // [13]
u64 fmtc :2; // [14-15]
u64 ___x1 :19; // [34-16]
u64 global_sp :1; /* [35] */
u64 page_bound :1; /* [36] */
u64 exc_mem_lock :1; /* [37] */
u64 ph_pr_page :1; /* [38] */
u64 io_page :1; /* [39] */
u64 isys_page :1; /* [40] */
u64 prot_page :1; /* [41] */
u64 priv_page :1; /* [42] */
u64 illegal_page :1; /* [43] */
u64 nwrite_page :1; /* [44] */
u64 page_miss :1; /* [45] */
u64 ph_bound :1; /* [46] */
u64 intl_res_bits :1; /* [47] */
u64 ___x0 :5; /* [52:48] */
u64 dst_ind :8; /* [60:53] */
u64 ___x2 :3; /* [63-61] */
};
u64 word;
union mmu_tc_cond_dword fields;
} tc_cond_t;
#define TC_COND_FMT_FULL(cond) (AS(cond).fmt | (AS(cond).fmtc << 3))
#define TC_COND_FMT_FULL(cond) (cond.fmt | (cond.fmtc << 3))
static inline bool tc_cond_is_special_mmu_aau(tc_cond_t cond)
{
unsigned int mas = AS(cond).mas;
int chan = AS(cond).chan;
int store = AS(cond).store;
int spec_mode = AS(cond).spec;
unsigned int mas = cond.mas;
int chan = cond.chan;
int store = cond.store;
int spec_mode = cond.spec;
if (unlikely(is_mas_special_mmu_aau(mas) && (store ||
!store && !spec_mode && (chan == 1 || chan == 3))))
@ -523,36 +580,36 @@ static inline bool tc_cond_is_special_mmu_aau(tc_cond_t cond)
static inline bool tc_cond_is_check_ld(tc_cond_t cond)
{
unsigned int mas = AS(cond).mas;
int store = AS(cond).store;
int spec_mode = AS(cond).spec;
unsigned int mas = cond.mas;
int store = cond.store;
int spec_mode = cond.spec;
return is_mas_check(mas) && !spec_mode && !store;
}
static inline bool tc_cond_is_check_unlock_ld(tc_cond_t cond)
{
unsigned int mas = AS(cond).mas;
int store = AS(cond).store;
int spec_mode = AS(cond).spec;
unsigned int mas = cond.mas;
int store = cond.store;
int spec_mode = cond.spec;
return is_mas_check_unlock(mas) && !spec_mode && !store;
}
static inline bool tc_cond_is_lock_check_ld(tc_cond_t cond)
{
unsigned int mas = AS(cond).mas;
int store = AS(cond).store;
int spec_mode = AS(cond).spec;
unsigned int mas = cond.mas;
int store = cond.store;
int spec_mode = cond.spec;
return is_mas_lock_check(mas) && spec_mode && !store;
}
static inline bool tc_cond_is_spec_lock_check_ld(tc_cond_t cond)
{
unsigned int mas = AS(cond).mas;
int store = AS(cond).store;
int spec_mode = AS(cond).spec;
unsigned int mas = cond.mas;
int store = cond.store;
int spec_mode = cond.spec;
return is_mas_spec_lock_check(mas) && spec_mode && !store;
}
@ -569,7 +626,7 @@ static inline int tc_cond_to_size(tc_cond_t cond)
if (fmt == LDST_QP_FMT || fmt == TC_FMT_QPWORD_Q) {
size = 16;
} else if (fmt == LDST_QWORD_FMT || fmt == TC_FMT_QWORD_QP) {
if (AS(cond).chan == 0 || AS(cond).chan == 2)
if (cond.chan == 0 || cond.chan == 2)
size = 16;
else
size = 8;
@ -621,11 +678,11 @@ ldst_chan_num_to_chan_opc(int chan_opc)
static inline bool
tc_cond_load_has_store_semantics(tc_cond_t condition, unsigned iset_ver)
{
const unsigned mas = AS(condition).mas;
const unsigned mas = condition.mas;
const unsigned mod = (mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT;
const unsigned chan = AS(condition).chan;
const bool root = AS(condition).root;
const bool spec = AS(condition).spec;
const unsigned chan = condition.chan;
const bool root = condition.root;
const bool spec = condition.spec;
if (chan != 0)
return false;
@ -646,9 +703,9 @@ tc_cond_load_has_store_semantics(tc_cond_t condition, unsigned iset_ver)
static inline bool
tc_cond_is_store(tc_cond_t condition, unsigned iset_ver)
{
const unsigned mas = AS(condition).mas;
const unsigned mas = condition.mas;
if (AS(condition).store && (mas != MAS_DCACHE_LINE_FLUSH))
if (condition.store && (mas != MAS_DCACHE_LINE_FLUSH))
return true;
return tc_cond_load_has_store_semantics(condition, iset_ver);
}
@ -706,7 +763,7 @@ typedef struct {
static inline bool is_record_asynchronous(tc_cond_t cond)
{
/* We use bitwise OR for performance */
return AS(cond).mode_80 | AS(cond).s_f | AS(cond).sru | AS(cond).clw;
return cond.mode_80 | cond.s_f | cond.sru | cond.clw;
}
/**

View File

@ -1,27 +0,0 @@
#ifndef _E2K_MSGBUF_H_
#define _E2K_MSGBUF_H_
/*
* The msqid64_ds structure for E2K architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
__kernel_time_t msg_rtime; /* last msgrcv time */
__kernel_time_t msg_ctime; /* last change time */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _E2K_MSGBUF_H_ */

View File

@ -19,7 +19,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/alternative.h>
#include <asm/cpu_regs_types.h>
#include <asm/e2k_api.h>
#include <asm/aau_regs_types.h>
@ -137,7 +136,7 @@ static __always_inline void native_write_aasr_reg_value(u32 reg_value)
{
NATIVE_SET_AAU_AASR(reg_value);
}
static inline u32 native_read_aafstr_reg_value(void)
static __always_inline u32 native_read_aafstr_reg_value(void)
{
return NATIVE_GET_AAU_AAFSTR();
}
@ -1165,7 +1164,8 @@ native_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value)
r_value = 0;
}
}
static inline void native_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value)
static __always_inline void native_read_aaldm_reg_value(u32 *lo_value,
u32 *hi_value)
{
u32 value1, value2;
@ -1174,19 +1174,20 @@ static inline void native_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value)
*hi_value = value2;
}
static __always_inline void native_write_aaldm_reg_value(u32 lo_value,
u32 hi_value)
u32 hi_value)
{
NATIVE_SET_AAU_AALDM(lo_value, hi_value);
}
static inline void native_read_aaldm_reg(e2k_aaldm_t *aaldm)
static __always_inline void native_read_aaldm_reg(e2k_aaldm_t *aaldm)
{
native_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi);
}
static __always_inline void native_write_aaldm_reg(e2k_aaldm_t *aaldm)
static __always_inline void native_write_aaldm_reg(e2k_aaldm_t aaldm)
{
native_write_aaldm_reg_value(aaldm->lo, aaldm->hi);
native_write_aaldm_reg_value(aaldm.lo, aaldm.hi);
}
static inline void native_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value)
static __always_inline void native_read_aaldv_reg_value(u32 *lo_value,
u32 *hi_value)
{
u32 value1, value2;
@ -1195,17 +1196,17 @@ static inline void native_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value)
*hi_value = value2;
}
static __always_inline void native_write_aaldv_reg_value(u32 lo_value,
u32 hi_value)
u32 hi_value)
{
NATIVE_SET_AAU_AALDV(lo_value, hi_value);
}
static inline void native_read_aaldv_reg(e2k_aaldv_t *aaldv)
static __always_inline void native_read_aaldv_reg(e2k_aaldv_t *aaldv)
{
native_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi);
}
static __always_inline void native_write_aaldv_reg(e2k_aaldv_t *aaldv)
static __always_inline void native_write_aaldv_reg(e2k_aaldv_t aaldv)
{
native_write_aaldv_reg_value(aaldv->lo, aaldv->hi);
native_write_aaldv_reg_value(aaldv.lo, aaldv.hi);
}
static inline void native_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p)
@ -1449,7 +1450,7 @@ static __always_inline void native_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_
}
static __always_inline void native_write_aads_4_reg(int AADs_no,
e2k_aadj_t *mem_p)
e2k_aadj_t *mem_p)
{
switch (AADs_no) {
case 0:

View File

@ -150,7 +150,7 @@ native_invalidate_CACHE_L12(void)
unsigned long flags;
DebugMR("Flush : Invalidate all CACHEs (op 0x%lx)\n",
_flush_op_invalidate_cache_L12);
flush_op_invalidate_cache_L12);
/* Invalidate operation was removed in E2S */
invalidate_supported = NATIVE_IS_MACHINE_ES2;
@ -158,9 +158,9 @@ native_invalidate_CACHE_L12(void)
raw_all_irq_save(flags);
E2K_WAIT_MA;
if (invalidate_supported)
NATIVE_FLUSH_CACHE_L12(_flush_op_invalidate_cache_L12);
NATIVE_FLUSH_CACHE_L12(flush_op_invalidate_cache_L12);
else
NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12);
NATIVE_FLUSH_CACHE_L12(flush_op_write_back_cache_L12);
E2K_WAIT_FLUSH;
raw_all_irq_restore(flags);
}
@ -171,10 +171,10 @@ native_write_back_CACHE_L12(void)
unsigned long flags;
DebugMR("Flush : Write back all CACHEs (op 0x%lx)\n",
_flush_op_write_back_cache_L12);
flush_op_write_back_cache_L12);
raw_all_irq_save(flags);
E2K_WAIT_MA;
NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12);
NATIVE_FLUSH_CACHE_L12(flush_op_write_back_cache_L12);
E2K_WAIT_FLUSH;
raw_all_irq_restore(flags);
}
@ -191,10 +191,10 @@ native_flush_TLB_all(void)
{
unsigned long flags;
DebugMR("Flush all TLBs (op 0x%lx)\n", _flush_op_tlb_all);
DebugMR("Flush all TLBs (op 0x%lx)\n", flush_op_tlb_all);
raw_all_irq_save(flags);
E2K_WAIT_ST;
NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all);
NATIVE_FLUSH_TLB_ALL(flush_op_tlb_all);
E2K_WAIT(_fl_c | _ma_c);
raw_all_irq_restore(flags);
}
@ -209,9 +209,9 @@ native_flush_TLB_all(void)
static inline void
native_flush_ICACHE_all(void)
{
DebugMR("Flush all ICACHE op 0x%lx\n", _flush_op_icache_all);
DebugMR("Flush all ICACHE op 0x%lx\n", flush_op_icache_all);
E2K_WAIT_ST;
NATIVE_FLUSH_ICACHE_ALL(_flush_op_icache_all);
NATIVE_FLUSH_ICACHE_ALL(flush_op_icache_all);
E2K_WAIT_FLUSH;
}

View File

@ -46,6 +46,8 @@ extern void e2k_remove_swap(struct swap_info_struct *sis);
extern void restore_tags_for_data(u64 *, u8 *);
extern u32 save_tags_from_data(u64 *, u8 *);
extern void get_page_with_tags(u8 *, u8 *, int *);
extern u8 *alloc_page_with_tags(void);
extern void free_page_with_tags(u8 *);
extern int check_tags(unsigned type, unsigned long beg, unsigned long end);
#endif //_E2K_PAGE_IO_H

View File

@ -1,12 +0,0 @@
/* $Id: param.h,v 1.4 2008/12/19 12:44:14 atic Exp $ */
#ifndef _E2K_PARAM_H_
#define _E2K_PARAM_H_
#include <uapi/asm/param.h>
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
# define USER_HZ HZ /* some user interfaces are in */
/* "ticks" */
# define CLOCKS_PER_SEC (USER_HZ)
#endif /* _E2K_PARAM_H_ */

View File

@ -148,20 +148,20 @@
} \
})
#define PV_GET_AAU_CONTEXT_V2(aau_context) \
#define PV_GET_AAU_CONTEXT_V2(aau_context, aasr) \
({ \
if (!paravirt_enabled()) { \
NATIVE_GET_AAU_CONTEXT_V2(aau_context); \
NATIVE_GET_AAU_CONTEXT_V2(aau_context, aasr); \
} else { \
kvm_get_aau_context(aau_context); \
kvm_get_aau_context(aau_context, aasr); \
} \
})
#define PV_GET_AAU_CONTEXT_V5(aau_context) \
#define PV_GET_AAU_CONTEXT_V5(aau_context, aasr) \
({ \
if (!paravirt_enabled()) { \
NATIVE_GET_AAU_CONTEXT_V5(aau_context); \
NATIVE_GET_AAU_CONTEXT_V5(aau_context, aasr); \
} else { \
kvm_get_aau_context(aau_context); \
kvm_get_aau_context(aau_context, aasr); \
} \
})

View File

@ -370,7 +370,7 @@ pv_read_aaldm_reg(e2k_aaldm_t *aaldm)
kvm_read_aaldm_reg(aaldm);
}
static inline void
pv_write_aaldm_reg(e2k_aaldm_t *aaldm)
pv_write_aaldm_reg(e2k_aaldm_t aaldm)
{
if (!paravirt_enabled())
native_write_aaldm_reg(aaldm);
@ -402,7 +402,7 @@ pv_read_aaldv_reg(e2k_aaldv_t *aaldv)
kvm_read_aaldv_reg(aaldv);
}
static inline void
pv_write_aaldv_reg(e2k_aaldv_t *aaldv)
pv_write_aaldv_reg(e2k_aaldv_t aaldv)
{
if (!paravirt_enabled())
native_write_aaldv_reg(aaldv);
@ -491,9 +491,9 @@ read_aaldm_reg(e2k_aaldm_t *aaldm)
pv_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi);
}
static inline void
write_aaldm_reg(e2k_aaldm_t *aaldm)
write_aaldm_reg(e2k_aaldm_t aaldm)
{
pv_write_aaldm_reg_value(aaldm->lo, aaldm->hi);
pv_write_aaldm_reg_value(aaldm.lo, aaldm.hi);
}
static inline void
read_aaldv_reg(e2k_aaldv_t *aaldv)
@ -501,9 +501,9 @@ read_aaldv_reg(e2k_aaldv_t *aaldv)
pv_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi);
}
static inline void
write_aaldv_reg(e2k_aaldv_t *aaldv)
write_aaldv_reg(e2k_aaldv_t aaldv)
{
pv_write_aaldm_reg_value(aaldv->lo, aaldv->hi);
pv_write_aaldm_reg_value(aaldv.lo, aaldv.hi);
}
#endif /* CONFIG_PARAVIRT_GUEST */

View File

@ -4,9 +4,10 @@
#ifdef __KERNEL__
#ifdef CONFIG_PARAVIRT_GUEST
static inline void
static inline int
get_mm_notifier_locked(struct mm_struct *mm)
{
return 0;
}
#endif /* CONFIG_PARAVIRT_GUEST */

View File

@ -409,16 +409,6 @@ typedef struct pv_cpu_ops {
void (*csd_unlock)(struct __call_single_data *data);
void (*setup_local_pic_virq)(unsigned int cpuid);
void (*startup_local_pic_virq)(unsigned int cpuid);
void (*smp_flush_tlb_all)(void);
void (*smp_flush_tlb_mm)(struct mm_struct *mm);
void (*smp_flush_tlb_page)(struct vm_area_struct *vma,
e2k_addr_t addr);
void (*smp_flush_tlb_range)(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end);
void (*smp_flush_pmd_tlb_range)(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end);
void (*smp_flush_tlb_range_and_pgtables)(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end);
void (*smp_flush_icache_range)(e2k_addr_t start, e2k_addr_t end);
void (*smp_flush_icache_range_array)(
void *icache_range_arr);
@ -427,6 +417,13 @@ typedef struct pv_cpu_ops {
void (*smp_flush_icache_all)(void);
void (*smp_flush_icache_kernel_line)(e2k_addr_t addr);
#endif /* CONFIG_SMP */
void (*flush_tlb_all)(void);
void (*flush_tlb_mm)(struct mm_struct *mm);
void (*flush_tlb_page)(struct vm_area_struct *vma,
e2k_addr_t addr);
void (*flush_tlb_mm_range)(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long stride, u32 levels_mask);
int (*host_printk)(const char *fmt, ...);
void (*arch_spin_lock_slow)(void *lock);

View File

@ -9,77 +9,46 @@
#include <linux/mm.h>
#include <asm/paravirt/pv_ops.h>
#ifdef CONFIG_SMP
static inline void
pv_smp_flush_tlb_all(void)
static inline void pv_flush_tlb_all(void)
{
pv_cpu_ops.smp_flush_tlb_all();
pv_cpu_ops.flush_tlb_all();
}
static inline void pv_flush_tlb_mm(struct mm_struct *mm)
{
pv_cpu_ops.flush_tlb_mm(mm);
}
static inline void
pv_smp_flush_tlb_mm(struct mm_struct *mm)
pv_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr)
{
pv_cpu_ops.smp_flush_tlb_mm(mm);
pv_cpu_ops.flush_tlb_page(vma, addr);
}
static inline void
pv_smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr)
static inline void pv_flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long stride, u32 levels_mask)
{
pv_cpu_ops.smp_flush_tlb_page(vma, addr);
pv_cpu_ops.flush_tlb_mm_range(mm, start, end, stride, levels_mask);
}
static inline void
pv_smp_flush_tlb_range(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
pv_cpu_ops.smp_flush_tlb_range(mm, start, end);
}
static inline void
pv_smp_flush_pmd_tlb_range(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
pv_cpu_ops.smp_flush_pmd_tlb_range(mm, start, end);
}
static inline void
pv_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
pv_cpu_ops.smp_flush_tlb_range_and_pgtables(mm, start, end);
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PARAVIRT_GUEST
/* it is paravirtualized host and guest kernel */
#ifdef CONFIG_SMP
static inline void
smp_flush_tlb_all(void)
static inline void flush_tlb_all(void)
{
pv_smp_flush_tlb_all();
pv_flush_tlb_all();
}
static inline void
smp_flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
pv_smp_flush_tlb_mm(mm);
pv_flush_tlb_mm(mm);
}
static inline void
smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr)
static inline void flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr)
{
pv_smp_flush_tlb_page(vma, addr);
pv_flush_tlb_page(vma, addr);
}
static inline void
smp_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end)
static inline void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long stride, u32 levels_mask)
{
pv_smp_flush_tlb_range(mm, start, end);
pv_flush_tlb_mm_range(mm, start, end, stride, levels_mask);
}
static inline void
smp_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end)
{
pv_smp_flush_pmd_tlb_range(mm, start, end);
}
static inline void
smp_flush_tlb_range_and_pgtables(struct mm_struct *mm,
e2k_addr_t start, e2k_addr_t end)
{
pv_smp_flush_tlb_range_and_pgtables(mm, start, end);
}
#endif /* CONFIG_SMP */
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* _E2K_PARAVIRT_TLBFLUSH_H */

View File

@ -250,7 +250,7 @@ pgd_populate_cpu_root_pt(struct mm_struct *mm, pgd_t *pgd)
}
if (pgd_val(*cpu_pgd) != pgd_val(*pgd)) {
*cpu_pgd = *pgd;
__flush_tlb_page(mm, (e2k_addr_t) cpu_pgd);
local_flush_tlb_page(mm, (e2k_addr_t) cpu_pgd);
}
DebugPT("CPU #%d set kernel root pgd %px to 0x%lx\n",
smp_processor_id(), cpu_pgd, pgd_val(*cpu_pgd));

View File

@ -272,27 +272,27 @@ mmu_clear_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags,
else
return clear_pte_val_v2_flags(pte_val, uni_flags);
}
static inline pteval_t
static __must_check inline pteval_t
fill_pte_val_flags(const uni_pteval_t uni_flags)
{
return mmu_fill_pte_val_flags(uni_flags, MMU_IS_PT_V6());
}
static inline pteval_t
static __must_check inline pteval_t
get_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags)
{
return mmu_get_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6());
}
static inline bool
static __must_check inline bool
test_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags)
{
return mmu_test_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6());
}
static inline pteval_t
static __must_check inline pteval_t
set_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags)
{
return mmu_set_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6());
}
static inline pteval_t
static __must_check inline pteval_t
clear_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags)
{
return mmu_clear_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6());

View File

@ -1 +0,0 @@
#include <asm-generic/poll.h>

View File

@ -1,11 +0,0 @@
#ifndef _E2K_POSIX_TYPES_H_
#define _E2K_POSIX_TYPES_H_
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc.
*/
#include <asm-generic/posix_types.h>
#endif /* _E2K_POSIX_TYPES_H_ */

View File

@ -0,0 +1,101 @@
#pragma once
#include <linux/thread_info.h>
register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC(
SMP_CPU_ID_GREG);
#define PREEMPT_COUNTER_SHIFT 33ull
#define PREEMPT_NEED_RESCHED 0x100000000ull
#define PREEMPT_ENABLED (0)
static __always_inline int preempt_count(void)
{
return __cpu_preempt_reg >> PREEMPT_COUNTER_SHIFT;
}
/*
* must be macros to avoid header recursion hell
*/
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
__cpu_preempt_reg = (u64) (u32) __cpu_preempt_reg; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
{
__cpu_preempt_reg |= PREEMPT_NEED_RESCHED;
}
static __always_inline void clear_preempt_need_resched(void)
{
__cpu_preempt_reg &= ~PREEMPT_NEED_RESCHED;
}
static __always_inline bool test_preempt_need_resched(void)
{
return __cpu_preempt_reg & PREEMPT_NEED_RESCHED;
}
/*
* The various preempt_count set/add/sub methods
*
* Careful here: an interrupt can arrive at any moment and set or clear
* the PREEMPT_NEED_RESCHED flag. We want to change preempt_count
* in a safe way so that the flag set in interrupt is not lost.
*/
static __always_inline void preempt_count_set(int pc)
{
E2K_INSFD_ATOMIC(pc,
31 /*shift*/ | (33 /*size*/ << 6) | (1 /*me1hi*/ << 13),
__cpu_preempt_reg);
}
static __always_inline void __preempt_count_add(int val)
{
E2K_ADDD_ATOMIC(__cpu_preempt_reg, (u64) (u32) val << PREEMPT_COUNTER_SHIFT);
}
static __always_inline void __preempt_count_sub(int val)
{
E2K_SUBD_ATOMIC(__cpu_preempt_reg, (u64) (u32) val << PREEMPT_COUNTER_SHIFT);
}
static __always_inline bool __preempt_count_dec_and_test(void)
{
u64 old;
E2K_SUBD_ATOMIC__SHRD32(__cpu_preempt_reg, 1ull << PREEMPT_COUNTER_SHIFT, old);
return unlikely(old == 1);
}
static __always_inline void init_preempt_count_resched(int pc, int resched)
{
__cpu_preempt_reg = (u64) (u32) __cpu_preempt_reg;
preempt_count_set(pc);
if (resched)
set_preempt_need_resched();
}
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(int preempt_offset)
{
return unlikely((__cpu_preempt_reg >> 32ull) ==
(((u64) (u32) preempt_offset << 1) | 1));
}
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPTION */

View File

@ -68,8 +68,7 @@ extern cpuinfo_e2k_t cpu_data[NR_CPUS];
#define INVALID_IO_BITMAP_OFFSET 0x8000
typedef struct thread_struct {
#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION
/* Used as a temporary area */
/* Used as a temporary area for !CPU_FEAT_FILL_INSTRUCTION case */
struct {
e2k_cr0_hi_t cr0_hi;
e2k_cr1_lo_t cr1_lo;
@ -84,7 +83,7 @@ typedef struct thread_struct {
bool from_paravirt_guest;
# endif
} fill;
#endif
u32 context; /* context of running process */
struct sw_regs sw_regs; /* switch regs */

View File

@ -149,6 +149,7 @@ typedef struct pt_regs {
int sys_num; /* to restart sys_call */
int kernel_entry;
union pt_regs_flags flags;
e2k_aasr_t aasr;
e2k_ctpr_t ctpr1; /* CTPRj for control transfer */
e2k_ctpr_t ctpr2;
e2k_ctpr_t ctpr3;
@ -231,23 +232,6 @@ pt_regs_to_trap_regs(struct pt_regs *regs)
return PTR_ALIGN((void *) regs + sizeof(*regs), 8);
}
#ifdef CONFIG_USE_AAU
static inline e2k_aau_t *
pt_regs_to_aau_regs(struct pt_regs *regs)
{
struct trap_pt_regs *trap;
trap = pt_regs_to_trap_regs(regs);
return PTR_ALIGN((void *) trap + sizeof(*trap), 8);
}
#else /* ! CONFIG_USE_AAU */
static inline e2k_aau_t *
pt_regs_to_aau_regs(struct pt_regs *regs)
{
return NULL;
}
#endif
static inline bool
is_sys_call_pt_regs(struct pt_regs *regs)
{

View File

@ -1201,6 +1201,7 @@ NATIVE_SWITCH_TO_KERNEL_STACK(e2k_addr_t ps_base, e2k_size_t ps_size,
e2k_addr_t pcs_base, e2k_size_t pcs_size,
e2k_addr_t ds_base, e2k_size_t ds_size)
{
pcsp_struct_t pcsp = {{ 0 }};
e2k_rwap_lo_struct_t reg_lo;
e2k_rwap_hi_struct_t reg_hi;
e2k_rwap_lo_struct_t stack_reg_lo;
@ -1219,13 +1220,10 @@ NATIVE_SWITCH_TO_KERNEL_STACK(e2k_addr_t ps_base, e2k_size_t ps_size,
reg_hi.PSP_hi_size = ps_size;
reg_hi.PSP_hi_ind = 0;
NATIVE_NV_WRITE_PSP_REG(reg_hi, reg_lo);
reg_lo.PCSP_lo_half = 0;
reg_lo.PCSP_lo_base = pcs_base;
reg_lo._PCSP_lo_rw = E2K_PCSR_RW_PROTECTIONS;
reg_hi.PCSP_hi_half = 0;
reg_hi.PCSP_hi_size = pcs_size;
reg_hi.PCSP_hi_ind = 0;
NATIVE_NV_WRITE_PCSP_REG(reg_hi, reg_lo);
pcsp.base = pcs_base;
pcsp.size = pcs_size;
pcsp.rw = E2K_PCSR_RW_PROTECTIONS;
NATIVE_NV_WRITE_PCSP_REG(pcsp.hi, pcsp.lo);
/*

View File

@ -1,22 +0,0 @@
#ifndef _E2K_SEMBUF_H_
#define _E2K_SEMBUF_H_
/*
* The semid64_ds structure for E2K architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
__kernel_time_t sem_ctime; /* last change time */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _E2K_SEMBUF_H_ */

View File

@ -1,455 +1,2 @@
/*
* include/asm-i386/serial.h
*/
/*
* This assumes you have a 1.8432 MHz clock for your UART.
*
* It'd be nice if someone built a serial card with a 24.576 MHz
* clock, since the 16550A is capable of handling a top speed of 1.5
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
#endif
#ifdef CONFIG_SERIAL_MANY_PORTS
#define FOURPORT_FLAGS ASYNC_FOURPORT
#define ACCENT_FLAGS 0
#define BOCA_FLAGS 0
#define HUB6_FLAGS 0
#define RS_TABLE_SIZE 64
#else
#define RS_TABLE_SIZE
#endif
#define NS16550_SERIAL_PORT_0 0x3f8
#define NS16550_SERIAL_PORT_1 0x2f8
#define NS16550_SERIAL_PORT_2 0x3e8
#define NS16550_SERIAL_PORT_3 0x2e8
#define SERIAL_PORT_DFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
#define AM85C30_RES_Tx_P 0x28
#define AM85C30_EXT_INT_ENAB 0x01
#define AM85C30_TxINT_ENAB 0x02
#define AM85C30_RxINT_MASK 0x18
/* AM85C30 WRITE Registers */
#define AM85C30_WR0 0x00
#define AM85C30_WR1 0x01
#define AM85C30_WR2 0x02
#define AM85C30_WR3 0x03
#define AM85C30_WR4 0x04
#define AM85C30_WR5 0x05
#define AM85C30_WR6 0x06
#define AM85C30_WR7 0x07
#define AM85C30_WR8 0x08
#define AM85C30_WR9 0x09
#define AM85C30_WR10 0x0a
#define AM85C30_WR11 0x0b
#define AM85C30_WR12 0x0c
#define AM85C30_WR13 0x0d
#define AM85C30_WR14 0x0e
#define AM85C30_WR15 0x0f
/* READ (Status) Registers */
#define AM85C30_RR0 0x00
#define AM85C30_RR1 0x01
#define AM85C30_RR2 0x02
#define AM85C30_RR3 0x03
#define AM85C30_RR8 0x08
#define AM85C30_RR10 0x0a
#define AM85C30_RR12 0x0c
#define AM85C30_RR13 0x0d
#define AM85C30_D0 (0x01 << 0)
#define AM85C30_D1 (0x01 << 1)
#define AM85C30_D2 (0x01 << 2)
#define AM85C30_D3 (0x01 << 3)
#define AM85C30_D4 (0x01 << 4)
#define AM85C30_D5 (0x01 << 5)
#define AM85C30_D6 (0x01 << 6)
#define AM85C30_D7 (0x01 << 7)
/* WR0 */
/* D2,D1,D0
* Register Access Pointer
*
* 000 - N0, [N8]*
* 001 - N1, [N9]*
* 010 - N2, [N10]*
* 011 - N3, [N11]*
* 100 - N4, [N12]*
* 101 - N5, [N13]*
* 110 - N6, [N14]*
* 111 - N7, [N15]*
*
* if Point High Register Group = 1
*
* D5,D4,D3
*
* SCC Command
*
* 000 - Null Code
* 001 - Point High Register Group
* 010 - Reset Ext/Status Interrupts
* 011 - Send Abort
* 100 - Enable Int. on Next Rx Character
* 101 - Reset Tx Int. Pending
* 110 - Error Reset
* 111 - Reset Highest IUS
*
* D7,D6
* SCC Command
*
* 00 - Null Code
* 01 - Reset Rx CRC Checker
* 10 - Reset Tx CRC Generator
* 11 - Reset Tx Underrun/EOM Latch
*/
/* WR1 */
/* D0
* Ext. Int. Enable
* D1
* Tx Int. Enable
* D2
* Parity is Special Condition
* D4,D3
* Rx Int Mode
*
* 00 - Rx Int Disable
* 01 - Rx Int on First Char. or Special Condition
* 10 - Int on All Rx Char. or Special Condition
* 11 - Rx Int. on Special Condition Only
* D5
* Wait/DMA Request on Receive/Transmit
* D6
* Wait/DMA Request Function
* D7
* Wait/DMA Request Enable
*/
/* WR2 */
/* D7 - D0
* Interrupt Vector
*/
/* WR3 */
/* D0
* Rx Enable
* D1
* Sync Character Load Inhibit
* D2
* Address Search Mode (SDLC)
* D3
* Rx CRC Enable
* D4
* Enter Hunt Mode
* D5
* Auto Enable
* D7,D6
*
* 00 - Rx 5 Bits / Character
* 01 - Rx 6 Bits / Character
* 10 - Rx 7 Bits / Character
* 11 - Rx 8 Bits / Character
*/
/* WR4 */
/* D0
* ParityEnable
* D1
* Parity Even(0) / Odd(1)
* D3,D2
*
* 00 - Sync Modes Enable
* 01 - 1 Stop Bit / Character
* 10 - 1.5 Stop Bits / Character
* 11 - 2 Stop Bits / Character
* D5,D4
*
* 00 - 8-Bit Sync Character
* 01 - 16-Bit Sync Character
* 10 - SDLC Mode
* 11 - External Sync Mode
* D7,D6
*
* 00 - X1 Clock Mode
* 01 - X16 Clock Mode
* 10 - X32 Clock Mode
* 11 - X64 Clock Mode
*/
/* WR5 */
/* D0
* Tx CRC Enable
* D1
* RTS
* D2
* SDLC-/CRC-16
* D3
* Tx Enable
* D4
* Send Break
* D6,D5
*
* 00 - Tx 5 Bits / Character
* 01 - Tx 6 Bits / Character
* 10 - Tx 7 Bits / Character
* 11 - Tx 8 Bits / Character
* D7
* DTR
*/
/* WR6 */
/* D5-D0
* xN constant
* D7,D6
* Reserved (not used in asynchronous mode)
*/
/* WR7 */
/* D6-D0
* Reserved (not used in asynchronous mode)
* D7
* xN Mode Enable
*/
/* WR8 */
/* D7-D0
* Transmit Buffer
*/
/* WR9 */
/* D0
* Vector Includes Status
* D1
* No Vector
* D2
* Disable Lower Chain
* D3
* Master Interrupt Enable
* D4
* Status High/Low_
* D5
* Interrupt Masking Without INTACK_
* D7-D6
*
* 00 - No Reset
* 01 - Channel B Reset
* 10 - Channel A Reset
* 11 - Force Hardware Reset
*/
/* WR10 */
/* D0
* 6 bit / 8 bit SYNC
* D1
* Loop Mode
* D2
* Abort/Flag on Underrun
* D3
* Mark/Flag Idle
* D4
* Go Active on Poll
* D6-D5
*
* 00 - NRZ
* 01 - NRZI
* 10 - FM1 (Transition = 1)
* 11 - FM0 (Transition = 0)
* D7
* CRC Preset '1' or '0'
*/
/* WR11 */
/* D1-D0
*
* 00 - TRxC Out = XTAL output
* 01 - TRxC Out = Transmit Clock
* 10 - TRxC Out = BRG output
* 11 - TRxC Out = DPLL output
* D2
* TRxC O/I
* D4-D3
*
* 00 - Transmit Clock = RTxC pin
* 01 - Transmit Clock = TRxC pin
* 10 - Transmit Clock = BRG output
* 11 - Transmit Clock = DPLL output
* D6-D5
*
* 00 - Receive Clock = RTxC pin
* 01 - Receive Clock = TRxC pin
* 10 - Receive Clock = BRG output
* 11 - Receive Clock = DPLL output
* D7
* RTxC XTAL / NO XTAL
*/
/* WR12 */
/* D7-D0
* Lower Byte of Time Constant
*/
/* WR13 */
/* D7-D0
* Upper Byte of Time Constant
*/
/* WR14 */
/* D0
* BRG Enable
* D1
* BRG Source
* D2
* DTR / REQUESTt Function
* D3
* Auto Echo
* D4
* Local Loopback
* D7-D5
*
* 000 - Null Command
* 001 - Enter Search Mode
* 010 - Reset Missing Clock
* 011 - Disable DPLL
* 100 - Set Source = BR Generator
* 101 - Set Source = RTxC_
* 110 - Set FM Mode
* 111 - Set NRZI Mode
*/
/* WR15 */
/* D0
* SDLC/HDLC Enhancement Enable
* D1
* Zero Count IE (Interrupt Enable)
* D2
* 10 * 19-bit Frame Status FIFO Enable
* D3
* DCD IE
* D4
* Sync/Hunt IE
* D5
* CTS IE
* D6
* Tx Underrun / EOM IE
* D7
* Break/Abort IE
*/
/* RR0 */
/* D0
* Rx Character Availiable
* D1
* Zero Count
* D2
* Tx Buffer Empty
* D3
* DCD
* D4
* Sync/Hunt
* D5
* CTS
* D6
* Tx Underrun / EOM
* D7
* Break/Abort
*/
/* RR1 */
/* D0
* All Sent
* D1
* Residue Code 2
* D2
* Residue Code 1
* D3
* Residue Code 0
* D4
* Parity Error
* D5
* Rx Overrun Error
* D6
* CRC / Framing Error
* D7
* End of Frame (SDLC)
*/
/* RR2 */
/* D7-D0
* Interrupt Vector
*
* Channel A RR2 = WR2
* Channel B RR2 = Interrupt Vector Modified*
*
* *
* D3 D2 D1 Status High/Low = 0
* D4 D5 D6 Status High/Low = 1
*
* 0 0 0 Ch B Transmit Buffer Empty
* 0 0 1 Ch B External/Status Change
* 0 1 0 Ch B Receive Char. Availiable
* 0 1 1 Ch B Special Receive Condition
* 1 0 0 Ch A Transmit Buffer Empty
* 1 0 1 Ch A External/Status Change
* 1 1 0 Ch A Receive Char. Availiable
* 1 1 1 Ch A Special Receive Condition
*/
/* RR3 */
/* D0
* Channel B Ext/Status IP (Interrupt Pending)
* D1
* Channel B Tx IP
* D2
* Channel B Rx IP
* D3
* Channel A Ext/Status IP
* D4
* Channel A Tx IP
* D5
* Channel A Rx IP
* D7-D6
* Always 00
*/
/* RR8 */
/* D7-D0
* Receive Buffer
*/
/* RR10 */
/* D7-D0
* Reserved (not used in asynchronous mode)
*/
/* RR12 */
/* D7-D0
* Lower Byte of Time Constant
*/
/* RR13 */
/* D7-D0
* Upper Byte of Time Constant
*/
#pragma once
#include <asm-l/serial.h>

View File

@ -106,9 +106,15 @@ extern void smp_cpus_recovery_done(unsigned int max_cpus);
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup.
*/
register unsigned long long __cpu_reg DO_ASM_GET_GREG_MEMONIC(
register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC(
SMP_CPU_ID_GREG);
#define raw_smp_processor_id() ((unsigned int) __cpu_reg)
#define raw_smp_processor_id() ((unsigned int) __cpu_preempt_reg)
#define set_smp_processor_id(cpu) \
do { \
__cpu_preempt_reg = (__cpu_preempt_reg & 0xffffffff00000000ull) | \
((u64) (u32) (cpu)); \
} while (0)
#endif /* !ASSEMBLY */

View File

@ -1 +0,0 @@
#include <asm-generic/socket.h>

View File

@ -1 +0,0 @@
#include <asm-generic/sockios.h>

View File

@ -1,6 +0,0 @@
#ifndef _E2K_STATFS_H_
#define _E2K_STATFS_H_
#include <asm-generic/statfs.h>
#endif /* _E2K_STATFS_H_ */

View File

@ -3,7 +3,6 @@
#include <linux/swab.h>
#include <asm/alternative.h>
#include <asm/machdep.h>
#define __HAVE_ARCH_STRNLEN
@ -20,43 +19,59 @@ extern int __memcmp(const void *cs, const void *ct, size_t count) __pure;
#define memcmp(dst, src, n) _memcmp((dst), (src), (n))
static inline int _memcmp(const void *s1, const void *s2, size_t n)
{
if (__builtin_constant_p(n)) {
u64 v1, v2;
if (n == 16) {
v1 = *(u64 *) s1;
v2 = *(u64 *) s2;
#ifdef E2K_P2V
bool manual_inline = !IS_ENABLED(CONFIG_CPU_ES2);
#else
bool manual_inline = !cpu_has(CPU_HWBUG_UNALIGNED_LOADS);
#endif
if (__builtin_constant_p(n) && n < 0x20 && manual_inline) {
/* Inline small memcmp's */
if (n & 0x10) {
u64 v1 = *(u64 *) s1;
u64 v2 = *(u64 *) s2;
u64 v21 = *(u64 *) (s1 + 8);
u64 v22 = *(u64 *) (s2 + 8);
if (v1 != v2)
return (__swab64(v1) > __swab64(v2)) ? 1 : -1;
if (v21 == v22)
return 0;
return (__swab64(v21) > __swab64(v22)) ? 1 : -1;
if (v21 != v22)
return (__swab64(v21) > __swab64(v22)) ? 1 : -1;
s1 += 0x10;
s2 += 0x10;
}
if (n == 8) {
v1 = *(u64 *) s1;
v2 = *(u64 *) s2;
if (v1 == v2)
return 0;
return (__swab64(v1) > __swab64(v2)) ? 1 : -1;
if (n & 0x8) {
u64 v1 = *(u64 *) s1;
u64 v2 = *(u64 *) s2;
if (v1 != v2)
return (__swab64(v1) > __swab64(v2)) ? 1 : -1;
s1 += 0x8;
s2 += 0x8;
}
if (n == 4) {
v1 = *(u32 *) s1;
v2 = *(u32 *) s2;
if (v1 == v2)
return 0;
return (__swab32(v1) > __swab32(v2)) ? 1 : -1;
if (n & 0x4) {
u64 v1 = *(u32 *) s1;
u64 v2 = *(u32 *) s2;
if (v1 != v2)
return (__swab32(v1) > __swab32(v2)) ? 1 : -1;
s1 += 0x4;
s2 += 0x4;
}
if (n == 2) {
v1 = *(u16 *) s1;
v2 = *(u16 *) s2;
return (u32) __swab16(v1) - (u32) __swab16(v2);
if (n & 0x2) {
u64 v1 = *(u16 *) s1;
u64 v2 = *(u16 *) s2;
if (!(n & 0x1) || v1 != v2)
return (u32) __swab16(v1) - (u32) __swab16(v2);
s1 += 0x1;
s2 += 0x1;
}
if (n == 1) {
v1 = *(u8 *) s1;
v2 = *(u8 *) s2;
if (n & 0x1) {
u64 v1 = *(u8 *) s1;
u64 v2 = *(u8 *) s2;
return v1 - v2;
}
return 0;
}
E2K_PREFETCH_L1_SPEC(s1);
@ -160,9 +175,17 @@ static inline void *_memcpy(void *__restrict dst,
const void *__restrict src,
size_t n, const unsigned long dst_align)
{
#if defined E2K_P2V || defined CONFIG_BOOT_E2K
bool manual_inline = !IS_ENABLED(CONFIG_CPU_ES2);
#else
bool manual_inline = !cpu_has(CPU_HWBUG_UNALIGNED_LOADS) &&
(dst_align >= 8 || cpu_has(CPU_FEAT_ISET_V6));
#endif
/*
* As measurements show, an unaligned dst causes a 20x slowdown,
* but unaligned src causes only a 2x slowdown.
* As measurements show, an unaligned dst causes a 4x slowdown,
* but unaligned src causes only a 2x slowdown (also note that
* since v6 unaligned accesses do not cause any slowdown at all).
*
* We can manually assure dst's alignment, but what about src?
*
@ -180,8 +203,8 @@ static inline void *_memcpy(void *__restrict dst,
* alignment and do the copy with 8-bytes words.
*/
if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) {
/* Inline small aligned memcpy's */
if (__builtin_constant_p(n) && n < 136 && manual_inline) {
/* Inline small memcpy's */
const u64 *__restrict l_src = src;
u64 *__restrict l_dst = dst;
@ -433,13 +456,7 @@ static inline void native_tagged_memcpy_8(void *__restrict dst,
*
* All parameters must be 8-bytes aligned.
*/
#ifdef CONFIG_BOOT_E2K
#define tagged_memcpy_8(dst, src, n) \
({ \
native_tagged_memcpy_8(dst, src, n, \
__alignof(*(dst)), __alignof(*(src))); \
})
#elif defined(CONFIG_PARAVIRT_GUEST)
#if defined(CONFIG_PARAVIRT_GUEST)
#include <asm/paravirt/string.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
#include <asm/kvm/guest/string.h>
@ -454,23 +471,7 @@ static inline void native_tagged_memcpy_8(void *__restrict dst,
extern void boot_fast_memcpy(void *, const void *, size_t);
extern notrace void boot_fast_memset(void *s_va, long c, size_t count);
#ifdef CONFIG_BOOT_E2K
/* own small bios (boot loader) for kernel */
static inline unsigned long
fast_tagged_memory_copy(void *dst, const void *src, size_t len,
unsigned long strd_opcode, unsigned long ldrd_opcode,
int prefetch)
{
return native_fast_tagged_memory_copy(dst, src, len,
strd_opcode, ldrd_opcode, prefetch);
}
static inline unsigned long
fast_tagged_memory_set(void *addr, u64 val, u64 tag,
size_t len, u64 strd_opcode)
{
return native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode);
}
#elif defined(CONFIG_PARAVIRT_GUEST)
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host/guest kernel */
#include <asm/paravirt/string.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)

View File

@ -47,13 +47,6 @@ static inline bool native_sge_is_set(void)
}
#ifdef CONFIG_E2K_PROFILING
#define boot_smp_processor_id_() \
(((e2k_addr_t)current_thread_info() >= TASK_SIZE) ? \
smp_processor_id() \
: \
((long)READ_CURRENT_REG_VALUE()))
typedef struct {
// FIRST ELEMENT
long max_disable_interrupt; // max #ticks of disable_interrupt
@ -87,7 +80,7 @@ extern disable_interrupt_t disable_interrupt[NR_CPUS];
#define add_info_interrupt(n, ticks) \
({ long t; int cpu; \
t = NATIVE_READ_CLKR_REG_VALUE() - ticks; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
disable_interrupt[cpu].interrupts[n]++; \
disable_interrupt[cpu].interrupts_time[n] += t; \
if (t > disable_interrupt[cpu].max_interrupts_time[n]) { \
@ -148,7 +141,7 @@ extern system_info_t system_info[NR_CPUS];
extern int enable_collect_interrupt_ticks;
#define collect_disable_interrupt_ticks() \
({ int cpu; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
if (system_info[cpu].max_disabled_interrupt.begin_time >0){ \
store_max_time_in_system_info( \
system_info[cpu].max_disabled_interrupt.begin_time, \
@ -161,13 +154,13 @@ extern int enable_collect_interrupt_ticks;
store_begin_ip_in_system_info(max_disabled_interrupt)
#define store_do_irq_ticks() \
({ int cpu = boot_smp_processor_id_(); \
({ int cpu = boot_smp_processor_id(); \
disable_interrupt[cpu].clk_of_do_irq = NATIVE_READ_CLKR_REG_VALUE(); \
})
#define define_time_of_do_irq(N) \
({ long t; int cpu; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
t = NATIVE_READ_CLKR_REG_VALUE() - \
disable_interrupt[cpu].clk_of_do_irq; \
disable_interrupt[cpu].do_irq_time[N] += t; \
@ -202,7 +195,7 @@ extern int enable_collect_interrupt_ticks;
({ long t; int cpu; \
register e2k_cr0_hi_t cr0_hi; \
if (enable_collect_interrupt_ticks) { \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
t = NATIVE_READ_CLKR_REG_VALUE(); \
AS_WORD(cr0_hi) = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \
system_info[cpu].FIELD.begin_time = tick; \
@ -215,7 +208,7 @@ extern int enable_collect_interrupt_ticks;
({ \
int cpu; \
register e2k_cr0_hi_t cr0_hi; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
disable_interrupt[cpu].clk = NATIVE_READ_CLKR_REG_VALUE(); \
cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \
system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \
@ -239,7 +232,7 @@ extern int enable_collect_interrupt_ticks;
({ \
int cpu; \
register e2k_cr0_hi_t cr0_hi; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \
system_info[cpu].FIELD.beg_ip = mutex->ip; \
system_info[cpu].FIELD.beg_parent_ip = mutex->caller; \
@ -252,7 +245,7 @@ extern int enable_collect_interrupt_ticks;
({ \
long t; int cpu; \
register e2k_cr0_hi_t cr0_hi; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
if (enable_collect_interrupt_ticks) { \
t = NATIVE_READ_CLKR_REG_VALUE()-system_info[cpu]. \
FIELD.begin_time; \
@ -281,7 +274,7 @@ extern long TIME;
({ \
long t; int cpu; \
register e2k_cr0_hi_t cr0_hi; \
cpu = boot_smp_processor_id_(); \
cpu = boot_smp_processor_id(); \
t = NATIVE_READ_CLKR_REG_VALUE()-tick; \
if (enable_collect_interrupt_ticks) { \
system_info[cpu].FIELD.number++; \

View File

@ -1,6 +0,0 @@
#ifndef _E2K_TERMBITS_H_
#define _E2K_TERMBITS_H_
#include <asm-generic/termbits.h>
#endif /* _E2K_TERMBITS_H_ */

View File

@ -1,6 +0,0 @@
#ifndef _E2K_TERMIOS_H_
#define _E2K_TERMIOS_H_
#include <asm-generic/termios.h>
#endif /* _E2K_TERMIOS_H_ */

View File

@ -53,8 +53,6 @@ typedef struct thread_info {
unsigned long status; /* thread synchronous flags */
long long irq_enter_clk; /* CPU clock when irq enter */
/* occured */
int preempt_count; /* 0 => preemptable, <0 */
/* => BUG */
mm_segment_t addr_limit; /* thread address space */
struct pt_regs *pt_regs; /* head of pt_regs */
/* structure queue: */
@ -396,12 +394,9 @@ void clear_g_list(struct thread_info *thread_info) { }
/*
* Macros/functions for gaining access to the thread information structure.
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.k_usd_lo = (e2k_usd_lo_t) { \
.word = (unsigned long) init_stack + \
@ -429,6 +424,7 @@ extern void clear_thread_info(struct task_struct *task);
extern unsigned long *alloc_thread_stack_node(struct task_struct *, int);
extern void free_thread_stack(struct task_struct *tsk);
extern int free_vm_stack_cache(unsigned int cpu);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */

Some files were not shown because too many files have changed in this diff Show More