linux-headers-5.4.0-3.13

This commit is contained in:
Alibek Omarov 2022-01-17 14:36:48 +03:00
parent 000d6cbdce
commit 2013552b5f
198 changed files with 4011 additions and 3435 deletions

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 143
EXTRAVERSION = -3.9
SUBLEVEL = 154
EXTRAVERSION = -3.13
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*

View File

@ -44,12 +44,7 @@ CFLAGS += -pipe -D__linux__
KBUILD_CFLAGS += $(CFLAGS)
ifdef CONFIG_SMP_DAM_BUG
KBUILD_CFLAGS += -fno-dam-call
endif
CFLAGS_GENERIC := -march=elbrus-v2
CFLAGS_ES2 := -mtune=elbrus-2c+
CFLAGS_GENERIC := -march=elbrus-v3
CFLAGS_E2S := -mtune=elbrus-4c
CFLAGS_E8C := -mtune=elbrus-8c
CFLAGS_E1CP := -mtune=elbrus-1c+
@ -58,71 +53,56 @@ CFLAGS_E12C := -mtune=elbrus-12c
CFLAGS_E16C := -mtune=elbrus-16c
CFLAGS_E2C3 := -mtune=elbrus-2c3
CFLAGS_ALL_CPUS := $(CFLAGS_ES2) $(CFLAGS_E2S) $(CFLAGS_E8C) $(CFLAGS_E1CP) \
$(CFLAGS_E8C2) $(CFLAGS_E12C) $(CFLAGS_E16C) $(CFLAGS_E2C3)
export CFLAGS_ALL_CPUS
CFLAGS_ALL_CPUS := $(CFLAGS_E2S) $(CFLAGS_E8C) $(CFLAGS_E1CP) $(CFLAGS_E8C2) \
$(CFLAGS_E12C) $(CFLAGS_E16C) $(CFLAGS_E2C3)
CFLAGS_E2K_SIC := $(CFLAGS_ES2)
export CFLAGS_ES2 CFLAGS_E2S CFLAGS_E8C CFLAGS_E1CP CFLAGS_E8C2 CFLAGS_E2C3 \
CFLAGS_E12C CFLAGS_E16C CFLAGS_E2K_SIC
export CFLAGS_E2S CFLAGS_E8C CFLAGS_E1CP CFLAGS_E8C2 CFLAGS_E2C3 CFLAGS_E12C \
CFLAGS_E16C CFLAGS_ALL_CPUS
ifeq ($(CONFIG_E2K_MACHINE),y)
ifeq ($(CONFIG_E2K_ES2_DSP),y)
KBUILD_CFLAGS += $(CFLAGS_ES2)
KBUILD_AFLAGS += $(CFLAGS_ES2)
TARGET_MDL := 04
ifeq ($(CONFIG_E2K_E2S),y)
KBUILD_CFLAGS += $(CFLAGS_E2S)
KBUILD_AFLAGS += $(CFLAGS_E2S)
TARGET_MDL := 03
else
ifeq ($(CONFIG_E2K_ES2_RU),y)
KBUILD_CFLAGS += $(CFLAGS_ES2)
KBUILD_AFLAGS += $(CFLAGS_ES2)
TARGET_MDL := 06
ifeq ($(CONFIG_E2K_E8C),y)
KBUILD_CFLAGS += $(CFLAGS_E8C)
KBUILD_AFLAGS += $(CFLAGS_E8C)
TARGET_MDL := 07
else
ifeq ($(CONFIG_E2K_E2S),y)
KBUILD_CFLAGS += $(CFLAGS_E2S)
KBUILD_AFLAGS += $(CFLAGS_E2S)
TARGET_MDL := 03
ifeq ($(CONFIG_E2K_E1CP),y)
KBUILD_CFLAGS += $(CFLAGS_E1CP)
KBUILD_AFLAGS += $(CFLAGS_E1CP)
TARGET_MDL := 08
else
ifeq ($(CONFIG_E2K_E8C),y)
KBUILD_CFLAGS += $(CFLAGS_E8C)
KBUILD_AFLAGS += $(CFLAGS_E8C)
TARGET_MDL := 07
ifeq ($(CONFIG_E2K_E8C2),y)
KBUILD_CFLAGS += $(CFLAGS_E8C2)
KBUILD_AFLAGS += $(CFLAGS_E8C2)
TARGET_MDL := 09
else
ifeq ($(CONFIG_E2K_E1CP),y)
KBUILD_CFLAGS += $(CFLAGS_E1CP)
KBUILD_AFLAGS += $(CFLAGS_E1CP)
TARGET_MDL := 08
ifeq ($(CONFIG_E2K_E12C),y)
KBUILD_CFLAGS += $(CFLAGS_E12C)
KBUILD_AFLAGS += $(CFLAGS_E12C)
TARGET_MDL := 0a
else
ifeq ($(CONFIG_E2K_E8C2),y)
KBUILD_CFLAGS += $(CFLAGS_E8C2)
KBUILD_AFLAGS += $(CFLAGS_E8C2)
TARGET_MDL := 09
ifeq ($(CONFIG_E2K_E16C),y)
KBUILD_CFLAGS += $(CFLAGS_E16C)
KBUILD_AFLAGS += $(CFLAGS_E16C)
TARGET_MDL := 0b
else
ifeq ($(CONFIG_E2K_E12C),y)
KBUILD_CFLAGS += $(CFLAGS_E12C)
KBUILD_AFLAGS += $(CFLAGS_E12C)
TARGET_MDL := 0a
ifeq ($(CONFIG_E2K_E2C3),y)
KBUILD_CFLAGS += $(CFLAGS_E2C3)
KBUILD_AFLAGS += $(CFLAGS_E2C3)
TARGET_MDL := 0c
else
ifeq ($(CONFIG_E2K_E16C),y)
KBUILD_CFLAGS += $(CFLAGS_E16C)
KBUILD_AFLAGS += $(CFLAGS_E16C)
TARGET_MDL := 0b
else
ifeq ($(CONFIG_E2K_E2C3),y)
KBUILD_CFLAGS += $(CFLAGS_E2C3)
KBUILD_AFLAGS += $(CFLAGS_E2C3)
TARGET_MDL := 0c
else
error "Invalid e2k machine type"
endif # ifeq ($(CONFIG_E2K_E2C3),y)
endif # ifeq ($(CONFIG_E2K_E16C),y)
endif # ifeq ($(CONFIG_E2K_E12C),y)
endif # ifeq ($(CONFIG_E2K_E8C2),y)
endif # ifeq ($(CONFIG_E2K_E1CP),y)
endif # ifeq ($(CONFIG_E2K_E8C),y)
endif # ifeq ($(CONFIG_E2K_E2S),y)
endif # ifeq ($(CONFIG_E2K_ES2_RU),y)
endif # ifeq ($(CONFIG_E2K_ES2_DSP),y)
error "Invalid e2k machine type"
endif # ifeq ($(CONFIG_E2K_E2C3),y)
endif # ifeq ($(CONFIG_E2K_E16C),y)
endif # ifeq ($(CONFIG_E2K_E12C),y)
endif # ifeq ($(CONFIG_E2K_E8C2),y)
endif # ifeq ($(CONFIG_E2K_E1CP),y)
endif # ifeq ($(CONFIG_E2K_E8C),y)
endif # ifeq ($(CONFIG_E2K_E2S),y)
else # ! ifeq ($(CONFIG_E2K_MACHINE),y)
KBUILD_CFLAGS += $(CFLAGS_GENERIC)
KBUILD_AFLAGS += $(CFLAGS_GENERIC)
@ -153,15 +133,12 @@ core-y += arch/l/
drivers-$(CONFIG_PCI) += arch/l/pci/
boot := arch/e2k/boot
all: es2boot
all: e2sboot
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
.PHONY: clean archclean archmrproper archdep bootimage image zImage
es2boot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_ES2=y boot
e2sboot: vmlinux
$(Q)$(MAKE) $(build)=$(boot) CONFIG_E2S=y boot
@ -240,7 +217,6 @@ define archhelp
echo ' zImage - Compressed kernel boot image (image.boot)'
echo ' install-headers - Install kernel headers in '
echo ' <basedir>/usr/include'
echo ' es2boot - Build kernel boot image with small embedded boot for es2 simulator'
echo ' e2sboot - Build kernel boot image with small embedded boot for e2s simulator'
echo ' e8cboot - Build kernel boot image with small embedded boot for e8c simulator'
echo ' e1cpboot - Build kernel boot image with small embedded boot for e1cp simulator'

View File

@ -267,7 +267,7 @@ extern bootblock_struct_t *bootblock_virt; /* bootblock structure */
#define SIMULATOR_MACH_FLAG 0x0001 /* system is running on */
/* simulator */
#define PROTOTYPE_MACH_FLAG_DEPRECATED 0x0002 /* machine is prototype */
#define IOHUB_MACH_FLAG 0x0004 /* machine has IOHUB */
#define IOHUB_MACH_FLAG_DEPRECATED 0x0004 /* machine has IOHUB */
#define OLDMGA_MACH_FLAG 0x0008 /* MGA card has old firmware */
#define MULTILINK_MACH_FLAG 0x0010 /* some nodes are connected */
/* by sevral IP links */

View File

@ -1,6 +0,0 @@
#ifndef _ASM_L_CLKR_H
#define _ASM_L_CLKR_H
extern struct clocksource clocksource_clkr;
#endif

View File

@ -95,7 +95,6 @@ extern __visible void epic_smp_timer_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_spurious_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_error_interrupt(struct pt_regs *regs);
extern __visible void prepic_smp_error_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_irq_move_cleanup_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_irq_work_interrupt(struct pt_regs *regs);
extern __visible void cepic_epic_interrupt(struct pt_regs *regs);
extern __visible void epic_hc_emerg_interrupt(struct pt_regs *regs);
@ -108,6 +107,7 @@ extern __visible void epic_pcs_interrupt(struct pt_regs *regs);
extern __visible void epic_pv_apf_wake(struct pt_regs *regs);
#endif /* CONFIG_KVM_ASYNC_PF */
#ifdef CONFIG_SMP
extern __visible void epic_smp_irq_move_cleanup_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_reschedule_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_call_function_interrupt(struct pt_regs *regs);
extern __visible void epic_smp_call_function_single_interrupt(

View File

@ -28,11 +28,6 @@ typedef struct {
IS_ENABLED(CONFIG_RDMA_NET))
unsigned int irq_rdma_count;
#endif
#ifdef CONFIG_E2K
#if IS_ENABLED(CONFIG_ELDSP)
unsigned int irq_eldsp_count;
#endif
#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);

View File

@ -134,7 +134,7 @@ extern void smp_trace_call_function_single_interrupt(struct pt_regs *regs);
extern void do_nmi(struct pt_regs * regs);
extern void l_init_system_handlers_table(void);
extern void epic_init_system_handlers_table(void);
extern void setup_APIC_vector_handler(int vector,
extern void setup_PIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
extern void do_IRQ(struct pt_regs * regs, unsigned int vector);

View File

@ -59,6 +59,16 @@ static inline void __pic_setup_vector_irq(int cpu)
__apic_setup_vector_irq(cpu);
}
extern void fixup_irqs_epic(void);
extern void fixup_irqs_apic(void);
static inline void fixup_irqs_pic(void)
{
if (nr_ioepics)
fixup_irqs_epic();
if (nr_ioapics)
fixup_irqs_apic();
}
extern void print_IO_APICs(void);
extern void print_IO_EPICs(void);
static inline void print_IO_PICs(void)
@ -96,6 +106,12 @@ static inline void __pic_setup_vector_irq(int cpu)
__apic_setup_vector_irq(cpu);
}
extern void fixup_irqs_apic(void);
static inline void fixup_irqs_pic(void)
{
fixup_irqs_apic();
}
extern void print_IO_APICs(void);
static inline void print_IO_PICs(void)
{

View File

@ -487,12 +487,10 @@ static inline int __iolinklist_parse(const char *buf, iolinkmask_t *dstp, int nb
(link) = iolink_domain_to_link((domain)))
#else /* MAX_NUMIOLINKS == 1 */
#define for_each_iolink_mask(domain, mask) \
if (HAS_MACHINE_E2K_IOHUB) \
for ((domain) = 0; (domain) < 1; (domain)++)
for ((domain) = 0; (domain) < 1; (domain)++)
#define for_each_node_iolink_mask(domain, node, link, mask) \
if (HAS_MACHINE_E2K_IOHUB) \
for ((domain) = 0, (node) = 0, (link) = 0; \
(domain) < 1; (domain)++)
for ((domain) = 0, (node) = 0, (link) = 0; \
(domain) < 1; (domain)++)
#endif /* MAX_NUMIOLINKS */
/*

View File

@ -371,6 +371,7 @@ typedef struct mpc_gpio_act {
struct iohub_sysdata;
void mp_pci_add_resources(struct list_head *resources,
struct iohub_sysdata *sd);
extern int __init mp_ioepic_find_bus(int ioepic_id);
#ifdef CONFIG_IOHUB_DOMAINS
struct iohub_sysdata;
extern int mp_find_iolink_root_busnum(int node, int link);

View File

@ -15,6 +15,13 @@ extern int apic_get_vector(void);
#include <asm/epic.h>
#include <asm/machdep.h>
#define pic_printk(v, s, a...) \
do { \
if (cpu_has_epic()) \
epic_printk(s, a); \
else \
apic_printk(v, s, a); \
} while (0)
static inline unsigned int read_pic_id(void)
{
@ -117,6 +124,17 @@ static inline void pic_send_reschedule(int cpu)
else
apic_smp_send_reschedule(cpu);
}
struct irq_desc;
extern void apic_irq_force_complete_move(struct irq_desc *desc);
extern void epic_irq_force_complete_move(struct irq_desc *desc);
static inline void pic_irq_force_complete_move(struct irq_desc *desc)
{
if (cpu_has_epic())
epic_irq_force_complete_move(desc);
else
apic_irq_force_complete_move(desc);
}
#endif
struct pt_regs;
@ -273,6 +291,7 @@ static inline void pic_irq_work_raise(void)
apic_irq_work_raise();
}
#ifdef CONFIG_SMP
extern void apic_send_call_function_ipi_mask(const struct cpumask *mask);
static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask)
{
@ -291,6 +310,14 @@ static inline void pic_send_reschedule(int cpu)
apic_smp_send_reschedule(cpu);
}
struct irq_desc;
extern void apic_irq_force_complete_move(struct irq_desc *desc);
static inline void pic_irq_force_complete_move(struct irq_desc *desc)
{
apic_irq_force_complete_move(desc);
}
#endif /* CONFIG_SMP */
struct pt_regs;
extern noinline notrace void apic_do_nmi(struct pt_regs *regs);
static inline void pic_do_nmi(struct pt_regs *regs)

View File

@ -7,6 +7,7 @@ generic-y += emergency-restart.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h

View File

@ -69,9 +69,9 @@ typedef union e2k_fapb_aps {
#define get_vlc(reg) ((reg >> LSR_VLC_SHIFT) & LSR_VLC_MASK)
static inline void
native_get_array_descriptors_v2(e2k_aau_t *context)
native_get_array_descriptors_v3(e2k_aau_t *context)
{
NATIVE_GET_ARRAY_DESCRIPTORS_V2(context);
NATIVE_GET_ARRAY_DESCRIPTORS_V3(context);
}
static inline void
native_get_array_descriptors_v5(e2k_aau_t *context)
@ -86,9 +86,9 @@ native_set_array_descriptors(const e2k_aau_t *context)
}
static inline void
native_get_synchronous_part_v2(e2k_aau_t *context)
native_get_synchronous_part_v3(e2k_aau_t *context)
{
NATIVE_GET_SYNCHRONOUS_PART_V2(context);
NATIVE_GET_SYNCHRONOUS_PART_V3(context);
}
static inline void
native_get_synchronous_part_v5(e2k_aau_t *context)
@ -130,9 +130,9 @@ static __always_inline void native_set_aau_aaldis_aaldas(
* and comparison with aasr.iab was taken.
*/
static inline void
native_get_aau_context_v2(e2k_aau_t *context, e2k_aasr_t aasr)
native_get_aau_context_v3(e2k_aau_t *context, e2k_aasr_t aasr)
{
NATIVE_GET_AAU_CONTEXT_V2(context, aasr);
NATIVE_GET_AAU_CONTEXT_V3(context, aasr);
}
static inline void
native_get_aau_context_v5(e2k_aau_t *context, e2k_aasr_t aasr)
@ -160,24 +160,24 @@ static __always_inline void native_set_aau_context(const e2k_aau_t *context,
/* native kernel without virtualization */
/* or native host kernel with virtualization support */
#define GET_ARRAY_DESCRIPTORS_V2(aau_context) \
#define GET_ARRAY_DESCRIPTORS_V3(aau_context) \
({ \
native_get_array_descriptors_v2(aau_context); \
native_get_array_descriptors_v3(aau_context); \
})
#define GET_ARRAY_DESCRIPTORS_V5(aau_context) \
({ \
native_get_array_descriptors_v5(aau_context); \
})
#define GET_SYNCHRONOUS_PART_V2(aau_context) \
#define GET_SYNCHRONOUS_PART_V3(aau_context) \
({ \
native_get_synchronous_part_v2(aau_context); \
native_get_synchronous_part_v3(aau_context); \
})
#define GET_SYNCHRONOUS_PART_V5(aau_context) \
({ \
native_get_synchronous_part_v5(aau_context); \
})
#define GET_AAU_CONTEXT_V2(cntx, aasr) native_get_aau_context_v2(cntx, aasr)
#define GET_AAU_CONTEXT_V3(cntx, aasr) native_get_aau_context_v3(cntx, aasr)
#define GET_AAU_CONTEXT_V5(cntx, aasr) native_get_aau_context_v5(cntx, aasr)
#define SAVE_AAU_MASK_REGS(aau_context, aasr) \
@ -193,7 +193,7 @@ static __always_inline void native_set_aau_context(const e2k_aau_t *context,
NATIVE_RESTORE_AADS(aau_regs)
#define SAVE_AALDIS_V2(regs) NATIVE_SAVE_AALDIS_V2(regs)
#define SAVE_AALDIS_V3(regs) NATIVE_SAVE_AALDIS_V3(regs)
#define SAVE_AALDIS_V5(regs) NATIVE_SAVE_AALDIS_V5(regs)
#define SAVE_AALDA(aaldas) \

View File

@ -148,21 +148,21 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(30, regs[30], regs[62]); \
PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(31, regs[31], regs[63]); \
})
#define PREFIX_SAVE_AALDIS_V2(PV_TYPE, pv_type, regs) \
PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V2, v2, regs)
#define PREFIX_SAVE_AALDIS_V3(PV_TYPE, pv_type, regs) \
PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V3, v3, regs)
#define PREFIX_SAVE_AALDIS_V5(PV_TYPE, pv_type, regs) \
PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V5, v5, regs)
#define NATIVE_SAVE_AALDIS_V2(regs) \
PREFIX_SAVE_AALDIS_V2(NATIVE, native, regs)
#define NATIVE_SAVE_AALDIS_V3(regs) \
PREFIX_SAVE_AALDIS_V3(NATIVE, native, regs)
#define NATIVE_SAVE_AALDIS_V5(regs) \
PREFIX_SAVE_AALDIS_V5(NATIVE, native, regs)
#define NATIVE_SAVE_AALDIS(regs) \
({ \
if (IS_AAU_ISET_V5()) { \
NATIVE_SAVE_AALDIS_V5(regs); \
} else if (IS_AAU_ISET_V2()) { \
NATIVE_SAVE_AALDIS_V2(regs); \
} else if (IS_AAU_ISET_V3()) { \
NATIVE_SAVE_AALDIS_V3(regs); \
} else if (IS_AAU_ISET_GENERIC()) { \
machine.save_aaldi(regs); \
} else { \
@ -170,7 +170,7 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
} \
})
#define PREFIX_GET_ARRAY_DESCRIPTORS_V2(PV_TYPE, pv_type, aau_context) \
#define PREFIX_GET_ARRAY_DESCRIPTORS_V3(PV_TYPE, pv_type, aau_context) \
({ \
u64 *const aainds = (aau_context)->aainds; \
u64 *const aaincrs = (aau_context)->aaincrs; \
@ -186,14 +186,14 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
ind13, ind14, ind15; \
register u32 tags; \
\
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(1, ind1, ind2); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(3, ind3, ind4); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(5, ind5, ind6); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(7, ind7, ind8); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(9, ind9, ind10); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(11, ind11, ind12); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(13, ind13, ind14); \
PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V2(ind15, tags); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(1, ind1, ind2); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(3, ind3, ind4); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(5, ind5, ind6); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(7, ind7, ind8); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(9, ind9, ind10); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(11, ind11, ind12); \
PV_TYPE##_READ_AAINDS_PAIR_VALUE_V3(13, ind13, ind14); \
PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V3(ind15, tags); \
aainds[0] = 0; \
aainds[1] = ind1; \
aainds[2] = ind2; \
@ -222,10 +222,10 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
incr5, incr6, incr7; \
register u32 tags; \
\
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(1, incr1, incr2); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(3, incr3, incr4); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(5, incr5, incr6); \
PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(incr7, tags); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V3(1, incr1, incr2); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V3(3, incr3, incr4); \
PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V3(5, incr5, incr6); \
PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V3(incr7, tags); \
aaincrs[0] = 1; \
aaincrs[1] = (s64) (s32) incr1; \
aaincrs[2] = (s64) (s32) incr2; \
@ -237,8 +237,8 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
context->aaincr_tags = tags; \
} \
})
#define NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context) \
PREFIX_GET_ARRAY_DESCRIPTORS_V2(NATIVE, native, aau_context)
#define NATIVE_GET_ARRAY_DESCRIPTORS_V3(aau_context) \
PREFIX_GET_ARRAY_DESCRIPTORS_V3(NATIVE, native, aau_context)
#define PREFIX_GET_ARRAY_DESCRIPTORS_V5(PV_TYPE, pv_type, aau_context) \
({ \
@ -344,7 +344,7 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
#define NATIVE_SET_ARRAY_DESCRIPTORS(aau_context) \
PREFIX_SET_ARRAY_DESCRIPTORS(NATIVE, native, aau_context)
#define PREFIX_GET_SYNCHRONOUS_PART_V2(PV_TYPE, pv_type, aau_context) \
#define PREFIX_GET_SYNCHRONOUS_PART_V3(PV_TYPE, pv_type, aau_context) \
({ \
u64 *const aastis = (aau_context)->aastis; \
register u32 sti0, sti1, sti2, sti3, \
@ -353,14 +353,14 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
sti12, sti13, sti14, sti15; \
\
/* get AASTIs */ \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(0, sti0, sti1); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(2, sti2, sti3); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(4, sti4, sti5); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(6, sti6, sti7); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(8, sti8, sti9); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(10, sti10, sti11); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(12, sti12, sti13); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(14, sti14, sti15); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(0, sti0, sti1); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(2, sti2, sti3); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(4, sti4, sti5); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(6, sti6, sti7); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(8, sti8, sti9); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(10, sti10, sti11); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(12, sti12, sti13); \
PV_TYPE##_READ_AASTIS_PAIR_VALUE_V3(14, sti14, sti15); \
\
aastis[0] = sti0; \
aastis[1] = sti1; \
@ -419,8 +419,8 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
(aau_context)->aasti_tags = \
pv_type##_read_aasti_tags_reg_value(); \
})
#define NATIVE_GET_SYNCHRONOUS_PART_V2(aau_context) \
PREFIX_GET_SYNCHRONOUS_PART_V2(NATIVE, native, aau_context)
#define NATIVE_GET_SYNCHRONOUS_PART_V3(aau_context) \
PREFIX_GET_SYNCHRONOUS_PART_V3(NATIVE, native, aau_context)
#define NATIVE_GET_SYNCHRONOUS_PART_V5(aau_context) \
PREFIX_GET_SYNCHRONOUS_PART_V5(NATIVE, native, aau_context)
@ -508,20 +508,20 @@ static inline e2k_aasr_t aasr_parse(e2k_aasr_t aasr)
if (aasr.stb) \
PV_TYPE##_GET_SYNCHRONOUS_PART_##ISET(aau_context); \
})
#define PREFIX_GET_AAU_CONTEXT_V2(PV_TYPE, pv_type, aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V2, v2, aau_context, aasr)
#define PREFIX_GET_AAU_CONTEXT_V3(PV_TYPE, pv_type, aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V3, v3, aau_context, aasr)
#define PREFIX_GET_AAU_CONTEXT_V5(PV_TYPE, pv_type, aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V5, v5, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT_V2(aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT_V2(NATIVE, native, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT_V3(aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT_V3(NATIVE, native, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT_V5(aau_context, aasr) \
PREFIX_GET_AAU_CONTEXT_V5(NATIVE, native, aau_context, aasr)
#define NATIVE_GET_AAU_CONTEXT(aau_context, aasr) \
do { \
if (IS_AAU_ISET_V5()) { \
NATIVE_GET_AAU_CONTEXT_V5(aau_context, aasr); \
} else if (IS_AAU_ISET_V2()) { \
NATIVE_GET_AAU_CONTEXT_V2(aau_context, aasr); \
} else if (IS_AAU_ISET_V3()) { \
NATIVE_GET_AAU_CONTEXT_V3(aau_context, aasr); \
} else if (IS_AAU_ISET_GENERIC()) { \
machine.get_aau_context(aau_context, aasr); \
} else { \

View File

@ -22,20 +22,20 @@
#if CONFIG_CPU_ISET >= 5
# define IS_AAU_ISET_V5() true
# define IS_AAU_ISET_V2() false
# define IS_AAU_ISET_V3() false
# define IS_AAU_ISET_GENERIC() false
#elif CONFIG_CPU_ISET >= 1
# define IS_AAU_ISET_V2() true
# define IS_AAU_ISET_V3() true
# define IS_AAU_ISET_V5() false
# define IS_AAU_ISET_GENERIC() false
#elif CONFIG_CPU_ISET == 0
# define IS_AAU_ISET_GENERIC() true
# define IS_AAU_ISET_V2() false
# define IS_AAU_ISET_V3() false
# define IS_AAU_ISET_V5() false
#else /* CONFIG_CPU_ISET undefined or negative */
# warning "Undefined CPU ISET VERSION #, IS_AAU_ISET_Vx is defined dinamicaly"
# define IS_AAU_ISET_GENERIC() true
# define IS_AAU_ISET_V2() false
# define IS_AAU_ISET_V3() false
# define IS_AAU_ISET_V5() false
#endif /* CONFIG_CPU_ISET 0-6 */

View File

@ -35,7 +35,7 @@ register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID
#elif defined(E2K_P2V)
# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \
(NATIVE_NV_READ_IP_REG_VALUE() & ~0x3fUL)
(NATIVE_READ_IP_REG_VALUE() & ~0x3fUL)
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0
# if !defined(CONFIG_E2K_MACHINE) || defined(CONFIG_E2K_E8C)
# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 1
@ -46,7 +46,7 @@ register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID
#else /* CONFIG_BOOT_E2K */
# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \
(NATIVE_NV_READ_IP_REG_VALUE() & ~0x3fUL)
(NATIVE_READ_IP_REG_VALUE() & ~0x3fUL)
# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0
# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0

View File

@ -78,7 +78,7 @@ do { \
/*
* store_release() - same as __smp_store_release but acts on device accesses too
*/
#define store_release_v2 __smp_store_release
#define store_release_v3 __smp_store_release
#define store_release_v6(p, v) \
do { \
__typeof__(*(p)) __sr6_v = (v); \
@ -95,7 +95,7 @@ do { \
if (cpu_has(CPU_FEAT_ISET_V6)) \
store_release_v6((p), (v)); \
else \
store_release_v2((p), (v)); \
store_release_v3((p), (v)); \
} while (0)
#if CONFIG_CPU_ISET >= 6

View File

@ -1,7 +1,6 @@
#ifndef _E2K_CACHE_H_
#define _E2K_CACHE_H_
#include <asm/es2.h>
#include <asm/e2s.h>
#include <asm/e8c.h>
#include <asm/e8c2.h>
@ -10,54 +9,30 @@
#include <asm/e12c.h>
#include <asm/e2c3.h>
#define _max_(a, b) ((a) > (b) ? (a) : (b))
#define _max_(a, b) ((a) > (b) ? (a) : (b))
#define _max3_(a, b, c) _max_((a), _max_((b), (c)))
#define L1_CACHE_SHIFT 5
#define L2_CACHE_SHIFT 6
#ifdef CONFIG_E2K_MACHINE
# if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU)
# define L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
# elif defined(CONFIG_E2K_E2S)
# define L1_CACHE_SHIFT E2S_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E2S_L2_CACHE_SHIFT
# elif defined(CONFIG_E2K_E8C)
# define L1_CACHE_SHIFT E8C_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E8C_L2_CACHE_SHIFT
# if defined(CONFIG_E2K_E8C)
# define L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E1CP)
# define L1_CACHE_SHIFT E1CP_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E1CP_L2_CACHE_SHIFT
# elif defined(CONFIG_E2K_E8C2)
# define L1_CACHE_SHIFT E8C2_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E8C2_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E8C2_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E12C)
# define L1_CACHE_SHIFT E12C_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E12C_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E12C_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E16C)
# define L1_CACHE_SHIFT E16C_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E16C_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT E16C_L3_CACHE_SHIFT
# elif defined(CONFIG_E2K_E2C3)
# define L1_CACHE_SHIFT E2C3_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT E2C3_L2_CACHE_SHIFT
# else
# error "E2K MACHINE type does not defined"
# endif
# ifndef L3_CACHE_SHIFT
# define L3_CACHE_SHIFT 0
# define L3_CACHE_SHIFT 0
# endif
#else /* ! CONFIG_E2K_MACHINE */
/*
* FIXME: Take it in mind while adding new cpu type
*/
# define L1_CACHE_SHIFT_MAX ES2_L1_CACHE_SHIFT
# define L2_CACHE_SHIFT_MAX ES2_L2_CACHE_SHIFT
# define L3_CACHE_SHIFT_MAX E8C_L3_CACHE_SHIFT
# define L1_CACHE_SHIFT L1_CACHE_SHIFT_MAX
# define L2_CACHE_SHIFT L2_CACHE_SHIFT_MAX
# define L3_CACHE_SHIFT L3_CACHE_SHIFT_MAX
#endif /* CONFIG_E2K_MACHINE */

View File

@ -64,13 +64,11 @@ extern void native_flush_icache_page(struct vm_area_struct *vma,
#define flush_icache_page(vma, page) __flush_icache_page(vma, page)
#define smp_flush_icache_all()
#define native_smp_flush_icache_range(start, end)
#define native_smp_flush_icache_range_array(icache_range_arr)
#define native_smp_flush_icache_page(vma, page)
#define native_smp_flush_icache_kernel_line(addr)
#else /* CONFIG_SMP */
extern void smp_flush_icache_all(void);
extern void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
extern void native_smp_flush_icache_range_array(
icache_range_array_t *icache_range_arr);
extern void native_smp_flush_icache_page(struct vm_area_struct *vma,
@ -78,24 +76,9 @@ extern void native_smp_flush_icache_page(struct vm_area_struct *vma,
extern void native_smp_flush_icache_kernel_line(e2k_addr_t addr);
#define flush_icache_all() smp_flush_icache_all()
#define flush_icache_range(start, end) \
({ \
if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \
__flush_icache_range(start, end); \
else \
smp_flush_icache_range(start, end); \
})
#define flush_icache_range(start, end) __flush_icache_range(start, end);
#define flush_icache_range_array smp_flush_icache_range_array
#define flush_icache_page(vma, page) \
({ \
if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \
__flush_icache_page(vma, page); \
else \
smp_flush_icache_page(vma, page); \
})
#define flush_icache_page(vma, page) __flush_icache_page(vma, page);
#endif /* ! (CONFIG_SMP) */
@ -162,11 +145,6 @@ native_clear_DCACHE_L1_range(void *virt_addr, size_t len)
/* it is native kernel without virtualization support */
/* or native kernel with virtualization support */
static inline void
smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end)
{
native_smp_flush_icache_range(start, end);
}
static inline void
smp_flush_icache_range_array(icache_range_array_t *icache_range_arr)
{
native_smp_flush_icache_range_array(icache_range_arr);

View File

@ -46,12 +46,7 @@ static inline __wsum ip_fast_csum_nofold_maybe_unaligned(const void *iph, unsign
#define ip_fast_csum ip_fast_csum
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
if (cpu_has(CPU_HWBUG_UNALIGNED_LOADS) &&
!IS_ALIGNED((unsigned long) iph, 4))
return (__force __sum16) ~e2k_do_csum(iph, ihl*4);
else
return csum_fold(ip_fast_csum_nofold_maybe_unaligned(iph, ihl));
return csum_fold(ip_fast_csum_nofold_maybe_unaligned(iph, ihl));
}
static inline u32 add32_with_carry(u32 a, u32 b)
@ -86,8 +81,7 @@ __wsum __csum_partial(const void *buff, int len, __wsum sum);
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0 &&
!cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) {
if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
u64 sum_64 = (__force u32) sum;
if (len == 2)
@ -108,8 +102,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
sum_64 += *(const u32 *) (buff + 12);
sum = from64to32(sum_64);
} else if (__builtin_constant_p(len) && (len & 3) == 0 &&
!cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) {
} else if (__builtin_constant_p(len) && (len & 3) == 0) {
sum = csum_add(sum, ip_fast_csum_nofold_maybe_unaligned(buff, len >> 2));
} else {
prefetch((__force void *) buff);

View File

@ -2,12 +2,6 @@
#define _ASM_E2K_CLKR_H
#include <asm/cpu.h>
#include <asm-l/clkr.h>
extern __interrupt u64 fast_syscall_read_clkr(void);
extern u64 last_clkr;
DECLARE_PER_CPU(u64, clkr_offset);
#if defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized guest and host kernel */

View File

@ -11,24 +11,13 @@
static inline char mc146818_cmos_read(char addr)
{
if (HAS_MACHINE_E2K_IOHUB) {
WARN_ONCE(1, "Warning: CMOS_READ attempted on a machine without a functioning CMOS\n");
return 0;
}
outb_p((addr),RTC_PORT(0));
return inb_p(RTC_PORT(1));
WARN_ONCE(1, "Warning: CMOS_READ attempted on a machine without a functioning CMOS\n");
return 0;
}
static inline void mc146818_cmos_write(char val, char addr)
{
if (HAS_MACHINE_E2K_IOHUB) {
WARN_ONCE(1, "Warning: CMOS_WRITE attempted on a machine without a functioning CMOS\n");
return;
}
outb_p(addr, RTC_PORT(0));
outb_p(val, RTC_PORT(1));
WARN_ONCE(1, "Warning: CMOS_WRITE attempted on a machine without a functioning CMOS\n");
}
#define CMOS_READ(addr) mc146818_cmos_read(addr)

View File

@ -139,7 +139,7 @@ native_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
} else {
#pragma loop count (5)
for (i = 0; i < size / 128; i++)
E2K_TAGGED_MEMMOVE_128_RF_V2(&dst[16 * i],
E2K_TAGGED_MEMMOVE_128_RF_V3(&dst[16 * i],
&src[16 * i]);
copied = round_down(size, 128);
@ -619,7 +619,7 @@ static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks)
* we will have pcshtp = pcsp_hi.ind = 0. But situation
* with pcsp_hi.ind != 0 and pcshtp = 0 is impossible. */
if (WARN_ON_ONCE(spilled_pc_size < SZ_OF_CR &&
AS(stacks->pcsp_hi).ind != 0))
AS(stacks->pcsp_hi).ind != 0 && !paravirt_enabled()))
do_exit(SIGKILL);
/* Keep the last user frame (see user_hw_stacks_copy_full()) */

View File

@ -4,16 +4,9 @@
#ifndef __ASSEMBLY__
enum {
/* Hardware bugs */
CPU_HWBUG_LARGE_PAGES,
CPU_HWBUG_LAPIC_TIMER,
CPU_HWBUG_PIO_READS,
CPU_HWBUG_ATOMIC,
CPU_HWBUG_CLW,
CPU_HWBUG_PAGE_A,
CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR,
CPU_HWBUG_UNALIGNED_LOADS,
CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE,
CPU_HWBUG_DMA_AT_APIC_ADDR,
CPU_HWBUG_KERNEL_DATA_MONITOR,
CPU_HWBUG_WRITE_MEMORY_BARRIER,
CPU_HWBUG_BAD_RESET,
@ -43,8 +36,6 @@ enum {
CPU_HWBUG_C3,
/* Features, not bugs */
CPU_FEAT_WC_PCI_PREFETCH,
CPU_FEAT_FLUSH_DC_IC,
CPU_FEAT_EPIC,
CPU_FEAT_TRAP_V5,
CPU_FEAT_TRAP_V6,
@ -53,7 +44,6 @@ enum {
CPU_FEAT_SEPARATE_TLU_CACHE,
CPU_FEAT_FILLR,
CPU_FEAT_FILLC,
CPU_FEAT_ISET_V3,
CPU_FEAT_ISET_V5,
CPU_FEAT_ISET_V6,

View File

@ -547,11 +547,6 @@ write_OSGD_hi_reg(e2k_osgd_hi_t OSGD_hi)
* from the high & low word structure
*/
#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \
({ \
WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \
WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \
})
#define BOOT_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \
({ \
BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \

View File

@ -134,6 +134,8 @@
NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value)
#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \
NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value)
#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \
NATIVE_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value)
/*
* Read/write low/high double-word Compilation Unit Register (CUD)
@ -393,7 +395,7 @@
/*
* Read double-word CPU current Instruction Pointer register (IP)
*/
#define READ_IP_REG_VALUE() NATIVE_NV_READ_IP_REG_VALUE()
#define READ_IP_REG_VALUE() NATIVE_READ_IP_REG_VALUE()
/*
* Read debug and monitors regigisters

View File

@ -1379,7 +1379,7 @@ typedef union e2k_dst {
#define AS_WORD(x) ((x).word)
#define AS_STRUCT(x) ((x).fields)
#define AS_V2_STRUCT(x) ((x).v2_fields)
#define AS_V3_STRUCT(x) ((x).v3_fields)
#define AS_V6_STRUCT(x) ((x).v6_fields)
#define AS_SAP_STRUCT(x) ((x).sap_fields)
#define AS_AP_STRUCT(x) ((x).ap_fields)
@ -1920,13 +1920,13 @@ typedef union e2k_tsd {
#define CUD_CFLAG_SET 1 /* ISV have passed */
/* Hardware procedure stack memory mapping (one quad-register record, LE) */
/* Istruction sets from V2 to V4 */
typedef struct e2k_mem_ps_v2 {
/* Istruction sets from V3 to V4 */
typedef struct e2k_mem_ps_v3 {
unsigned long word_lo; /* low word value */
unsigned long word_hi; /* high word value */
unsigned long ext_lo; /* extention of low word */
unsigned long ext_hi; /* extention of hagh word */
} e2k_mem_ps_v2_t;
} e2k_mem_ps_v3_t;
/* Istruction sets from V5 to V6 */
typedef struct e2k_mem_ps_v5 {
unsigned long word_lo; /* low word value */
@ -1935,7 +1935,7 @@ typedef struct e2k_mem_ps_v5 {
unsigned long ext_hi; /* extention of hagh word */
} e2k_mem_ps_v5_t;
typedef union e2k_mem_ps {
e2k_mem_ps_v2_t v2;
e2k_mem_ps_v3_t v3;
e2k_mem_ps_v5_t v5;
} e2k_mem_ps_t;
@ -2079,7 +2079,7 @@ typedef struct e2k_upsr_fields {
u32 a20 : 1; /* emulation of 1 Mb memory (only for Intel) */
/* should be 0 for Elbrus */
u32 nmie : 1; /* not masked interrupt enable */
/* next field of register exist only on ES2/E2S/E8C/E1C+ CPUs */
/* next field of register exist only on E2S/E8C/E1C+ CPUs */
u32 fsm : 1; /* floating comparison mode flag */
/* 1 - compatible with x86/x87 */
u32 impt : 1; /* ignore Memory Protection Table flag */
@ -2113,7 +2113,7 @@ typedef union e2k_upsr {
#define UPSR_IE 0x20U
#define UPSR_A20 0x40U
#define UPSR_NMIE 0x80U
/* next field of register exist only on ES2/E2S/E8C/E1C+ CPUs */
/* next field of register exist only on E2S/E8C/E1C+ CPUs */
#define UPSR_FSM 0x100U
#define UPSR_IMPT 0x200U
#define UPSR_IUC 0x400U
@ -2169,10 +2169,7 @@ typedef union e2k_idr {
/* CPU model numbers */
#define IDR_NONE 0x00 /* No such hardware exists */
#define IDR_E2S_MDL 0x03 /* Elbrus-4C (Elbrus-2S) */
#define IDR_ES2_DSP_MDL 0x04 /* Elbrus-2C+ */
#define IDR_E4S_MDL 0x05 /* reserve */
#define IDR_ES2_RU_MDL 0x06 /* Elbrus-2CM (without DSP) */
/* russian MICRON release */
#define IDR_E8C_MDL 0x07 /* Elbrus-8C */
#define IDR_E1CP_MDL 0x08 /* Elbrus-1C+ one processor e2s */
/* + graphic */

View File

@ -15,8 +15,6 @@ struct pt_regs;
extern void boot_e12c_setup_arch(void);
extern void e12c_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
#endif
#define E12C_NR_NODE_CPUS 12
@ -24,35 +22,14 @@ extern void setup_APIC_vector_handler(int vector,
#define E12C_NODE_IOLINKS 1
#define E12C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE
#define E12C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE
#define E12C_PCICFG_AREA_PHYS_BASE E2S_PCICFG_AREA_PHYS_BASE
#define E12C_PCICFG_AREA_SIZE E2S_PCICFG_AREA_SIZE
#define E12C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE
#define E12C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET
#define E12C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE
#define E12C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE
#define E12C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE
#define E12C_MLT_SIZE ES2_MLT_SIZE
#define E12C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E12C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM
#define E12C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2
#define E12C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2
#define E12C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
#define E12C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
#define E12C_NSR_AREA_PHYS_BASE E2S_NSR_AREA_PHYS_BASE
#define E12C_SIC_MC_SIZE E16C_SIC_MC_SIZE
#define E12C_SIC_MC_COUNT 2
#define E12C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E12C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E12C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E12C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E12C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#define E12C_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT
#define E12C_L3_CACHE_BYTES E8C_L3_CACHE_BYTES

View File

@ -15,8 +15,6 @@ struct pt_regs;
extern void boot_e16c_setup_arch(void);
extern void e16c_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
#endif
#define E16C_NR_NODE_CPUS 16
@ -24,35 +22,14 @@ extern void setup_APIC_vector_handler(int vector,
#define E16C_NODE_IOLINKS 1
#define E16C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE
#define E16C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE
#define E16C_PCICFG_AREA_PHYS_BASE E2S_PCICFG_AREA_PHYS_BASE
#define E16C_PCICFG_AREA_SIZE E2S_PCICFG_AREA_SIZE
#define E16C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE
#define E16C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET
#define E16C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE
#define E16C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE
#define E16C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE
#define E16C_MLT_SIZE ES2_MLT_SIZE
#define E16C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E16C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM
#define E16C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2
#define E16C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2
#define E16C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
#define E16C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
#define E16C_NSR_AREA_PHYS_BASE E2S_NSR_AREA_PHYS_BASE
#define E16C_SIC_MC_SIZE 0x60
#define E16C_SIC_MC_COUNT 8
#define E16C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E16C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E16C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E16C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E16C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#define E16C_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT
#define E16C_L3_CACHE_BYTES E8C_L3_CACHE_BYTES

View File

@ -21,26 +21,6 @@ extern void e1cp_setup_machine(void);
#define E1CP_PCICFG_AREA_PHYS_BASE 0x000000ff10000000UL
#define E1CP_PCICFG_AREA_SIZE 0x0000000010000000UL
#define E1CP_NBSR_AREA_OFFSET E2S_NBSR_AREA_OFFSET
#define E1CP_NBSR_AREA_SIZE E2S_NBSR_AREA_SIZE
#define E1CP_MLT_SIZE ES2_MLT_SIZE
#define E1CP_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E1CP_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM
#define E1CP_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2
#define E1CP_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2
#define E1CP_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
#define E1CP_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
#define E1CP_SIC_MC_COUNT ES2_SIC_MC_COUNT
#define E1CP_SIC_MC1_ECC E2S_SIC_MC1_ECC
#define E1CP_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E1CP_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E1CP_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E1CP_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E1CP_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#define E1CP_SIC_MC_COUNT 2
#endif /* _ASM_E1CP_H_ */

View File

@ -15,8 +15,6 @@ struct pt_regs;
extern void boot_e2c3_setup_arch(void);
extern void e2c3_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
#endif
#define E2C3_NR_NODE_CPUS 2
@ -24,34 +22,12 @@ extern void setup_APIC_vector_handler(int vector,
#define E2C3_NODE_IOLINKS 1
#define E2C3_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE
#define E2C3_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE
#define E2C3_PCICFG_AREA_PHYS_BASE E2S_PCICFG_AREA_PHYS_BASE
#define E2C3_PCICFG_AREA_SIZE E2S_PCICFG_AREA_SIZE
#define E2C3_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE
#define E2C3_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET
#define E2C3_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE
#define E2C3_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE
#define E2C3_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE
#define E2C3_MLT_SIZE ES2_MLT_SIZE
#define E2C3_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E2C3_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM
#define E2C3_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2
#define E2C3_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2
#define E2C3_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
#define E2C3_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
#define E2C3_NSR_AREA_PHYS_BASE E2S_NSR_AREA_PHYS_BASE
#define E2C3_SIC_MC_SIZE E16C_SIC_MC_SIZE
#define E2C3_SIC_MC_COUNT E12C_SIC_MC_COUNT
#define E2C3_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E2C3_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E2C3_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E2C3_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E2C3_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#endif /* _ASM_E2C3_H_ */

View File

@ -8,7 +8,6 @@
#include <asm/e2k_api.h>
#include <asm/sections.h>
#include <asm/e2s.h>
#include <asm/es2.h>
#include <asm/e8c.h>
#include <asm/e1cp.h>
#include <asm/e8c2.h>
@ -20,37 +19,26 @@
#define MACHINE_ID_CPU_TYPE_MASK 0x000f
#define MACHINE_ID_SIMUL 0x0010
#define MACHINE_ID_E2K_FULL_SIC 0x0020
#define MACHINE_ID_E2K_IOHUB 0x0040
#define MACHINE_ID_L_IOMMU 0x0080
#define MACHINE_ID_E2K_LEGACY_SIC 0x0100 /* host bridge & legacy NBSR */
#define MACHINE_ID_E2K_VIRT_IO 0x0400 /* machine is virtual and */
#define MACHINE_ID_L_IOMMU 0x0040
#define MACHINE_ID_E2K_LEGACY_SIC 0x0080 /* host bridge & legacy NBSR */
#define MACHINE_ID_E2K_VIRT_IO 0x0100 /* machine is virtual and */
/* IO simulates on user level */
/* (for example by QEMU) */
#define MACHINE_ID_HW_VIRT 0x4000 /* hardware virtualized VM */
#define MACHINE_ID_VIRT 0x8000 /* soft paravirtualized VM */
#define MACHINE_ID_E2K_IOMMU 0x10000
#define MACHINE_ID_HW_VIRT 0x0200 /* hardware virtualized VM */
#define MACHINE_ID_VIRT 0x0400 /* soft paravirtualized VM */
#define MACHINE_ID_E2K_IOMMU 0x0800
#define MACHINE_ID_ES2_DSP (IDR_ES2_DSP_MDL | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB)
#define MACHINE_ID_ES2_RU (IDR_ES2_RU_MDL | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB)
#define MACHINE_ID_E2S (IDR_E2S_MDL | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_L_IOMMU)
#define MACHINE_ID_E8C (IDR_E8C_MDL | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_L_IOMMU)
#define MACHINE_ID_E1CP (IDR_E1CP_MDL | \
MACHINE_ID_E2K_LEGACY_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_L_IOMMU)
#define MACHINE_ID_E8C2 (IDR_E8C2_MDL | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_L_IOMMU)
/*
* IO_* NBSRs are absent in models with EIOHub. Using LEGACY_SIC with FULL_SIC
@ -60,33 +48,25 @@
#define MACHINE_ID_E12C (IDR_E12C_MDL | \
MACHINE_ID_E2K_LEGACY_SIC | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_E2K_IOMMU)
#define MACHINE_ID_E16C (IDR_E16C_MDL | \
MACHINE_ID_E2K_LEGACY_SIC | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_E2K_IOMMU)
#define MACHINE_ID_E2C3 (IDR_E2C3_MDL | \
MACHINE_ID_E2K_LEGACY_SIC | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_E2K_IOMMU)
#define MACHINE_ID_E2K_VIRT (IDR_E2K_VIRT_MDL | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_E2K_VIRT_IO)
#define MACHINE_ID_E2K_HW_VIRT (IDR_E2K_VIRT_MDL | \
MACHINE_ID_HW_VIRT | \
MACHINE_ID_E2K_LEGACY_SIC | \
MACHINE_ID_E2K_FULL_SIC | \
MACHINE_ID_E2K_IOHUB | \
MACHINE_ID_E2K_IOMMU | \
MACHINE_ID_E2K_VIRT_IO)
#define MACHINE_ID_ES2_DSP_LMS (MACHINE_ID_ES2_DSP | \
MACHINE_ID_SIMUL)
#define MACHINE_ID_ES2_RU_LMS (MACHINE_ID_ES2_RU | MACHINE_ID_SIMUL)
#define MACHINE_ID_E2S_LMS (MACHINE_ID_E2S | MACHINE_ID_SIMUL)
#define MACHINE_ID_E8C_LMS (MACHINE_ID_E8C | MACHINE_ID_SIMUL)
#define MACHINE_ID_E1CP_LMS (MACHINE_ID_E1CP | MACHINE_ID_SIMUL)
@ -95,8 +75,6 @@
#define MACHINE_ID_E16C_LMS (MACHINE_ID_E16C | MACHINE_ID_SIMUL)
#define MACHINE_ID_E2C3_LMS (MACHINE_ID_E2C3 | MACHINE_ID_SIMUL)
#define MACHINE_ID_VIRT_ES2_DSP (MACHINE_ID_ES2_DSP | MACHINE_ID_VIRT)
#define MACHINE_ID_VIRT_ES2_RU (MACHINE_ID_ES2_RU | MACHINE_ID_VIRT)
#define MACHINE_ID_VIRT_E2S (MACHINE_ID_E2S | MACHINE_ID_VIRT)
#define MACHINE_ID_VIRT_E8C (MACHINE_ID_E8C | MACHINE_ID_VIRT)
#define MACHINE_ID_VIRT_E1CP (MACHINE_ID_E1CP | MACHINE_ID_VIRT)
@ -112,11 +90,7 @@
#endif
#ifdef CONFIG_E2K_MACHINE
#if defined(CONFIG_E2K_ES2_DSP)
#define native_machine_id (MACHINE_ID_ES2_DSP | MACHINE_SIMUL_FLAG)
#elif defined(CONFIG_E2K_ES2_RU)
#define native_machine_id (MACHINE_ID_ES2_RU | MACHINE_SIMUL_FLAG)
#elif defined(CONFIG_E2K_E2S)
#if defined(CONFIG_E2K_E2S)
#define native_machine_id (MACHINE_ID_E2S | MACHINE_SIMUL_FLAG)
#elif defined(CONFIG_E2K_E8C)
#define native_machine_id (MACHINE_ID_E8C | MACHINE_SIMUL_FLAG)
@ -133,8 +107,6 @@
#else
# error "E2K MACHINE type does not defined"
#endif
#elif defined(CONFIG_ES2) /* can be defined only for tiny boot on lms */
#define native_machine_id MACHINE_ID_ES2_DSP_LMS
#elif defined(CONFIG_E2S) /* can be defined only for tiny boot on lms */
#define native_machine_id MACHINE_ID_E2S_LMS
#elif defined(CONFIG_E8C) /* can be defined only for tiny boot on lms */
@ -162,13 +134,6 @@ extern const char *native_get_mach_type_name(void);
extern void e2k_init_IRQ(void);
#define IS_THE_MACHINE_ES2_DSP(mach_id) \
(((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_ES2_DSP_MDL)
#define IS_THE_MACHINE_ES2_RU(mach_id) \
(((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_ES2_RU_MDL)
#define IS_THE_MACHINE_ES2(mach_id) \
((IS_THE_MACHINE_ES2_DSP(mach_id)) || \
(IS_THE_MACHINE_ES2_RU(mach_id)))
#define IS_THE_MACHINE_E2S(mach_id) \
(((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2S_MDL)
#define IS_THE_MACHINE_E8C(mach_id) \
@ -189,12 +154,8 @@ extern void e2k_init_IRQ(void);
#define IS_THE_MACHINE_SIM(mach_id) \
(((mach_id) & MACHINE_ID_SIMUL) != 0)
#define HAS_THE_MACHINE_E2K_DSP(mach_id) \
(IS_THE_MACHINE_ES2_DSP(mach_id))
#define HAS_THE_MACHINE_E2K_FULL_SIC(mach_id) \
(((mach_id) & MACHINE_ID_E2K_FULL_SIC) != 0)
#define HAS_THE_MACHINE_E2K_IOHUB(mach_id) \
(((mach_id) & MACHINE_ID_E2K_IOHUB) != 0)
#define HAS_THE_MACHINE_L_IOMMU(mach_id) \
(((mach_id) & MACHINE_ID_L_IOMMU) != 0)
#define HAS_THE_MACHINE_E2K_IOMMU(mach_id) \
@ -205,12 +166,6 @@ extern void e2k_init_IRQ(void);
(HAS_THE_MACHINE_E2K_FULL_SIC(mach_id) || \
HAS_THE_MACHINE_E2K_LEGACY_SIC(mach_id))
#define NATIVE_IS_MACHINE_ES2_DSP \
IS_THE_MACHINE_ES2_DSP(native_machine_id)
#define NATIVE_IS_MACHINE_ES2_RU \
IS_THE_MACHINE_ES2_RU(native_machine_id)
#define NATIVE_IS_MACHINE_ES2 \
IS_THE_MACHINE_ES2(native_machine_id)
#define NATIVE_IS_MACHINE_E2S \
IS_THE_MACHINE_E2S(native_machine_id)
#define NATIVE_IS_MACHINE_E8C \
@ -227,13 +182,6 @@ extern void e2k_init_IRQ(void);
IS_THE_MACHINE_E2C3(native_machine_id)
#define NATIVE_IS_MACHINE_E2K_VIRT (false)
#define BOOT_NATIVE_IS_MACHINE_ES2_DSP \
IS_THE_MACHINE_ES2_DSP(boot_native_machine_id)
#define BOOT_NATIVE_IS_MACHINE_ES2_RU \
IS_THE_MACHINE_ES2_RU(boot_native_machine_id)
#define BOOT_NATIVE_IS_MACHINE_ES2 \
((BOOT_NATIVE_IS_MACHINE_ES2_DSP) || \
(BOOT_NATIVE_IS_MACHINE_ES2_RU))
#define BOOT_NATIVE_IS_MACHINE_E2S \
IS_THE_MACHINE_E2S(boot_native_machine_id)
#define BOOT_NATIVE_IS_MACHINE_E8C \
@ -253,12 +201,8 @@ extern void e2k_init_IRQ(void);
#define NATIVE_IS_MACHINE_SIM \
IS_THE_MACHINE_SIM(native_machine_id)
#define NATIVE_HAS_MACHINE_E2K_DSP \
HAS_THE_MACHINE_E2K_DSP(native_machine_id)
#define NATIVE_HAS_MACHINE_E2K_FULL_SIC \
HAS_THE_MACHINE_E2K_FULL_SIC(native_machine_id)
#define NATIVE_HAS_MACHINE_E2K_IOHUB \
HAS_THE_MACHINE_E2K_IOHUB(native_machine_id)
#define NATIVE_HAS_MACHINE_E2K_IOMMU \
HAS_THE_MACHINE_E2K_IOMMU(native_machine_id)
#define NATIVE_HAS_MACHINE_E2K_LEGACY_SIC \
@ -269,12 +213,8 @@ extern void e2k_init_IRQ(void);
#define BOOT_NATIVE_IS_MACHINE_SIM \
IS_THE_MACHINE_SIM(boot_native_machine_id)
#define BOOT_NATIVE_HAS_MACHINE_E2K_DSP \
HAS_THE_MACHINE_E2K_DSP(boot_native_machine_id)
#define BOOT_NATIVE_HAS_MACHINE_E2K_FULL_SIC \
HAS_THE_MACHINE_E2K_FULL_SIC(boot_native_machine_id)
#define BOOT_NATIVE_HAS_MACHINE_E2K_IOHUB \
HAS_THE_MACHINE_E2K_IOHUB(boot_native_machine_id)
#define BOOT_NATIVE_HAS_MACHINE_E2K_LEGACY_SIC \
HAS_THE_MACHINE_E2K_LEGACY_SIC(boot_native_machine_id)
#define BOOT_NATIVE_HAS_MACHINE_L_SIC \
@ -307,12 +247,6 @@ static inline void set_mach_type_id(void)
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
#define IS_MACHINE_ES2_DSP \
IS_THE_MACHINE_ES2_DSP(get_machine_id())
#define IS_MACHINE_ES2_RU \
IS_THE_MACHINE_ES2_RU(get_machine_id())
#define IS_MACHINE_ES2 \
IS_THE_MACHINE_ES2(get_machine_id())
#define IS_MACHINE_E2S \
IS_THE_MACHINE_E2S(get_machine_id())
#define IS_MACHINE_E8C \
@ -330,12 +264,8 @@ static inline void set_mach_type_id(void)
#define IS_MACHINE_E2K_VIRT \
IS_THE_MACHINE_E2K_VIRT(get_machine_id())
#define HAS_MACHINE_E2K_DSP \
HAS_THE_MACHINE_E2K_DSP(get_machine_id())
#define HAS_MACHINE_E2K_FULL_SIC \
HAS_THE_MACHINE_E2K_FULL_SIC(get_machine_id())
#define HAS_MACHINE_E2K_IOHUB \
HAS_THE_MACHINE_E2K_IOHUB(get_machine_id())
#define HAS_MACHINE_L_IOMMU \
HAS_THE_MACHINE_L_IOMMU(get_machine_id())
#define HAS_MACHINE_E2K_IOMMU \
@ -345,12 +275,6 @@ static inline void set_mach_type_id(void)
#define HAS_MACHINE_L_SIC \
HAS_THE_MACHINE_L_SIC(get_machine_id())
#define BOOT_IS_MACHINE_ES2_DSP \
IS_THE_MACHINE_ES2_DSP(boot_get_machine_id())
#define BOOT_IS_MACHINE_ES2_RU \
IS_THE_MACHINE_ES2_RU(boot_get_machine_id())
#define BOOT_IS_MACHINE_ES2 \
IS_THE_MACHINE_ES2(boot_get_machine_id())
#define BOOT_IS_MACHINE_E2S \
IS_THE_MACHINE_E2S(boot_get_machine_id())
#define BOOT_IS_MACHINE_E8C \
@ -368,12 +292,8 @@ static inline void set_mach_type_id(void)
#define BOOT_IS_MACHINE_VIRT \
IS_THE_MACHINE_VIRT(boot_get_machine_id())
#define BOOT_HAS_MACHINE_E2K_DSP \
HAS_THE_MACHINE_E2K_DSP(boot_get_machine_id())
#define BOOT_HAS_MACHINE_E2K_FULL_SIC \
HAS_THE_MACHINE_E2K_FULL_SIC(boot_get_machine_id())
#define BOOT_HAS_MACHINE_E2K_IOHUB \
HAS_THE_MACHINE_E2K_IOHUB(boot_get_machine_id())
#define BOOT_HAS_MACHINE_L_IOMMU \
HAS_THE_MACHINE_L_IOMMU(boot_get_machine_id())
#define BOOT_HAS_MACHINE_E2K_IOMMU \

View File

@ -56,7 +56,6 @@ typedef void *__e2k_ptr_t;
/*
* If x->e_flags && ELF_E2K_INCOMPAT == 1
* the code can executed only (mtype==0) - any
* ==2 es2
* ==3 e2s
* ==4 e8c
*/
@ -96,12 +95,8 @@ typedef void *__e2k_ptr_t;
\
switch (mt) { \
case 0: \
if (!IS_INCOMPAT(x) || _iset == ELBRUS_S_ISET) \
_res = 1; \
break; \
case 2: \
if (!IS_INCOMPAT(x) && _iset > ELBRUS_S_ISET \
|| _iset == ELBRUS_S_ISET) \
if (!IS_INCOMPAT(x)) \
_res = 1; \
break; \
case 3: \
@ -460,10 +455,10 @@ do { \
} \
})
#define ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) \
#define ASM_SAVE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi, iset) \
({ \
u64 reg0, reg1; \
BUILD_BUG_ON(iset != E2K_ISET_V2); \
BUILD_BUG_ON(iset != E2K_ISET_V3); \
\
asm ( \
"strd,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \
@ -478,10 +473,10 @@ do { \
: "memory"); \
})
#define ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) \
#define ASM_RESTORE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi, iset) \
({ \
u64 reg0, reg1, reg2, reg3; \
BUILD_BUG_ON(iset != E2K_ISET_V2); \
BUILD_BUG_ON(iset != E2K_ISET_V3); \
\
asm ( \
"ldrd,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \
@ -534,13 +529,13 @@ do { \
: "%g" #numlo, "%g" #numhi); \
})
#if __iset__ == 2
#if __iset__ == 3
#define ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \
ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset)
ASM_SAVE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi, iset)
#define ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \
ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset)
ASM_RESTORE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi, iset)
#elif __iset__ == 5
@ -557,21 +552,21 @@ do { \
#define NATIVE_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \
ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset)
#define NATIVE_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi) \
ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V2)
#define NATIVE_SAVE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi) \
ASM_SAVE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V3)
#define NATIVE_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi) \
ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V5)
#define NATIVE_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \
ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset)
#define NATIVE_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi) \
ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V2)
#define NATIVE_RESTORE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi) \
ASM_RESTORE_GREG_V3(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V3)
#define NATIVE_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi) \
ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V5)
#define ASM_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) \
({ \
u64 reg0, reg1; \
BUILD_BUG_ON(iset != E2K_ISET_V2); \
BUILD_BUG_ON(iset != E2K_ISET_V3); \
\
asm ( \
"strd [ %[base] + %[opc] ], %%dg" #greg_no "\n" \
@ -1012,6 +1007,18 @@ _Pragma("no_asm_inline") \
: "ri" ((__e2k_u64_t) (val))); \
})
#define NATIVE_SET_DSREGS_CLOSED_NOEXC(reg_mnemonic_lo, reg_mnemonic_hi, \
_val_lo, _val_hi, nop) \
({ \
asm volatile ("{rwd %[val_lo], %%" #reg_mnemonic_lo "}" \
"{nop " __stringify(NOP_##nop##_MINUS_4) "\n" \
" rwd %[val_hi], %%" #reg_mnemonic_hi "}" \
"{nop} {nop} {nop} {nop}" \
: \
: [val_lo] "ri" ((u64) (_val_lo)), \
[val_hi] "ri" ((u64) (_val_hi))); \
})
/*
* For some registers (see "Scheduling 1.1.1") there is no requirement
* of avoiding deferred and exact exception after the long instruction.
@ -2606,7 +2613,7 @@ do { \
: "memory"); \
})
#define E2K_TAGGED_MEMMOVE_128_RF_V2(__dst, __src) \
#define E2K_TAGGED_MEMMOVE_128_RF_V3(__dst, __src) \
({ \
u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7, __tmp8; \
asm ( \
@ -3107,43 +3114,13 @@ do { \
MAS_MODE_LOAD_OP_LOCK_CHECK)); \
})
#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) && defined(CONFIG_CPU_ES2)
# define HWBUG_ATOMIC_BEGIN(addr) \
unsigned long __hwbug_atomic_flags = 0; \
bool __hwbug_atomic_possible = cpu_has(CPU_HWBUG_ATOMIC); \
if (__hwbug_atomic_possible) { \
__hwbug_atomic_flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \
NATIVE_SET_UPSR_IRQ_BARRIER( \
__hwbug_atomic_flags & ~(UPSR_IE | UPSR_NMIE)); \
NATIVE_FLUSH_DCACHE_LINE_UNPRIV((unsigned long) (addr)); \
}
# define HWBUG_ATOMIC_END() \
if (__hwbug_atomic_possible) \
NATIVE_SET_UPSR_IRQ_BARRIER(__hwbug_atomic_flags)
#else
# define HWBUG_ATOMIC_BEGIN(addr)
# define HWBUG_ATOMIC_END()
#endif
/*
* On E2C+ atomic operations have relaxed memory ordering:
* _st_unlock can be reordered with subsequent loads and stores.
* Issue an explicit memory barrier if atomic operation returns a value.
*
* On E4C with multiple nodes and E2C+ atomic operations have fully
* relaxed memory ordering because of a hardware bug, must add "wait ma_c".
* On E4C with multiple nodes atomic operations have fully relaxed memory
* ordering because of a hardware bug, must add "wait ma_c".
*/
#if !defined CONFIG_E2K_MACHINE
# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n"
# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n"
# define MB_AFTER_ATOMIC_LOCK_MB /* E2K_WAIT_ST_C_SAS() */ \
".word 0x00008001\n" \
".word 0x30000084\n"
#elif defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU
# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n"
# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n"
# define MB_AFTER_ATOMIC_LOCK_MB /* E2K_WAIT_ST_C_SAS() */ \
".word 0x00008001\n" \
".word 0x30000084\n"
@ -3219,7 +3196,6 @@ do { \
#define NATIVE_ATOMIC_OP(__val, __addr, __rval, \
size_letter, op, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n1:" \
@ -3239,13 +3215,11 @@ do { \
: [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \
: [val] "ir" (__val) \
CLOBBERS_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC_FETCH_OP(__val, __addr, __rval, __tmp, \
size_letter, op, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n1:" \
@ -3266,12 +3240,10 @@ do { \
[rval] "=&r" (__rval) \
: [val] "ir" (__val) \
CLOBBERS_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC32_ADD_IF_NOT_NEGATIVE(__val, __addr, __rval, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n1:" \
@ -3295,12 +3267,10 @@ do { \
: [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \
: [val] "ir" (__val) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC64_ADD_IF_NOT_NEGATIVE(__val, __addr, __rval, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n1:" \
@ -3325,13 +3295,11 @@ do { \
: [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \
: [val] "ir" (__val) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
/* Atomically add to 16 low bits and return the new 32 bits value */
#define NATIVE_ATOMIC16_ADD_RETURN32_LOCK(val, addr, rval, tmp) \
({ \
HWBUG_ATOMIC_BEGIN(addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n{"\
@ -3352,7 +3320,6 @@ do { \
: "=&r" (rval), "=&r" (tmp) \
: "i" (val), "r" ((__e2k_ptr_t) (addr)) \
: "memory"); \
HWBUG_ATOMIC_END(); \
})
/* Atomically add two 32 bits values packed into one 64 bits value */
@ -3360,7 +3327,6 @@ do { \
#define NATIVE_ATOMIC32_PAIR_ADD_RETURN64_LOCK(val_lo, val_hi, addr, rval, \
tmp1, tmp2, tmp3) \
({ \
HWBUG_ATOMIC_BEGIN(addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{"\
@ -3395,7 +3361,6 @@ do { \
"ri" (val_hi), \
"r" ((__e2k_ptr_t) (addr)) \
: "memory"); \
HWBUG_ATOMIC_END(); \
})
/* Atomically sub two 32 bits values packed into one 64 bits value */
@ -3403,7 +3368,6 @@ do { \
#define NATIVE_ATOMIC32_PAIR_SUB_RETURN64_LOCK(val_lo, val_hi, addr, rval, \
tmp1, tmp2, tmp3) \
({ \
HWBUG_ATOMIC_BEGIN(addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{"\
@ -3438,7 +3402,6 @@ do { \
"ri" (val_hi), \
"r" ((__e2k_ptr_t) (addr)) \
: "memory"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -3457,7 +3420,6 @@ do { \
#define NATIVE_ATOMIC_TICKET_TRYLOCK(spinlock, tail_shift, \
__val, __head, __tail, __rval) \
do { \
HWBUG_ATOMIC_BEGIN(spinlock); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n{"\
@ -3488,7 +3450,6 @@ do { \
[addr] "+m" (*(spinlock)) \
: [incr] "i" (1 << tail_shift) \
: "memory", "pred2"); \
HWBUG_ATOMIC_END(); \
} while (0)
/*
@ -3663,7 +3624,6 @@ atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed)
#define NATIVE_ATOMIC_ADD_NEW_READER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -3722,7 +3682,6 @@ atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed)
[tmp] "=&r" (__tmp), \
[addr] "+m" (*(__rw_addr)) \
:: "memory", "pred2", "pred3"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -3767,7 +3726,6 @@ atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed)
#define NATIVE_ATOMIC_TRY_ADD_NEW_READER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -3827,7 +3785,6 @@ atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed)
[tmp] "=&r" (__tmp), \
[addr] "+m" (*(__rw_addr)) \
:: "memory", "pred2", "pred3"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -3867,7 +3824,6 @@ atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success)
#define NATIVE_ATOMIC_ADD_SLOW_READER(__rw_addr, __success, \
__head, __ticket, __count, __dst, __tmp) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -3922,7 +3878,6 @@ atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success)
[addr] "+m" (*(__rw_addr)) \
: [ticket] "r" (__ticket) \
: "memory", "pred2", "pred3"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -3945,7 +3900,6 @@ atomic_free_lock_reader(arch_rwlock_t *rw)
*/
#define NATIVE_ATOMIC_FREE_LOCK_READER(__rw_addr, __dst) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -3964,7 +3918,6 @@ atomic_free_lock_reader(arch_rwlock_t *rw)
: [dst] "=&r" (__dst), \
[addr] "+m" (*(__rw_addr)) \
:: "memory"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -4006,7 +3959,6 @@ atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed)
#define NATIVE_ATOMIC_ADD_NEW_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -4060,7 +4012,6 @@ atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed)
[tmp] "=&r" (__tmp), \
[addr] "+m" (*(__rw_addr)) \
:: "memory", "pred2", "pred3"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -4102,7 +4053,6 @@ atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed)
#define NATIVE_ATOMIC_TRY_ADD_NEW_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __src, __dst, __tmp) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -4157,7 +4107,6 @@ atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed)
[tmp] "=&r" (__tmp), \
[addr] "+m" (*(__rw_addr)) \
:: "memory", "pred2", "pred3"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -4194,7 +4143,6 @@ atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success)
#define NATIVE_ATOMIC_ADD_SLOW_WRITER(__rw_addr, __success, \
__head, __ticket, __count, __dst, __tmp) \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -4243,7 +4191,6 @@ atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success)
[addr] "+m" (*(__rw_addr)) \
: [ticket] "r" (__ticket) \
: "memory", "pred2", "pred3"); \
HWBUG_ATOMIC_END(); \
})
/*
@ -4269,7 +4216,6 @@ atomic_free_lock_writer(arch_rwlock_t *rw)
#define NATIVE_ATOMIC_FREE_LOCK_WRITER(__rw_addr, \
__head, __count, __dst, __tmp); \
({ \
HWBUG_ATOMIC_BEGIN(__rw_addr); \
asm NOT_VOLATILE ( \
"\n1:" \
"\n\t{" \
@ -4307,7 +4253,6 @@ atomic_free_lock_writer(arch_rwlock_t *rw)
[tmp] "=&r" (__tmp), \
[addr] "+m" (*(__rw_addr)) \
:: "memory"); \
HWBUG_ATOMIC_END(); \
})
@ -4318,7 +4263,6 @@ atomic_free_lock_writer(arch_rwlock_t *rw)
#define NATIVE_ATOMIC_FETCH_OP_UNLESS(__val, __addr, __unless, __tmp, __rval, \
size_letter, op, op_pred, add_op, add_op_pred, cmp_op, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n1:" \
@ -4345,13 +4289,11 @@ do { \
[addr] "+m" (*(__addr)) \
: [val] "ir" (__val), [unless] "ir" (__unless) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC_FETCH_XCHG_UNLESS(__val, __addr, __tmp, __rval, \
size_letter, merge_op, cmp_op, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n1:" \
@ -4377,13 +4319,11 @@ do { \
[addr] "+m" (*(__addr)) \
: [val] "ir" (__val) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC_XCHG_RETURN(__val, __addr, __rval, \
size_letter, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n2:" \
@ -4400,7 +4340,6 @@ do { \
: [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \
: [val] "r" (__val) \
CLOBBERS_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define CLOBBERS_PRED2_LOCK_MB : "memory", "pred2"
@ -4424,7 +4363,6 @@ do { \
#define NATIVE_ATOMIC_CMPXCHG_RETURN(__old, __new, __addr, __stored_val, \
__rval, size_letter, sxt_size, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n3:" \
@ -4454,13 +4392,11 @@ do { \
[addr] "+m" (*(__addr)) \
: [new] "ir" (__new), [old] "ir" (__old) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC_CMPXCHG_WORD_RETURN(__old, __new, __addr, \
__stored_val, __rval, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n3:" \
@ -4487,13 +4423,11 @@ do { \
[rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \
: [new] "ir" (__new), [old] "ir" (__old) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#define NATIVE_ATOMIC_CMPXCHG_DWORD_RETURN(__old, __new, __addr, \
__stored_val, __rval, mem_model) \
do { \
HWBUG_ATOMIC_BEGIN(__addr); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_##mem_model \
"\n3:" \
@ -4520,7 +4454,6 @@ do { \
[rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \
: [new] "ir" (__new), [old] "ir" (__old) \
CLOBBERS_PRED2_##mem_model); \
HWBUG_ATOMIC_END(); \
} while (0)
#ifdef CONFIG_HAVE_CMPXCHG_DOUBLE
@ -4623,7 +4556,6 @@ do { \
#define _all_c 0x1 /* stop until prev. operations complete */
#if !defined CONFIG_E2K_MACHINE || \
defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU || \
(defined CONFIG_E2K_E2S && defined CONFIG_NUMA)
# define WORKAROUND_WAIT_HWBUG(num) (((num) & (_st_c | _all_c | _sas)) ? \
((num) | _ma_c) : (num))
@ -5092,51 +5024,26 @@ do { \
_Pragma("no_asm_inline") \
asm volatile (".word 0x00008001; .word 0x7000000c" ::: "memory"); \
} while (0)
#define NATIVE_FILL_HARDWARE_STACKS__SW() \
#define NATIVE_FILL_HARDWARE_STACKS__SW(_sw_fill_sequel) \
do { \
asm volatile ( \
"{\n" \
"nop 4\n" \
"return %%ctpr3\n" \
"movtd [ 0f ], %%dg" __stringify(GUEST_VCPU_STATE_GREG) "\n" \
"movtd %[sw_fill_sequel], %%dg" __stringify(GUEST_VCPU_STATE_GREG) "\n" \
"}\n" \
"{\n" \
"rrd %%wd, %%dg" __stringify(CURRENT_TASK_GREG) "\n" \
"}\n" \
"{\n" \
"rrd %%br, %%dg" __stringify(SMP_CPU_ID_GREG) "\n" \
"ct %%ctpr3\n" \
"}\n" \
"0:\n" \
"{\n" \
"rwd %%dg" __stringify(CURRENT_TASK_GREG) ", %%wd\n" \
"}\n" \
"{\n" \
"rwd %%dg" __stringify(SMP_CPU_ID_GREG) ", %%br\n" \
"}\n" \
"{\n" \
"nop 3\n" \
SMP_ONLY("ldw %%dg" __stringify(GUEST_VCPU_STATE_GREG) ", " \
"%[task_ti_cpu_delta], " \
"%%dg" __stringify(SMP_CPU_ID_GREG) "\n") \
"subd %%dg" __stringify(GUEST_VCPU_STATE_GREG) ", " \
"%[task_ti_offset], " \
"%%dg" __stringify(CURRENT_TASK_GREG) "\n" \
"}\n" \
"{\n" \
"nop\n" /* For "rwd %wd" */ \
"}\n" \
:: SMP_ONLY([task_ti_cpu_delta] "i" (offsetof(struct task_struct, cpu) - \
offsetof(struct task_struct, thread_info)),) \
[task_ti_offset] "i" (offsetof(struct task_struct, thread_info)) \
: "ctpr1", "ctpr3", "memory"); \
/* If CPU supports only FILLC but not FILLR, then we use the return \
* trick above to fill RF and FILLC instruction to fill CF. */ \
if (cpu_has(CPU_FEAT_FILLC)) { \
/* "{fillc}" */ \
_Pragma("no_asm_inline") \
asm volatile (".word 0x00008001; .word 0x70000008" ::: "memory"); \
} \
: \
: [sw_fill_sequel] "ir" (_sw_fill_sequel) \
: "ctpr1", "ctpr2", "ctpr3", "memory"); \
} while (0)
#define NATIVE_FILL_CHAIN_STACK__HW() \
do { \
/* "{fillc}" */ \
_Pragma("no_asm_inline") \
asm volatile (".word 0x00008001; .word 0x70000008" ::: "memory"); \
} while (0)
#ifndef __ASSEMBLY__
@ -5466,26 +5373,25 @@ do { \
_Pragma("no_asm_inline") \
asm volatile ("\n" \
"{\n" \
"addd \t 0, %0, %%dr0\n" \
"addd \t 0, %1, %%dr1\n" \
"addd \t 0, %2, %%dr2\n" \
"addd \t 0, %3, %%dr3\n" \
"addd \t 0, %4, %%dr4\n" \
"addd \t 0, %5, %%dr5\n" \
"addd \t 0, %6, %%dr6\n" \
"}\n" \
"{\n" \
"addd \t 0, %6, %%dr6\n" \
"addd \t 0, %0, %%dr0\n" \
"ibranch \t" #label "\n" \
"}\n" \
: \
: "ri" ((__e2k_u64_t) (arg1)), \
: "i" ((__e2k_u64_t) (arg1)), \
"ri" ((__e2k_u64_t) (arg2)), \
"ri" ((__e2k_u64_t) (arg3)), \
"ri" ((__e2k_u64_t) (arg4)), \
"ri" ((__e2k_u64_t) (arg5)), \
"ri" ((__e2k_u64_t) (arg6)), \
"ri" ((__e2k_u64_t) (arg7)) \
: "r0", "r1", "r2", "r3", "r4", "r5", "r6" \
); \
} while (false)
#define E2K_SCALL_ARG7(trap_num, ret, sys_num, arg1, arg2, arg3, \
@ -6183,7 +6089,7 @@ do { \
#define SIMPLE_RECOVERY_STORE(_addr, _data, _opc) \
do { \
u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \
u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \
u64 _ind = ((ldst_rec_op_t *) &_opc)->index; \
asm ( \
"{nop 1\n" \
" cmpesb,0 %[fmt], 1, %%pred20\n" \
@ -6204,7 +6110,7 @@ do { \
#define SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, _greg_no, _sm, _mas) \
do { \
u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \
u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \
u64 _ind = ((ldst_rec_op_t *) &_opc)->index; \
asm ( \
"{nop 1\n" \
" cmpesb,0 %[fmt], 1, %%pred20\n" \
@ -6330,7 +6236,7 @@ do { \
do { \
u64 _data; \
u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \
u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \
u64 _ind = ((ldst_rec_op_t *) &_opc)->index; \
asm ( \
"{nop 1\n" \
" cmpesb,0 %[fmt], 1, %%pred20\n" \
@ -6822,7 +6728,6 @@ do { \
# define __arch_pcpu_atomic_xchg(_val, _var, size) \
({ \
typeof(_var) __ret; \
HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_RELAXED_MB \
"\n2:" \
@ -6839,14 +6744,12 @@ do { \
: [ret] "=&r" (__ret) \
: [var] "r" (&(_var)), [val] "r" ((u64) (_val)) \
: "memory"); \
HWBUG_ATOMIC_END(); \
__ret; \
})
# define __arch_pcpu_atomic_cmpxchg(_old, _new, _var, size, sxt_size) \
({ \
typeof(_var) __ret, __stored_val; \
HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_RELAXED_MB \
"\n3:" \
@ -6875,14 +6778,12 @@ do { \
: [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \
: [var] "r" (&(_var)), [new] "ir" (_new), [old] "ir" (_old) \
: "memory", "pred2"); \
HWBUG_ATOMIC_END(); \
__ret; \
})
# define __arch_pcpu_atomic_cmpxchg_word(_old, _new, _var) \
({ \
typeof(_var) __ret, __stored_val; \
HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_RELAXED_MB \
"\n3:" \
@ -6908,14 +6809,12 @@ do { \
: [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \
: [var] "r" (&(_var)), [new] "ir" (_new), [old] "ir" (_old) \
: "memory", "pred2"); \
HWBUG_ATOMIC_END(); \
__ret; \
})
# define __arch_pcpu_atomic_cmpxchg_dword(_old, _new, _var) \
({ \
typeof(_var) __ret, __stored_val; \
HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_RELAXED_MB \
"\n3:" \
@ -6941,14 +6840,12 @@ do { \
: [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \
: [var] "r" (&(_var)), [new] "ir" ((u64) (_new)), [old] "ir" ((u64) (_old)) \
: "memory", "pred2"); \
HWBUG_ATOMIC_END(); \
__ret; \
})
#define __arch_pcpu_atomic_op(_val, _var, size, op) \
({ \
typeof(_var) __ret; \
HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \
asm NOT_VOLATILE ( \
MB_BEFORE_ATOMIC_RELAXED_MB \
"\n1:" \
@ -6966,11 +6863,10 @@ do { \
: [ret] "=&r" (__ret) \
: [var] "r" (&(_var)), [val] "ir" ((u64) (_val)) \
: "memory"); \
HWBUG_ATOMIC_END(); \
__ret; \
})
#endif /* #ifndef CONFIG_CPU_ES2 */
#endif
/* Disable %aalda writes on iset v6 (iset correction v6.107).
* Use alternatives since we cannot do jumps at this point

View File

@ -85,6 +85,20 @@ typedef int (*parse_chain_fn_t)(e2k_mem_crs_t *crs,
extern notrace long parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg);
extern notrace int ____parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg, unsigned long delta_user,
unsigned long top, unsigned long bottom,
bool *interrupts_enabled, unsigned long *irq_flags);
static inline int
native_do_parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg, unsigned long delta_user,
unsigned long top, unsigned long bottom,
bool *interrupts_enabled, unsigned long *irq_flags)
{
return ____parse_chain_stack(flags, p, func, arg, delta_user, top, bottom,
interrupts_enabled, irq_flags);
}
extern void *kernel_symtab;
extern long kernel_symtab_size;
@ -207,6 +221,23 @@ extern int print_window_regs;
extern int debug_datastack;
#endif
#ifndef CONFIG_KVM_GUEST_KERNEL
/* it is native kernel without any virtualization */
/* or it is native host kernel with virtualization support */
/* or it is paravirtualized host and guest kernel */
static inline int
do_parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg, unsigned long delta_user,
unsigned long top, unsigned long bottom,
bool *interrupts_enabled, unsigned long *irq_flags)
{
return native_do_parse_chain_stack(flags, p, func, arg, delta_user,
top, bottom,
interrupts_enabled, irq_flags);
}
#endif /* !CONFIG_KVM_GUEST_KERNEL */
#ifndef CONFIG_VIRTUALIZATION
/* it is native kernel without any virtualization */
#define GET_PHYS_ADDR(task, addr) NATIVE_GET_PHYS_ADDR(task, addr)
@ -444,12 +475,11 @@ static inline void print_tc_state(const trap_cellar_t *tcellar, int num)
" chan = 0x%x, se = 0x%x, pm = 0x%x\n\n"
" fault_type = 0x%x:\n"
" intl_res_bits = %d MLT_trap = %d\n"
" ph_pr_page = %d page_bound = %d\n"
" ph_pr_page = %d global_sp = %d\n"
" io_page = %d isys_page = %d\n"
" prot_page = %d priv_page = %d\n"
" illegal_page = %d nwrite_page = %d\n"
" page_miss = %d ph_bound = %d\n"
" global_sp = %d\n\n"
" miss_lvl = 0x%x, num_align = 0x%x, empt = 0x%x\n"
" clw = 0x%x, rcv = 0x%x dst_rcv = 0x%x\n"
"----------------------------------------------------"
@ -471,12 +501,11 @@ static inline void print_tc_state(const trap_cellar_t *tcellar, int num)
(u32)AS(tcellar->condition).pm,
(u32)AS(tcellar->condition).fault_type,
(u32)AS(ftype).intl_res_bits, (u32)(AS(ftype).exc_mem_lock),
(u32)AS(ftype).ph_pr_page, (u32)AS(ftype).page_bound,
(u32)AS(ftype).ph_pr_page, (u32)AS(ftype).global_sp,
(u32)AS(ftype).io_page, (u32)AS(ftype).isys_page,
(u32)AS(ftype).prot_page, (u32)AS(ftype).priv_page,
(u32)AS(ftype).illegal_page, (u32)AS(ftype).nwrite_page,
(u32)AS(ftype).page_miss, (u32)AS(ftype).ph_bound,
(u32)AS(ftype).global_sp,
(u32)AS(tcellar->condition).miss_lvl,
(u32)AS(tcellar->condition).num_align,
(u32)AS(tcellar->condition).empt,

View File

@ -15,13 +15,10 @@
#include <asm/tags.h>
/*
* Tagged values structures
*/
/* Address Pointers */
typedef union { /* High word of all pointers */
@ -323,7 +320,7 @@ typedef struct e2k_pl {
#define PLHI_item hi
#define IS_PL_ITAG(pl_lo_word) (PL_ITAG_GET(pl_lo_word) == E2K_PL_ITAG)
static inline e2k_pl_t DO_MAKE_PL_V2(u64 addr, bool pm)
static inline e2k_pl_t DO_MAKE_PL_V3(u64 addr, bool pm)
{
e2k_pl_t p;
e2k_pl_lo_t pl;
@ -331,7 +328,7 @@ static inline e2k_pl_t DO_MAKE_PL_V2(u64 addr, bool pm)
pl.PL_lo_value = 0;
pl.PL_lo_target = addr;
pl.PL_lo_pm = pm;
pl.PL_lo_itag = E2K_PL_V2_ITAG;
pl.PL_lo_itag = E2K_PL_V3_ITAG;
p.lo = pl;
p.hi.word = 0L;
return p;
@ -341,16 +338,16 @@ static inline e2k_pl_t DO_MAKE_PL_V6(u64 addr, bool pm, unsigned int cui)
{
e2k_pl_t pl;
pl = DO_MAKE_PL_V2(addr, pm);
pl = DO_MAKE_PL_V3(addr, pm);
pl.PL_itag = E2K_PL_ITAG;
pl.PLHI_value = 0;
pl.PL_cui = cui;
return pl;
}
static inline e2k_pl_t MAKE_PL_V2(u64 addr)
static inline e2k_pl_t MAKE_PL_V3(u64 addr)
{
return DO_MAKE_PL_V2(addr, false);
return DO_MAKE_PL_V3(addr, false);
}
static inline e2k_pl_t MAKE_PL_V6(u64 addr, unsigned int cui)

View File

@ -10,29 +10,25 @@
#include <asm/e2k.h>
#include <asm/e2k_api.h>
#include <asm/mas.h>
#include <asm/es2.h>
#include <asm/e2s.h>
#include <asm/e8c.h>
#include <asm/e16c.h>
#include <asm/e12c.h>
#include <asm/e2c3.h>
/*
* NBR area configuration
*/
#define E2K_NSR_AREA_PHYS_BASE (machine.get_nsr_area_phys_base())
#define E2K_NSR_AREA_SIZE (machine.nbsr_area_size)
#define E2K_NBSR_OFFSET (machine.nbsr_area_offset)
#define E2K_NBSR_SIZE (machine.nbsr_area_size)
#define E2K_COPSR_AREA_PHYS_BASE (machine.copsr_area_phys_base)
#define E2K_COPSR_AREA_SIZE (machine.copsr_area_size)
#define E2K_NSR_AREA_SIZE 0x0000000000100000UL
#define E2K_NBSR_OFFSET 0x0000000000000000UL
#define E2K_NBSR_SIZE E2K_NSR_AREA_SIZE
#define BOOT_NSR_AREA_PHYS_BASE (boot_machine.nsr_area_phys_base)
#define BOOT_NSR_AREA_SIZE (boot_machine.nbsr_area_size)
#define BOOT_NBSR_OFFSET (boot_machine.nbsr_area_offset)
#define BOOT_NBSR_SIZE (boot_machine.nbsr_area_size)
#define BOOT_COPSR_AREA_PHYS_BASE (boot_machine.copsr_area_phys_base)
#define BOOT_COPSR_AREA_SIZE (boot_machine.copsr_area_size)
#define BOOT_NSR_AREA_SIZE E2K_NSR_AREA_SIZE
#define BOOT_NBSR_OFFSET E2K_NBSR_OFFSET
#define BOOT_NBSR_SIZE E2K_NBSR_SIZE
/*
* Nodes system registers area - NSR = { NSR0 ... NSRj ... }
@ -63,13 +59,6 @@
((unsigned char *)(BOOT_THE_NODE_NSR_PHYS_BASE(node) + \
BOOT_NODE_NBSR_OFFSET))
/*
* Nodes system coprocessors registers area - COPSR = { COPSR0 ... COPSRj ... }
*/
#define NODE_COPSR_SIZE E2K_COPSR_AREA_SIZE
#define THE_NODE_COPSR_PHYS_BASE(node) \
(E2K_COPSR_AREA_PHYS_BASE + (node * NODE_COPSR_SIZE))
extern unsigned char *nodes_nbsr_base[MAX_NUMNODES];
extern phys_addr_t nodes_nbsr_phys_base[MAX_NUMNODES];

View File

@ -15,8 +15,6 @@ struct pt_regs;
extern void boot_e2s_setup_arch(void);
extern void e2s_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
extern void sic_error_interrupt(struct pt_regs *regs);
#endif
@ -25,35 +23,12 @@ extern void sic_error_interrupt(struct pt_regs *regs);
#define E2S_NODE_IOLINKS 1
#define E2S_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE
#define E2S_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE
#define E2S_PCICFG_AREA_PHYS_BASE 0x0000000200000000UL
#define E2S_PCICFG_AREA_SIZE 0x0000000010000000UL
#define E2S_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE
#define E2S_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET
#define E2S_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE
#define E2S_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE
#define E2S_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE
#define E2S_MLT_SIZE ES2_MLT_SIZE
#define E2S_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E2S_TLB_ADDR_LINE_NUM ES2_TLB_ADDR_LINE_NUM
#define E2S_TLB_ADDR_LINE_NUM2 0x000000001fe00000
#define E2S_TLB_ADDR_LINE_NUM_SHIFT2 21
#define E2S_TLB_ADDR_SET_NUM ES2_TLB_ADDR_SET_NUM
#define E2S_TLB_ADDR_SET_NUM_SHIFT ES2_TLB_ADDR_SET_NUM_SHIFT
#define E2S_NSR_AREA_PHYS_BASE 0x0000000110000000UL
#define E2S_SIC_MC_SIZE 0xa4
#define E2S_SIC_MC_COUNT 3
#define E2S_SIC_MC1_ECC 0x440
#define E2S_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E2S_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E2S_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E2S_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E2S_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#endif /* _ASM_E2S_H_ */

View File

@ -15,8 +15,6 @@ struct pt_regs;
extern void boot_e8c_setup_arch(void);
extern void e8c_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
extern void sic_error_interrupt(struct pt_regs *regs);
#endif
@ -25,36 +23,14 @@ extern void sic_error_interrupt(struct pt_regs *regs);
#define E8C_NODE_IOLINKS 1
#define E8C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE
#define E8C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE
#define E8C_PCICFG_AREA_PHYS_BASE E2S_PCICFG_AREA_PHYS_BASE
#define E8C_PCICFG_AREA_SIZE E2S_PCICFG_AREA_SIZE
#define E8C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE
#define E8C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET
#define E8C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE
#define E8C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE
#define E8C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE
#define E8C_MLT_SIZE ES2_MLT_SIZE
#define E8C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E8C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM
#define E8C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2
#define E8C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2
#define E8C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
#define E8C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
#define E8C_NSR_AREA_PHYS_BASE E2S_NSR_AREA_PHYS_BASE
#define E8C_SIC_MC_SIZE 0xe4
#define E8C_SIC_MC_COUNT 4
#define E8C_SIC_MC1_ECC E2S_SIC_MC1_ECC
#define E8C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E8C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E8C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E8C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E8C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#define E8C_L3_CACHE_SHIFT 6
#define E8C_L3_CACHE_BYTES (1 << E8C_L3_CACHE_SHIFT)

View File

@ -15,8 +15,6 @@ struct pt_regs;
extern void boot_e8c2_setup_arch(void);
extern void e8c2_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
extern void sic_error_interrupt(struct pt_regs *regs);
#endif
@ -25,36 +23,14 @@ extern void sic_error_interrupt(struct pt_regs *regs);
#define E8C2_NODE_IOLINKS E8C_NODE_IOLINKS
#define E8C2_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE
#define E8C2_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE
#define E8C2_PCICFG_AREA_PHYS_BASE E2S_PCICFG_AREA_PHYS_BASE
#define E8C2_PCICFG_AREA_SIZE E2S_PCICFG_AREA_SIZE
#define E8C2_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE
#define E8C2_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET
#define E8C2_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE
#define E8C2_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE
#define E8C2_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE
#define E8C2_MLT_SIZE ES2_MLT_SIZE
#define E8C2_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM
#define E8C2_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM
#define E8C2_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2
#define E8C2_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2
#define E8C2_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
#define E8C2_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
#define E8C2_NSR_AREA_PHYS_BASE E2S_NSR_AREA_PHYS_BASE
#define E8C2_SIC_MC_SIZE 0xf4
#define E8C2_SIC_MC_COUNT E8C_SIC_MC_COUNT
#define E8C2_SIC_MC1_ECC E2S_SIC_MC1_ECC
#define E8C2_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
#define E8C2_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT
#define E8C2_L1_CACHE_BYTES ES2_L1_CACHE_BYTES
#define E8C2_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT
#define E8C2_L2_CACHE_BYTES ES2_L2_CACHE_BYTES
#define E8C2_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT
#define E8C2_L3_CACHE_BYTES E8C_L3_CACHE_BYTES

View File

@ -15,7 +15,7 @@
/*
* e2k relocation types
*/
#define R_E2K_NONE 0
#define R_E2K_32_ABS 0
#define R_E2K_32_PC 2
#define R_E2K_64_ABS 50 /* Direct 64 bit */
#define R_E2K_64_ABS_LIT 51 /* Direct 64 bit for LTS syllable */

View File

@ -1,60 +0,0 @@
#ifndef _ASM_ES2_H_
#define _ASM_ES2_H_
/*
* Machine (based on E2C+ processor) topology:
* E2C+ is NUMA system on distributed memory and can have several nodes.
* Each node can have some memory (faster to access) and max 4 CPUs (cores),
* but real processor chip has only two cores (2 other should be considered
* as always disabled). So online CPU numbers will be 0, 1, 4, 5, 8, 9 ...
* Node number is the same as chip-processor number
* Some nodes (CPUs) can be without memory
* LAPIC cluster number is the same as node number
*/
#ifndef __ASSEMBLY__
struct pt_regs;
extern void boot_es2_setup_arch(void);
extern void es2_setup_machine(void);
extern void setup_APIC_vector_handler(int vector,
void (*handler)(struct pt_regs *), bool system, char *name);
extern void eldsp_interrupt(struct pt_regs *regs);
#endif
#define ES2_NR_NODE_CPUS 2
#define ES2_MAX_NR_NODE_CPUS 4
#define ES2_NODE_IOLINKS 2
#define ES2_PCICFG_AREA_PHYS_BASE 0x0000000200000000UL
#define ES2_PCICFG_AREA_SIZE 0x0000000010000000UL
#define ES2_NSR_AREA_PHYS_BASE 0x0000000110000000UL
#define ES2_NBSR_AREA_OFFSET 0x0000000000000000UL
#define ES2_NBSR_AREA_SIZE 0x0000000000100000UL
#define ES2_COPSR_AREA_PHYS_BASE 0x00000001c0000000UL
#define ES2_COPSR_AREA_SIZE 0x0000000001000000UL
#define ES2_MLT_SIZE 16
#define ES2_TLB_LINES_BITS_NUM 8
#define ES2_TLB_ADDR_LINE_NUM 0x00000000000ff000
#define ES2_TLB_ADDR_LINE_NUM2 0x000000003fc00000
#define ES2_TLB_ADDR_LINE_NUM_SHIFT2 22
#define ES2_TLB_ADDR_SET_NUM 0x0000000000000018
#define ES2_TLB_ADDR_SET_NUM_SHIFT 3
#define ES2_SIC_MC_COUNT 2
#define ES2_SIC_MC1_ECC 0x500
#define ES2_CLOCK_TICK_RATE 10000000
#define ES2_L1_CACHE_SHIFT 5
#define ES2_L1_CACHE_BYTES (1 << ES2_L1_CACHE_SHIFT)
#define ES2_L2_CACHE_SHIFT 6
#define ES2_L2_CACHE_BYTES (1 << ES2_L2_CACHE_SHIFT)
#endif /* _ASM_ES2_H_ */

View File

@ -11,7 +11,7 @@
#include <asm/trap_table.h>
#include <asm/gregs.h>
#include <asm/hw_stacks.h>
#include <uapi/asm/ucontext.h>
#include <asm/ucontext.h>
struct fast_syscalls_data {
struct timekeeper *tk;
@ -32,6 +32,17 @@ extern const fast_system_call_func fast_sys_calls_table_32[NR_fast_syscalls];
int fast_sys_ni_syscall(void);
/* trap table entry started by direct branch (it is closer to fast system */
/* call wirthout switch and use user local data stack) */
#define ttable_entry1_clock_gettime(which, time, ret) \
goto_ttable_entry1_args3(__NR_clock_gettime, which, time, ret)
#define ttable_entry1_gettimeofday(tv, tz, ret) \
goto_ttable_entry1_args3(__NR_gettimeofday, tv, tz, ret)
#define ttable_entry1_sigprocmask(how, nset, oset, ret) \
goto_ttable_entry1_args4(__NR_sigprocmask, how, \
nset, oset, ret)
#define FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \
(fast_system_call_func) sysname
#define COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \
@ -204,11 +215,6 @@ enum {
__cycles = fast_syscall_read_sclkr(); \
if (__cycles) \
__ret = FAST_SYS_OK; \
} else if (likely(__clock == &clocksource_clkr)) { \
__cycle_last = __tk->tkr_mono.cycle_last; \
__mask = __clock->mask; \
__cycles = fast_syscall_read_clkr(); \
__ret = FAST_SYS_OK; \
} \
} while (unlikely(read_seqcount_retry(&timekeeper_seq, __seq))); \
\
@ -478,10 +484,152 @@ static inline int _fast_sys_getcontext(struct ucontext __user *ucp,
return 0;
}
notrace __interrupt __section(.entry_handlers)
notrace __section(.entry_handlers)
static inline int fast_sys_set_return(u64 ip, int flags)
{
return do_fast_sys_set_return(ip, flags);
}
/* Inlined handlers for compat fast syscalls */
notrace __section(".entry.text")
static inline int _compat_fast_sys_clock_gettime(const clockid_t which_clock,
struct compat_timespec __user *__restrict tp)
{
struct thread_info *const ti = READ_CURRENT_REG();
struct timespec kts;
int ret;
prefetch_nospec(&fsys_data);
if (unlikely((u64) tp + sizeof(struct compat_timespec) >
ti->addr_limit.seg))
return -EFAULT;
ret = do_fast_clock_gettime(which_clock, &kts);
if (likely(!ret)) {
tp->tv_sec = kts.tv_sec;
tp->tv_nsec = kts.tv_nsec;
} else {
ttable_entry1_clock_gettime((u64) which_clock, (u64) tp, ret);
}
return ret;
}
notrace __section(".entry.text")
static inline int _compat_fast_sys_gettimeofday(
struct compat_timeval __user *__restrict tv,
struct timezone __user *__restrict tz)
{
struct thread_info *const ti = READ_CURRENT_REG();
struct timeval ktv;
int ret;
prefetch_nospec(&fsys_data);
if (unlikely((u64) tv + sizeof(struct compat_timeval) >
ti->addr_limit.seg
|| (u64) tz + sizeof(struct timezone) >
ti->addr_limit.seg))
return -EFAULT;
if (likely(tv)) {
ret = do_fast_gettimeofday(&ktv);
if (unlikely(ret))
ttable_entry1_gettimeofday((u64) tv, (u64) tz, ret);
} else {
ret = 0;
}
if (tv) {
tv->tv_sec = ktv.tv_sec;
tv->tv_usec = ktv.tv_usec;
}
if (tz) {
tz->tz_minuteswest = sys_tz.tz_minuteswest;
tz->tz_dsttime = sys_tz.tz_dsttime;
}
return ret;
}
#if _NSIG != 64
# error We read u64 value here...
#endif
notrace __section(".entry.text")
static inline int _compat_fast_sys_siggetmask(u32 __user *oset,
size_t sigsetsize)
{
struct thread_info *const ti = READ_CURRENT_REG();
struct task_struct *task = thread_info_task(ti);
int ret = 0;
union {
u32 word[2];
u64 whole;
} set;
set.whole = task->blocked.sig[0];
if (unlikely(sigsetsize != 8))
return -EINVAL;
if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg))
return -EFAULT;
oset[0] = set.word[0];
oset[1] = set.word[1];
return ret;
}
#if _NSIG != 64
# error We read u64 value here...
#endif
notrace __section(".entry.text")
static inline int _compat_fast_sys_getcontext(struct ucontext_32 __user *ucp,
size_t sigsetsize)
{
struct thread_info *const ti = READ_CURRENT_REG();
struct task_struct *task = thread_info_task(ti);
register u64 pcsp_lo, pcsp_hi;
register u32 fpcr, fpsr, pfpfr;
union {
u32 word[2];
u64 whole;
} set;
u64 key;
BUILD_BUG_ON(sizeof(task->blocked.sig[0]) != 8);
set.whole = task->blocked.sig[0];
if (unlikely(sigsetsize != 8))
return -EINVAL;
if (unlikely((u64) ucp + sizeof(struct ucontext_32) >
ti->addr_limit.seg
|| (u64) ucp >= ti->addr_limit.seg))
return -EFAULT;
key = context_ti_key_fast_syscall(ti);
E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi);
/* We want stack to point to user frame that called us */
pcsp_hi -= SZ_OF_CR;
((u32 *) &ucp->uc_sigmask)[0] = set.word[0];
((u32 *) &ucp->uc_sigmask)[1] = set.word[1];
ucp->uc_mcontext.sbr = key;
ucp->uc_mcontext.pcsp_lo = pcsp_lo;
ucp->uc_mcontext.pcsp_hi = pcsp_hi;
ucp->uc_extra.fpcr = fpcr;
ucp->uc_extra.fpsr = fpsr;
ucp->uc_extra.pfpfr = pfpfr;
return 0;
}
#endif /* _ASM_E2K_FAST_SYSCALLS_H */

View File

@ -9,8 +9,7 @@
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = (cpu_has(CPU_FEAT_WC_PCI_PREFETCH) &&
vma->vm_flags & VM_WRITECOMBINED) ?
vma->vm_page_prot = (vma->vm_flags & VM_WRITECOMBINED) ?
pgprot_writecombine(vma->vm_page_prot) :
pgprot_noncached(vma->vm_page_prot);
}

View File

@ -49,10 +49,8 @@
/* the small pages */
/* Size of pages where the kernel is loaded */
#define E2K_KERNEL_PAGE_SIZE (cpu_has(CPU_HWBUG_LARGE_PAGES) ? \
E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE)
#define BOOT_E2K_KERNEL_PAGE_SIZE (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \
E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE)
#define E2K_KERNEL_PAGE_SIZE E2K_LARGE_PAGE_SIZE
#define BOOT_E2K_KERNEL_PAGE_SIZE BOOT_E2K_LARGE_PAGE_SIZE
/* Equal map of phys */
/* to virt addresses */
@ -124,12 +122,10 @@
/* Size of pages to map physical memory */
#define E2K_MAPPED_PHYS_MEM_PAGE_SIZE \
((cpu_has(CPU_HWBUG_LARGE_PAGES) || \
IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) ? \
(IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) ? \
E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE)
#define BOOT_E2K_MAPPED_PHYS_MEM_PAGE_SIZE \
((boot_cpu_has(CPU_HWBUG_LARGE_PAGES) || \
IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) ? \
(IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) ? \
E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE)
/*

View File

@ -46,8 +46,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE)
ptep_set_wrprotect(mm, addr, ++ptep);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@ -58,8 +56,6 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(*ptep, pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE)
set_pte_at(vma->vm_mm, addr, ++ptep, pte);
flush_tlb_range(vma, addr, addr + PMD_SIZE);
}
return changed;
@ -75,8 +71,6 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long address,
* All two pte's (pmd's) should be cleared.
*/
pte_clear(mm, address, page_table);
if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE)
pte_clear(mm, address, (++page_table));
}
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR

View File

@ -18,10 +18,9 @@ extern int __init native_arch_pci_init(void);
#define E2K_X86_IO_AREA_BASE E2K_KERNEL_IO_BIOS_AREAS_BASE
/* Size of pages for the IO area */
#define E2K_X86_IO_PAGE_SIZE (cpu_has(CPU_HWBUG_LARGE_PAGES) ? \
E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE)
#define X86_IO_AREA_PHYS_BASE (machine.x86_io_area_base)
#define X86_IO_AREA_PHYS_SIZE (machine.x86_io_area_size)
#define E2K_X86_IO_PAGE_SIZE E2K_LARGE_PAGE_SIZE
#define X86_IO_AREA_PHYS_BASE (machine.x86_io_area_base)
#define X86_IO_AREA_PHYS_SIZE (machine.x86_io_area_size)
/*
@ -39,32 +38,24 @@ extern int __init native_arch_pci_init(void);
static inline u8 native_readb_relaxed(const volatile void __iomem *addr)
{
u8 res = *(const volatile u8 __force *) addr;
if (cpu_has(CPU_HWBUG_PIO_READS))
__E2K_WAIT(_ld_c);
return res;
}
static inline u16 native_readw_relaxed(const volatile void __iomem *addr)
{
u16 res = *(const volatile u16 __force *) addr;
if (cpu_has(CPU_HWBUG_PIO_READS))
__E2K_WAIT(_ld_c);
return res;
}
static inline u32 native_readl_relaxed(const volatile void __iomem *addr)
{
u32 res = *(const volatile u32 __force *) addr;
if (cpu_has(CPU_HWBUG_PIO_READS))
__E2K_WAIT(_ld_c);
return res;
}
static inline u64 native_readq_relaxed(const volatile void __iomem *addr)
{
u64 res = *(const volatile u64 __force *) addr;
if (cpu_has(CPU_HWBUG_PIO_READS))
__E2K_WAIT(_ld_c);
return res;
}
@ -198,11 +189,7 @@ static inline void native_writeq(u64 value, volatile void __iomem *addr)
* x86 works and how most of the drivers are tested. */
# define __io_paw() __E2K_WAIT(_st_c | _sas)
#else
# define __io_par() \
do { \
if (cpu_has(CPU_HWBUG_PIO_READS)) \
__E2K_WAIT(_ld_c); \
} while (0)
# define __io_par()
# define __io_pbw()
# define __io_paw()
#endif

View File

@ -370,7 +370,7 @@ kvm_read_aainds_pair_value(int AAINDs_pair, u64 *lo_value, u64 *hi_value)
*lo_value = value1;
*hi_value = value2;
}
#define KVM_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, value1, value2) \
#define KVM_READ_AAINDS_PAIR_VALUE_V3(AAINDs_pair, value1, value2) \
KVM_GET_AAU_AAINDS(AAINDs_pair, ((AAINDs_pair) + 1), \
value1, value2)
#define KVM_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, value1, value2) \
@ -387,7 +387,7 @@ kvm_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value)
{
KVM_SET_AAU_AAINDS(AAINDs_pair, (AAINDs_pair + 1), lo_value, hi_value);
}
#define KVM_WRITE_AAINDS_PAIR_VALUE_V2(AAINDs_pair, lo_value, hi_value) \
#define KVM_WRITE_AAINDS_PAIR_VALUE_V3(AAINDs_pair, lo_value, hi_value) \
kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value)
#define KVM_WRITE_AAINDS_PAIR_VALUE_V5(AAINDs_pair, lo_value, hi_value) \
kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value)
@ -432,7 +432,7 @@ kvm_read_aaincrs_pair_value(int AAINCRs_pair, u64 *lo_value, u64 *hi_value)
*lo_value = value1;
*hi_value = value2;
}
#define KVM_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, value1, value2) \
#define KVM_READ_AAINCRS_PAIR_VALUE_V3(AAINCRs_pair, value1, value2) \
KVM_GET_AAU_AAINCRS(AAINCRs_pair, ((AAINCRs_pair) + 1), \
value1, value2)
#define KVM_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, value1, value2) \
@ -450,7 +450,7 @@ kvm_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value)
KVM_SET_AAU_AAINCRS(AAINCRs_pair, (AAINCRs_pair + 1),
lo_value, hi_value);
}
#define KVM_WRITE_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, lo_value, hi_value) \
#define KVM_WRITE_AAINCRS_PAIR_VALUE_V3(AAINCRs_pair, lo_value, hi_value) \
kvm_write_aaincrs_pair_value(AAINCRs_pair, lo_value, hi_value)
#define KVM_WRITE_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, lo_value, hi_value) \
kvm_write_aaincrs_pair_value(AAINCRs_pair, lo_value, hi_value)
@ -485,7 +485,7 @@ kvm_read_aastis_pair_value(int AASTIs_pair, u64 *lo_value, u64 *hi_value)
*lo_value = value1;
*hi_value = value2;
}
#define KVM_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, value1, value2) \
#define KVM_READ_AASTIS_PAIR_VALUE_V3(AASTIs_pair, value1, value2) \
KVM_GET_AAU_AASTIS(AASTIs_pair, ((AASTIs_pair) + 1), \
value1, value2)
#define KVM_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, value1, value2) \
@ -497,7 +497,7 @@ kvm_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value)
{
KVM_SET_AAU_AASTIS(AASTIs_pair, (AASTIs_pair + 1), lo_value, hi_value);
}
#define KVM_WRITE_AASTIS_PAIR_VALUE_V2(AASTIs_pair, lo_value, hi_value) \
#define KVM_WRITE_AASTIS_PAIR_VALUE_V3(AASTIs_pair, lo_value, hi_value) \
kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value)
#define KVM_WRITE_AASTIS_PAIR_VALUE_V5(AASTIs_pair, lo_value, hi_value) \
kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value)
@ -511,7 +511,7 @@ kvm_read_aaldi_reg_value(int AALDI_no, u64 *l_value, u64 *r_value)
*l_value = value1;
*r_value = value2;
}
#define KVM_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2) \
#define KVM_READ_AALDI_REG_VALUE_V3(AALDI_no, value1, value2) \
KVM_GET_AAU_AALDI(AALDI_no, value1, value2)
#define KVM_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2) \
KVM_GET_AAU_AALDI(AALDI_no, value1, value2)

View File

@ -28,16 +28,21 @@
#ifndef CONFIG_KVM_GUEST_KERNEL
/* it is native host kernel with virtualization support */
/* or paravirtualized host and guest kernel */
extern void kvm_host_machine_setup_regs_v3(host_machdep_t *);
extern void kvm_host_machine_setup_regs_v6(host_machdep_t *);
static inline void
kvm_host_machine_setup(machdep_t *host_machine)
{
machdep_t *node_mach;
int nid;
for_each_node_has_dup_kernel(nid) {
node_mach = the_node_machine(nid);
if (host_machine->native_iset_ver < E2K_ISET_V5) {
machdep_t *n_machine = the_node_machine(nid);
if (host_machine->native_iset_ver >= E2K_ISET_V6) {
kvm_host_machine_setup_regs_v6(&n_machine->host);
} else {
kvm_host_machine_setup_regs_v3(&n_machine->host);
}
}
}

View File

@ -9,18 +9,17 @@
#include <asm/e2k_api.h>
#include <asm/cpu_regs_types.h>
#include <asm/kvm/cpu_hv_regs_types.h>
#include <asm/machdep.h>
/*
* Virtualization control registers
*/
#define READ_VIRT_CTRL_CU_REG_VALUE() NATIVE_GET_DSREG_CLOSED(virt_ctrl_cu)
#define READ_VIRT_CTRL_CU_REG() \
((virt_ctrl_cu_t) { .word = NATIVE_GET_DSREG_CLOSED(virt_ctrl_cu) })
/* Bug #127239: on some CPUs "rwd %virt_ctrl_cu" instruction must also
* contain a NOP. This is already accomplished by using delay "5" here. */
#define WRITE_VIRT_CTRL_CU_REG_VALUE(virt_ctrl) \
NATIVE_SET_DSREG_CLOSED_NOEXC(virt_ctrl_cu, virt_ctrl, 5)
#define READ_VIRT_CTRL_CU_REG() read_VIRT_CTRL_CU_reg()
#define WRITE_VIRT_CTRL_CU_REG(virt_ctrl) \
write_VIRT_CTRL_CU_reg(virt_ctrl)
#define WRITE_VIRT_CTRL_CU_REG(v) \
NATIVE_SET_DSREG_CLOSED_NOEXC(virt_ctrl_cu, ((virt_ctrl_cu_t) (v)).word, 5)
/* Shadow CPU registers */
@ -170,6 +169,7 @@
#define WRITE_SH_OSCUTD_REG_VALUE(CUTD_value) \
NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscutd, CUTD_value, 7)
#define WRITE_SH_OSCUTD_REG(value) WRITE_SH_OSCUTD_REG_VALUE(AW(value))
/*
* Read/write word Compilation Unit Index Register (SH_OSCUIR)
@ -178,6 +178,7 @@
#define WRITE_SH_OSCUIR_REG_VALUE(CUIR_value) \
NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscuir, CUIR_value, 7)
#define WRITE_SH_OSCUIR_REG(value) WRITE_SH_OSCUIR_REG_VALUE(AW(value))
/*
* Read/Write Processor Core Mode Register (SH_CORE_MODE)
@ -186,232 +187,18 @@
#define WRITE_SH_CORE_MODE_REG_VALUE(modes) \
NATIVE_SET_SREG_CLOSED_NOEXC(sh_core_mode, modes, 5)
extern unsigned long read_VIRT_CTRL_CU_reg_value(void);
extern void write_VIRT_CTRL_CU_reg_value(unsigned long value);
extern unsigned int read_SH_CORE_MODE_reg_value(void);
extern void write_SH_CORE_MODE_reg_value(unsigned int value);
extern unsigned long read_SH_PSP_LO_reg_value(void);
extern unsigned long read_SH_PSP_HI_reg_value(void);
extern void write_SH_PSP_LO_reg_value(unsigned long value);
extern void write_SH_PSP_HI_reg_value(unsigned long value);
extern unsigned long read_BU_PSP_LO_reg_value(void);
extern unsigned long read_BU_PSP_HI_reg_value(void);
extern void write_BU_PSP_LO_reg_value(unsigned long value);
extern void write_BU_PSP_HI_reg_value(unsigned long value);
extern unsigned long read_SH_PSHTP_reg_value(void);
extern void write_SH_PSHTP_reg_value(unsigned long value);
extern unsigned long read_SH_PCSP_LO_reg_value(void);
extern unsigned long read_SH_PCSP_HI_reg_value(void);
extern void write_SH_PCSP_LO_reg_value(unsigned long value);
extern void write_SH_PCSP_HI_reg_value(unsigned long value);
extern unsigned long read_BU_PCSP_LO_reg_value(void);
extern unsigned long read_BU_PCSP_HI_reg_value(void);
extern void write_BU_PCSP_LO_reg_value(unsigned long value);
extern void write_BU_PCSP_HI_reg_value(unsigned long value);
extern int read_SH_PCSHTP_reg_value(void);
extern void write_SH_PCSHTP_reg_value(int value);
extern unsigned long read_SH_WD_reg_value(void);
extern void write_SH_WD_reg_value(unsigned long value);
extern unsigned long read_SH_OSCUD_LO_reg_value(void);
extern unsigned long read_SH_OSCUD_HI_reg_value(void);
extern void write_SH_OSCUD_LO_reg_value(unsigned long value);
extern void write_SH_OSCUD_HI_reg_value(unsigned long value);
extern unsigned long read_SH_OSGD_LO_reg_value(void);
extern unsigned long read_SH_OSGD_HI_reg_value(void);
extern void write_SH_OSGD_LO_reg_value(unsigned long value);
extern void write_SH_OSGD_HI_reg_value(unsigned long value);
extern unsigned long read_SH_OSCUTD_reg_value(void);
extern void write_SH_OSCUTD_reg_value(unsigned long value);
extern unsigned int read_SH_OSCUIR_reg_value(void);
extern void write_SH_OSCUIR_reg_value(unsigned int value);
extern unsigned long read_SH_OSR0_reg_value(void);
extern void write_SH_OSR0_reg_value(unsigned long value);
static inline virt_ctrl_cu_t read_VIRT_CTRL_CU_reg(void)
{
virt_ctrl_cu_t virt_ctrl;
virt_ctrl.VIRT_CTRL_CU_reg = read_VIRT_CTRL_CU_reg_value();
return virt_ctrl;
}
static inline void write_VIRT_CTRL_CU_reg(virt_ctrl_cu_t virt_ctrl)
{
write_VIRT_CTRL_CU_reg_value(virt_ctrl.VIRT_CTRL_CU_reg);
}
static inline e2k_psp_lo_t read_SH_PSP_LO_reg(void)
{
e2k_psp_lo_t psp_lo;
psp_lo.PSP_lo_half = read_SH_PSP_LO_reg_value();
return psp_lo;
}
static inline e2k_psp_hi_t read_SH_PSP_HI_reg(void)
{
e2k_psp_hi_t psp_hi;
psp_hi.PSP_hi_half = read_SH_PSP_HI_reg_value();
return psp_hi;
}
static inline void write_SH_PSP_LO_reg(e2k_psp_lo_t psp_lo)
{
write_SH_PSP_LO_reg_value(psp_lo.PSP_lo_half);
}
static inline void write_SH_PSP_HI_reg(e2k_psp_hi_t psp_hi)
{
write_SH_PSP_HI_reg_value(psp_hi.PSP_hi_half);
}
static inline e2k_pcsp_lo_t read_SH_PCSP_LO_reg(void)
{
e2k_pcsp_lo_t pcsp_lo;
pcsp_lo.PCSP_lo_half = read_SH_PCSP_LO_reg_value();
return pcsp_lo;
}
static inline e2k_pcsp_hi_t read_SH_PCSP_HI_reg(void)
{
e2k_pcsp_hi_t pcsp_hi;
pcsp_hi.PCSP_hi_half = read_SH_PCSP_HI_reg_value();
return pcsp_hi;
}
static inline void write_SH_PCSP_LO_reg(e2k_pcsp_lo_t pcsp_lo)
{
write_SH_PCSP_LO_reg_value(pcsp_lo.PCSP_lo_half);
}
static inline void write_SH_PCSP_HI_reg(e2k_pcsp_hi_t pcsp_hi)
{
write_SH_PCSP_HI_reg_value(pcsp_hi.PCSP_hi_half);
}
static inline e2k_psp_lo_t read_BU_PSP_LO_reg(void)
{
e2k_psp_lo_t psp_lo;
psp_lo.PSP_lo_half = read_BU_PSP_LO_reg_value();
return psp_lo;
}
static inline e2k_psp_hi_t read_BU_PSP_HI_reg(void)
{
e2k_psp_hi_t psp_hi;
psp_hi.PSP_hi_half = read_BU_PSP_HI_reg_value();
return psp_hi;
}
static inline void write_BU_PSP_LO_reg(e2k_psp_lo_t psp_lo)
{
write_BU_PSP_LO_reg_value(psp_lo.PSP_lo_half);
}
static inline void write_BU_PSP_HI_reg(e2k_psp_hi_t psp_hi)
{
write_BU_PSP_HI_reg_value(psp_hi.PSP_hi_half);
}
static inline e2k_pcsp_lo_t read_BU_PCSP_LO_reg(void)
{
e2k_pcsp_lo_t pcsp_lo;
pcsp_lo.PCSP_lo_half = read_BU_PCSP_LO_reg_value();
return pcsp_lo;
}
static inline e2k_pcsp_hi_t read_BU_PCSP_HI_reg(void)
{
e2k_pcsp_hi_t pcsp_hi;
pcsp_hi.PCSP_hi_half = read_BU_PCSP_HI_reg_value();
return pcsp_hi;
}
static inline void write_BU_PCSP_LO_reg(e2k_pcsp_lo_t pcsp_lo)
{
write_BU_PCSP_LO_reg_value(pcsp_lo.PCSP_lo_half);
}
static inline void write_BU_PCSP_HI_reg(e2k_pcsp_hi_t pcsp_hi)
{
write_BU_PCSP_HI_reg_value(pcsp_hi.PCSP_hi_half);
}
static inline e2k_oscud_lo_t read_SH_OSCUD_LO_reg(void)
{
e2k_oscud_lo_t oscud_lo;
oscud_lo.OSCUD_lo_half = read_SH_OSCUD_LO_reg_value();
return oscud_lo;
}
static inline e2k_oscud_hi_t read_SH_OSCUD_HI_reg(void)
{
e2k_oscud_hi_t oscud_hi;
oscud_hi.OSCUD_hi_half = read_SH_OSCUD_HI_reg_value();
return oscud_hi;
}
static inline void write_SH_OSCUD_LO_reg(e2k_oscud_lo_t oscud_lo)
{
write_SH_OSCUD_LO_reg_value(oscud_lo.OSCUD_lo_half);
}
static inline void write_SH_OSCUD_HI_reg(e2k_oscud_hi_t oscud_hi)
{
write_SH_OSCUD_HI_reg_value(oscud_hi.OSCUD_hi_half);
}
static inline e2k_osgd_lo_t read_SH_OSGD_LO_reg(void)
{
e2k_osgd_lo_t osgd_lo;
osgd_lo.OSGD_lo_half = read_SH_OSGD_LO_reg_value();
return osgd_lo;
}
static inline e2k_osgd_hi_t read_SH_OSGD_HI_reg(void)
{
e2k_osgd_hi_t osgd_hi;
osgd_hi.OSGD_hi_half = read_SH_OSGD_HI_reg_value();
return osgd_hi;
}
static inline void write_SH_OSGD_LO_reg(e2k_osgd_lo_t osgd_lo)
{
write_SH_OSGD_LO_reg_value(osgd_lo.OSGD_lo_half);
}
static inline void write_SH_OSGD_HI_reg(e2k_osgd_hi_t osgd_hi)
{
write_SH_OSGD_HI_reg_value(osgd_hi.OSGD_hi_half);
}
static inline e2k_cutd_t read_SH_OSCUTD_reg(void)
{
e2k_cutd_t cutd;
cutd.CUTD_reg = read_SH_OSCUTD_reg_value();
return cutd;
}
static inline void write_SH_OSCUTD_reg(e2k_cutd_t cutd)
{
write_SH_OSCUTD_reg_value(cutd.CUTD_reg);
}
static inline e2k_cuir_t read_SH_OSCUIR_reg(void)
{
e2k_cuir_t cuir;
cuir.CUIR_reg = read_SH_OSCUIR_reg_value();
return cuir;
}
static inline void write_SH_OSCUIR_reg(e2k_cuir_t cuir)
{
write_SH_OSCUIR_reg_value(cuir.CUIR_reg);
}
#ifdef CONFIG_VIRTUALIZATION
static inline e2k_core_mode_t read_SH_CORE_MODE_reg(void)
{
e2k_core_mode_t core_mode;
core_mode.CORE_MODE_reg = read_SH_CORE_MODE_reg_value();
core_mode.CORE_MODE_reg = machine.host.read_SH_CORE_MODE();
return core_mode;
}
static inline void write_SH_CORE_MODE_reg(e2k_core_mode_t core_mode)
{
write_SH_CORE_MODE_reg_value(core_mode.CORE_MODE_reg);
machine.host.write_SH_CORE_MODE(core_mode.CORE_MODE_reg);
}
#endif /* CONFIG_VIRTUALIZATION */
#define READ_G_PREEMPT_TMR_REG() \
((e2k_g_preempt_tmr_t) NATIVE_GET_SREG_CLOSED(g_preempt_tmr))

View File

@ -76,58 +76,6 @@ typedef union virt_ctrl_cu {
/* restore */
#define VIRT_CTRL_CU_reg word /* [63: 0] - entire register */
/* Bits mask of VIRT_CTRL_CU fields and flags */
#define VIRT_CTRL_CU_ENV_C_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_evn_c = -1, }.word)
#define VIRT_CTRL_CU_RR_IDR_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_idr = 1, }.word)
#define VIRT_CTRL_CU_RR_CLKR_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_clkr = 1, }.word)
#define VIRT_CTRL_CU_RR_SCLKR_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_sclkr = 1, }.word)
#define VIRT_CTRL_CU_RR_DBG_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_dbg = 1, }.word)
#define VIRT_CTRL_CU_RW_CORE_MODE_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_core_mode = 1, }.word)
#define VIRT_CTRL_CU_RW_CLKR_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_clkr = 1, }.word)
#define VIRT_CTRL_CU_RW_SCLKR_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_sclkr = 1, }.word)
#define VIRT_CTRL_CU_RW_SCLKM3_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_sclkm3 = 1, }.word)
#define VIRT_CTRL_CU_RW_DBG_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_dbg = 1, }.word)
#define VIRT_CTRL_CU_HCEM_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_hcem = 1, }.word)
#define VIRT_CTRL_CU_VIRT_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_virt = 1, }.word)
#define VIRT_CTRL_CU_STOP_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_stop = 1, }.word)
#define VIRT_CTRL_CU_EXC_C_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_c = -1, }.word)
#define VIRT_CTRL_CU_EXC_INSTR_DEBUG_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_instr_debug = 1, }.word)
#define VIRT_CTRL_CU_EXC_DATA_DEBUG_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_data_debug = 1, }.word)
#define VIRT_CTRL_CU_EXC_INSTR_PAGE_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_instr_page = 1, }.word)
#define VIRT_CTRL_CU_EXC_DATA_PAGE_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_data_page = 1, }.word)
#define VIRT_CTRL_CU_EXC_MOVA_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_mova = 1, }.word)
#define VIRT_CTRL_CU_EXC_INTERRUPT_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_interrupt = 1, }.word)
#define VIRT_CTRL_CU_EXC_NM_INTERRUPT_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_nm_interrupt = 1, }.word)
#define VIRT_CTRL_CU_GLNCH_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch = -1, }.word)
#define VIRT_CTRL_CU_GLNCH_G_TH_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch_g_th = 1, }.word)
#define VIRT_CTRL_CU_GLNCH_TIR_FZ_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch_tir_fz = 1, }.word)
#define VIRT_CTRL_CU_TIR_RST_MASK \
((virt_ctrl_cu_t) { .VIRT_CTRL_CU_tir_rst = 1, }.word)
#endif /* ! __ASSEMBLY__ */
#define INTC_CU_COND_EVENT_NO 0

View File

@ -1377,6 +1377,11 @@ extern void dump_stack(void); \
BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value)
#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \
BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value)
#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \
do { \
KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \
KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \
} while (0)
/*
* Read/write low/high double-word Compilation Unit Register (CUD)

View File

@ -24,7 +24,9 @@
})
#define GUEST_USER_PTRS_PER_PGD (GUEST_PAGE_OFFSET / PGDIR_SIZE)
#define GUEST_KERNEL_PGD_PTRS_START GUEST_USER_PTRS_PER_PGD
#define GUEST_USER_PGD_PTRS_START 0
#define GUEST_USER_PGD_PTRS_END GUEST_USER_PTRS_PER_PGD
#define GUEST_KERNEL_PGD_PTRS_START GUEST_USER_PGD_PTRS_END
#define GUEST_KERNEL_PGD_PTRS_END (GUEST_KERNEL_MEM_END / PGDIR_SIZE)
#define GUEST_KERNEL_PTRS_PER_PGD (GUEST_KERNEL_PGD_PTRS_END - \
GUEST_KERNEL_PGD_PTRS_START)
@ -59,14 +61,17 @@ kvm_init_new_context(struct kvm *kvm, gmm_struct_t *gmm)
}
#ifdef CONFIG_KVM_HV_MMU
static inline pgd_t *
kvm_mmu_get_init_gmm_root(struct kvm *kvm)
{
GTI_BUG_ON(pv_mmu_get_init_gmm(kvm) == NULL);
if (!VALID_PAGE(pv_mmu_get_init_gmm(kvm)->root_hpa))
hpa_t root_hpa = kvm_mmu_get_init_gmm_root_hpa(kvm);
if (!VALID_PAGE(root_hpa))
return NULL;
return (pgd_t *)__va(pv_mmu_get_init_gmm(kvm)->root_hpa);
return (pgd_t *)__va(root_hpa);
}
static inline void
kvm_mmu_set_init_gmm_root(struct kvm_vcpu *vcpu, hpa_t root)
{
@ -85,6 +90,7 @@ kvm_mmu_set_init_gmm_root(struct kvm_vcpu *vcpu, hpa_t root)
}
if (VALID_PAGE(root)) {
gmm->root_hpa = root;
gmm->gk_root_hpa = root;
}
if (is_sep_virt_spaces(vcpu)) {
root_gpa = kvm_get_space_type_guest_os_root(vcpu);
@ -108,9 +114,17 @@ kvm_mmu_get_gmm_root(struct gmm_struct *gmm)
return (pgd_t *)__va(gmm->root_hpa);
}
static inline pgd_t *
kvm_mmu_get_gmm_gk_root(struct gmm_struct *gmm)
{
GTI_BUG_ON(gmm == NULL);
if (!VALID_PAGE(gmm->gk_root_hpa))
return NULL;
return (pgd_t *)__va(gmm->gk_root_hpa);
}
static inline pgd_t *
kvm_mmu_load_the_gmm_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm)
{
pgd_t *root;
pgd_t *root, *gk_root;
bool u_space = gmm != pv_vcpu_get_init_gmm(vcpu);
GTI_BUG_ON(vcpu == NULL);
@ -127,16 +141,20 @@ kvm_mmu_load_the_gmm_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm)
vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->u_vptb);
kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root));
kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root));
kvm_set_space_type_spt_gk_root(vcpu, (hpa_t)__pa(root));
}
return root;
} else {
vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, gmm->u_pptb);
kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root));
gk_root = kvm_mmu_get_gmm_gk_root(gmm);
kvm_set_space_type_spt_gk_root(vcpu, (hpa_t)__pa(gk_root));
if (likely(!is_sep_virt_spaces(vcpu))) {
vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->u_pptb);
kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root));
}
return gk_root;
}
return root;
}
static inline pgd_t *

View File

@ -6,6 +6,7 @@
#include <asm/machdep.h>
#include <asm/glob_regs.h>
#include <asm/ptrace.h>
#include <asm/e2k_debug.h>
#ifdef CONFIG_VIRTUALIZATION
/* It is native host guest kernel with virtualization support */
@ -113,20 +114,26 @@
machine.save_kernel_gregs(&vcpu->arch.host_ctxt.k_gregs); \
\
u64 guest_vs = GET_GUEST_VCPU_STATE_POINTER(vcpu); \
E2K_SET_DGREG(GUEST_VCPU_STATE_GREG, guest_vs); \
HOST_ONLY_RESTORE_VCPU_STATE_GREG(guest_vs); \
})
#define HOST_VCPU_STATE_REG_RESTORE(host_ti) \
({ \
struct kvm_vcpu *vcpu = host_ti->vcpu; \
\
struct kernel_gregs h_gregs; \
machine.save_kernel_gregs(&h_gregs); \
copy_k_gregs_to_k_gregs(&host_ti->k_gregs_light, \
&vcpu->arch.host_ctxt.k_gregs); \
})
#define HOST_VCPU_STATE_COPY_SWITCH_TO_GUEST(vcpu, __ti) \
({ \
gthread_info_t *_gti = pv_vcpu_get_gti(vcpu); \
kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \
kernel_gregs_t *gk_gregs = &(_gti)->gk_gregs; \
u64 guest_vs; \
\
NATIVE_RESTORE_KERNEL_GREGS(&vcpu->arch.host_ctxt.k_gregs); \
machine.save_kernel_gregs(&host_ti->k_gregs_light); \
\
NATIVE_RESTORE_KERNEL_GREGS(&h_gregs); \
HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(gk_gregs, guest_vs); \
HOST_ONLY_COPY_TO_VCPU_STATE_GREG(k_gregs, guest_vs); \
})
#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) \

View File

@ -65,10 +65,10 @@
PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \
} \
})
#define KVM_SAVE_AALDIS_V2(regs) \
#define KVM_SAVE_AALDIS_V3(regs) \
({ \
if (IS_HV_GM()) { \
NATIVE_SAVE_AALDIS_V2(regs); \
NATIVE_SAVE_AALDIS_V3(regs); \
} else { \
PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \
} \
@ -111,10 +111,10 @@
#define KVM_GET_ARRAY_DESCRIPTORS(aau_context) \
PREFIX_GET_ARRAY_DESCRIPTORS_V5(KVM, kvm, aau_context)
#define KVM_GET_ARRAY_DESCRIPTORS_V2(aau_context) \
#define KVM_GET_ARRAY_DESCRIPTORS_V3(aau_context) \
({ \
if (IS_HV_GM()) { \
NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context); \
NATIVE_GET_ARRAY_DESCRIPTORS_V3(aau_context); \
} else { \
KVM_GET_ARRAY_DESCRIPTORS(aau_context); \
} \
@ -138,10 +138,10 @@
#define KVM_GET_SYNCHRONOUS_PART(context) \
PREFIX_GET_SYNCHRONOUS_PART_V5(KVM, kvm, context)
#define KVM_GET_SYNCHRONOUS_PART_V2(context) \
#define KVM_GET_SYNCHRONOUS_PART_V3(context) \
({ \
if (IS_HV_GM()) { \
NATIVE_GET_SYNCHRONOUS_PART_V2(context); \
NATIVE_GET_SYNCHRONOUS_PART_V3(context); \
} else { \
KVM_GET_SYNCHRONOUS_PART(context); \
} \
@ -163,10 +163,10 @@ do { \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context, aasr); \
} \
} while (0)
#define KVM_GET_AAU_CONTEXT_V2(context, aasr) \
#define KVM_GET_AAU_CONTEXT_V3(context, aasr) \
do { \
if (IS_HV_GM()) { \
NATIVE_GET_AAU_CONTEXT_V2(context, aasr); \
NATIVE_GET_AAU_CONTEXT_V3(context, aasr); \
} else { \
PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context, aasr); \
} \
@ -186,9 +186,9 @@ kvm_save_aaldi(u64 *aaldis)
KVM_SAVE_AALDIS(aaldis);
}
static inline void
kvm_save_aaldi_v2(u64 *aaldis)
kvm_save_aaldi_v3(u64 *aaldis)
{
KVM_SAVE_AALDIS_V2(aaldis);
KVM_SAVE_AALDIS_V3(aaldis);
}
static inline void
kvm_save_aaldi_v5(u64 *aaldis)
@ -202,9 +202,9 @@ kvm_get_array_descriptors(e2k_aau_t *context)
KVM_GET_ARRAY_DESCRIPTORS(context);
}
static inline void
kvm_get_array_descriptors_v2(e2k_aau_t *context)
kvm_get_array_descriptors_v3(e2k_aau_t *context)
{
KVM_GET_ARRAY_DESCRIPTORS_V2(context);
KVM_GET_ARRAY_DESCRIPTORS_V3(context);
}
static inline void
kvm_get_array_descriptors_v5(e2k_aau_t *context)
@ -224,9 +224,9 @@ kvm_get_synchronous_part(e2k_aau_t *context)
KVM_GET_SYNCHRONOUS_PART(context);
}
static inline void
kvm_get_synchronous_part_v2(e2k_aau_t *context)
kvm_get_synchronous_part_v3(e2k_aau_t *context)
{
KVM_GET_SYNCHRONOUS_PART_V2(context);
KVM_GET_SYNCHRONOUS_PART_V3(context);
}
static inline void
kvm_get_synchronous_part_v5(e2k_aau_t *context)
@ -264,7 +264,7 @@ static __always_inline void kvm_set_aau_context(e2k_aau_t *context,
#define RESTORE_AADS(aau_regs) \
KVM_RESTORE_AADS(aau_regs)
#define SAVE_AALDIS_V2(regs) KVM_SAVE_AALDIS_V2(regs)
#define SAVE_AALDIS_V3(regs) KVM_SAVE_AALDIS_V3(regs)
#define SAVE_AALDIS_V5(regs) KVM_SAVE_AALDIS_V5(regs)
#define SAVE_AALDA(aaldas) KVM_SAVE_AALDAS(aaldas)
@ -274,17 +274,17 @@ static __always_inline void kvm_set_aau_context(e2k_aau_t *context,
#define SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \
KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti)
#define GET_ARRAY_DESCRIPTORS_V2(context) \
KVM_GET_ARRAY_DESCRIPTORS_V2(context)
#define GET_ARRAY_DESCRIPTORS_V3(context) \
KVM_GET_ARRAY_DESCRIPTORS_V3(context)
#define GET_ARRAY_DESCRIPTORS_V5(context) \
KVM_GET_ARRAY_DESCRIPTORS_V5(context)
#define GET_SYNCHRONOUS_PART_V2(context) \
KVM_GET_SYNCHRONOUS_PART_V2(context)
#define GET_SYNCHRONOUS_PART_V3(context) \
KVM_GET_SYNCHRONOUS_PART_V3(context)
#define GET_SYNCHRONOUS_PART_V5(context) \
KVM_GET_SYNCHRONOUS_PART_V5(context)
#define GET_AAU_CONTEXT_V2(context, aasr) KVM_GET_AAU_CONTEXT_V2(context, aasr)
#define GET_AAU_CONTEXT_V3(context, aasr) KVM_GET_AAU_CONTEXT_V3(context, aasr)
#define GET_AAU_CONTEXT_V5(context, aasr) KVM_GET_AAU_CONTEXT_V5(context, aasr)
static inline void

View File

@ -17,11 +17,6 @@ struct page;
*/
extern void smp_flush_icache_all(void);
static inline void
kvm_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end)
{
smp_flush_icache_all();
}
static inline void
kvm_smp_flush_icache_range_array(struct icache_range_array *icache_range_arr)
{
@ -40,6 +35,7 @@ kvm_smp_flush_icache_kernel_line(e2k_addr_t addr)
#endif /* CONFIG_SMP */
extern void kvm_flush_dcache_line(e2k_addr_t virt_addr);
extern u64 kvm_read_dcache_l1_fault_reg(void);
extern void kvm_clear_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set);
extern void kvm_flush_dcache_range(void *addr, size_t len);
extern void kvm_clear_dcache_l1_range(void *virt_addr, size_t len);
@ -57,11 +53,6 @@ extern void kvm_flush_icache_page(struct vm_area_struct *vma,
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
#ifdef CONFIG_SMP
static inline void
smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end)
{
kvm_smp_flush_icache_range(start, end);
}
static inline void
smp_flush_icache_range_array(struct icache_range_array *icache_range_arr)
{
kvm_smp_flush_icache_range_array(icache_range_arr);

View File

@ -105,26 +105,14 @@ copy_stack_page_from_kernel(void __user *dst, void *src, e2k_size_t to_copy,
return ret;
}
static __always_inline int
copy_stack_page_to_user(void __user *dst, void *src, e2k_size_t to_copy,
bool is_chain)
static inline struct page *get_user_addr_to_kernel_page(unsigned long addr)
{
struct page *page = NULL;
unsigned long addr = (unsigned long)dst;
void *k_dst;
e2k_size_t offset;
mm_segment_t seg;
unsigned long ts_flag;
int npages;
int ret;
if (to_copy == 0)
return 0;
DebugUST("started to copy %s stack from kernel stack %px to user %px "
"size 0x%lx\n",
(is_chain) ? "chain" : "procedure",
src, dst, to_copy);
seg = get_fs();
set_fs(K_USER_DS);
ts_flag = set_ts_flag(TS_KERNEL_SYSCALL);
@ -142,11 +130,44 @@ copy_stack_page_to_user(void __user *dst, void *src, e2k_size_t to_copy,
}
clear_ts_flag(ts_flag);
set_fs(seg);
goto failed;
return ERR_PTR(ret);
} while (npages != 1);
clear_ts_flag(ts_flag);
set_fs(seg);
return page;
}
static inline void put_user_addr_to_kernel_page(struct page *page)
{
if (likely(!IS_ERR_OR_NULL(page)))
put_page(page);
}
static __always_inline int
copy_stack_page_to_user(void __user *dst, void *src, e2k_size_t to_copy,
bool is_chain)
{
struct page *page;
unsigned long addr = (unsigned long)dst;
void *k_dst;
e2k_size_t offset;
int ret;
if (to_copy == 0)
return 0;
DebugUST("started to copy %s stack from kernel stack %px to user %px "
"size 0x%lx\n",
(is_chain) ? "chain" : "procedure",
src, dst, to_copy);
page = get_user_addr_to_kernel_page(addr);
if (unlikely(IS_ERR_OR_NULL(page))) {
ret = (IS_ERR(page)) ? PTR_ERR(page) : -EINVAL;
goto failed;
}
offset = addr & ~PAGE_MASK;
k_dst = page_address(page) + offset;
DebugUST("copy stack frames from kernel %px to user %px, size 0x%lx\n",
@ -161,7 +182,7 @@ copy_stack_page_to_user(void __user *dst, void *src, e2k_size_t to_copy,
}
failed_copy:
put_page(page);
put_user_addr_to_kernel_page(page);
failed:
return ret;
}
@ -200,23 +221,59 @@ kvm_copy_user_stack_from_kernel(void __user *dst, void *src,
if (trace_guest_va_tlb_state_enabled()) {
trace_guest_va_tlb_state((e2k_addr_t)dst);
}
trace_proc_stack_frames((kernel_mem_ps_t *)(src - copied),
(kernel_mem_ps_t *)(src - copied), copied,
src -= copied;
trace_proc_stack_frames((kernel_mem_ps_t *)(src),
(kernel_mem_ps_t *)(src), copied,
trace_guest_proc_stack_frame);
trace_proc_stack_frames((kernel_mem_ps_t *)(dst - copied),
(kernel_mem_ps_t *)(dst - copied), copied,
dst -= copied;
to_copy = copied;
do {
struct page *page;
void *k_dst;
offset = (unsigned long)dst & ~PAGE_MASK;
len = min(to_copy, PAGE_SIZE - offset);
page = get_user_addr_to_kernel_page((unsigned long)dst);
if (unlikely(IS_ERR_OR_NULL(page))) {
ret = (IS_ERR(page)) ? PTR_ERR(page) : -EINVAL;
goto failed;
}
k_dst = page_address(page) + offset;
trace_proc_stack_frames((kernel_mem_ps_t *)(k_dst),
(kernel_mem_ps_t *)(k_dst), len,
trace_guest_proc_stack_frame);
dst += len;
to_copy -= len;
} while (to_copy > 0);
}
if (is_chain && trace_guest_chain_stack_frame_enabled()) {
if (trace_guest_va_tlb_state_enabled()) {
trace_guest_va_tlb_state((e2k_addr_t)dst);
}
trace_chain_stack_frames((e2k_mem_crs_t *)(src - copied),
(e2k_mem_crs_t *)(src - copied), copied,
src -= copied;
trace_chain_stack_frames((e2k_mem_crs_t *)(src),
(e2k_mem_crs_t *)(src), copied,
trace_guest_chain_stack_frame);
trace_chain_stack_frames((e2k_mem_crs_t *)(dst - copied),
(e2k_mem_crs_t *)(dst - copied), copied,
dst -= copied;
to_copy = copied;
do {
struct page *page;
void *k_dst;
offset = (unsigned long)dst & ~PAGE_MASK;
len = min(to_copy, PAGE_SIZE - offset);
page = get_user_addr_to_kernel_page((unsigned long)dst);
if (unlikely(IS_ERR_OR_NULL(page))) {
ret = (IS_ERR(page)) ? PTR_ERR(page) : -EINVAL;
goto failed;
}
k_dst = page_address(page) + offset;
trace_chain_stack_frames((e2k_mem_crs_t *)(k_dst),
(e2k_mem_crs_t *)(k_dst), len,
trace_guest_chain_stack_frame);
dst += len;
to_copy -= len;
} while (to_copy > 0);
}
return 0;
@ -259,14 +316,33 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs)
stacks = &regs->stacks;
copyed_ps_size = regs->copyed.ps_size;
copyed_pcs_size = regs->copyed.pcs_size;
if (unlikely(copyed_ps_size || copyed_pcs_size)) {
if (unlikely(copyed_ps_size)) {
/* stacks have been already copyed */
BUG_ON(copyed_ps_size != GET_PSHTP_MEM_INDEX(stacks->pshtp) &&
GET_PSHTP_MEM_INDEX(stacks->pshtp) != 0);
BUG_ON(copyed_pcs_size != PCSHTP_SIGN_EXTEND(stacks->pcshtp) &&
PCSHTP_SIGN_EXTEND(stacks->pcshtp) != SZ_OF_CR);
return 0;
if (copyed_ps_size != GET_PSHTP_MEM_INDEX(stacks->pshtp) &&
GET_PSHTP_MEM_INDEX(stacks->pshtp) != 0) {
pr_err("%s(): copyed_ps_size 0x%lx != pshtp 0x%llx or "
"pshtp 0x%llx != 0\n",
__func__,
copyed_ps_size, GET_PSHTP_MEM_INDEX(stacks->pshtp),
GET_PSHTP_MEM_INDEX(stacks->pshtp));
WARN_ON(true);
}
}
if (unlikely(copyed_pcs_size)) {
/* stacks have been already copyed */
if (copyed_pcs_size != PCSHTP_SIGN_EXTEND(stacks->pcshtp) &&
PCSHTP_SIGN_EXTEND(stacks->pcshtp) != SZ_OF_CR) {
pr_err("%s(): copyed_pcs_size 0x%lx != pcshtp 0x%llx or "
"pcshtp 0x%llx != 0x%lx\n",
__func__,
copyed_pcs_size, PCSHTP_SIGN_EXTEND(stacks->pcshtp),
PCSHTP_SIGN_EXTEND(stacks->pcshtp), SZ_OF_CR);
WARN_ON(true);
}
}
if (unlikely(copyed_ps_size && copyed_pcs_size))
/* both stacks have been already copyed */
return 0;
ret = HYPERVISOR_copy_stacks_to_memory();
if (ret != 0) {
@ -280,6 +356,10 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs)
pshtp.PSHTP_reg,
pcsp_lo.PCSP_lo_half, pcsp_hi.PCSP_hi_half,
pcshtp);
if (unlikely(copyed_ps_size))
goto copy_chain_stack;
src = (void *)psp_lo.PSP_lo_base;
DebugUST("procedure stack at kernel from %px, size 0x%x, ind 0x%x, "
"pshtp 0x%llx\n",
@ -329,6 +409,11 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs)
regs->copyed.ps_size = to_copy;
}
copy_chain_stack:
if (unlikely(copyed_pcs_size))
goto complete_copy;
/* copy user part of chain stack from kernel back to user */
src = (void *)pcsp_lo.PCSP_lo_base;
DebugUST("chain stack at kernel from %px, size 0x%x, ind 0x%x, "
@ -379,6 +464,7 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs)
regs->copyed.pcs_size = to_copy;
}
complete_copy:
failed:
if (DEBUG_USER_STACKS_MODE)
debug_ustacks = false;

View File

@ -24,6 +24,11 @@ kvm_modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip,
*((u64 *)pa_to_vpa(phys_ip)) = instr_word;
}
extern int kvm_do_parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg, unsigned long delta_user,
unsigned long top, unsigned long bottom,
bool *interrupts_enabled, unsigned long *irq_flags);
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
@ -74,6 +79,17 @@ modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip,
kvm_modify_instr_on_IP(ip, phys_ip, instr_word);
}
static inline int
do_parse_chain_stack(int flags, struct task_struct *p,
parse_chain_fn_t func, void *arg, unsigned long delta_user,
unsigned long top, unsigned long bottom,
bool *interrupts_enabled, unsigned long *irq_flags)
{
return kvm_do_parse_chain_stack(flags, p, func, arg, delta_user,
top, bottom,
interrupts_enabled, irq_flags);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */
#endif /* ! _E2K_KVM_GUEST_DEBUG_H */

View File

@ -22,13 +22,13 @@
#define KVM_SAVE_HOST_GREGS(__ti) \
({ \
if (IS_HV_GM()) { \
DO_SAVE_VCPU_STATE_GREGS_V2(__ti->h_gregs.g); \
DO_SAVE_VCPU_STATE_GREGS_V3(__ti->h_gregs.g); \
} \
})
#define KVM_RESTORE_HOST_GREGS(__ti) \
({ \
if (IS_HV_GM()) { \
DO_RESTORE_VCPU_STATE_GREGS_V2(__ti->h_gregs.g); \
DO_RESTORE_VCPU_STATE_GREGS_V3(__ti->h_gregs.g); \
} \
})
#else /* CONFIG_E2K_ISET_VER >= 5 */

View File

@ -17,13 +17,6 @@ typedef struct guest_machdep {
/* guest interface functions */
} guest_machdep_t;
#ifdef CONFIG_KVM_GUEST_KERNEL
/* It is pure guest kernel (not paravirtualized based on pv_ops) */
typedef struct host_machdep {
/* nothing to support and do */
} host_machdep_t;
#endif /* CONFIG_KVM_GUEST_KERNEL */
#endif /* CONFIG_VIRTUALIZATION */
#endif /* __KERNEL__ */

View File

@ -8,6 +8,19 @@
#include <asm/tlb_regs_types.h>
#include <asm/mmu_fault.h>
static inline bool
kvm_ftype_has_sw_fault(tc_fault_type_t ftype)
{
/* host KVM can pass software fault for guest kernel */
return ftype_test_is_kvm_fault_injected(ftype) || true;
}
static inline bool
kvm_ftype_test_sw_fault(tc_fault_type_t ftype)
{
return ftype_test_is_kvm_fault_injected(ftype);
}
extern void kvm_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
u64 opc_ext, int chan, int qp_store, int atomic_store);
@ -51,12 +64,24 @@ guest_addr_to_host(void **addr, const pt_regs_t *regs)
}
static inline void *
guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs)
guest_ptr_to_host(void *ptr, bool is_write, int size, const pt_regs_t *regs)
{
/* there are not any guests, so nothing convertion */
return native_guest_ptr_to_host(ptr, size);
}
static inline bool
ftype_has_sw_fault(tc_fault_type_t ftype)
{
return kvm_ftype_has_sw_fault(ftype);
}
static inline bool
ftype_test_sw_fault(tc_fault_type_t ftype)
{
return kvm_ftype_test_sw_fault(ftype);
}
static inline bool
is_guest_kernel_gregs(struct thread_info *ti,
unsigned greg_num_d, u64 **greg_copy)

View File

@ -90,6 +90,10 @@ static inline kvm_vcpu_state_t *kvm_get_vcpu_state(void)
return (kvm_vcpu_state_t *)(vcpu_base);
}
extern void kvm_clean_pc_stack_zero_frame(void *addr, bool user);
extern e2k_cute_t *kvm_get_cut_entry_pointer(int cui, struct page **page);
extern void kvm_put_cut_entry_pointer(struct page *page);
/*
* Restore proper psize field of WD register
*/
@ -322,6 +326,22 @@ static inline void COPY_STACKS_TO_MEMORY(void)
KVM_COPY_STACKS_TO_MEMORY();
}
static inline void
clean_pc_stack_zero_frame(void *addr, bool user)
{
kvm_clean_pc_stack_zero_frame(addr, user);
}
static inline e2k_cute_t *get_cut_entry_pointer(int cui, struct page **page)
{
return kvm_get_cut_entry_pointer(cui, page);
}
static inline void put_cut_entry_pointer(struct page *page)
{
kvm_put_cut_entry_pointer(page);
}
static inline void
restore_wd_register_psize(e2k_wd_t wd_from)
{
@ -428,7 +448,10 @@ release_kernel_stacks(thread_info_t *dead_ti)
}
#endif /* COMMON_KERNEL_USER_HW_STACKS */
#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) /* nothing to do */
#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) \
({ \
(pv_guest) = false; \
})
static inline int
switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks,

View File

@ -27,9 +27,9 @@ extern void kvm_restore_local_glob_regs(const local_gregs_t *l_gregs,
extern void kvm_get_all_user_glob_regs(global_regs_t *gregs);
static inline void
guest_save_glob_regs_v2(global_regs_t *gregs)
guest_save_glob_regs_v3(global_regs_t *gregs)
{
kvm_guest_save_gregs_v2(gregs);
kvm_guest_save_gregs_v3(gregs);
}
static inline void
@ -39,9 +39,9 @@ guest_save_glob_regs_v5(global_regs_t *gregs)
}
static inline void
guest_save_glob_regs_dirty_bgr_v2(global_regs_t *gregs)
guest_save_glob_regs_dirty_bgr_v3(global_regs_t *gregs)
{
kvm_guest_save_gregs_v2(gregs);
kvm_guest_save_gregs_v3(gregs);
}
static inline void
@ -51,9 +51,9 @@ guest_save_glob_regs_dirty_bgr_v5(global_regs_t *gregs)
}
static inline void
guest_save_local_glob_regs_v2(local_gregs_t *l_gregs, bool is_signal)
guest_save_local_glob_regs_v3(local_gregs_t *l_gregs, bool is_signal)
{
kvm_guest_save_local_gregs_v2(l_gregs, is_signal);
kvm_guest_save_local_gregs_v3(l_gregs, is_signal);
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
copy_k_gregs_to_l_gregs(l_gregs,
&current_thread_info()->k_gregs);
@ -69,9 +69,9 @@ guest_save_local_glob_regs_v5(local_gregs_t *l_gregs, bool is_signal)
}
static inline void
guest_restore_glob_regs_v2(const global_regs_t *gregs)
guest_restore_glob_regs_v3(const global_regs_t *gregs)
{
kvm_guest_restore_gregs_v2(gregs);
kvm_guest_restore_gregs_v3(gregs);
}
static inline void
@ -81,9 +81,9 @@ guest_restore_glob_regs_v5(const global_regs_t *gregs)
}
static inline void
guest_restore_local_glob_regs_v2(const local_gregs_t *l_gregs, bool is_signal)
guest_restore_local_glob_regs_v3(const local_gregs_t *l_gregs, bool is_signal)
{
kvm_guest_restore_local_gregs_v2(l_gregs, is_signal);
kvm_guest_restore_local_gregs_v3(l_gregs, is_signal);
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
get_k_gregs_from_l_regs(&current_thread_info()->k_gregs,
l_gregs);
@ -334,10 +334,10 @@ do { \
KVM_RESTORE_COMMON_REGS(regs)
static inline void
save_glob_regs_v2(global_regs_t *gregs)
save_glob_regs_v3(global_regs_t *gregs)
{
if (IS_HV_GM()) {
guest_save_glob_regs_v2(gregs);
guest_save_glob_regs_v3(gregs);
} else {
kvm_save_glob_regs(gregs);
}
@ -354,10 +354,10 @@ save_glob_regs_v5(global_regs_t *gregs)
}
static inline void
save_glob_regs_dirty_bgr_v2(global_regs_t *gregs)
save_glob_regs_dirty_bgr_v3(global_regs_t *gregs)
{
if (IS_HV_GM()) {
guest_save_glob_regs_dirty_bgr_v2(gregs);
guest_save_glob_regs_dirty_bgr_v3(gregs);
} else {
kvm_save_glob_regs_dirty_bgr(gregs);
}
@ -374,10 +374,10 @@ save_glob_regs_dirty_bgr_v5(global_regs_t *gregs)
}
static inline void
save_local_glob_regs_v2(local_gregs_t *l_gregs, bool is_signal)
save_local_glob_regs_v3(local_gregs_t *l_gregs, bool is_signal)
{
if (IS_HV_GM()) {
guest_save_local_glob_regs_v2(l_gregs, is_signal);
guest_save_local_glob_regs_v3(l_gregs, is_signal);
} else {
kvm_save_local_glob_regs(l_gregs, is_signal);
}
@ -394,10 +394,10 @@ save_local_glob_regs_v5(local_gregs_t *l_gregs, bool is_signal)
}
static inline void
restore_glob_regs_v2(const global_regs_t *gregs)
restore_glob_regs_v3(const global_regs_t *gregs)
{
if (IS_HV_GM()) {
guest_restore_glob_regs_v2(gregs);
guest_restore_glob_regs_v3(gregs);
} else {
kvm_restore_glob_regs(gregs);
}
@ -414,10 +414,10 @@ restore_glob_regs_v5(const global_regs_t *gregs)
}
static inline void
restore_local_glob_regs_v2(const local_gregs_t *l_gregs, bool is_signal)
restore_local_glob_regs_v3(const local_gregs_t *l_gregs, bool is_signal)
{
if (IS_HV_GM())
guest_restore_local_glob_regs_v2(l_gregs, is_signal);
guest_restore_local_glob_regs_v3(l_gregs, is_signal);
else
kvm_restore_local_glob_regs(l_gregs, is_signal);
}

View File

@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <asm/pv_info.h>
#include <asm/kvm/hypercall.h>
@ -12,6 +13,12 @@
#define REPLACE_USR_PFAULT(to_pfault_IP) \
(current_thread_info()->usr_pfault_jump = to_pfault_IP)
extern unsigned long kvm_fast_kernel_tagged_memory_copy(void *dst, const void *src,
size_t len, unsigned long strd_opcode,
unsigned long ldrd_opcode, int prefetch);
extern unsigned long kvm_fast_kernel_tagged_memory_set(void *addr, u64 val, u64 tag,
size_t len, u64 strd_opcode);
/*
* optimized copy memory along with tags
* using privileged LD/ST recovery operations
@ -36,13 +43,11 @@ kvm_do_fast_tagged_memory_set(void *addr, u64 val, u64 tag,
{
long ret;
if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)addr)) {
ret = HYPERVISOR_fast_tagged_guest_memory_set(addr, val, tag,
len, strd_opcode);
} else {
do {
ret = HYPERVISOR_fast_tagged_memory_set(addr, val, tag, len,
strd_opcode);
}
} while (ret == -EAGAIN);
return ret;
}
@ -168,8 +173,8 @@ fast_tagged_memory_copy(void *dst, const void *src, size_t len,
unsigned long strd_opcode, unsigned long ldrd_opcode,
int prefetch)
{
return kvm_fast_tagged_memory_copy(dst, src, len, strd_opcode,
ldrd_opcode, prefetch);
return kvm_fast_kernel_tagged_memory_copy(dst, src, len, strd_opcode,
ldrd_opcode, prefetch);
}
static inline unsigned long
fast_tagged_memory_copy_user(void *dst, const void *src, size_t len, size_t *copied,
@ -191,7 +196,7 @@ static inline unsigned long
fast_tagged_memory_set(void *addr, u64 val, u64 tag,
size_t len, u64 strd_opcode)
{
return kvm_fast_tagged_memory_set(addr, val, tag, len, strd_opcode);
return kvm_fast_kernel_tagged_memory_set(addr, val, tag, len, strd_opcode);
}
static inline unsigned long
fast_tagged_memory_set_user(void *addr, u64 val, u64 tag,

View File

@ -101,7 +101,8 @@ static inline void __guest_exit(struct thread_info *ti,
kvm_guest_exit(ti, vcpu, flags);
}
static inline void
trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags)
trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags,
restore_caller_t from)
{
kvm_trap_guest_enter(ti, regs, flags);
}
@ -159,7 +160,7 @@ static inline void pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
}
static inline void guest_exit_intc(struct pt_regs *regs,
bool intc_emul_flag) { }
bool intc_emul_flag, restore_caller_t from) { }
static inline void guest_syscall_exit_trap(struct pt_regs *regs,
bool ts_host_at_vcpu_mode) { }

View File

@ -5,7 +5,7 @@
#include <linux/hugetlb.h>
#include <asm/trace-defs.h>
#include <asm/trace_pgtable-v2.h>
#include <asm/trace_pgtable-v3.h>
#include <asm/trace_pgtable-v6.h>
#include <asm/pgtable_def.h>
#include <asm/kvm/guest/trace-defs.h>

View File

@ -5,7 +5,7 @@
#include <linux/hugetlb.h>
#include <asm/trace-defs.h>
#include <asm/trace_pgtable-v2.h>
#include <asm/trace_pgtable-v3.h>
#include <asm/trace_pgtable-v6.h>
#include <asm/pgtable_def.h>
#include <asm/kvm/guest/trace-defs.h>

View File

@ -5,7 +5,7 @@
#include <linux/hugetlb.h>
#include <asm/trace-defs.h>
#include <asm/trace_pgtable-v2.h>
#include <asm/trace_pgtable-v3.h>
#include <asm/trace_pgtable-v6.h>
#include <asm/pgtable_def.h>

View File

@ -56,23 +56,23 @@
/* guest VCPU state registers are saved with other kernel global registers */
/* at thread_info->k_gregs, same as by host for paravirtualized guest */
.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
.macro DO_SAVE_HOST_GREGS_V3 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
drti, predSAVE, drtmp, rtmp0, rtmp1
/* not used */
.endm /* DO_SAVE_HOST_GREGS_V2 */
.endm /* DO_SAVE_HOST_GREGS_V3 */
.macro DO_SAVE_HOST_GREGS_V5 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
drti, predSAVE, drtmp
/* not used */
.endm /* DO_SAVE_HOST_GREGS_V5 */
.macro SAVE_HOST_GREGS_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
DO_SAVE_HOST_GREGS_V2 \
.macro SAVE_HOST_GREGS_V3 drti, predSAVE, drtmp, rtmp0, rtmp1
DO_SAVE_HOST_GREGS_V3 \
GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \
VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \
\drti, \predSAVE, \
\drtmp, \rtmp0, \rtmp1
.endm /* SAVE_HOST_GREGS_V2 */
.endm /* SAVE_HOST_GREGS_V3 */
.macro SAVE_HOST_GREGS_V5 drti, predSAVE, drtmp
DO_SAVE_HOST_GREGS_V5 \
@ -108,10 +108,10 @@
.endm /* SET_VCPU_STATE_GREGS */
#endif
.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
SAVE_HOST_GREGS_V2 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1
.macro SAVE_HOST_GREGS_TO_VIRT_V3 drti, predSAVE, drtmp, rtmp0, rtmp1
SAVE_HOST_GREGS_V3 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1
SET_VCPU_STATE_GREGS \drti, \predSAVE, \drtmp
.endm /* SAVE_HOST_GREGS_TO_VIRT_V2 */
.endm /* SAVE_HOST_GREGS_TO_VIRT_V3 */
.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp
SAVE_HOST_GREGS_V5 \drti, \predSAVE, \drtmp
@ -135,7 +135,7 @@
/* not used */
.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */
.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
.macro SAVE_HOST_GREGS_TO_VIRT_V3 drti, predSAVE, drtmp, rtmp0, rtmp1
/* not used */
.endm /* SAVE_VCPU_STATE_GREGS */

View File

@ -54,9 +54,7 @@ static inline void kvm_clear_fork_child_pt_regs(struct pt_regs *childregs)
kvm_init_pt_regs_copyed_fields(childregs);
}
#define kvm_restore_some_values_after_fill(__regs, __from, __return_to_user)
#define KVM_FILL_HARDWARE_STACKS() /* host itself will fill */
#define KVM_FILL_HARDWARE_STACKS(sw_fill_sequel) /* host itself will fill */
extern void kvm_correct_trap_psp_pcsp(struct pt_regs *regs,
thread_info_t *thread_info);
@ -173,18 +171,15 @@ do { \
} \
} while (false)
#define FILL_HARDWARE_STACKS__SW() \
#define FILL_HARDWARE_STACKS__SW(sw_fill_sequel) \
do { \
if (IS_HV_GM()) { \
NATIVE_FILL_HARDWARE_STACKS__SW(); \
NATIVE_FILL_HARDWARE_STACKS__SW(sw_fill_sequel); \
} else { \
KVM_FILL_HARDWARE_STACKS(); \
KVM_FILL_HARDWARE_STACKS(sw_fill_sequel); \
} \
} while (false)
#define restore_some_values_after_fill(__regs, __from, __return_to_user) \
kvm_restore_some_values_after_fill(__regs, __from, __return_to_user)
static inline void
exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi,
e2k_usd_lo_t usd_lo, e2k_upsr_t upsr)

View File

@ -0,0 +1,134 @@
#ifndef _E2K_KVM_GUEST_UACCESS_H_
#define _E2K_KVM_GUEST_UACCESS_H_
/*
* Guest User space memory access functions
*/
#include <linux/thread_info.h>
#include <asm/alternative.h>
#include <asm/errno.h>
#include <asm/page.h>
#include <asm/e2k_api.h>
#include <asm/head.h>
#ifdef CONFIG_PROTECTED_MODE
#include <asm/e2k_ptypes.h>
#endif
#define __kvm_get_priv_user(x, ptr) \
({ \
const __typeof__(*(ptr)) *___gk_ptr; \
struct page *page; \
unsigned long u_addr = (unsigned long)(ptr); \
unsigned long offset, k_addr; \
int __ret_gu; \
\
page = get_user_addr_to_kernel_page(u_addr); \
if (unlikely(IS_ERR_OR_NULL(page))) { \
__ret_gu = (IS_ERR(page)) ? PTR_ERR(page) : -EINVAL; \
} else { \
offset = u_addr & ~PAGE_MASK; \
k_addr = (unsigned long)page_address(page) + offset; \
___gk_ptr = (const __typeof__(*(ptr)) *)k_addr; \
__ret_gu = __get_user(x, ___gk_ptr); \
put_user_addr_to_kernel_page(page); \
} \
(int) builtin_expect_wrapper(__ret_gu, 0); \
})
#define kvm_get_priv_user(x, ptr) \
({ \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
might_fault(); \
access_ok(__gu_ptr, sizeof(*__gu_ptr)) ? \
__kvm_get_priv_user((x), __gu_ptr) : \
((x) = (__typeof__(x)) 0, -EFAULT); \
})
#define __kvm_put_priv_user(x, ptr) \
({ \
__typeof__(*(ptr)) *___pk_ptr = (ptr); \
struct page *page; \
unsigned long u_addr = (unsigned long)(ptr); \
unsigned long offset, k_addr; \
int __ret_pu; \
\
page = get_user_addr_to_kernel_page(u_addr); \
if (unlikely(IS_ERR_OR_NULL(page))) { \
__ret_pu = (IS_ERR(page)) ? PTR_ERR(page) : -EINVAL; \
} else { \
offset = u_addr & ~PAGE_MASK; \
k_addr = (unsigned long)page_address(page) + offset; \
___pk_ptr = (__typeof__(*(ptr)) *)k_addr; \
__ret_pu = __put_user(x, ___pk_ptr); \
put_user_addr_to_kernel_page(page); \
} \
(int) builtin_expect_wrapper(__ret_pu, 0); \
})
#define kvm_put_priv_user(x, ptr) \
({ \
__typeof__(*(ptr)) *__pu_ptr = (ptr); \
might_fault(); \
(access_ok(__pu_ptr, sizeof(*__pu_ptr))) ? \
__kvm_put_priv_user((x), __pu_ptr) : -EFAULT; \
})
extern unsigned long __kvm_copy_to_priv_user(void __user *to, const void *from,
unsigned long n);
extern unsigned long __kvm_copy_to_priv_user_with_tags(void __user *to,
const void *from, unsigned long n);
extern unsigned long __kvm_copy_from_priv_user(void *to,
const void __user *from, unsigned long n);
extern unsigned long __kvm_copy_from_priv_user_with_tags(void *to,
const void __user *from, unsigned long n);
static inline
unsigned long kvm_copy_to_priv_user_with_tags(void __user *to, const void *from,
unsigned long n)
{
if (access_ok(to, n))
n = __kvm_copy_to_priv_user_with_tags(to, from, n);
return n;
}
static inline
unsigned long kvm_copy_from_priv_user_with_tags(void *to, const void __user *from,
unsigned long n)
{
if (access_ok(from, n))
n = __kvm_copy_from_priv_user_with_tags(to, from, n);
return n;
}
#ifdef CONFIG_KVM_GUEST_KERNEL
/* It is native guest kernel (without paravirtualization) */
#define __get_priv_user(x, ptr) __kvm_get_priv_user(x, ptr)
#define __put_priv_user(x, ptr) __kvm_put_priv_user(x, ptr)
#define get_priv_user(x, ptr) kvm_get_priv_user(x, ptr)
#define put_priv_user(x, ptr) kvm_put_priv_user(x, ptr)
#define __copy_to_priv_user __kvm_copy_to_priv_user
#define __copy_from_priv_user __kvm_copy_from_priv_user
#define __copy_to_priv_user_with_tags __kvm_copy_to_priv_user_with_tags
#define __copy_from_priv_user_with_tags __kvm_copy_from_priv_user_with_tags
static inline
unsigned long copy_to_priv_user_with_tags(void __user *to, const void *from,
unsigned long n)
{
return kvm_copy_to_priv_user_with_tags(to, from, n);
}
static inline
unsigned long copy_from_priv_user_with_tags(void *to, const void __user *from,
unsigned long n)
{
return kvm_copy_from_priv_user_with_tags(to, from, n);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */
#endif /* _E2K_KVM_GUEST_UACCESS_H_ */

View File

@ -6,6 +6,7 @@
#include <asm/page.h>
#include <asm/kvm/mmu_exc.h>
#include <asm/kvm/mmu_pte.h>
/* Format of address record in gva cache */
typedef union {
@ -34,7 +35,7 @@ typedef struct gva_cache_cell {
/* gva -> gpa cache size */
#define KVM_GVA_CACHE_SZ PAGE_SIZE
/* 2 ^ KVM_GVA_CACHE_BUCKET_BITS buckets in cache */
#define KVM_GVA_CACHE_BUCKET_BITS 5
#define KVM_GVA_CACHE_BUCKET_BITS 6
#define KVM_GVA_CACHE_BUCKETS (1 << KVM_GVA_CACHE_BUCKET_BITS)
#define KVM_GVA_CACHE_BUCKET_SZ \
(KVM_GVA_CACHE_SZ / KVM_GVA_CACHE_BUCKETS)
@ -42,6 +43,8 @@ typedef struct gva_cache_cell {
(KVM_GVA_CACHE_BUCKET_SZ / sizeof(gva_cache_cell_t))
#define KVM_GVA_CACHE_LEN \
(KVM_GVA_CACHE_SZ / sizeof(gva_cache_cell_t))
/* maximum gva range len for partial flush */
#define KVM_GVA_CACHE_FLUSH_THRESHOLD 6
typedef enum REPLACE_POLICY {
LRU = 0,
@ -53,7 +56,7 @@ typedef enum REPLACE_POLICY {
* guest page table and kvm memory slots lookup.
*/
typedef struct gva_cache {
spinlock_t bucket_locks[KVM_GVA_CACHE_BUCKETS];
spinlock_t cache_lock;
gva_cache_cell_t *data;
replace_policy_t replace_policy;
} gva_cache_t;
@ -71,15 +74,17 @@ typedef struct gva_cache_query {
} gva_cache_query_t;
typedef gpa_t (*gva_tranlslator_t)(struct kvm_vcpu *, gva_t,
u32, struct kvm_arch_exception*);
typedef gpa_t (*gva_translator_t)(struct kvm_vcpu *, gva_t,
u32, struct kvm_arch_exception*, gw_attr_t *);
gpa_t gva_cache_translate(gva_cache_t *cache, gva_t gva, u32 access,
struct kvm_vcpu *vcpu, kvm_arch_exception_t *exc,
gva_tranlslator_t gva_translate);
gva_translator_t gva_translate);
void gva_cache_fetch_addr(gva_cache_t *cache, gva_t gva, gpa_t gpa,
u32 access);
void gva_cache_flush_addr(gva_cache_t *cache, gva_t gva);
void gva_cache_flush_addr_range(gva_cache_t *cache, gva_t start_gva,
gva_t end_gva);
gva_cache_t *gva_cache_init(void);
void gva_cache_erase(gva_cache_t *cache);
@ -96,42 +101,41 @@ typedef struct gva_caches_stat {
u64 sum_hit_time;
u64 sum_miss_pen;
u64 conflict_misses;
u64 cold_misses;
u64 flushes;
u64 flushes_gva;
u64 flushes_all;
u64 sum_flush_gva_time;
u64 sum_flush_all_time;
u64 fetches;
u64 update_fetches;
u64 conflict_fetches;
u64 cold_fetches;
u64 sum_fetch_time;
} gva_caches_stat_t;
extern gva_caches_stat_t caches_stat;
#define gva_cache_stat_lookup_start(start) \
({ \
#define gva_cache_stat_lookup_start() \
caches_stat.accesses++; \
start = ktime_get_ns(); \
})
u64 stop, start = ktime_get_ns()
#define gva_cache_stat_lookup_hit_end(start, stop) \
#define gva_cache_stat_lookup_hit_end() \
({ \
stop = ktime_get_ns(); \
caches_stat.hits++; \
caches_stat.sum_hit_time += (stop - start); \
})
#define gva_cache_stat_lookup_miss_start(start) \
#define gva_cache_stat_lookup_miss_start() \
({ \
caches_stat.misses++; \
start = ktime_get_ns(); \
})
#define gva_cache_stat_lookup_miss_stop(start, stop) \
({ \
stop = ktime_get_ns(); \
caches_stat.sum_miss_pen += (stop - start); \
})
#define gva_cache_stat_lookup_miss_stop(start, stop) \
#define gva_cache_stat_lookup_miss_stop() \
({ \
stop = ktime_get_ns(); \
caches_stat.sum_miss_pen += (stop - start); \
@ -151,30 +155,84 @@ extern gva_caches_stat_t caches_stat;
*is_conflict = conflict; \
})
#define gva_cache_stat_fetch() \
({ \
#define gva_cache_stat_fetch_start() \
caches_stat.accesses++; \
caches_stat.fetches++; \
})
u64 stop, start = ktime_get_ns()
#define gva_cache_stat_flush() \
#define gva_cache_stat_fetch_end() \
({ \
caches_stat.accesses++; \
caches_stat.flushes++; \
stop = ktime_get_ns(); \
caches_stat.sum_fetch_time += (stop - start); \
})
#else /* CONFIG_KVM_GVA_CACHE_STAT */
#define gva_cache_stat_fetch_replace(conflict) \
({ \
if (conflict) \
caches_stat.conflict_fetches++; \
else \
caches_stat.cold_fetches++; \
})
#define gva_cache_stat_lookup_start(start)
#define gva_cache_stat_lookup_hit_end(start, stop)
#define gva_cache_stat_lookup_miss_start(start)
#define gva_cache_stat_lookup_miss_stop(start, stop)
#define gva_cache_stat_lookup_miss_stop(start, stop)
#define gva_cache_stat_fetch_update() \
({ \
caches_stat.update_fetches++; \
})
#define gva_cache_stat_flush_gva_start() \
caches_stat.accesses++; \
caches_stat.flushes_gva++; \
u64 stop, start = ktime_get_ns()
#define gva_cache_stat_flush_gva_end() \
({ \
stop = ktime_get_ns(); \
caches_stat.sum_flush_gva_time += (stop - start); \
})
#define gva_cache_stat_flush_all_start() \
caches_stat.accesses++; \
caches_stat.flushes_all++; \
u64 stop, start = ktime_get_ns()
#define gva_cache_stat_flush_all_end() \
({ \
stop = ktime_get_ns(); \
caches_stat.sum_flush_all_time += (stop - start); \
})
#else /* !CONFIG_KVM_GVA_CACHE_STAT */
#define gva_cache_stat_lookup_start()
#define gva_cache_stat_lookup_hit_end()
#define gva_cache_stat_lookup_miss_start()
#define gva_cache_stat_lookup_miss_stop()
#define gva_cache_stat_lookup_miss_stop()
#define gva_cache_stat_lookup_miss_conflict(is_conflict)
#define gva_cache_stat_replace_conflict(is_conflict, conflict)
#define gva_cache_stat_fetch()
#define gva_cache_stat_flush()
#define gva_cache_stat_fetch_start()
#define gva_cache_stat_fetch_end()
#define gva_cache_stat_fetch_replace(conflict)
#define gva_cache_stat_fetch_update()
#define gva_cache_stat_flush_gva_start()
#define gva_cache_stat_flush_gva_end()
#define gva_cache_stat_flush_all_start()
#define gva_cache_stat_flush_all_end()
#endif /* !CONFIG_KVM_GVA_CACHE_STAT */
#ifdef CONFIG_KVM_GVA_CACHE_DEBUG
#define DbgGvaCache(fmt, args...) \
({ \
pr_info("%s(): " fmt, __func__, ##args); \
})
#else /* !CONFIG_KVM_GVA_CACHE_DEBUG */
#define DbgGvaCache(fmt, args...)
#endif /* !CONFIG_KVM_GVA_CACHE_DEBUG */
#endif /* CONFIG_KVM_GVA_CACHE_STAT */
#endif /* GVA_CACHE_H */

View File

@ -250,6 +250,11 @@ static inline unsigned long generic_hypercall6(unsigned long nr,
#define KVM_HCALL_RETURN_FROM_FAST_SYSCALL 33
/* change return ip in user stack */
#define KVM_HCALL_SET_RETURN_USER_IP 34
/* fast guest kernel tagged memory copy */
#define KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_COPY 40
/* fast guest kernel tagged memory set */
#define KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_SET 41
typedef struct kvm_hw_stacks_flush {
unsigned long psp_lo;
@ -488,6 +493,22 @@ HYPERVISOR_set_return_user_ip(u64 gti, u64 ip, int flags)
return light_hypercall3(KVM_HCALL_SET_RETURN_USER_IP, gti,
ip, flags);
}
static inline unsigned long
HYPERVISOR_fast_kernel_tagged_memory_copy(void *dst, const void *src, size_t len,
unsigned long strd_opcode, unsigned long ldrd_opcode,
int prefetch)
{
return light_hypercall6(KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_COPY,
(unsigned long)dst, (unsigned long)src,
len, strd_opcode, ldrd_opcode, prefetch);
}
static inline unsigned long
HYPERVISOR_fast_kernel_tagged_memory_set(void *addr, u64 val, u64 tag, size_t len,
u64 strd_opcode)
{
return light_hypercall5(KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_SET,
(unsigned long)addr, val, tag, len, strd_opcode);
}
/*
* KVM hypervisor (host) <-> guest generic hypercalls list
@ -626,10 +647,7 @@ HYPERVISOR_set_return_user_ip(u64 gti, u64 ip, int flags)
/* value and tag to global */
/* register */
#define KVM_HCALL_MOVE_TAGGED_GUEST_DATA 114 /* move data value from to */
#define KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY 115
/* fast tagged memory copy */
#define KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET 116
/* fast tagged memory set */
#define KVM_HCALL_COPY_IN_USER_WITH_TAGS 115 /* tagged guest memory copy */
#define KVM_HCALL_FAST_TAGGED_MEMORY_COPY 117 /* fast tagged memory copy */
#define KVM_HCALL_FAST_TAGGED_MEMORY_SET 118 /* fast tagged memory set */
#define KVM_HCALL_SHUTDOWN 120 /* shutdown of guest */
@ -844,7 +862,7 @@ typedef struct vcpu_gmmu_info {
bool sep_virt_space; /* guest use separate PTs for */
/* OS and user virtual spaces */
bool pt_v6; /* guest PTs are of v6 format */
unsigned long mmu_cr; /* MMU control register */
e2k_mmu_cr_t mmu_cr; /* MMU control register */
unsigned long pid; /* MMU PID (context) register */
unsigned long trap_cellar; /* MMU trap cellar base */
unsigned long u_pptb; /* physical base of user (for */
@ -1212,20 +1230,11 @@ HYPERVISOR_move_tagged_guest_data(int word_size,
word_size, addr_from, addr_to);
}
static inline unsigned long
HYPERVISOR_fast_tagged_guest_memory_copy(void *dst, const void *src, size_t len,
unsigned long strd_opcode, unsigned long ldrd_opcode,
int prefetch)
HYPERVISOR_copy_in_user_with_tags(void __user *dst, const void __user *src,
unsigned long size)
{
return generic_hypercall6(KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY,
(unsigned long)dst, (unsigned long)src,
len, strd_opcode, ldrd_opcode, prefetch);
}
static inline unsigned long
HYPERVISOR_fast_tagged_guest_memory_set(void *addr, u64 val, u64 tag,
size_t len, u64 strd_opcode)
{
return generic_hypercall5(KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET,
(unsigned long)addr, val, tag, len, strd_opcode);
return generic_hypercall3(KVM_HCALL_COPY_IN_USER_WITH_TAGS,
(unsigned long)dst, (unsigned long)src, size);
}
static inline unsigned long

View File

@ -17,21 +17,21 @@ typedef struct guest_machdep {
/* none any guest */
} guest_machdep_t;
#else /* CONFIG_VIRTUALIZATION */
extern void kvm_guest_save_local_gregs_v2(struct local_gregs *gregs,
extern void kvm_guest_save_local_gregs_v3(struct local_gregs *gregs,
bool is_signal);
extern void kvm_guest_save_local_gregs_v5(struct local_gregs *gregs,
bool is_signal);
extern void kvm_guest_save_kernel_gregs_v2(kernel_gregs_t *gregs);
extern void kvm_guest_save_kernel_gregs_v3(kernel_gregs_t *gregs);
extern void kvm_guest_save_kernel_gregs_v5(kernel_gregs_t *gregs);
extern void kvm_guest_save_gregs_v2(struct global_regs *gregs);
extern void kvm_guest_save_gregs_v3(struct global_regs *gregs);
extern void kvm_guest_save_gregs_v5(struct global_regs *gregs);
extern void kvm_guest_save_gregs_dirty_bgr_v2(struct global_regs *gregs);
extern void kvm_guest_save_gregs_dirty_bgr_v3(struct global_regs *gregs);
extern void kvm_guest_save_gregs_dirty_bgr_v5(struct global_regs *gregs);
extern void kvm_guest_restore_gregs_v2(const global_regs_t *gregs);
extern void kvm_guest_restore_gregs_v3(const global_regs_t *gregs);
extern void kvm_guest_restore_gregs_v5(const global_regs_t *gregs);
extern void kvm_guest_restore_kernel_gregs_v2(global_regs_t *gregs);
extern void kvm_guest_restore_kernel_gregs_v3(global_regs_t *gregs);
extern void kvm_guest_restore_kernel_gregs_v5(global_regs_t *gregs);
extern void kvm_guest_restore_local_gregs_v2(const struct local_gregs *gregs,
extern void kvm_guest_restore_local_gregs_v3(const struct local_gregs *gregs,
bool is_signal);
extern void kvm_guest_restore_local_gregs_v5(const struct local_gregs *gregs,
bool is_signal);
@ -42,13 +42,38 @@ extern void kvm_guest_restore_local_gregs_v5(const struct local_gregs *gregs,
#include <asm/kvm/guest/machdep.h>
#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */
#ifndef CONFIG_KVM_GUEST_KERNEL
/* it is native host kernel with virtualization support */
/* or paravirtualized host and guest kernel */
typedef struct host_machdep {
u32 (*read_SH_CORE_MODE)(void);
void (*write_SH_CORE_MODE)(u32);
u64 (*read_SH_PSHTP)(void);
void (*write_SH_PSHTP)(u64);
u32 (*read_SH_PCSHTP)(void);
void (*write_SH_PCSHTP)(u32);
u64 (*read_SH_WD)(void);
void (*write_SH_WD)(u64);
u64 (*read_SH_OSR0)(void);
void (*write_SH_OSR0)(u64);
u64 (*read_VIRT_CTRL_MU)(void);
void (*write_VIRT_CTRL_MU)(u64);
u64 (*read_GID)(void);
void (*write_GID)(u64);
u64 (*read_GP_VPTB)(void);
void (*write_GP_VPTB)(u64);
u64 (*read_GP_PPTB)(void);
void (*write_GP_PPTB)(u64);
u64 (*read_SH_OS_PPTB)(void);
void (*write_SH_OS_PPTB)(u64);
u64 (*read_SH_OS_VPTB)(void);
void (*write_SH_OS_VPTB)(u64);
u64 (*read_SH_OS_VAB)(void);
void (*write_SH_OS_VAB)(u64);
u64 (*read_G_W_IMASK_MMU_CR)(void);
void (*write_G_W_IMASK_MMU_CR)(u64);
u64 (*read_SH_PID)(void);
void (*write_SH_PID)(u64);
u64 (*read_SH_MMU_CR)(void);
void (*write_SH_MMU_CR)(u64);
} host_machdep_t;
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
/* it is native host kernel with virtualization support */

View File

@ -36,10 +36,13 @@ typedef struct gmm_struct {
size_t total_released; /* total number of allocated and */
/* released SPs through list */
#endif /* CONFIG_GUEST_MM_SPT_LIST */
#ifdef CONFIG_KVM_HV_MMU
hpa_t root_hpa; /* physical base of root shadow PT */
/* for guest mm on host */
/* to access only to user space */
hpa_t gk_root_hpa; /* root shadow PT for guest kernel */
/* to access to user & kernel spaces */
gfn_t root_gpa; /* 'physical' base of guest root PT */
#ifdef CONFIG_KVM_HV_MMU
gpa_t os_pptb; /* guest kernel root PT physical base */
gpa_t u_pptb; /* guest user root PT physical base */
gva_t os_vptb; /* guest kernel root PT virtual base */

View File

@ -9,6 +9,18 @@
#include <asm/mmu_fault.h>
#include <asm/kvm/pv-emul.h>
#ifdef CONFIG_VIRTUALIZATION
static inline bool is_guest_user_gva(gva_t gva)
{
return gva < GUEST_TASK_SIZE;
}
static inline bool is_guest_kernel_gva(gva_t gva)
{
return gva >= GUEST_PAGE_OFFSET && gva < HOST_PAGE_OFFSET;
}
static inline bool is_ss(struct kvm_vcpu *vcpu)
{
return false;
@ -185,14 +197,33 @@ kvm_set_space_type_spt_u_root(struct kvm_vcpu *vcpu, hpa_t root)
kvm_set_space_type_spt_root(vcpu, root, true);
}
static inline hpa_t
kvm_get_space_type_spt_gk_root(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mmu.get_vcpu_sh_gk_pptb(vcpu);
}
static inline void
kvm_set_space_type_spt_gk_root(struct kvm_vcpu *vcpu, hpa_t gk_root)
{
vcpu->arch.mmu.set_vcpu_sh_gk_pptb(vcpu, gk_root);
}
static inline void
kvm_set_vcpu_spt_u_pptb_context(struct kvm_vcpu *vcpu)
{
vcpu->arch.mmu.set_vcpu_u_pptb_context(vcpu);
}
static inline hpa_t
kvm_get_space_addr_spt_root(struct kvm_vcpu *vcpu, gva_t gva)
{
if (!vcpu->arch.mmu.sep_virt_space) {
return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu);
} else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) {
return vcpu->arch.mmu.get_vcpu_sh_os_pptb(vcpu);
if (likely(is_guest_user_gva(gva))) {
if (!vcpu->arch.mmu.sep_virt_space) {
return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu);
} else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) {
return vcpu->arch.mmu.get_vcpu_sh_os_pptb(vcpu);
} else {
return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu);
}
} else {
return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu);
return kvm_mmu_get_init_gmm_root_hpa(vcpu->kvm);
}
}
static inline hpa_t
@ -293,6 +324,9 @@ kvm_get_space_addr_spt_vptb(struct kvm_vcpu *vcpu, gva_t gva)
#define INVALID_GPA ((gpa_t)E2K_INVALID_PAGE)
#define IS_INVALID_GPA(gpa) ((gpa) == INVALID_GPA)
#define INVALID_GVA ((gva_t)E2K_INVALID_PAGE)
#define IS_INVALID_GVA(gpa) ((gpa) == INVALID_GVA)
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@ -300,6 +334,11 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
return (struct kvm_mmu_page *)page_private(page);
}
static inline bool spte_same(pgprot_t pgd_a, pgprot_t pgd_b)
{
return pgprot_val(pgd_a) == pgprot_val(pgd_b);
}
extern void kvm_get_spt_translation(struct kvm_vcpu *vcpu, e2k_addr_t address,
pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd,
pteval_t *pte, int *pt_level);
@ -363,8 +402,6 @@ extern int kvm_pv_mmu_instr_page_fault(struct kvm_vcpu *vcpu,
extern int kvm_pv_mmu_aau_page_fault(struct kvm_vcpu *vcpu,
struct pt_regs *regs, e2k_addr_t address,
tc_cond_t cond, unsigned int aa_no);
extern long kvm_hv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs,
intc_info_mu_t *intc_info_mu);
extern int kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address,
bool async_instr, u32 error_code);
#else /* ! CONFIG_KVM_SHADOW_PT_ENABLE */
@ -409,8 +446,8 @@ kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address,
#endif /* CONFIG_KVM_SHADOW_PT_ENABLE */
extern int kvm_guest_addr_to_host(void **addr);
extern void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, int size,
bool need_inject);
extern void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, bool is_write,
int size, bool need_inject);
#ifdef CONFIG_KVM_HOST_MODE
/* it is native host kernel with virtualization support */
@ -425,15 +462,17 @@ guest_addr_to_host(void **addr, const pt_regs_t *regs)
return kvm_guest_addr_to_host(addr);
}
static inline void *
guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs)
guest_ptr_to_host(void *ptr, bool is_write, int size, const pt_regs_t *regs)
{
if (likely(!host_test_intc_emul_mode(regs))) {
/* faulted addres is not paravirtualized guest one */
return native_guest_ptr_to_host(ptr, size);
}
return kvm_guest_ptr_to_host_ptr(ptr, size, false);
return kvm_guest_ptr_to_host_ptr(ptr, is_write, size, false);
}
#endif /* CONFIG_KVM_HOST_MODE */
#endif /* CONFIG_VIRTUALIZATION */
#endif /* __E2K_KVM_HOST_MMU_H */

View File

@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/e2k_api.h>
#include <asm/machdep.h>
#include <asm/mmu_regs_types.h>
#include <asm/mmu_regs_access.h>
@ -21,8 +22,11 @@
#ifndef __ASSEMBLY__
#define READ_VIRT_CTRL_MU_REG_VALUE() NATIVE_GET_MMUREG(virt_ctrl_mu)
#define WRITE_VIRT_CTRL_MU_REG_VALUE(val) NATIVE_SET_MMUREG(virt_ctrl_mu, (val))
#define READ_VIRT_CTRL_MU_REG() \
((virt_ctrl_mu_t) { .word = NATIVE_GET_MMUREG(virt_ctrl_mu) })
#define WRITE_VIRT_CTRL_MU_REG_VALUE(v) NATIVE_SET_MMUREG(virt_ctrl_mu, (v))
#define WRITE_VIRT_CTRL_MU_REG(v) \
NATIVE_SET_MMUREG(virt_ctrl_mu, ((virt_ctrl_mu_t) (v)).word)
#define READ_G_W_IMASK_MMU_CR_REG_VALUE() \
NATIVE_GET_MMUREG(g_w_imask_mmu_cr)
@ -200,119 +204,100 @@ kvm_get_intc_info_mu_is_updated(struct kvm_vcpu *vcpu)
#define READ_SH_MMU_CR_REG_VALUE() NATIVE_GET_MMUREG(sh_mmu_cr)
#define WRITE_SH_MMU_CR_REG_VALUE(val) NATIVE_SET_MMUREG(sh_mmu_cr, (val))
extern unsigned long read_VIRT_CTRL_MU_reg_value(void);
extern void write_VIRT_CTRL_MU_reg_value(unsigned long value);
extern unsigned long read_GID_reg_value(void);
extern void write_GID_reg_value(unsigned long value);
extern unsigned long read_GP_VPTB_reg_value(void);
extern void write_GP_VPTB_reg_value(unsigned long value);
extern unsigned long read_GP_PPTB_reg_value(void);
extern void write_GP_PPTB_reg_value(unsigned long value);
extern unsigned long read_SH_OS_PPTB_reg_value(void);
extern void write_SH_OS_PPTB_reg_value(unsigned long value);
extern unsigned long read_SH_OS_VPTB_reg_value(void);
extern void write_SH_OS_VPTB_reg_value(unsigned long value);
extern unsigned long read_SH_OS_VAB_reg_value(void);
extern void write_SH_OS_VAB_reg_value(unsigned long value);
extern unsigned long read_SH_PID_reg_value(void);
extern void write_SH_PID_reg_value(unsigned long value);
extern unsigned long read_SH_MMU_CR_reg_value(void);
extern void write_SH_MMU_CR_reg_value(unsigned long value);
extern unsigned long read_G_W_IMASK_MMU_CR_reg_value(void);
extern void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value);
#ifdef CONFIG_VIRTUALIZATION
static inline virt_ctrl_mu_t read_VIRT_CTRL_MU_reg(void)
{
virt_ctrl_mu_t virt_ctrl;
virt_ctrl.VIRT_CTRL_MU_reg = read_VIRT_CTRL_MU_reg_value();
virt_ctrl.VIRT_CTRL_MU_reg = machine.host.read_VIRT_CTRL_MU();
return virt_ctrl;
}
static inline void write_VIRT_CTRL_MU_reg(virt_ctrl_mu_t virt_ctrl)
{
write_VIRT_CTRL_MU_reg_value(virt_ctrl.VIRT_CTRL_MU_reg);
machine.host.write_VIRT_CTRL_MU(virt_ctrl.VIRT_CTRL_MU_reg);
}
static inline unsigned int read_GID_reg(void)
{
return read_GID_reg_value();
return machine.host.read_GID();
}
static inline void write_GID_reg(unsigned int mmu_gid)
{
write_GID_reg_value(MMU_GID(mmu_gid));
machine.host.write_GID(MMU_GID(mmu_gid));
}
static inline mmu_reg_t read_SH_MMU_CR_reg(void)
static inline e2k_mmu_cr_t read_SH_MMU_CR_reg(void)
{
return __mmu_reg(read_SH_MMU_CR_reg_value());
return (e2k_mmu_cr_t) { .word = machine.host.read_SH_MMU_CR() };
}
static inline void write_SH_MMU_CR_reg(mmu_reg_t mmu_cr)
static inline void write_SH_MMU_CR_reg(e2k_mmu_cr_t mmu_cr)
{
write_SH_MMU_CR_reg_value(mmu_reg_val(mmu_cr));
machine.host.write_SH_MMU_CR(AW(mmu_cr));
}
static inline mmu_reg_t read_G_W_IMASK_MMU_CR_reg(void)
static inline e2k_mmu_cr_t read_G_W_IMASK_MMU_CR_reg(void)
{
return __mmu_reg(read_G_W_IMASK_MMU_CR_reg_value());
return (e2k_mmu_cr_t) { .word = machine.host.read_G_W_IMASK_MMU_CR() };
}
static inline void write_G_W_IMASK_MMU_CR_reg(mmu_reg_t mmu_cr_mask)
static inline void write_G_W_IMASK_MMU_CR_reg(e2k_mmu_cr_t mmu_cr_mask)
{
write_G_W_IMASK_MMU_CR_reg_value(mmu_reg_val(mmu_cr_mask));
machine.host.write_G_W_IMASK_MMU_CR(AW(mmu_cr_mask));
}
static inline unsigned int read_SH_PID_reg(void)
{
return read_SH_PID_reg_value();
return machine.host.read_SH_PID();
}
static inline void write_SH_PID_reg(unsigned int mmu_pid)
{
write_SH_PID_reg_value(MMU_PID(mmu_pid));
machine.host.write_SH_PID(MMU_PID(mmu_pid));
}
static inline e2k_addr_t read_SH_OS_PPTB_reg(void)
{
return read_SH_OS_PPTB_reg_value();
return machine.host.read_SH_OS_PPTB();
}
static inline void write_SH_OS_PPTB_reg(e2k_addr_t phys_addr)
{
write_SH_OS_PPTB_reg_value(MMU_ADDR_TO_PPTB(phys_addr));
machine.host.write_SH_OS_PPTB(MMU_ADDR_TO_PPTB(phys_addr));
}
static inline e2k_addr_t read_SH_OS_VPTB_reg(void)
{
return read_SH_OS_VPTB_reg_value();
return machine.host.read_SH_OS_VPTB();
}
static inline void write_SH_OS_VPTB_reg(e2k_addr_t virt_addr)
{
write_SH_OS_VPTB_reg_value(MMU_ADDR_TO_VPTB(virt_addr));
machine.host.write_SH_OS_VPTB(MMU_ADDR_TO_VPTB(virt_addr));
}
static inline e2k_addr_t read_GP_PPTB_reg(void)
{
return read_GP_PPTB_reg_value();
return machine.host.read_GP_PPTB();
}
static inline void write_GP_PPTB_reg(e2k_addr_t phys_addr)
{
write_GP_PPTB_reg_value(MMU_ADDR_TO_PPTB(phys_addr));
machine.host.write_GP_PPTB(MMU_ADDR_TO_PPTB(phys_addr));
}
static inline e2k_addr_t read_GP_VPTB_reg(void)
{
return read_GP_VPTB_reg_value();
return machine.host.read_GP_VPTB();
}
static inline void write_GP_VPTB_reg(e2k_addr_t virt_addr)
{
write_GP_VPTB_reg_value(MMU_ADDR_TO_VPTB(virt_addr));
machine.host.write_GP_VPTB(MMU_ADDR_TO_VPTB(virt_addr));
}
static inline e2k_addr_t read_SH_OS_VAB_reg(void)
{
return read_SH_OS_VAB_reg_value();
return machine.host.read_SH_OS_VAB();
}
static inline void write_SH_OS_VAB_reg(e2k_addr_t virt_addr)
{
write_SH_OS_VAB_reg_value(MMU_ADDR_TO_VAB(virt_addr));
machine.host.write_SH_OS_VAB(MMU_ADDR_TO_VAB(virt_addr));
}
#endif /* CONFIG_VIRTUALIZATION */
#endif /* ! __ASSEMBLY__ */
#endif /* _E2K_KVM_MMU_HV_REGS_ACCESS_H_ */

View File

@ -133,66 +133,6 @@ typedef union virt_ctrl_mu {
#define VIRT_CTRL_MU_sh_pt_en sh_pt_en
#define VIRT_CTRL_MU_reg word /* [63: 0] - entire register */
/* Bits mask of VIRT_CTRL_MU fields and flags */
#define VIRT_CTRL_MU_ENV_C_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_evn_c = -1, }.word)
#define VIRT_CTRL_MU_RR_MMU_CR_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_mmu_cr = 1, }.word)
#define VIRT_CTRL_MU_RR_U_PPTB_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_pptb = 1, }.word)
#define VIRT_CTRL_MU_RR_U_VPTB_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_vptb = 1, }.word)
#define VIRT_CTRL_MU_RR_APIC_BASE_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_apic_base = 1, }.word)
#define VIRT_CTRL_MU_RR_MTRR_PAT_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_mtrr_pat = 1, }.word)
#define VIRT_CTRL_MU_RR_PH_PCI_B_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_ph_pci_b = 1, }.word)
#define VIRT_CTRL_MU_RR_DBG_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_dbg = 1, }.word)
#define VIRT_CTRL_MU_RR_DBG1_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_dbg1 = 1, }.word)
#define VIRT_CTRL_MU_RW_MMU_CR_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_mmu_cr = 1, }.word)
#define VIRT_CTRL_MU_RW_U_PPTB_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_pptb = 1, }.word)
#define VIRT_CTRL_MU_RW_U_VPTB_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_vptb = 1, }.word)
#define VIRT_CTRL_MU_RW_APIC_BASE_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_apic_base = 1, }.word)
#define VIRT_CTRL_MU_RW_MTRR_PAT_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_mtrr_pat = 1, }.word)
#define VIRT_CTRL_MU_RW_PH_PCI_B_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_ph_pci_b = 1, }.word)
#define VIRT_CTRL_MU_RW_DBG_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_dbg = 1, }.word)
#define VIRT_CTRL_MU_RW_DBG1_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_dbg1 = 1, }.word)
#define VIRT_CTRL_MU_PMA_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_pma = 1, }.word)
#define VIRT_CTRL_MU_FL_DC_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_dc = 1, }.word)
#define VIRT_CTRL_MU_FL_DCL_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_dcl = 1, }.word)
#define VIRT_CTRL_MU_FL_IC_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_ic = 1, }.word)
#define VIRT_CTRL_MU_FL_ICL_U_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_icl_u = 1, }.word)
#define VIRT_CTRL_MU_FL_ICL_P_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_icl_p = 1, }.word)
#define VIRT_CTRL_MU_FL_TLB_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlb = 1, }.word)
#define VIRT_CTRL_MU_FL_TLBPG_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlbpg = 1, }.word)
#define VIRT_CTRL_MU_FL_TLB2PG_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlb2pg = 1, }.word)
#define VIRT_CTRL_MU_PRB_ENTRY_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_prb_entry = 1, }.word)
#define VIRT_CTRL_MU_GP_PT_EN_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_gp_pt_en = 1, }.word)
#define VIRT_CTRL_MU_SH_PT_EN_MASK \
((virt_ctrl_mu_t) { .VIRT_CTRL_MU_sh_pt_en = 1, }.word)
typedef union {
struct {
u64 event_code : 8;

View File

@ -1,11 +1,15 @@
#ifndef MMU_PTE_H
#define MMU_PTE_H
/* uwx (u - user mode, w - writable, x executable) */
/* puwx (u - user mode, w - writable, x executable) */
/* (p - user privileged access to hardware stacks) */
#define ACC_EXEC_MASK 0x1
#define ACC_WRITE_MASK 0x2
#define ACC_USER_MASK 0x4
#define ACC_PRIV_MASK 0x8 /* only for user privileged access: hw stacks */
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
/* guest user acces can include special case: privileged hardware stacks */
#define ACC_USER_ALL (ACC_ALL | ACC_PRIV_MASK)
/* page tables directories always privileged & not executable */
#define ACC_PT_DIR (ACC_WRITE_MASK)
@ -26,6 +30,9 @@
#define PFERR_READ_PROT_BIT 14
#define PFERR_IS_UNMAPPED_BIT 15
#define PFERR_FAPB_BIT 16
#define PFERR_HW_ACCESS_BIT 17
#define PFERR_USER_ADDR_BIT 18
#define PFERR_ILLEGAL_PAGE_BIT 19
#define PFERR_ACCESS_SIZE_BIT 24
@ -46,6 +53,9 @@
#define PFERR_READ_PROT_MASK (1U << PFERR_READ_PROT_BIT)
#define PFERR_IS_UNMAPPED_MASK (1U << PFERR_IS_UNMAPPED_BIT)
#define PFERR_FAPB_MASK (1U << PFERR_FAPB_BIT)
#define PFERR_HW_ACCESS_MASK (1U << PFERR_HW_ACCESS_BIT)
#define PFERR_USER_ADDR_MASK (1U << PFERR_USER_ADDR_BIT)
#define PFERR_ILLEGAL_PAGE_MASK (1U << PFERR_ILLEGAL_PAGE_BIT)
#define PFERR_ACCESS_SIZE_MASK (~0U << PFERR_ACCESS_SIZE_BIT)
@ -55,4 +65,9 @@
(((pfres) & ~PFERR_ACCESS_SIZE_MASK) | \
((size) << PFERR_ACCESS_SIZE_BIT))
typedef struct gw_attr {
int level;
u32 access;
} gw_attr_t;
#endif /* MMU_PTE_H */

View File

@ -276,6 +276,19 @@ KVM_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr)
}
}
/*
* Read DCACHE L1 fault_reg register
*/
static inline u64
KVM_READ_L1_FAULT_REG(void)
{
if (IS_HV_GM()) {
return NATIVE_READ_L1_FAULT_REG();
} else {
return kvm_read_dcache_l1_fault_reg();
}
}
/*
* Clear DCACHE L1 set
*/
@ -641,6 +654,14 @@ static inline void FLUSH_DCACHE_LINE_OFFSET(e2k_addr_t virt_addr, size_t offset)
KVM_FLUSH_DCACHE_LINE(virt_addr + offset);
}
/*
* Read DCACHE L1 fault_reg register
*/
static inline u64
READ_L1_FAULT_REG(void)
{
return KVM_READ_L1_FAULT_REG();
}
/*
* Clear DCACHE L1 set

View File

@ -31,7 +31,7 @@
/* numbers of PTE's bits */
#define _PAGE_P_BIT_TDP _PAGE_P_BIT_V6 /* Present */
#define _PAGE_W_BIT_TDP _PAGE_W_BIT_V6 /* Writable */
#define _PAGE_A_HW_BIT_TDP _PAGE_A_HW_BIT_V6 /* page Accessed */
#define _PAGE_A_BIT_TDP _PAGE_A_BIT_V6 /* page Accessed */
#define _PAGE_D_BIT_TDP _PAGE_D_BIT_V6 /* page Dirty */
#define _PAGE_HUGE_BIT_TDP _PAGE_HUGE_BIT_V6 /* huge Page Size */
#define _PAGE_MTCR_SHIFT_TDP 8 /* Memory Type */
@ -47,7 +47,7 @@
#define _PAGE_P_TDP (1ULL << _PAGE_P_BIT_TDP)
#define _PAGE_W_TDP (1ULL << _PAGE_W_BIT_TDP)
#define _PAGE_A_HW_TDP (1ULL << _PAGE_A_HW_BIT_TDP)
#define _PAGE_A_TDP (1ULL << _PAGE_A_BIT_TDP)
#define _PAGE_D_TDP (1ULL << _PAGE_D_BIT_TDP)
#define _PAGE_HUGE_TDP (1ULL << _PAGE_HUGE_BIT_TDP)
#define _PAGE_MTCR_TDP \
@ -82,8 +82,8 @@ covert_uni_pte_flags_to_pte_val_tdp(const uni_pteval_t uni_flags)
pte_flags |= (_PAGE_P_TDP);
if (uni_flags & UNI_PAGE_WRITE)
pte_flags |= (_PAGE_W_TDP);
if (uni_flags & UNI_PAGE_HW_ACCESS)
pte_flags |= (_PAGE_A_HW_TDP);
if (uni_flags & UNI_PAGE_ACCESSED)
pte_flags |= (_PAGE_A_TDP);
if (uni_flags & UNI_PAGE_DIRTY)
pte_flags |= (_PAGE_D_TDP);
if (uni_flags & UNI_PAGE_HUGE)

View File

@ -199,12 +199,27 @@ void go2guest(long fn, bool priv_guest);
#define GET_GUEST_VCPU_STATE_POINTER(__vcpu) \
({ \
e2k_addr_t vs = (e2k_addr_t)((__vcpu)->arch.vcpu_state); \
(gva_t)((__vcpu)->arch.guest_vcpu_state); \
})
#define TO_GUEST_VCPU_STATE_PHYS_POINTER(__vcpu) \
({ \
gpa_t vs = (gpa_t)((__vcpu)->arch.vcpu_state); \
\
vs = kvm_vcpu_hva_to_gpa(__vcpu, vs); \
if (is_paging(__vcpu)) \
vs = (e2k_addr_t)__guest_va(vs); \
vs; \
(gva_t)vs; \
})
#define TO_GUEST_VCPU_STATE_POINTER(__vcpu) \
({ \
gpa_t vs; \
\
vs = TO_GUEST_VCPU_STATE_PHYS_POINTER(__vcpu); \
if (!IS_INVALID_GPA(vs)) { \
if (is_paging(__vcpu)) \
vs = (gpa_t)__guest_va(vs); \
} \
(gva_t)vs; \
})
#define INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu) \

View File

@ -217,6 +217,8 @@ static inline void atomic_load_osgd_to_gd(void)
#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \
((!(ignore_IP)) ? is_call_from_guest_kernel(cr0_hi, cr1_lo) : \
from_guest_kernel_mode(cr1_lo))
#define call_from_guest_kernel(regs) \
is_call_from_guest_kernel((regs)->crs.cr0_hi, (regs)->crs.cr1_lo)
#define is_trap_on_user(regs, __HOST__) \
((__HOST__) ? \
@ -458,6 +460,7 @@ check_is_user_address(struct task_struct *task, e2k_addr_t address)
from_kernel_mode(cr1_lo))
#define is_call_from_guest_kernel(cr0_hi, cr1_lo) false
#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) false
#define call_from_guest_kernel(regs) false
#define is_call_from_user(cr0_hi, cr1_lo, __HOST__) \
is_call_from_host_user(cr0_hi, cr1_lo)

View File

@ -10,6 +10,8 @@
#include <asm/ptrace.h>
enum restore_caller;
#ifdef CONFIG_VIRTUALIZATION
static __always_inline void
kvm_set_intc_emul_flag(pt_regs_t *regs)
@ -126,7 +128,8 @@ extern void insert_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu,
extern void kvm_emulate_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs,
trap_pt_regs_t *trap);
extern void return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs);
extern void return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs,
enum restore_caller from);
static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu)
{
@ -256,6 +259,14 @@ pv_vcpu_set_active_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm)
KVM_BUG_ON(true);
}
}
static inline hpa_t
kvm_mmu_get_init_gmm_root_hpa(struct kvm *kvm)
{
gmm_struct_t *init_gmm = pv_mmu_get_init_gmm(kvm);
GTI_BUG_ON(init_gmm == NULL);
return init_gmm->root_hpa;
}
static inline mm_context_t *pv_vcpu_get_gmm_context(struct kvm_vcpu *vcpu)
{

View File

@ -28,35 +28,35 @@
pr_info("%s(): " fmt, __func__, ##args); \
})
#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK)
#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V3(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V3, GUEST_GREGS_MASK)
#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK)
#define DO_SAVE_GREGS_EXCEPT_HOST_V2(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK)
#define DO_SAVE_GREGS_EXCEPT_HOST_V3(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V3, GUEST_GREGS_MASK)
#define DO_SAVE_GREGS_EXCEPT_HOST_V5(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK)
#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, \
#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V3(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V3, \
GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK)
#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs) \
DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, \
GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK)
#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK)
#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V3(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V3, GUEST_GREGS_MASK)
#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK)
#define DO_RESTORE_GREGS_EXCEPT_HOST_V2(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK)
#define DO_RESTORE_GREGS_EXCEPT_HOST_V3(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V3, GUEST_GREGS_MASK)
#define DO_RESTORE_GREGS_EXCEPT_HOST_V5(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK)
#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, \
#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V3(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V3, \
GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK)
#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs) \
DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, \
@ -69,9 +69,9 @@
(machine.host.restore_guest_gregs(gregs))
#elif CONFIG_E2K_ISET_VER < 5
#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \
DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2((gregs)->g)
DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V3((gregs)->g)
#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \
DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2((gregs)->g)
DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V3((gregs)->g)
#else /* CONFIG_E2K_ISET_VER >= 5 */
#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \
DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5((gregs)->g)

View File

@ -9,6 +9,7 @@
#include <asm/mmu_regs_access.h>
#include <asm/gregs.h>
#include <asm/regs_state.h>
#include <asm/processor.h>
#include <asm/kvm/cpu_hv_regs_access.h>
#include <asm/kvm/mmu_hv_regs_access.h>
#include <asm/pgd.h>
@ -189,7 +190,36 @@ static inline void kvm_switch_cu_regs(struct kvm_sw_cpu_context *sw_ctxt)
sw_ctxt->cutd = cutd;
}
static inline void kvm_switch_mmu_pt_regs(struct kvm_sw_cpu_context *sw_ctxt)
static inline void kvm_add_guest_kernel_map(struct kvm_vcpu *vcpu, hpa_t root)
{
pgprot_t *src_root, *dst_root;
int start, end, index;
dst_root = (pgprot_t *)root;
src_root = (pgprot_t *)kvm_mmu_get_init_gmm_root(vcpu->kvm);
start = GUEST_KERNEL_PGD_PTRS_START;
end = GUEST_KERNEL_PGD_PTRS_END;
for (index = start; index < end; index++) {
dst_root[index] = src_root[index];
}
}
static inline void kvm_clear_guest_kernel_map(struct kvm_vcpu *vcpu, hpa_t root)
{
pgprot_t *dst_root;
int start, end, index;
dst_root = (pgprot_t *)root;
start = GUEST_KERNEL_PGD_PTRS_START;
end = GUEST_KERNEL_PGD_PTRS_END;
for (index = start; index < end; index++) {
dst_root[index] = __pgprot(0);
}
}
static inline void kvm_switch_hv_mmu_pt_regs(struct kvm_sw_cpu_context *sw_ctxt)
{
mmu_reg_t u_pptb;
mmu_reg_t u_vptb;
@ -204,6 +234,57 @@ static inline void kvm_switch_mmu_pt_regs(struct kvm_sw_cpu_context *sw_ctxt)
sw_ctxt->sh_u_vptb = u_vptb;
}
static inline void
kvm_switch_pv_mmu_pt_regs_to_guest(struct kvm_sw_cpu_context *sw_ctxt,
struct thread_info *ti)
{
struct kvm_vcpu *vcpu = ti->vcpu;
mmu_reg_t u_pptb, u_vptb, root;
u_pptb = NATIVE_READ_MMU_U_PPTB_REG();
u_vptb = NATIVE_READ_MMU_U_VPTB_REG();
if (likely(test_ti_status_flag(ti, TS_HOST_TO_GUEST_USER))) {
root = kvm_get_space_type_spt_u_root(vcpu);
KVM_BUG_ON(is_paging(vcpu) &&
root != pv_vcpu_get_gmm(ti->vcpu)->root_hpa);
} else {
root = kvm_get_space_type_spt_gk_root(vcpu);
KVM_BUG_ON(is_paging(vcpu) &&
root != pv_vcpu_get_gmm(vcpu)->gk_root_hpa);
}
NATIVE_WRITE_MMU_U_PPTB_REG(root);
NATIVE_WRITE_MMU_U_VPTB_REG(sw_ctxt->sh_u_vptb);
sw_ctxt->sh_u_pptb = u_pptb;
sw_ctxt->sh_u_vptb = u_vptb;
}
static inline void
kvm_switch_pv_mmu_pt_regs_to_host(struct kvm_sw_cpu_context *sw_ctxt,
struct thread_info *ti)
{
struct kvm_vcpu *vcpu = ti->vcpu;
mmu_reg_t u_pptb, u_vptb;
u_vptb = NATIVE_READ_MMU_U_VPTB_REG();
if (likely(test_ti_status_flag(ti, TS_HOST_TO_GUEST_USER))) {
u_pptb = kvm_get_space_type_spt_u_root(vcpu);
KVM_BUG_ON(is_paging(vcpu) &&
u_pptb != pv_vcpu_get_gmm(ti->vcpu)->root_hpa);
} else {
u_pptb = kvm_get_space_type_spt_gk_root(vcpu);
KVM_BUG_ON(is_paging(vcpu) &&
u_pptb != pv_vcpu_get_gmm(vcpu)->gk_root_hpa);
}
NATIVE_WRITE_MMU_U_PPTB_REG(sw_ctxt->sh_u_pptb);
NATIVE_WRITE_MMU_U_VPTB_REG(sw_ctxt->sh_u_vptb);
sw_ctxt->sh_u_pptb = u_pptb;
sw_ctxt->sh_u_vptb = u_vptb;
}
static inline void kvm_switch_mmu_tc_regs(struct kvm_sw_cpu_context *sw_ctxt)
{
mmu_reg_t tc_hpa;
@ -219,26 +300,59 @@ static inline void kvm_switch_mmu_tc_regs(struct kvm_sw_cpu_context *sw_ctxt)
sw_ctxt->trap_count = trap_count;
}
static inline void kvm_switch_mmu_regs(struct kvm_sw_cpu_context *sw_ctxt,
static inline void kvm_switch_hv_mmu_regs(struct kvm_sw_cpu_context *sw_ctxt,
bool switch_tc)
{
if (likely(!sw_ctxt->no_switch_pt)) {
kvm_switch_mmu_pt_regs(sw_ctxt);
kvm_switch_hv_mmu_pt_regs(sw_ctxt);
}
if (switch_tc) {
kvm_switch_mmu_tc_regs(sw_ctxt);
}
}
static inline void kvm_switch_to_guest_mmu_pid(struct kvm_vcpu *vcpu)
static inline void
kvm_switch_pv_mmu_regs_to_guest(struct kvm_sw_cpu_context *sw_ctxt,
struct thread_info *ti, bool switch_tc)
{
if (likely(!sw_ctxt->no_switch_pt)) {
kvm_switch_pv_mmu_pt_regs_to_guest(sw_ctxt, ti);
}
if (switch_tc) {
kvm_switch_mmu_tc_regs(sw_ctxt);
}
}
static inline void
kvm_switch_pv_mmu_regs_to_host(struct kvm_sw_cpu_context *sw_ctxt,
struct thread_info *ti, bool switch_tc)
{
if (likely(!sw_ctxt->no_switch_pt)) {
kvm_switch_pv_mmu_pt_regs_to_host(sw_ctxt, ti);
}
if (switch_tc) {
kvm_switch_mmu_tc_regs(sw_ctxt);
}
}
static inline void kvm_switch_to_guest_mmu_pid(struct kvm_vcpu *vcpu,
struct thread_info *ti)
{
mm_context_t *gmm_context;
unsigned long mask, flags;
int cpu = raw_smp_processor_id();
if (unlikely(vcpu->arch.sw_ctxt.no_switch_pt)) {
copy_user_pgd_to_kernel_root_pt(
(pgd_t *)__va(kvm_get_space_type_spt_u_root(vcpu)));
pgd_t *pgd;
if (test_ti_status_flag(ti, TS_HOST_TO_GUEST_USER)) {
/* switch to guest user, guest kernel pgds should */
/* be zeroed at user root */
pgd = (pgd_t *)__va(kvm_get_space_type_spt_u_root(vcpu));
} else {
pgd = (pgd_t *)__va(kvm_get_space_type_spt_gk_root(vcpu));
}
copy_user_pgd_to_kernel_root_pt(pgd);
}
raw_all_irq_save(flags);
gmm_context = pv_vcpu_get_gmm_context(vcpu);
@ -249,7 +363,13 @@ static inline void kvm_switch_to_guest_mmu_pid(struct kvm_vcpu *vcpu)
/* see arch/e2k/iclude/asm/mmu_context.h */
smp_mb();
#endif /* CONFIG_SMP */
mask = get_mmu_pid(gmm_context, cpu);
if (unlikely(test_ti_status_flag(ti, TS_HOST_SWITCH_MMU_PID))) {
/* get new MMU context to exclude access to guest kernel */
/* virtual space from guest user */
mask = get_new_mmu_pid(gmm_context, cpu);
} else {
mask = get_mmu_pid(gmm_context, cpu);
}
reload_context_mask(mask);
raw_all_irq_restore(flags);
}
@ -373,7 +493,12 @@ static inline void host_guest_enter(struct thread_info *ti,
/* restore guest PT context (U_PPTB/U_VPTB) */
if (!(flags & DONT_MMU_CONTEXT_SWITCH)) {
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
if (likely(!vcpu->is_hv)) {
kvm_switch_pv_mmu_regs_to_guest(sw_ctxt, ti,
vcpu->is_hv);
} else {
kvm_switch_hv_mmu_regs(sw_ctxt, vcpu->is_hv);
}
}
} else if (flags & FULL_CONTEXT_SWITCH) {
@ -386,8 +511,7 @@ static inline void host_guest_enter(struct thread_info *ti,
&sw_ctxt->aau_context);
#endif
if (machine.flushts)
machine.flushts();
E2K_FLUSHTS;
if (likely(!(flags & DONT_SAVE_KGREGS_SWITCH))) {
/* For interceptions restore extended part */
@ -403,7 +527,12 @@ static inline void host_guest_enter(struct thread_info *ti,
kvm_switch_fpu_regs(sw_ctxt);
kvm_switch_cu_regs(sw_ctxt);
if (likely(!(flags & DONT_MMU_CONTEXT_SWITCH))) {
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
if (likely(!vcpu->is_hv)) {
kvm_switch_pv_mmu_regs_to_guest(sw_ctxt, ti,
vcpu->is_hv);
} else {
kvm_switch_hv_mmu_regs(sw_ctxt, vcpu->is_hv);
}
}
#ifdef CONFIG_USE_AAU
@ -429,7 +558,11 @@ static inline void host_guest_enter(struct thread_info *ti,
*/
/* switch to guest MMU context to continue guest execution */
kvm_switch_mmu_regs(sw_ctxt, false);
if (likely(!vcpu->is_hv)) {
kvm_switch_pv_mmu_regs_to_guest(sw_ctxt, ti, false);
} else {
KVM_BUG_ON(true);
}
}
KVM_BUG_ON(vcpu->is_hv && !NATIVE_READ_MMU_US_CL_D());
@ -518,7 +651,11 @@ static inline void host_guest_exit(struct thread_info *ti,
/* save guest PT context (U_PPTB/U_VPTB) and restore host */
/* user PT context */
if (!(flags & DONT_MMU_CONTEXT_SWITCH)) {
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
if (likely(!vcpu->is_hv)) {
kvm_switch_pv_mmu_regs_to_host(sw_ctxt, ti, false);
} else {
kvm_switch_hv_mmu_regs(sw_ctxt, true);
}
}
} else if (flags & FULL_CONTEXT_SWITCH) {
@ -583,7 +720,11 @@ static inline void host_guest_exit(struct thread_info *ti,
kvm_switch_fpu_regs(sw_ctxt);
kvm_switch_cu_regs(sw_ctxt);
if (likely(!(flags & DONT_MMU_CONTEXT_SWITCH))) {
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
if (likely(!vcpu->is_hv)) {
kvm_switch_pv_mmu_regs_to_host(sw_ctxt, ti, false);
} else {
kvm_switch_hv_mmu_regs(sw_ctxt, true);
}
}
} else {
/*
@ -591,7 +732,11 @@ static inline void host_guest_exit(struct thread_info *ti,
*/
/* switch to hypervisor MMU context to emulate hw intercept */
kvm_switch_mmu_regs(sw_ctxt, false);
if (likely(!vcpu->is_hv)) {
kvm_switch_pv_mmu_regs_to_host(sw_ctxt, ti, false);
} else {
KVM_BUG_ON(true);
}
}
/* This makes a call so switch it after AAU */
@ -890,7 +1035,8 @@ host_syscall_from_guest_user(struct thread_info *ti)
}
static inline void
host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs)
host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs,
restore_caller_t from)
{
if (likely(!kvm_test_intc_emul_flag(regs))) {
/* it is not paravirtualized guest VCPU intercepts*/
@ -903,7 +1049,7 @@ host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs)
* Return from trap on paravirtualized guest VCPU which was
* interpreted as interception
*/
return_from_pv_vcpu_intc(ti, regs);
return_from_pv_vcpu_intc(ti, regs, from);
}
static inline bool
@ -1037,10 +1183,10 @@ host_trap_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs)
static inline void
host_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs,
unsigned flags)
unsigned flags, restore_caller_t from)
{
if (flags & EXIT_FROM_INTC_SWITCH) {
host_trap_guest_exit_intc(ti, regs);
host_trap_guest_exit_intc(ti, regs, from);
}
if (flags & EXIT_FROM_TRAP_SWITCH) {
host_trap_guest_exit_trap(ti, regs);
@ -1150,7 +1296,8 @@ static inline void __guest_exit_light(struct thread_info *ti,
{
}
static inline void
trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags)
trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags,
restore_caller_t from)
{
native_trap_guest_enter(ti, regs, flags);
}
@ -1205,7 +1352,7 @@ static inline void pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
native_pv_vcpu_syscall_intc(ti, regs);
}
static inline void guest_exit_intc(struct pt_regs *regs,
bool intc_emul_flag) { }
bool intc_emul_flag, restore_caller_t from) { }
static inline void guest_syscall_exit_trap(struct pt_regs *regs,
bool ts_host_at_vcpu_mode) { }
@ -1235,9 +1382,10 @@ static inline void __guest_exit_light(struct thread_info *ti,
host_guest_exit_light(ti, vcpu);
}
static inline void
trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags)
trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags,
restore_caller_t from)
{
host_trap_guest_enter(ti, regs, flags);
host_trap_guest_enter(ti, regs, flags, from);
}
static inline void
trap_guest_exit(struct thread_info *ti, struct pt_regs *regs,
@ -1296,7 +1444,8 @@ static inline void pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs)
host_pv_vcpu_syscall_intc(ti, regs);
}
static inline void guest_exit_intc(struct pt_regs *regs, bool intc_emul_flag)
static inline void guest_exit_intc(struct pt_regs *regs, bool intc_emul_flag,
restore_caller_t from)
{
if (unlikely(intc_emul_flag)) {
kvm_clear_intc_emul_flag(regs);
@ -1305,7 +1454,7 @@ static inline void guest_exit_intc(struct pt_regs *regs, bool intc_emul_flag)
* Return from trap on paravirtualized guest VCPU which was
* interpreted as interception
*/
return_from_pv_vcpu_intc(current_thread_info(), regs);
return_from_pv_vcpu_intc(current_thread_info(), regs, from);
}
}

View File

@ -43,6 +43,8 @@ extern void host_flush_shadow_pt_level_tlb(struct kvm *kvm, gmm_struct_t *gmm,
* and it is then that the PID will become active, but now it is still passive.
*/
extern void host_local_flush_tlb_range_and_pgtables(gmm_struct_t *gmm,
unsigned long start, unsigned long end);
extern void host_flush_tlb_mm(gmm_struct_t *gmm);
extern void host_flush_tlb_page(gmm_struct_t *gmm, unsigned long addr);
extern void host_flush_tlb_range(gmm_struct_t *gmm,

View File

@ -7,6 +7,8 @@
#include <asm/pgtable_def.h>
#include <asm/kvm/mmu.h>
#ifdef CONFIG_VIRTUALIZATION
static inline void
trace_kvm_get_va_translation(struct kvm_vcpu *vcpu, e2k_addr_t address,
pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd, pteval_t *pte, int *pt_level)
@ -20,4 +22,6 @@ trace_kvm_get_gva_to_hva(struct kvm_vcpu *vcpu, gva_t gva)
return kvm_get_gva_to_hva(vcpu, gva);
}
#endif /* CONFIG_VIRTUALIZATION */
#endif /* _E2K_KVM_TRACE_DEFS_H_ */

View File

@ -5,7 +5,7 @@
#include <linux/hugetlb.h>
#include <asm/trace-defs.h>
#include <asm/trace_pgtable-v2.h>
#include <asm/trace_pgtable-v3.h>
#include <asm/trace_pgtable-v6.h>
#include <asm/pgtable_def.h>
#include <asm/kvm/trace-defs.h>

View File

@ -750,7 +750,7 @@ TRACE_EVENT(
),
TP_printk("TIR%lld: ip 0x%llx, als 0x%llx\n"
" exceptions: %s\n"
" exceptions: %s"
,
__entry->tir_hi >> 56,
__entry->tir_lo & E2K_VA_MASK,
@ -785,9 +785,10 @@ TRACE_EVENT(
__entry->ctpr3_hi = ctpr3_hi;
),
TP_printk("ctpr1 0x%llx, ctpr1_hi 0x%llx\n"
TP_printk("\n"
"ctpr1 0x%llx, ctpr1_hi 0x%llx\n"
"ctpr2 0x%llx, ctpr2_hi 0x%llx\n"
"ctpr3 0x%llx, ctpr3_hi 0x%llx\n",
"ctpr3 0x%llx, ctpr3_hi 0x%llx",
__entry->ctpr1, __entry->ctpr1_hi,
__entry->ctpr2, __entry->ctpr2_hi,
__entry->ctpr3, __entry->ctpr3_hi)
@ -863,7 +864,8 @@ TRACE_EVENT(
__entry->aaldi[i] = aau_ctxt->aaldi[i];
),
TP_printk("aasr 0x%x, lsr 0x%llx, lsr1 0x%llx, ilcr 0x%llx, ilcr1 0x%llx\n"
TP_printk("\n"
"aasr 0x%x, lsr 0x%llx, lsr1 0x%llx, ilcr 0x%llx, ilcr1 0x%llx\n"
"aaldv 0x%llx, aaldm = 0x%llx\n"
"aads lo/hi 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n"
"0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n"
@ -884,7 +886,7 @@ TRACE_EVENT(
"aaldis 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n"
"0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n"
"0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n"
"0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
"0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
__entry->aasr, __entry->lsr, __entry->lsr1,
__entry->ilcr, __entry->ilcr1,
__entry->aaldv, __entry->aaldm,
@ -997,7 +999,7 @@ TRACE_EVENT(
),
TP_printk("CPU#%llu, generic hypercall %llu\n"
"Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx; gsbr: 0x%llx\n"
"Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx; gsbr: 0x%llx"
,
__entry->cpu,
__entry->hcall_num,
@ -1043,7 +1045,7 @@ TRACE_EVENT(
),
TP_printk("CPU#%llu, light hypercall %llu\n"
"Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx"
,
__entry->cpu,
__entry->hcall_num,
@ -1070,7 +1072,7 @@ TRACE_EVENT(
__entry->ret = ret;
),
TP_printk("Generic hypercall exit: %llu\n", __entry->ret)
TP_printk("Generic hypercall exit: %llu", __entry->ret)
);
TRACE_EVENT(
@ -1088,7 +1090,7 @@ TRACE_EVENT(
__entry->ret = ret;
),
TP_printk("Light hypercall exit: %llu\n", __entry->ret)
TP_printk("Light hypercall exit: %llu", __entry->ret)
);
TRACE_EVENT(
@ -1181,7 +1183,7 @@ TRACE_EVENT(
__entry->cpu = cpu;
),
TP_printk("vcpu %d, cpu %d\n", __entry->vcpu, __entry->cpu)
TP_printk("vcpu %d, cpu %d", __entry->vcpu, __entry->cpu)
);
TRACE_EVENT(
@ -1203,7 +1205,7 @@ TRACE_EVENT(
__entry->cpu = cpu;
),
TP_printk("vcpu %d, cpu %d, last_cpu %d\n", __entry->vcpu, __entry->cpu,
TP_printk("vcpu %d, cpu %d, last_cpu %d", __entry->vcpu, __entry->cpu,
__entry->last_cpu)
);
@ -1231,12 +1233,486 @@ TRACE_EVENT(
__entry->handler = handler;
),
TP_printk("HVA 0x%llx - 0x%llx; GPA 0x%llx - 0x%llx; handler 0x%px\n",
TP_printk("HVA 0x%llx - 0x%llx; GPA 0x%llx - 0x%llx; handler 0x%px",
__entry->hva_start, __entry->hva_end,
__entry->gpa_start, __entry->gpa_end,
__entry->handler)
);
TRACE_EVENT(
rmap_add_sp_entry,
TP_PROTO(struct kvm_mmu_page *sp, gfn_t gfn, pgprot_t *sptep,
struct kvm_rmap_head *rmap_head),
TP_ARGS(sp, gfn, sptep, rmap_head),
TP_STRUCT__entry(
__field(struct kvm_mmu_page *, sp )
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(gfn_t, gfn )
__field(int, level )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sp = sp;
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->gfn = gfn;
__entry->level = sp->role.level;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add sp %px gfn 0x%llx level %d "
"spte %px : 0x%lx\n",
__entry->rmap_head, __entry->sp, __entry->gfn, __entry->level,
__entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_add_parent_pte,
TP_PROTO(struct kvm_mmu_page *sp, pgprot_t *sptep,
struct kvm_rmap_head *rmap_head),
TP_ARGS(sp, sptep, rmap_head),
TP_STRUCT__entry(
__field(struct kvm_mmu_page *, sp )
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(gfn_t, gfn )
__field(int, level )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sp = sp;
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->gfn = sp->gfn;
__entry->level = sp->role.level;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add sp %px gfn 0x%llx level %d "
"parent spte %px : 0x%lx\n",
__entry->rmap_head, __entry->sp, __entry->gfn, __entry->level,
__entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_remove_sp_entry,
TP_PROTO(struct kvm_mmu_page *sp, gfn_t gfn, pgprot_t *sptep,
struct kvm_rmap_head *rmap_head),
TP_ARGS(sp, gfn, sptep, rmap_head),
TP_STRUCT__entry(
__field(struct kvm_mmu_page *, sp )
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(gfn_t, gfn )
__field(int, level )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sp = sp;
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->gfn = gfn;
__entry->level = sp->role.level;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove sp %px gfn 0x%llx level %d "
"spte %px : 0x%lx\n",
__entry->rmap_head, __entry->sp, __entry->gfn, __entry->level,
__entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_remove_parent_pte,
TP_PROTO(struct kvm_mmu_page *sp, pgprot_t *sptep,
struct kvm_rmap_head *rmap_head),
TP_ARGS(sp, sptep, rmap_head),
TP_STRUCT__entry(
__field(struct kvm_mmu_page *, sp )
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(gfn_t, gfn )
__field(int, level )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sp = sp;
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->gfn = sp->gfn;
__entry->level = sp->role.level;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove sp %px gfn 0x%llx level %d "
"parent spte %px : 0x%lx\n",
__entry->rmap_head, __entry->sp, __entry->gfn, __entry->level,
__entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_add_0_1_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, pgprot_t *sptep),
TP_ARGS(rmap_head, sptep),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add 0->1 spte %px : 0x%lx head val 0x%lx\n",
__entry->rmap_head, __entry->sptep, __entry->spte, __entry->val)
);
TRACE_EVENT(
rmap_add_1_many_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc,
pgprot_t *sptep),
TP_ARGS(rmap_head, desc, sptep),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(struct pte_list_desc *, desc )
__field(unsigned long, val )
__field(pgprot_t *, desc0 )
__field(pgprot_t *, desc1 )
__field(pgprotval_t, spte1 )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->desc = desc;
__entry->desc0 = desc->sptes[0];
__entry->desc1 = desc->sptes[1];
__entry->spte1 = pgprot_val(*(desc->sptes[1]));
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add 1->many spte %px : 0x%lx head val 0x%lx "
"desc %px\n"
" desc[0] : val %px\n"
" desc[1] : spte %px : 0x%lx\n",
__entry->rmap_head, __entry->sptep, __entry->spte, __entry->val,
__entry->desc, __entry->desc0, __entry->desc1, __entry->spte1)
);
TRACE_EVENT(
rmap_add_new_desc,
TP_PROTO(struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc),
TP_ARGS(rmap_head, desc),
TP_STRUCT__entry(
__field(struct pte_list_desc *, desc )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->desc = desc;
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add many->many new desc %px head val 0x%lx\n",
__entry->rmap_head, __entry->desc, __entry->val)
);
TRACE_EVENT(
rmap_add_many_many_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc,
pgprot_t *sptep, int index),
TP_ARGS(rmap_head, desc, sptep, index),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(struct pte_list_desc *, desc )
__field(unsigned long, val )
__field(int, index )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->desc = desc;
__entry->index = index;
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add many->many val 0x%lx\n"
" adde to desc %px[%04x] : spte %px : 0x%lx\n",
__entry->rmap_head, __entry->val, __entry->desc,
__entry->index, __entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_remove_1_0_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, pgprot_t *sptep),
TP_ARGS(rmap_head, sptep),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(pgprot_t *, rmap_sptep )
__field(pgprotval_t, rmap_spte )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->rmap_sptep = (pgprot_t *)rmap_head->val;
__entry->rmap_spte = pgprot_val(*(pgprot_t *)rmap_head->val);
__entry->val = 0;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove 1->0 head val 0x%lx\n"
" rmap spte : %px : 0x%lx\n"
" to remove : %px : 0x%lx\n",
__entry->rmap_head, __entry->val,
__entry->rmap_sptep, __entry->rmap_spte,
__entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_move_desc,
TP_PROTO(struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc,
int index_to, int index_from),
TP_ARGS(rmap_head, desc, index_to, index_from),
TP_STRUCT__entry(
__field(struct pte_list_desc *, desc )
__field(pgprot_t *, sptep_to )
__field(pgprotval_t, spte_to )
__field(pgprot_t *, sptep_from )
__field(pgprotval_t, spte_from )
__field(int, index_to )
__field(int, index_from )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->desc = desc;
__entry->index_to = index_to;
__entry->index_from = index_from;
__entry->sptep_to = desc->sptes[index_to];
__entry->spte_to = pgprot_val(*(desc->sptes[index_to]));
__entry->sptep_from = desc->sptes[index_from];
__entry->spte_from = pgprot_val(*(desc->sptes[index_from]));
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : move many->many head val 0x%lx\n"
" deleted desc at %px[%04x] : %px : 0x%lx\n"
" moved to from %px[%04x] : %px ; 0x%lx\n",
__entry->rmap_head, __entry->val,
__entry->desc, __entry->index_to,
__entry->sptep_to, __entry->spte_to,
__entry->desc, __entry->index_from,
__entry->sptep_from, __entry->spte_from)
);
TRACE_EVENT(
rmap_remove_desc,
TP_PROTO(struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc,
struct pte_list_desc *prev),
TP_ARGS(rmap_head, desc, prev),
TP_STRUCT__entry(
__field(struct pte_list_desc *, desc )
__field(struct pte_list_desc *, prev )
__field(struct pte_list_desc *, next )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->desc = desc;
__entry->desc = prev;
if (prev != NULL) {
__entry->next = prev->more;
} else {
__entry->next = NULL;
}
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove many->many head val 0x%lx\n"
" desc %px\n"
" prev %px\n"
" next %px\n",
__entry->rmap_head, __entry->val,
__entry->desc, __entry->prev, __entry->next)
);
TRACE_EVENT(
rmap_remove_many_many_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, pgprot_t *sptep,
struct pte_list_desc *desc, int index),
TP_ARGS(rmap_head, sptep, desc, index),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(pgprot_t *, rmap_sptep )
__field(pgprotval_t, rmap_spte )
__field(struct pte_list_desc *, desc )
__field(int, index )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->desc = desc;
__entry->index = index;
__entry->rmap_sptep = desc->sptes[index];
__entry->rmap_spte = pgprot_val(*(desc->sptes[index]));
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : add many->many head val 0x%lx "
"desc %px[%04x]\n"
" rmap spte : %px : 0x%lx\n"
" to remove : %px : 0x%lx\n",
__entry->rmap_head, __entry->val, __entry->desc, __entry->index,
__entry->rmap_sptep, __entry->rmap_spte,
__entry->sptep, __entry->spte)
);
TRACE_EVENT(
rmap_remove_0_bad_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, pgprot_t *sptep),
TP_ARGS(rmap_head, sptep),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove 0->bad not found spte %px : 0x%lx "
"head val 0x%lx\n",
__entry->rmap_head, __entry->sptep, __entry->spte, __entry->val)
);
TRACE_EVENT(
rmap_remove_1_bad_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, pgprot_t *sptep),
TP_ARGS(rmap_head, sptep),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove 1->bad not found spte %px : 0x%lx "
"head val 0x%lx\n",
__entry->rmap_head, __entry->sptep, __entry->spte, __entry->val)
);
TRACE_EVENT(
rmap_remove_many_bad_spte,
TP_PROTO(struct kvm_rmap_head *rmap_head, pgprot_t *sptep),
TP_ARGS(rmap_head, sptep),
TP_STRUCT__entry(
__field(pgprot_t *, sptep )
__field(pgprotval_t, spte )
__field(unsigned long, val )
__field(struct kvm_rmap_head *, rmap_head )
),
TP_fast_assign(
__entry->sptep = sptep;
__entry->spte = pgprot_val(*sptep);
__entry->val = rmap_head->val;
__entry->rmap_head = rmap_head;
),
TP_printk("rmap head %px : remove many->bad not found spte %px : 0x%lx "
"head val 0x%lx\n",
__entry->rmap_head, __entry->sptep, __entry->spte, __entry->val)
);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH

View File

@ -6,6 +6,7 @@
#include <linux/tracepoint.h>
#include <asm/kvm_host.h>
#include <asm/mmu_types.h>
#define E2K_TRACE_PRINT_CU_HDR_LO(entry) \
__print_flags(entry, "|", \
@ -67,193 +68,148 @@
{ IME_TLB_PAGE_FLUSH_UPPER, "TLB_PAGE_FLUSH_UPPER" }, \
{ IME_TLB_ENTRY_PROBE, "TLB_ENTRY_PROBE" })
#define E2K_PRINT_INTC_CU_ENTRY(__entry, i) \
(__entry->cu_num > i) ? \
E2K_TRACE_PRINT_CU_INFO_LO(__entry->cu[2 * i]) : "(none)", \
(__entry->cu_num > i) ? __entry->cu[2 * i] : 0ULL, \
(__entry->cu_num > i) ? __entry->cu[2 * i + 1] : 0ULL
#define E2K_TC_TYPE_STORE (1ULL << 17)
#define E2K_TC_TYPE_S_F (1ULL << 19)
#define E2K_TC_TYPE_ROOT (1ULL << 27)
#define E2K_TC_TYPE_SCAL (1ULL << 28)
#define E2K_TC_TYPE_SRU (1ULL << 29)
#define E2K_TC_TYPE_SPEC (1ULL << 30)
#define E2K_TC_TYPE_PM (1ULL << 31)
#define E2K_TC_TYPE_NUM_ALIGN (1ULL << 50)
#define E2K_TC_TYPE_EMPT (1ULL << 51)
#define E2K_TC_TYPE_CLW (1ULL << 52)
#define E2K_PRINT_INTC_MU_ENTRY(__entry, mu_num, i) \
(mu_num > i) ? \
E2K_TRACE_PRINT_MU_INFO_HDR(__entry->mu[7 * i]) : "(none)", \
(mu_num > i) ? __entry->mu[7 * i] : 0ULL, \
(mu_num > i) ? __entry->mu[7 * i + 1] : 0ULL, \
(mu_num > i) ? __entry->mu[7 * i + 2] : 0ULL, \
(mu_num > i) ? __entry->mu[7 * i + 3] : 0ULL, \
(mu_num > i) ? __entry->mu[7 * i + 4] : 0ULL, \
(mu_num > i) ? __entry->mu[7 * i + 5] : 0ULL, \
(mu_num > i) ? __entry->mu[7 * i + 6] : 0ULL
#define E2K_TC_TYPE (E2K_TC_TYPE_STORE | E2K_TC_TYPE_S_F | E2K_TC_TYPE_ROOT | \
E2K_TC_TYPE_SCAL | E2K_TC_TYPE_SRU | E2K_TC_TYPE_SPEC | \
E2K_TC_TYPE_PM | E2K_TC_TYPE_NUM_ALIGN | \
E2K_TC_TYPE_EMPT | E2K_TC_TYPE_CLW)
#define E2K_FAULT_TYPE_GLOBAL_SP (1ULL << 0)
#define E2K_FAULT_TYPE_EXC_MEM_LOCK__ILLEGAL_SMPH (1ULL << 1)
#define E2K_FAULT_TYPE_EXC_MEM_LOCK__MEM_LOCK (1ULL << 2)
#define E2K_FAULT_TYPE_PH_PR_PAGE (1ULL << 3)
#define E2K_FAULT_TYPE_IO_PAGE (1ULL << 4)
#define E2K_FAULT_TYPE_ISYS_PAGE (1ULL << 5)
#define E2K_FAULT_TYPE_PROT_PAGE (1ULL << 6)
#define E2K_FAULT_TYPE_PRIV_PAGE (1ULL << 7)
#define E2K_FAULT_TYPE_ILLEGAL_PAGE (1ULL << 8)
#define E2K_FAULT_TYPE_NWRITE_PAGE (1ULL << 9)
#define E2K_FAULT_TYPE_PAGE_MISS (1ULL << 10)
#define E2K_FAULT_TYPE_PH_BOUND (1ULL << 11)
#define E2K_FAULT_TYPE_INTL_RES_BITS (1ULL << 12)
TRACE_EVENT(
intc,
cu_intc,
TP_PROTO(const struct kvm_intc_cpu_context *intc_ctxt),
TP_PROTO(const intc_info_cu_t *cu, int num),
TP_ARGS(intc_ctxt),
TP_ARGS(cu, num),
TP_STRUCT__entry(
__field( int, cu_num )
__field( int, mu_num )
__field( u64, cu_hdr_lo )
__array( u64, cu, INTC_INFO_CU_ENTRY_MAX )
__array( u64, mu, INTC_INFO_MU_MAX )
__field( int, num )
__field( u64, cu_lo )
__field( u64, cu_hi )
),
TP_fast_assign(
__entry->cu_num = intc_ctxt->cu_num;
__entry->mu_num = intc_ctxt->mu_num;
if (__entry->cu_num >= 0)
__entry->cu_hdr_lo = AW(intc_ctxt->cu.header.lo);
if (__entry->cu_num > 0) {
int i;
for (i = 0; i < __entry->cu_num; i++) {
__entry->cu[2 * i] =
AW(intc_ctxt->cu.entry[i].lo);
__entry->cu[2 * i + 1] =
intc_ctxt->cu.entry[i].hi;
}
}
if (__entry->mu_num > 0) {
int i;
for (i = 0; i < __entry->mu_num; i++) {
__entry->mu[7 * i] =
AW(intc_ctxt->mu[i].hdr);
__entry->mu[7 * i + 1] =
intc_ctxt->mu[i].gpa;
__entry->mu[7 * i + 2] =
intc_ctxt->mu[i].gva;
__entry->mu[7 * i + 3] =
intc_ctxt->mu[i].data;
__entry->mu[7 * i + 4] =
AW(intc_ctxt->mu[i].condition);
__entry->mu[7 * i + 5] =
intc_ctxt->mu[i].data_ext;
__entry->mu[7 * i + 6] =
AW(intc_ctxt->mu[i].mask);
}
}
__entry->cu_lo = !num ? AW(cu->header.lo) : AW(cu->entry[num - 1].lo);
__entry->cu_hi = !num ? AW(cu->header.hi) : cu->entry[num - 1].hi;
__entry->num = num;
),
TP_printk("cu_num %d, mu_num %d\n"
"CU header: %s (0x%llx)\n"
"CU entry0: %s (0x%llx 0x%llx)\n"
"CU entry1: %s (0x%llx 0x%llx)\n"
"MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry1: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry2: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry3: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry4: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry5: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry6: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry7: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry8: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry9: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry10: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
,
__entry->cu_num, __entry->mu_num,
(__entry->cu_num >= 0) ?
E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_hdr_lo) : "(none)",
(__entry->cu_num >= 0) ? __entry->cu_hdr_lo : 0,
E2K_PRINT_INTC_CU_ENTRY(__entry, 0),
E2K_PRINT_INTC_CU_ENTRY(__entry, 1),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 0),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 1),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 2),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 3),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 4),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 5),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 6),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 7),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 8),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 9),
E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 10))
TP_printk("CU %s: %s (lo 0x%llx hi 0x%llx)",
(!__entry->num) ? "header" : "entry",
(!__entry->num) ? E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_lo) :
E2K_TRACE_PRINT_CU_INFO_LO(__entry->cu_lo),
__entry->cu_lo, __entry->cu_hi)
);
TRACE_EVENT(
single_mu_intc,
mu_intc,
TP_PROTO(const intc_info_mu_t *mu),
TP_PROTO(const intc_info_mu_t *mu, int num),
TP_ARGS(mu),
TP_ARGS(mu, num),
TP_STRUCT__entry(
__array( u64, mu, INTC_INFO_MU_ITEM_SIZE )
__field( int, num )
__field( u64, header )
__field( u64, gpa )
__field( u64, gva )
__field( u64, data_val )
__field( u64, data_ext_val )
__field( u8, data_tag )
__field( u8, data_ext_tag )
__field( u64, condition )
__field( u64, mask )
),
TP_fast_assign(
__entry->mu[0] = AW(mu[0].hdr);
__entry->mu[1] = mu[0].gpa;
__entry->mu[2] = mu[0].gva;
__entry->mu[3] = mu[0].data;
__entry->mu[4] = AW(mu[0].condition);
__entry->mu[5] = mu[0].data_ext;
__entry->mu[6] = AW(mu[0].mask);
__entry->num = num;
__entry->header = AW(mu[0].hdr);
__entry->gpa = mu[0].gpa;
__entry->gva = mu[0].gva;
load_value_and_tagd(&mu[0].data, &__entry->data_val,
&__entry->data_tag);
load_value_and_tagd(&mu[0].data_ext, &__entry->data_ext_val,
&__entry->data_ext_tag);
__entry->condition = AW(mu[0].condition);
__entry->mask = AW(mu[0].mask);
),
TP_printk("MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
E2K_PRINT_INTC_MU_ENTRY(__entry, 1, 0))
);
TRACE_EVENT(
double_mu_intc,
TP_PROTO(const intc_info_mu_t *mu),
TP_ARGS(mu),
TP_STRUCT__entry(
__array( u64, mu, 2 * INTC_INFO_MU_ITEM_SIZE )
),
TP_fast_assign(
int i;
for (i = 0; i < 2; i++) {
__entry->mu[7 * i] =
AW(mu[i].hdr);
__entry->mu[7 * i + 1] =
mu[i].gpa;
__entry->mu[7 * i + 2] =
mu[i].gva;
__entry->mu[7 * i + 3] =
mu[i].data;
__entry->mu[7 * i + 4] =
AW(mu[i].condition);
__entry->mu[7 * i + 5] =
mu[i].data_ext;
__entry->mu[7 * i + 6] =
AW(mu[i].mask);
}
),
TP_printk("MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n"
"MU entry1: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
E2K_PRINT_INTC_MU_ENTRY(__entry, 2, 0),
E2K_PRINT_INTC_MU_ENTRY(__entry, 2, 1))
);
TRACE_EVENT(
single_cu_intc,
TP_PROTO(const intc_info_cu_hdr_t cu_hdr),
TP_ARGS(cu_hdr),
TP_STRUCT__entry(
__field( u64, cu_hdr_lo )
),
TP_fast_assign(
__entry->cu_hdr_lo = AW(cu_hdr.lo);
),
TP_printk("CU header: %s (0x%llx)\n",
E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_hdr_lo),
__entry->cu_hdr_lo)
TP_printk("\n"
"MU entry %d: header %s gva 0x%llx gpa 0x%llx\n"
"data %hhx 0x%llx data_ext %hhx 0x%llx\n"
"condition 0x%llx mask 0x%llx\n"
"Register: address=0x%02hhx, vl=%d, vr=%d\n"
"Opcode: fmt=%d, n_prot=%d, fmtc=%d\n"
"Info1: chan=%d, mas=0x%02hhx, miss_lvl=%d, rcv=%d, dst_rcv=0x%03x\n"
"Info2: %s\n"
"Ftype: %s",
__entry->num,
E2K_TRACE_PRINT_MU_INFO_HDR(__entry->header), __entry->gva, __entry->gpa,
__entry->data_tag, __entry->data_val, __entry->data_ext_tag, __entry->data_ext_val,
__entry->condition, __entry->mask,
AS((tc_cond_t) __entry->condition).address,
AS((tc_cond_t) __entry->condition).vl,
AS((tc_cond_t) __entry->condition).vr,
AS((tc_cond_t) __entry->condition).fmt,
AS((tc_cond_t) __entry->condition).npsp,
AS((tc_cond_t) __entry->condition).fmtc,
AS((tc_cond_t) __entry->condition).chan,
AS((tc_cond_t) __entry->condition).mas,
AS((tc_cond_t) __entry->condition).miss_lvl,
AS((tc_cond_t) __entry->condition).rcv,
AS((tc_cond_t) __entry->condition).dst_rcv,
__print_flags(__entry->condition & E2K_TC_TYPE, "|",
{ E2K_TC_TYPE_STORE, "store" },
{ E2K_TC_TYPE_S_F, "s_f" },
{ E2K_TC_TYPE_ROOT, "root" },
{ E2K_TC_TYPE_SCAL, "scal" },
{ E2K_TC_TYPE_SRU, "sru" },
{ E2K_TC_TYPE_SPEC, "spec" },
{ E2K_TC_TYPE_PM, "pm" },
{ E2K_TC_TYPE_NUM_ALIGN, "num_align" },
{ E2K_TC_TYPE_EMPT, "empt" },
{ E2K_TC_TYPE_CLW, "clw" }
),
__print_flags(AS((tc_cond_t) __entry->condition).fault_type, "|",
{ E2K_FAULT_TYPE_GLOBAL_SP, "global_sp" },
{ E2K_FAULT_TYPE_EXC_MEM_LOCK__ILLEGAL_SMPH,
"exc_mem_lock.illegal_smph" },
{ E2K_FAULT_TYPE_EXC_MEM_LOCK__MEM_LOCK,
"exc_mem_lock.mem_lock" },
{ E2K_FAULT_TYPE_PH_PR_PAGE, "ph_pr_page" },
{ E2K_FAULT_TYPE_IO_PAGE, "io_page" },
{ E2K_FAULT_TYPE_ISYS_PAGE, "isys_page" },
{ E2K_FAULT_TYPE_PROT_PAGE, "prot_page" },
{ E2K_FAULT_TYPE_PRIV_PAGE, "priv_page" },
{ E2K_FAULT_TYPE_ILLEGAL_PAGE, "illegal_page" },
{ E2K_FAULT_TYPE_NWRITE_PAGE, "nwrite_page" },
{ E2K_FAULT_TYPE_PAGE_MISS, "page_miss" },
{ E2K_FAULT_TYPE_PH_BOUND, "ph_bound" },
{ E2K_FAULT_TYPE_INTL_RES_BITS, "intl_res_bits" }
))
);
TRACE_EVENT(
@ -271,7 +227,7 @@ TRACE_EVENT(
__entry->ret = ret;
),
TP_printk("Intercept exit %s(%d)\n",
TP_printk("Intercept exit %s(%d)",
(__entry->ret) ? "to QEMU " : "",
__entry->ret)
);
@ -338,13 +294,14 @@ TRACE_EVENT(
__entry->bu_pcsp_hi = AW(hw_ctxt->bu_pcsp_hi);
),
TP_printk("sbr 0x%llx, usd_lo 0x%llx, usd_hi 0x%llx\n"
TP_printk("\n"
"sbr 0x%llx, usd_lo 0x%llx, usd_hi 0x%llx\n"
"sh_psp_lo 0x%llx, sh_psp_hi 0x%llx, sh_pshtp 0x%llx\n"
"sh_pcsp_lo 0x%llx, sh_pcsp_hi 0x%llx, sh_pcshtp 0x%x\n"
"cr0_lo 0x%llx, cr0_hi 0x%llx, cr1_lo 0x%llx, cr1_hi 0x%llx\n"
"bu_psp_lo 0x%llx, bu_psp_hi 0x%llx\n"
"bu_pcsp_lo 0x%llx, bu_pcsp_hi 0x%llx\n"
"backup chain stack IPs: %s\n"
"backup chain stack IPs: %s"
,
__entry->sbr, __entry->usd_lo, __entry->usd_hi,
__entry->psp_lo, __entry->psp_hi, __entry->pshtp,
@ -379,7 +336,7 @@ TRACE_EVENT(
__entry->dam_active = dam_active;
),
TP_printk("to vcpu %d via %s, vector 0x%x, dlvm %d\n", __entry->vcpu,
TP_printk("to vcpu %d via %s, vector 0x%x, dlvm %d", __entry->vcpu,
__entry->dam_active ? "icr" : "pmirr",
__entry->vector, __entry->dlvm)
);
@ -401,7 +358,7 @@ TRACE_EVENT(
__entry->val = val;
),
TP_printk("pmirr#%d val 0x%llx\n", __entry->pmirr, __entry->val)
TP_printk("pmirr#%d val 0x%llx", __entry->pmirr, __entry->val)
);
TRACE_EVENT(
@ -421,7 +378,7 @@ TRACE_EVENT(
__entry->val = val;
),
TP_printk("pmirr#%d val 0x%llx\n", __entry->pmirr, __entry->val)
TP_printk("pmirr#%d val 0x%llx", __entry->pmirr, __entry->val)
);
TRACE_EVENT(
@ -439,7 +396,7 @@ TRACE_EVENT(
__entry->val = val;
),
TP_printk("pnmirr val 0x%x\n", __entry->val)
TP_printk("pnmirr val 0x%x", __entry->val)
);
TRACE_EVENT(
@ -457,7 +414,7 @@ TRACE_EVENT(
__entry->val = val;
),
TP_printk("pnmirr val 0x%x\n", __entry->val)
TP_printk("pnmirr val 0x%x", __entry->val)
);
TRACE_EVENT(
@ -475,7 +432,7 @@ TRACE_EVENT(
__entry->cir = cir;
),
TP_printk("cir 0x%x\n", __entry->cir)
TP_printk("cir 0x%x", __entry->cir)
);
TRACE_EVENT(
@ -493,7 +450,7 @@ TRACE_EVENT(
__entry->cir = cir;
),
TP_printk("cir 0x%x\n", __entry->cir)
TP_printk("cir 0x%x", __entry->cir)
);
TRACE_EVENT(
@ -513,7 +470,7 @@ TRACE_EVENT(
__entry->data = data;
),
TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data)
TP_printk("gpa 0x%lx, data 0x%lx", __entry->gpa, __entry->data)
);
TRACE_EVENT(
@ -533,7 +490,7 @@ TRACE_EVENT(
__entry->data = data;
),
TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data)
TP_printk("gpa 0x%lx, data 0x%lx", __entry->gpa, __entry->data)
);
TRACE_EVENT(
@ -565,8 +522,9 @@ TRACE_EVENT(
__entry->us_cl_m3 = us_cl_m3;
),
TP_printk("us_cl_d %d, us_cl_b 0x%lx, us_cl_up 0x%lx\n"
"us_cl_m0 0x%lx us_cl_m1 0x%lx us_cl_m2 0x%lx, us_cl_m3 0x%lx\n",
TP_printk("\n"
"us_cl_d %d, us_cl_b 0x%lx, us_cl_up 0x%lx\n"
"us_cl_m0 0x%lx us_cl_m1 0x%lx us_cl_m2 0x%lx, us_cl_m3 0x%lx",
__entry->us_cl_d, __entry->us_cl_b, __entry->us_cl_up,
__entry->us_cl_m0, __entry->us_cl_m1, __entry->us_cl_m2, __entry->us_cl_m3)
);

View File

@ -27,8 +27,8 @@
E2K_TC_TYPE_EMPT | E2K_TC_TYPE_CLW)
#define E2K_FAULT_TYPE_GLOBAL_SP (1ULL << 0)
#define E2K_FAULT_TYPE_PAGE_BOUND (1ULL << 1)
#define E2K_FAULT_TYPE_EXC_MEM_LOCK (1ULL << 2)
#define E2K_FAULT_TYPE_EXC_MEM_LOCK__ILLEGAL_SMPH (1ULL << 1)
#define E2K_FAULT_TYPE_EXC_MEM_LOCK__MEM_LOCK (1ULL << 2)
#define E2K_FAULT_TYPE_PH_PR_PAGE (1ULL << 3)
#define E2K_FAULT_TYPE_IO_PAGE (1ULL << 4)
#define E2K_FAULT_TYPE_ISYS_PAGE (1ULL << 5)
@ -104,8 +104,10 @@ TRACE_EVENT(
),
__print_flags(AS((tc_cond_t) __entry->condition).fault_type, "|",
{ E2K_FAULT_TYPE_GLOBAL_SP, "global_sp" },
{ E2K_FAULT_TYPE_PAGE_BOUND, "page_bound" },
{ E2K_FAULT_TYPE_EXC_MEM_LOCK, "exc_mem_lock" },
{ E2K_FAULT_TYPE_EXC_MEM_LOCK__ILLEGAL_SMPH,
"exc_mem_lock.illegal_smph" },
{ E2K_FAULT_TYPE_EXC_MEM_LOCK__MEM_LOCK,
"exc_mem_lock.mem_lock" },
{ E2K_FAULT_TYPE_PH_PR_PAGE, "ph_pr_page" },
{ E2K_FAULT_TYPE_IO_PAGE, "io_page" },
{ E2K_FAULT_TYPE_ISYS_PAGE, "isys_page" },

View File

@ -252,7 +252,7 @@
* rtmp0/rtmp1 two temporary registers (for example %dr20, %dr21)
*/
.macro SAVE_GREGS_PAIR_COND_V2 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \
.macro SAVE_GREGS_PAIR_COND_V3 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \
predSAVE, rtmp0, rtmp1
{
strd,2 %dg\gpair_lo, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \
@ -270,7 +270,7 @@
sth \rtmp1, [\rbase + (\kreg_hi * GLOB_REG_SIZE + \
GLOB_REG_EXT)] ? \predSAVE;
}
.endm /* SAVE_GREGS_PAIR_COND_V2 */
.endm /* SAVE_GREGS_PAIR_COND_V3 */
/* Bug 116851 - all strqp must be speculative if dealing with tags */
.macro SAVE_GREGS_PAIR_COND_V5 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \
@ -326,15 +326,15 @@
#ifdef CONFIG_KVM_HOST_MODE
/* it is host kernel with virtualization support */
/* or paravirtualized host and guest kernel */
.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
.macro DO_SAVE_HOST_GREGS_V3 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
drti, predSAVE, drtmp, rtmp0, rtmp1
/* drtmp: thread_info->h_gregs.g */
addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE;
SAVE_GREGS_PAIR_COND_V2 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \
SAVE_GREGS_PAIR_COND_V3 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \
\drtmp, /* thread_info->h_gregs.g base address */ \
\predSAVE, \
\rtmp0, \rtmp1
.endm /* DO_SAVE_HOST_GREGS_V2 */
.endm /* DO_SAVE_HOST_GREGS_V3 */
.macro DO_SAVE_HOST_GREGS_V5 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
drti, predSAVE, drtmp
@ -345,13 +345,13 @@
\predSAVE
.endm /* DO_SAVE_HOST_GREGS_V5 */
.macro SAVE_HOST_GREGS_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
DO_SAVE_HOST_GREGS_V2 \
.macro SAVE_HOST_GREGS_V3 drti, predSAVE, drtmp, rtmp0, rtmp1
DO_SAVE_HOST_GREGS_V3 \
GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \
VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \
\drti, \predSAVE, \
\drtmp, \rtmp0, \rtmp1
.endm /* SAVE_HOST_GREGS_V2 */
.endm /* SAVE_HOST_GREGS_V3 */
.macro SAVE_HOST_GREGS_V5 drti, predSAVE, drtmp
DO_SAVE_HOST_GREGS_V5 \
@ -361,9 +361,9 @@
\drtmp,
.endm /* SAVE_HOST_GREGS_V5 */
.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
SAVE_HOST_GREGS_V2 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1
.endm /* SAVE_HOST_GREGS_TO_VIRT_V2 */
.macro SAVE_HOST_GREGS_TO_VIRT_V3 drti, predSAVE, drtmp, rtmp0, rtmp1
SAVE_HOST_GREGS_V3 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1
.endm /* SAVE_HOST_GREGS_TO_VIRT_V3 */
.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp
SAVE_HOST_GREGS_V5 \drti, \predSAVE, \drtmp
@ -378,13 +378,13 @@
#include <asm/kvm/guest/trap_table.S.h>
#else /* ! CONFIG_KVM_HOST_MODE && ! CONFIG_KVM_GUEST_KERNEL */
/* It is native host kernel without any virtualization */
.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
.macro SAVE_HOST_GREGS_TO_VIRT_V3 drti, predSAVE, drtmp, rtmp0, rtmp1
/* not used */
.endm /* SAVE_VCPU_STATE_GREGS */
.endm /* SAVE_HOST_GREGS_TO_VIRT_V3 */
.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp
/* not used */
.endm /* SAVE_GREGS_TO_VIRT */
.endm /* SAVE_HOST_GREGS_TO_VIRT_V5 */
.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp
/* not used */

View File

@ -296,6 +296,7 @@ kvm_init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap)
regs->traps_to_guest = 0; /* only for host */
regs->is_guest_user = false; /* only for host */
regs->g_stacks_valid = false; /* only for host */
regs->in_fast_syscall = false; /* only for host */
if (user_mode_trap && test_thread_flag(TIF_LIGHT_HYPERCALL) &&
(NATIVE_NV_READ_CR1_LO_REG().CR1_lo_pm)) {
regs->flags.light_hypercall = 1;
@ -308,6 +309,7 @@ kvm_init_guest_syscalls_handling(struct pt_regs *regs)
regs->traps_to_guest = 0; /* only for host */
regs->is_guest_user = true; /* only for host */
regs->g_stacks_valid = false; /* only for host */
regs->in_fast_syscall = false; /* only for host */
}
static inline void

View File

@ -50,17 +50,14 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
#ifdef CONFIG_KVM_HOST_MODE
/* it is host kernel with virtualization support */
#define host_get_user(kval, uptr, hregs) \
#define host_get_guest_kernel(kval, gk_ptr) \
({ \
__typeof__(*(uptr)) __user *___pu_ptr = (uptr); \
int sz_uptr = sizeof(*(uptr)); \
__typeof__(*(gk_ptr)) __user *___pu_ptr; \
int sz_uptr = sizeof(*(gk_ptr)); \
long res; \
\
___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \
(uptr) \
: \
kvm_guest_ptr_to_host_ptr((uptr), sz_uptr, \
true); \
___pu_ptr = kvm_guest_ptr_to_host_ptr((gk_ptr), false, \
sz_uptr, true); \
if (PTR_ERR(___pu_ptr) == -EAGAIN) \
res = -EAGAIN; \
else \
@ -69,17 +66,14 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
(res); \
})
#define host_put_user(kval, uptr, hregs) \
#define host_put_guest_kernel(kval, gk_ptr) \
({ \
__typeof__(*(uptr)) __user *___pu_ptr = (uptr); \
int sz_uptr = sizeof(*(uptr)); \
__typeof__(*(gk_ptr)) __user *___pu_ptr; \
int sz_uptr = sizeof(*(gk_ptr)); \
long res; \
\
___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \
(uptr) \
: \
kvm_guest_ptr_to_host_ptr((uptr), sz_uptr, \
true); \
___pu_ptr = kvm_guest_ptr_to_host_ptr((gk_ptr), true, \
sz_uptr, true); \
if (PTR_ERR(___pu_ptr) == -EAGAIN) \
res = -EAGAIN; \
else \
@ -88,6 +82,88 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
(res); \
})
#define host_get_user(kval, uptr, hregs) \
({ \
__typeof__(*(uptr)) __user *__pu_ptr = (uptr); \
long res; \
\
res = (!host_test_intc_emul_mode(hregs)) ? \
((__pu_ptr) ? \
native_get_user(kval, __pu_ptr) \
: \
-EFAULT) \
: \
host_get_guest_kernel(kval, __pu_ptr); \
(res); \
})
#define host_put_user(kval, uptr, hregs) \
({ \
__typeof__(*(uptr)) __user *__pu_ptr = (uptr); \
int sz_uptr = sizeof(*(uptr)); \
long res; \
\
res = (!host_test_intc_emul_mode(hregs)) ? \
(__pu_ptr) ? \
native_put_user(kval, __pu_ptr) \
: \
-EFAULT) \
: \
host_put_guest_kernel(kval, __pu_ptr); \
(res); \
})
#define __kvm_get_guest_atomic(__slot, gfn, __hk_ptr, offset, \
gk_ptrp, __writable) \
({ \
__typeof__(__hk_ptr) __user *gk_ptr; \
unsigned long addr; \
int r; \
\
addr = gfn_to_hva_memslot_prot(__slot, gfn, __writable); \
if (unlikely(kvm_is_error_hva(addr))) { \
r = -EFAULT; \
} else { \
gk_ptr = (__typeof__((__hk_ptr)) *)(addr + offset); \
gk_ptrp = gk_ptr; \
r = native_get_user((__hk_ptr), gk_ptr); \
} \
r; \
})
#define kvm_get_guest_atomic(kvm, gpa, _hk_ptr) \
({ \
gfn_t gfn = (gpa) >> PAGE_SHIFT; \
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); \
int offset = offset_in_page(gpa); \
__typeof__(_hk_ptr) __user *unused; \
int r; \
\
__kvm_get_guest_atomic(slot, gfn, (_hk_ptr), offset, \
unused, NULL); \
})
#define kvm_vcpu_get_guest_ptr_atomic(vcpu, gpa, _hk_ptr, \
_gk_ptrp, _writable) \
({ \
gfn_t gfn = (gpa) >> PAGE_SHIFT; \
struct kvm_memory_slot *slot; \
int offset = offset_in_page(gpa); \
int r; \
\
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); \
r = __kvm_get_guest_atomic(slot, gfn, (_hk_ptr), offset, \
_gk_ptrp, _writable); \
r; \
})
#define kvm_vcpu_get_guest_atomic(vcpu, gpa, ___hk_ptr) \
({ \
__typeof__(___hk_ptr) __user *unused; \
\
kvm_vcpu_get_guest_ptr_atomic(vcpu, gpa, ___hk_ptr, \
unused, NULL); \
})
extern unsigned long kvm_copy_in_user_with_tags(void __user *to,
const void __user *from, unsigned long n);
extern unsigned long kvm_copy_to_user_with_tags(void __user *to,

View File

@ -22,6 +22,7 @@
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <asm/cpu_regs_types.h>
#include <asm/mmu_regs_types.h>
#include <asm/kvm/cpu_hv_regs_types.h>
#include <asm/kvm/mmu_hv_regs_types.h>
#include <asm/apicdef.h>
@ -171,6 +172,9 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_vcpu;
struct kvm;
struct kvm_mmu_pages;
struct kvm_shadow_trans;
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@ -333,7 +337,8 @@ typedef union kvm_mmu_root_flags {
struct {
unsigned has_host_pgds:1;
unsigned has_guest_pgds:1;
unsigned unused:30;
unsigned nonpaging:1;
unsigned unused:29;
};
} kvm_mmu_root_flags_t;
@ -341,6 +346,14 @@ typedef struct kvm_rmap_head {
unsigned long val;
} kvm_rmap_head_t;
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3
typedef struct pte_list_desc {
pgprot_t *sptes[PTE_LIST_EXT];
struct pte_list_desc *more;
} pte_list_desc_t;
typedef struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
@ -404,6 +417,14 @@ typedef enum pf_res {
} pf_res_t;
struct kvm_arch_exception;
struct kvm_rmap_head;
/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler)(struct kvm *kvm,
struct kvm_rmap_head *rmap_head);
typedef const pt_struct_t * (*get_pt_struct_func_t)(struct kvm *kvm);
typedef const pt_struct_t * (*get_vcpu_pt_struct_func_t)(struct kvm_vcpu *vcpu);
/*
* e2k supports 2 types of virtual space:
@ -425,7 +446,8 @@ struct kvm_arch_exception;
* The kvm_mmu structure abstracts the details of the current mmu mode.
*/
typedef struct kvm_mmu {
hpa_t sh_u_root_hpa; /* shadow PT root for user (and probably OS) */
hpa_t sh_u_root_hpa; /* shadow PT root for user for guest user running */
hpa_t sh_gk_root_hpa; /* shadow PT root for user for guest kernel -''- */
hpa_t sh_os_root_hpa; /* shadow PT root for OS (separate spoaces) */
hpa_t gp_root_hpa; /* physical base of root PT to translate */
/* guest physical addresses */
@ -485,10 +507,10 @@ typedef struct kvm_mmu {
/* MMU interceptions control registers state */
virt_ctrl_mu_t virt_ctrl_mu;
mmu_reg_t g_w_imask_mmu_cr;
e2k_mmu_cr_t g_w_imask_mmu_cr;
/* MMU shadow control registers initial state */
mmu_reg_t init_sh_mmu_cr;
e2k_mmu_cr_t init_sh_mmu_cr;
mmu_reg_t init_sh_pid;
/* Can have large pages at levels 2..last_nonleaf_level-1. */
@ -498,6 +520,7 @@ typedef struct kvm_mmu {
bool (*is_paging)(struct kvm_vcpu *vcpu);
void (*set_vcpu_u_pptb)(struct kvm_vcpu *vcpu, pgprotval_t base);
void (*set_vcpu_sh_u_pptb)(struct kvm_vcpu *vcpu, hpa_t root);
void (*set_vcpu_sh_gk_pptb)(struct kvm_vcpu *vcpu, hpa_t gk_root);
void (*set_vcpu_os_pptb)(struct kvm_vcpu *vcpu, pgprotval_t base);
void (*set_vcpu_sh_os_pptb)(struct kvm_vcpu *vcpu, hpa_t root);
void (*set_vcpu_u_vptb)(struct kvm_vcpu *vcpu, gva_t base);
@ -508,6 +531,7 @@ typedef struct kvm_mmu {
void (*set_vcpu_gp_pptb)(struct kvm_vcpu *vcpu, hpa_t root);
pgprotval_t (*get_vcpu_u_pptb)(struct kvm_vcpu *vcpu);
hpa_t (*get_vcpu_sh_u_pptb)(struct kvm_vcpu *vcpu);
hpa_t (*get_vcpu_sh_gk_pptb)(struct kvm_vcpu *vcpu);
pgprotval_t (*get_vcpu_os_pptb)(struct kvm_vcpu *vcpu);
hpa_t (*get_vcpu_sh_os_pptb)(struct kvm_vcpu *vcpu);
gva_t (*get_vcpu_u_vptb)(struct kvm_vcpu *vcpu);
@ -517,6 +541,7 @@ typedef struct kvm_mmu {
gva_t (*get_vcpu_os_vab)(struct kvm_vcpu *vcpu);
hpa_t (*get_vcpu_gp_pptb)(struct kvm_vcpu *vcpu);
void (*set_vcpu_pt_context)(struct kvm_vcpu *vcpu, unsigned flags);
void (*set_vcpu_u_pptb_context)(struct kvm_vcpu *vcpu);
void (*init_vcpu_ptb)(struct kvm_vcpu *vcpu);
pgprotval_t (*get_vcpu_context_u_pptb)(struct kvm_vcpu *vcpu);
gva_t (*get_vcpu_context_u_vptb)(struct kvm_vcpu *vcpu);
@ -525,23 +550,138 @@ typedef struct kvm_mmu {
gva_t (*get_vcpu_context_os_vab)(struct kvm_vcpu *vcpu);
hpa_t (*get_vcpu_context_gp_pptb)(struct kvm_vcpu *vcpu);
pgprotval_t (*get_vcpu_pdpte)(struct kvm_vcpu *vcpu, int index);
/* MMU page tables management functions */
pf_res_t (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
bool prefault, gfn_t *gfn, kvm_pfn_t *pfn);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct kvm_arch_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct kvm_arch_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct kvm_arch_exception *exception);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_arch_exception *exception,
gw_attr_t *gw_res);
void (*update_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprot_t *spte, const void *pte);
void (*sync_gva)(struct kvm_vcpu *vcpu, gva_t gva);
void (*sync_gva)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, gva_t gva);
long (*sync_gva_range)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
gva_t gva_start, gva_t gva_end);
int (*sync_page)(struct kvm_vcpu *vcpu, kvm_mmu_page_t *sp);
} kvm_mmu_t;
extern void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp);
/*
* MMU page tables interface
*/
typedef struct kvm_mmu_pt_ops {
/* host page table structure to support guest MMU and PTs can be */
/* different in common case */
const pt_struct_t *host_pt_struct; /* abstractions for details */
/* of the host page table structure */
const pt_struct_t *guest_pt_struct; /* abstractions for details */
/* of the guest page table structure */
const pt_struct_t *gp_pt_struct; /* abstractions for details */
/* of the guest physical page table */
/* structure, if is enable */
get_pt_struct_func_t get_host_pt_struct;
get_vcpu_pt_struct_func_t get_vcpu_pt_struct;
get_pt_struct_func_t get_gp_pt_struct;
/* MMU page tables management functions */
pgprotval_t (*get_spte_valid_mask)(struct kvm *kvm);
pgprotval_t (*get_spte_pfn_mask)(struct kvm *kvm);
gfn_t (*kvm_gfn_to_index)(struct kvm *kvm, gfn_t gfn, gfn_t base_gfn,
int level_id);
bool (*kvm_is_thp_gpmd_invalidate)(struct kvm_vcpu *vcpu,
pgprot_t old_gpmd, pgprot_t new_gpmd);
void (*kvm_vmlpt_kernel_spte_set)(struct kvm *kvm, pgprot_t *spte,
pgprot_t *root);
void (*kvm_vmlpt_user_spte_set)(struct kvm *kvm, pgprot_t *spte,
pgprot_t *root);
void (*mmu_gfn_disallow_lpage)(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
void (*mmu_gfn_allow_lpage)(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
bool (*rmap_write_protect)(struct kvm_vcpu *vcpu, u64 gfn);
int (*mmu_unsync_walk)(struct kvm *kvm, kvm_mmu_page_t *sp,
struct kvm_mmu_pages *pvec, int pt_entries_level);
bool (*mmu_slot_gfn_write_protect)(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
void (*account_shadowed)(struct kvm *kvm, struct kvm_mmu_page *sp);
void (*unaccount_shadowed)(struct kvm *kvm, struct kvm_mmu_page *sp);
int (*walk_shadow_pts)(struct kvm_vcpu *vcpu, gva_t addr,
struct kvm_shadow_trans *st, hpa_t spt_root);
pf_res_t (*nonpaging_page_fault)(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault,
gfn_t *gfnp, kvm_pfn_t *pfnp);
pgprot_t (*nonpaging_gpa_to_pte)(struct kvm_vcpu *vcpu, gva_t addr);
long (*kvm_hv_mmu_page_fault)(struct kvm_vcpu *vcpu, struct pt_regs *regs,
intc_info_mu_t *intc_info_mu);
void (*kvm_mmu_pte_write)(struct kvm_vcpu *vcpu, struct gmm_struct *gmm,
gpa_t gpa, const u8 *new, int bytes,
unsigned long flags);
int (*sync_shadow_pt_range)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
hpa_t spt_root, gva_t start, gva_t end,
gpa_t guest_root, gva_t vptb);
int (*shadow_pt_protection_fault)(struct kvm_vcpu *vcpu,
gpa_t addr, kvm_mmu_page_t *sp);
void (*direct_unmap_prefixed_mmio_gfn)(struct kvm *kvm, gfn_t gfn);
void (*kvm_mmu_free_page)(struct kvm *kvm, struct kvm_mmu_page *sp);
void (*copy_guest_shadow_root_range)(struct kvm_vcpu *vcpu,
gmm_struct_t *gmm, pgprot_t *dst_root, pgprot_t *src_root,
int start_index, int end_index);
void (*switch_kernel_pgd_range)(struct kvm_vcpu *vcpu, int cpu);
void (*zap_linked_children)(struct kvm *kvm, pgprot_t *root_spt,
int start_index, int end_index);
void (*mark_parents_unsync)(struct kvm *kvm, kvm_mmu_page_t *sp);
int (*prepare_zap_page)(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
unsigned long end, unsigned flags);
int (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
void (*arch_mmu_enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask);
bool (*slot_handle_ptes_level_range)(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb);
bool (*slot_handle_rmap_write_protect)(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb);
bool (*slot_handle_collapsible_sptes)(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb);
bool (*slot_handle_clear_dirty)(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb);
bool (*slot_handle_largepage_remove_write_access)(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb);
bool (*slot_handle_set_dirty)(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb);
/* MMU flush interface */
void (*mmu_flush_spte_tlb_range)(struct kvm_vcpu *vcpu,
pgprot_t *sptep, int level);
void (*mmu_flush_large_spte_tlb_range)(struct kvm_vcpu *vcpu,
pgprot_t *sptep);
void (*mmu_flush_shadow_pt_level_tlb)(struct kvm *kvm,
pgprot_t *sptep, pgprot_t old_spte);
/* MMU interafce init functions */
void (*mmu_init_vcpu_pt_struct)(struct kvm_vcpu *vcpu);
void (*kvm_init_mmu_pt_structs)(struct kvm *kvm);
void (*kvm_init_nonpaging_pt_structs)(struct kvm *kvm, hpa_t root);
void (*setup_shadow_pt_structs)(struct kvm_vcpu *vcpu);
void (*setup_tdp_pt_structs)(struct kvm_vcpu *vcpu);
void (*kvm_init_mmu_spt_context)(struct kvm_vcpu *vcpu,
struct kvm_mmu *context);
void (*kvm_init_mmu_tdp_context)(struct kvm_vcpu *vcpu,
struct kvm_mmu *context);
void (*kvm_init_mmu_nonpaging_context)(struct kvm_vcpu *vcpu,
struct kvm_mmu *context);
} kvm_mmu_pt_ops_t;
typedef struct intc_mu_state {
unsigned long notifier_seq; /* 'mmu_notifier_seq' state before */
@ -641,6 +781,7 @@ typedef struct kvm_guest_virq {
typedef struct kvm_sw_cpu_context {
int osem;
bool in_hypercall;
bool in_fast_syscall;
e2k_usd_lo_t usd_lo;
e2k_usd_hi_t usd_hi;
@ -698,7 +839,7 @@ typedef struct kvm_sw_cpu_context {
e2k_cutd_t cutd;
/* guest (hypervisor shadow) user page table bases: */
mmu_reg_t sh_u_pptb; /* physical */
mmu_reg_t sh_u_pptb; /* physical (for user running) */
mmu_reg_t sh_u_vptb; /* and virtual */
mmu_reg_t tc_hpa; /* host physical base of VCPU */
/* trap cellar */
@ -826,7 +967,7 @@ typedef struct kvm_hw_cpu_context {
e2k_pcsp_lo_t bu_pcsp_lo;
e2k_pcsp_hi_t bu_pcsp_hi;
mmu_reg_t sh_mmu_cr;
e2k_mmu_cr_t sh_mmu_cr;
mmu_reg_t sh_pid;
mmu_reg_t sh_os_pptb;
mmu_reg_t gp_pptb;
@ -845,7 +986,7 @@ typedef struct kvm_hw_cpu_context {
virt_ctrl_cu_t virt_ctrl_cu;
virt_ctrl_mu_t virt_ctrl_mu;
mmu_reg_t g_w_imask_mmu_cr;
e2k_mmu_cr_t g_w_imask_mmu_cr;
struct kvm_epic_page *cepic;
@ -939,6 +1080,9 @@ struct kvm_vcpu_arch {
kvm_vcpu_state_t *kmap_vcpu_state; /* alias of VCPU state */
/* mapped into kernel VM */
/* space */
gva_t guest_vcpu_state; /* alias of VCPU state */
/* mapped into guest kernel VM */
/* to access from guest */
e2k_cute_t *guest_cut;
e2k_addr_t guest_phys_base; /* guest image (kernel) physical base */
char *guest_base; /* guest image (kernel) virtual base */
@ -1105,7 +1249,12 @@ typedef struct kvm_arch_memory_slot {
/* after show state of VCPU */
/* completion */
#define KVM_REQ_KICK 18 /* VCPU should be kicked */
#define KVM_REQ_VIRQS_INJECTED 20 /* pending VIRQs injected */
#define KVM_REQ_ADDR_FLUSH 19 /* local flush the TLB address */
#define KVM_REQ_SYNC_INIT_SPT_ROOT 20 /* it need sync guest kernel */
/* copies of root PT */
#define KVM_REQ_SYNC_GMM_SPT_ROOT 21 /* it need sync guest user */
/* copies of root PT */
#define KVM_REQ_VIRQS_INJECTED 22 /* pending VIRQs injected */
#define KVM_REQ_SCAN_IOAPIC 23 /* scan IO-APIC */
#define KVM_REQ_SCAN_IOEPIC 24 /* scan IO-EPIC */
@ -1141,9 +1290,6 @@ struct kvm_irq_mask_notifier {
struct hlist_node link;
};
typedef const pt_struct_t * (*get_pt_struct_func_t)(struct kvm *kvm);
typedef const pt_struct_t * (*get_vcpu_pt_struct_func_t)(struct kvm_vcpu *vcpu);
struct irq_remap_table {
bool enabled;
unsigned int host_pin;
@ -1188,19 +1334,6 @@ struct kvm_arch {
gmmid_table_t gmmid_table;
gmm_struct_t *init_gmm; /* host agent of guest kernel mm */
/* host page table structure to support guest MMU and PTs can be */
/* different in common case */
const pt_struct_t *host_pt_struct; /* abstractions for details */
/* of the host page table structure */
const pt_struct_t *guest_pt_struct; /* abstractions for details */
/* of the guest page table structure */
const pt_struct_t *gp_pt_struct; /* abstractions for details */
/* of the guest physical page table */
/* structure, if is enable */
get_pt_struct_func_t get_host_pt_struct;
get_vcpu_pt_struct_func_t get_vcpu_pt_struct;
get_pt_struct_func_t get_gp_pt_struct;
#ifdef CONFIG_KVM_HV_MMU
/* MMU nonpaging mode */
hpa_t nonp_root_hpa; /* physical base of nonpaging root PT */
@ -1219,6 +1352,8 @@ struct kvm_arch {
struct kvm_page_track_notifier_head track_notifier_head;
#endif /* CONFIG_KVM_HV_MMU */
kvm_mmu_pt_ops_t mmu_pt_ops; /* MMU PTs interface */
kvm_host_info_t *host_info; /* host machine and kernel INFO */
kvm_host_info_t *kmap_host_info; /* host machine and kernel INFO */
/* mapped to kernel space */
@ -1408,10 +1543,36 @@ typedef enum mmu_retry {
} mmu_retry_t;
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int mmu_pt_unmap_hva_range(struct kvm *kvm, unsigned long start,
unsigned long end, unsigned flags);
int mmu_pt_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int mmu_pt_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int mmu_pt_test_age_hva(struct kvm *kvm, unsigned long hva);
static inline int
kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
unsigned flags)
{
return mmu_pt_unmap_hva_range(kvm, start, end, flags);
}
static inline int
kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
return mmu_pt_set_spte_hva(kvm, hva, pte);
}
static inline int
kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
return mmu_pt_age_hva(kvm, start, end);
}
static inline int
kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return mmu_pt_test_age_hva(kvm, hva);
}
#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */
extern void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);

View File

@ -6,8 +6,6 @@
#include <asm/e2k.h>
#include <asm/l_timer_regs.h>
#define L_TIMER_IS_ALLOWED() (HAS_MACHINE_E2K_IOHUB || IS_HV_GM())
#include <asm-l/l_timer.h>
#endif /* __KERNEL__ */

View File

@ -27,6 +27,8 @@ union e2k_dimtp;
#include <asm/kvm/machdep.h> /* virtualization support */
typedef void (*restore_gregs_fn_t)(const struct global_regs *);
typedef void (*save_gregs_fn_t)(struct global_regs *);
typedef struct machdep {
int native_id; /* machine Id */
int native_rev; /* cpu revision */
@ -48,12 +50,6 @@ typedef struct machdep {
e2k_addr_t pcicfg_area_phys_base;
e2k_size_t pcicfg_area_size;
e2k_addr_t nsr_area_phys_base;
e2k_size_t nbsr_area_offset;
e2k_size_t nbsr_area_size;
e2k_addr_t copsr_area_phys_base;
e2k_size_t copsr_area_size;
u8 mlt_size;
u8 tlb_lines_bits_num;
u64 tlb_addr_line_num;
u64 tlb_addr_line_num2;
u8 tlb_addr_line_num_shift2;
@ -80,16 +76,17 @@ typedef struct machdep {
void (*save_kernel_gregs)(struct kernel_gregs *);
void (*save_gregs)(struct global_regs *);
void (*save_local_gregs)(struct local_gregs *, bool is_signal);
void (*save_gregs_dirty_bgr)(struct global_regs *);
save_gregs_fn_t save_gregs_dirty_bgr;
restore_gregs_fn_t restore_gregs;
void (*save_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
unsigned long not_save_gregs_mask);
void (*restore_gregs)(const struct global_regs *);
void (*restore_local_gregs)(const struct local_gregs *, bool is_signal);
void (*restore_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
unsigned long not_restore_gregs_mask);
void (*save_dimtp)(union e2k_dimtp *);
void (*restore_dimtp)(const union e2k_dimtp *);
void (*clear_dimtp)(void);
void (*save_kvm_context)(struct kvm_vcpu_arch *);
void (*restore_kvm_context)(const struct kvm_vcpu_arch *);
@ -113,8 +110,6 @@ typedef struct machdep {
void (*get_and_invalidate_MLT_context)(struct e2k_mlt *mlt_state);
#endif
void (*flushts)(void);
void (*setup_arch)(void);
void (*setup_cpu_info)(struct cpuinfo_e2k *c);
int (*show_cpuinfo)(struct seq_file *m, void *v);
@ -180,49 +175,12 @@ extern cpuhas_initcall_t __cpuhas_initcalls[], __cpuhas_initcalls_end[];
* set them anyway to make kernel running on a simulator
* behave in the same way as on real hardware. */
/* #47176 - Large pages do not work.
* Workaround - do not use them. */
CPUHAS(CPU_HWBUG_LARGE_PAGES,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL && revision < 1);
/* #56947 - lapic timer can lose interrupts.
* Workaround - do not use oneshot mode. */
CPUHAS(CPU_HWBUG_LAPIC_TIMER,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL && revision < 1);
/* #69194 - PIO reads can hang processor.
* Workaround - serialize PIO reads on every CPU. */
CPUHAS(CPU_HWBUG_PIO_READS,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) ||
cpu == IDR_ES2_RU_MDL && revision <= 1);
/* #71610 - Atomic operations can be non-atomic
* Workaround - flush data cache line.
* This workaround increases the count of DCACHE flushes,
* Turmalin has hardware bug with flushes so don't use
* this workaround on it. */
CPUHAS(CPU_HWBUG_ATOMIC,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL);
/* #58397, #76626 - CLW does not work.
* Workaround - do not use it. */
CPUHAS(CPU_HWBUG_CLW,
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S),
!IS_ENABLED(CONFIG_CPU_E2S),
false,
cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) ||
cpu == IDR_ES2_RU_MDL && revision <= 1 ||
cpu == IDR_E2S_MDL && revision == 0);
/* #76626 - "Page accessed" bit in PTE does not work.
* Workaround - always set it. */
CPUHAS(CPU_HWBUG_PAGE_A,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) ||
cpu == IDR_ES2_RU_MDL && revision <= 1);
/* #78411 - Sometimes exc_illegal_instr_addr is generated
* instead of exc_instr_page_miss.
* Workaround - always return to user from exc_illegal_instr_addr. */
@ -230,40 +188,23 @@ CPUHAS(CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR,
!IS_ENABLED(CONFIG_CPU_E2S),
false,
cpu == IDR_E2S_MDL && revision <= 1);
/* #83160 - unaligned loads do not work
* Workaround - limit the stream of unaligned loads to less
* than 32 bytes per cycle and put "wait ld_c" before it. */
CPUHAS(CPU_HWBUG_UNALIGNED_LOADS,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) ||
cpu == IDR_ES2_RU_MDL && revision <= 1)
/* #83884 - es2 deadlocks on DMA to neighbour node.
* #100984 - # DMA to neighbour node slows down.
* Workaround - allocate DMA buffers only in the device node. */
/* #100984 - e8c: DMA to neighbour node slows down.
* #136177 - no DMA through the links B and C.
* Workaround - allocate DMA buffers only in the device node. */
CPUHAS(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE,
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E8C),
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E16C),
false,
cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) ||
cpu == IDR_ES2_RU_MDL && revision <= 1 ||
cpu == IDR_E8C_MDL && revision <= 2);
/* #83884 - es2 deadlock on DMA at
* (APIC_DEFAULT_PHYS_BASE & 0x7fffFFFF) address.
* Workaround - reserve the 4K page at this address. */
CPUHAS(CPU_HWBUG_DMA_AT_APIC_ADDR,
!IS_ENABLED(CONFIG_CPU_ES2),
false,
cpu == IDR_ES2_DSP_MDL);
cpu == IDR_E8C_MDL && revision <= 2 ||
cpu == IDR_E16C_MDL && revision == 0);
/* #88644 - data profiling events are lost if overflow happens
* under closed NM interrupts; also DDMCR writing does not clear
* pending exc_data_debug exceptions.
* Workaround - disable data monitor profiling in kernel. */
CPUHAS(CPU_HWBUG_KERNEL_DATA_MONITOR,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL);
/* #89495 - write barrier does not work (even for atomics).
* Workaround - special command sequence after every read-acquire. */
@ -281,10 +222,9 @@ CPUHAS(CPU_HWBUG_BAD_RESET,
* Workaround - use HS.lng from the instruction being replaced. */
CPUHAS(CPU_HWBUG_BREAKPOINT_INSTR,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E8C2_MDL);
/* #92834, #96516 - hang because of hardware problems.
* Workaround - boot activates watchdog, kernel should disable it */
@ -310,19 +250,16 @@ CPUHAS(CPU_HWBUG_WC_DAM,
* Workaround - treat it as s_f=1, store=1, sru=1 */
CPUHAS(CPU_HWBUG_TRAP_CELLAR_S_F,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E8C2),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0);
/* #97594 - %cr1_lo.ss flag is lost if ext. interrupt arrives faster.
* Workaround - manually set %cr1_lo.ss again in interrupt handler */
CPUHAS(CPU_HWBUG_SS,
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2),
!IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) &&
!IS_ENABLED(CONFIG_CPU_E1CP) && !IS_ENABLED(CONFIG_CPU_E8C2),
false,
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL && revision <= 2 ||
cpu == IDR_E8C_MDL && revision <= 2 ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0);
@ -330,30 +267,25 @@ CPUHAS(CPU_HWBUG_SS,
* Workaround - insert 'wait ma_c' barrier */
CPUHAS(CPU_HWBUG_AAU_AALDV,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E8C2),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0);
/* #103223 - LAPIC does not send EoI to IO_APIC for level interrupts.
* Workaround - wait under closed interrupts until APIC_ISR clears */
CPUHAS(CPU_HWBUG_LEVEL_EOI,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) ||
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP) || IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL);
/* #104865 - hardware might generate a false single step interrupt
* Workaround - clean frame 0 of PCS during the allocation */
CPUHAS(CPU_HWBUG_FALSE_SS,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E1CP) ||
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL && revision <= 2 ||
IS_ENABLED(CONFIG_CPU_E1CP) || IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E2S_MDL && revision <= 2 ||
cpu == IDR_E8C_MDL && revision <= 2 ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL);
/* #117649 - false exc_data_debug are generated based on _previous_
@ -363,11 +295,9 @@ CPUHAS(CPU_HWBUG_FALSE_SS,
CPUHAS(CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) ||
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP) || IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
@ -396,11 +326,9 @@ CPUHAS(CPU_HWBUG_E16C_SLEEP,
CPUHAS(CPU_HWBUG_L1I_STOPS_WORKING,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) ||
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP) || IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
@ -452,12 +380,11 @@ CPUHAS(CPU_HWBUG_VIRT_PUSD_PSL,
* Valid sequences are: sbr, nop, usd.lo, usd.hi OR sbr, usd.lo, usd.hi, usd.lo */
CPUHAS(CPU_HWBUG_USD_ALIGNMENT,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) ||
IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) ||
IS_ENABLED(CONFIG_CPU_E8C2) || IS_ENABLED(CONFIG_CPU_E16C) ||
IS_ENABLED(CONFIG_CPU_E2C3) || IS_ENABLED(CONFIG_CPU_E12C),
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP) || IS_ENABLED(CONFIG_CPU_E8C2) ||
IS_ENABLED(CONFIG_CPU_E16C) || IS_ENABLED(CONFIG_CPU_E2C3) ||
IS_ENABLED(CONFIG_CPU_E12C),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E16C_MDL || cpu == IDR_E2C3_MDL ||
cpu == IDR_E12C_MDL);
@ -469,7 +396,6 @@ CPUHAS(CPU_HWBUG_VIRT_PSIZE_INTERCEPTION,
false,
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
/* #130066, #134351 - L1/L2 do not respect "lal"/"las"/"sas"/"st_rel" barriers.
* Workaround - do not use "las"/"sas"/"st_rel", and add 5 nops after "lal".
* #133605 - "lal"/"las"/"sas"/"sal" barriers do not work in certain conditions.
@ -489,7 +415,6 @@ CPUHAS(CPU_HWBUG_SOFT_WAIT_E8C2,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_E8C2),
cpu == IDR_E8C2_MDL);
/* #132693 - C3 idle state does not work.
* Workaround - do not use it. */
CPUHAS(CPU_HWBUG_C3,
@ -501,29 +426,14 @@ CPUHAS(CPU_HWBUG_C3,
* Not bugs but features go here
*/
/* On some processor's revisions writecombine memory
* in prefetchable PCI area is not allowed. */
CPUHAS(CPU_FEAT_WC_PCI_PREFETCH,
!IS_ENABLED(CONFIG_CPU_ES2),
true,
!((cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL) &&
revision == 0));
/* #82499 - Instruction Cache must be handled carefully
* when flush_dc_line also flushes IC by physical address. */
CPUHAS(CPU_FEAT_FLUSH_DC_IC,
CONFIG_CPU_ISET != 0,
CONFIG_CPU_ISET >= 3,
iset_ver >= E2K_ISET_V3);
/* Rely on IDR instead of iset version to choose between APIC and EPIC.
* For guest we use it's own fake IDR so that we choose between APIC and
* EPIC based on what hardware guest *thinks* it's being executed on. */
CPUHAS(CPU_FEAT_EPIC,
IS_ENABLED(CONFIG_E2K_MACHINE) &&
!IS_ENABLED(CONFIG_KVM_GUEST_KERNEL),
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2),
guest_cpu != IDR_ES2_DSP_MDL && guest_cpu != IDR_ES2_RU_MDL &&
!IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) &&
!IS_ENABLED(CONFIG_CPU_E1CP) && !IS_ENABLED(CONFIG_CPU_E8C2),
guest_cpu != IDR_E2S_MDL && guest_cpu != IDR_E8C_MDL &&
guest_cpu != IDR_E1CP_MDL && guest_cpu != IDR_E8C2_MDL);
/* Shows which user registers must be saved upon trap entry/exit */
@ -543,12 +453,10 @@ CPUHAS(CPU_FEAT_QPREG,
/* Hardware prefetcher that resides in L2 and works on phys. addresses */
CPUHAS(CPU_FEAT_HW_PREFETCHER,
IS_ENABLED(CONFIG_E2K_MACHINE),
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
cpu != IDR_ES2_DSP_MDL && cpu != IDR_ES2_RU_MDL &&
cpu != IDR_E2S_MDL && cpu != IDR_E8C_MDL &&
!IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) &&
!IS_ENABLED(CONFIG_CPU_E1CP) && !IS_ENABLED(CONFIG_CPU_E8C2) &&
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
cpu != IDR_E2S_MDL && cpu != IDR_E8C_MDL &&
cpu != IDR_E1CP_MDL && cpu != IDR_E8C2_MDL &&
cpu != IDR_E16C_MDL && cpu != IDR_E2C3_MDL);
/* When flushing high order page table entries we must also flush
@ -591,10 +499,6 @@ CPUHAS(CPU_FEAT_FILLC,
CONFIG_CPU_ISET >= 6,
iset_ver >= E2K_ISET_V6);
/* Optimized version of machine.iset check */
CPUHAS(CPU_FEAT_ISET_V3,
CONFIG_CPU_ISET != 0,
CONFIG_CPU_ISET >= 3,
iset_ver >= E2K_ISET_V3);
CPUHAS(CPU_FEAT_ISET_V5,
CONFIG_CPU_ISET != 0,
CONFIG_CPU_ISET >= 5,
@ -674,60 +578,56 @@ extern __nodedata pt_struct_t pgtable_struct;
# define IS_HV_GM() (machine.gmi)
#endif
extern void save_kernel_gregs_v2(struct kernel_gregs *);
extern void save_kernel_gregs_v3(struct kernel_gregs *);
extern void save_kernel_gregs_v5(struct kernel_gregs *);
extern void save_gregs_v2(struct global_regs *);
extern void save_gregs_v3(struct global_regs *);
extern void save_gregs_v5(struct global_regs *);
extern void save_local_gregs_v2(struct local_gregs *, bool is_signal);
extern void save_local_gregs_v3(struct local_gregs *, bool is_signal);
extern void save_local_gregs_v5(struct local_gregs *, bool is_signal);
extern void save_gregs_dirty_bgr_v2(struct global_regs *);
extern void save_gregs_dirty_bgr_v3(struct global_regs *);
extern void save_gregs_dirty_bgr_v5(struct global_regs *);
extern void save_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
extern void save_gregs_on_mask_v3(struct global_regs *, bool dirty_bgr,
unsigned long mask_not_save);
extern void save_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr,
unsigned long mask_not_save);
extern void restore_gregs_v2(const struct global_regs *);
extern void restore_gregs_v3(const struct global_regs *);
extern void restore_gregs_v5(const struct global_regs *);
extern void restore_local_gregs_v2(const struct local_gregs *, bool is_signal);
extern void restore_local_gregs_v3(const struct local_gregs *, bool is_signal);
extern void restore_local_gregs_v5(const struct local_gregs *, bool is_signal);
extern void restore_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
extern void restore_gregs_on_mask_v3(struct global_regs *, bool dirty_bgr,
unsigned long mask_not_restore);
extern void restore_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr,
unsigned long mask_not_restore);
extern void save_dimtp_v6(union e2k_dimtp *);
extern void restore_dimtp_v6(const union e2k_dimtp *);
extern void clear_dimtp_v6(void);
extern void save_kvm_context_v6(struct kvm_vcpu_arch *);
extern void restore_kvm_context_v6(const struct kvm_vcpu_arch *);
extern void qpswitchd_sm(int);
extern void calculate_aau_aaldis_aaldas_v2(const struct pt_regs *regs,
extern void calculate_aau_aaldis_aaldas_v3(const struct pt_regs *regs,
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
extern void calculate_aau_aaldis_aaldas_v5(const struct pt_regs *regs,
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
extern void calculate_aau_aaldis_aaldas_v6(const struct pt_regs *regs,
e2k_aalda_t *aaldas, struct e2k_aau_context *context);
extern void do_aau_fault_v2(int aa_field, struct pt_regs *regs);
extern void do_aau_fault_v3(int aa_field, struct pt_regs *regs);
extern void do_aau_fault_v5(int aa_field, struct pt_regs *regs);
extern void do_aau_fault_v6(int aa_field, struct pt_regs *regs);
extern void save_aaldi_v2(u64 *aaldis);
extern void save_aaldi_v3(u64 *aaldis);
extern void save_aaldi_v5(u64 *aaldis);
extern void get_aau_context_v2(struct e2k_aau_context *, e2k_aasr_t);
extern void get_aau_context_v3(struct e2k_aau_context *, e2k_aasr_t);
extern void get_aau_context_v5(struct e2k_aau_context *, e2k_aasr_t);
extern void flushts_v3(void);
extern unsigned long boot_native_read_IDR_reg_value(void);
unsigned long rrd_v2(int);
unsigned long rrd_v3(int);
unsigned long rrd_v3(int);
unsigned long rrd_v6(int);
void rwd_v2(int reg, unsigned long value);
void rwd_v3(int reg, unsigned long value);
void rwd_v6(int reg, unsigned long value);
unsigned long boot_rrd_v2(int);
unsigned long boot_rrd_v3(int);
unsigned long boot_rrd_v6(int);
void boot_rwd_v2(int reg, unsigned long value);
void boot_rwd_v3(int reg, unsigned long value);
void boot_rwd_v6(int reg, unsigned long value);
@ -740,14 +640,12 @@ enum {
E2K_REG_OSCUIR,
};
u64 native_get_cu_hw1_v2(void);
u64 native_get_cu_hw1_v3(void);
u64 native_get_cu_hw1_v5(void);
void native_set_cu_hw1_v2(u64);
void native_set_cu_hw1_v3(u64);
void native_set_cu_hw1_v5(u64);
void invalidate_MLT_v2(void);
void invalidate_MLT_v3(void);
void get_and_invalidate_MLT_context_v2(struct e2k_mlt *mlt_state);
void get_and_invalidate_MLT_context_v3(struct e2k_mlt *mlt_state);
void get_and_invalidate_MLT_context_v6(struct e2k_mlt *mlt_state);
@ -756,7 +654,7 @@ void native_clock_off_v3(void);
void native_clock_on_v3(int cpu);
#endif
void C1_enter_v2(void);
void C1_enter_v3(void);
void C1_enter_v6(void);
void C3_enter_v3(void);
void C3_enter_v6(void);

View File

@ -3,11 +3,9 @@
#include <asm/cpu_regs_types.h>
#include <asm/types.h>
#include <asm/es2.h>
#define NATIVE_MLT_SIZE (machine.mlt_size)
#define NATIVE_MAX_MLT_SIZE ES2_MLT_SIZE
#define NATIVE_MLT_SIZE 16
#define REG_MLT_N_SHIFT 7
#define REG_MLT_DW_SHIFT 5
@ -18,7 +16,7 @@
typedef unsigned long e2k_mlt_line_t;
typedef struct e2k_mlt_dw0_v2_fields
typedef struct e2k_mlt_dw0_v3_fields
{
e2k_mlt_line_t resc : 4; /* [3:0] */
e2k_mlt_line_t mask : 8; /* [11:4] */
@ -29,7 +27,7 @@ typedef struct e2k_mlt_dw0_v2_fields
e2k_mlt_line_t hit : 1; /* [52] */
e2k_mlt_line_t val : 1; /* [53] */
e2k_mlt_line_t unresolved : 10; /* [63:54] */
} e2k_mlt_dw0_v2_fields_t;
} e2k_mlt_dw0_v3_fields_t;
typedef struct e2k_mlt_dw0_v6_fields
{
@ -45,7 +43,7 @@ typedef struct e2k_mlt_dw0_v6_fields
/* One reg (string) in MLT table */
typedef struct e2k_mlt_entry {
union {
e2k_mlt_dw0_v2_fields_t v2_fields;
e2k_mlt_dw0_v3_fields_t v3_fields;
e2k_mlt_dw0_v6_fields_t v6_fields;
e2k_mlt_line_t word;
} dw0;
@ -60,8 +58,8 @@ typedef struct e2k_mlt_entry {
} e2k_mlt_entry_t;
typedef struct e2k_mlt {
int num; /* number of entries in the MLT */
e2k_mlt_entry_t mlt[NATIVE_MAX_MLT_SIZE]; /* valid MLT entries */
int num; /* number of entries in the MLT */
e2k_mlt_entry_t mlt[NATIVE_MLT_SIZE]; /* valid MLT entries */
} e2k_mlt_t;
#define NATIVE_READ_MLT_REG(addr) \

View File

@ -127,6 +127,7 @@ extern enum exec_mmu_ret execute_mmu_operations(trap_cellar_t *tcellar,
struct pt_regs *regs),
enum exec_mmu_ret (*calculate_rf_frame)(struct pt_regs *regs,
tc_cond_t cond, u64 **radr,
bool *load_to_rf));
bool *load_to_rf),
bool priv_user);
#endif /* _E2K_MMAN_H_ */

View File

@ -1,161 +0,0 @@
/*
* E2K ISET V2-V5 MMU structure and common definitions.
*
* Copyright 2018 (c) MCST, Salavat S. Guiliazov (atic@mcst.ru)
*/
#ifndef _ASM_E2K_MMU_REGS_TYPES_V2_H
#define _ASM_E2K_MMU_REGS_TYPES_V2_H
/* Avoid header dependency loop of probe_entry_t and DTLB_ENTRY_PH_BOUND_V2 */
#ifndef _E2K_TLB_REGS_TYPES_H_
# error Do not include <asm/mmu-regs-types-v2.h> directly, use <asm/tlb_regs_types.h> instead
#endif
/*
* This file contains the functions and defines necessary to modify and
* use the E2K ISET V2-V5 page tables.
* NOTE: E2K has four levels of page tables.
*/
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/bug.h>
#include <asm/mmu_types.h>
/*
* DTLB entry probe format
*/
#define DTLB_ENTRY_ERROR_MASK_V2 0xbe00000000000000ULL
#define DTLB_ENTRY_PH_BOUND_V2 0x8000000000000000ULL
#define DTLB_ENTRY_ILLEGAL_PAGE_V2 0x4000000000000000ULL
#define DTLB_ENTRY_PAGE_MISS_V2 0x2000000000000000ULL
#define DTLB_ENTRY_PROBE_DISABLED_V2 0x0400000000000000ULL
#define DTLB_ENTRY_RES_BITS_V2 0x0200000000000000ULL
#define DTLB_ENTRY_MISS_LEVEL_MASK_V2 0x1800000000000000ULL
#define DTLB_ENTRY_WR_V2 0x0000000000000002ULL
#define DTLB_ENTRY_NON_EX_U_S_V2 0x0000000000000004ULL
#define DTLB_ENTRY_PWT_V2 0x0000000000000008ULL
#define DTLB_ENTRY_PCD1_V2 0x0000000000000010ULL
#define DTLB_ENTRY_D_V2 0x0000000000000040ULL
#define DTLB_ENTRY_G_V2 0x0000000000000100ULL
#define DTLB_ENTRY_PCD2_V2 0x0000000000000200ULL
#define DTLB_ENTRY_NWA_V2 0x0000000000000400ULL
#define DTLB_ENTRY_PHA_V2 0x000000fffffff000ULL /* phys address */
#define DTLB_ENTRY_VVA_V2 0x0000010000000000ULL /* VVA bit */
#define DTLB_ENTRY_PV_V2 0x0000020000000000ULL
#define DTLB_ENTRY_INT_PR_NON_EX_V2 0x0000040000000000ULL
#define DTLB_ENTRY_INTL_RD_V2 0x0000200000000000ULL
#define DTLB_ENTRY_INTL_WR_V2 0x0000400000000000ULL
#define DTLB_ENTRY_WP_V2 0x0000800000000000ULL
#define DTLB_ENTRY_UC_V2 0x0001000000000000ULL
/* MPT flags for 2/4Mb & 1Gb pages [46:45] */
#define DTLB_ENTRY_MPT_FLAGS_V2 0x0000600000000000ULL
#define DTLB_EP_RES_V2 0x0001ffffffffffffULL
#define DTLB_EP_FAULT_RES_V2 (~DTLB_EP_RES_V2)
/*
* DTLB address probe result format
*/
#define PH_ADDR_AP_RES_V2 0x000000ffffffffffULL /* Physical address */
/* normal result of */
/* AP [39: 0] */
#define DISABLE_AP_RES_V2 DISABLE_EP_RES_V2 /* AP diasble result */
/* [62] */
#define ILLEGAL_PAGE_AP_RES_V2 ILLEGAL_PAGE_EP_RES_V2 /* illegal page */
/* [58] */
/* convert physical address to page frame number for DTLB */
#define PA_TO_DTLB_ENTRY_PHA_V2(phys_addr) \
(((e2k_addr_t)phys_addr) & DTLB_ENTRY_PHA_V2)
/* convert the page frame number from DTLB entry to physical address */
#define DTLB_ENTRY_PHA_TO_PA_V2(dtlb_entry) \
((e2k_addr_t)(dtlb_entry) & DTLB_ENTRY_PHA_V2)
static inline probe_entry_t
covert_uni_dtlb_flags_to_dtlb_val_v2(const uni_dtlb_t uni_flags)
{
probe_entry_t dtlb_flags = 0;
if (uni_flags & UNI_PAGE_WRITE)
dtlb_flags |= (DTLB_ENTRY_WR_V2);
if (uni_flags & UNI_PAGE_PRIV)
dtlb_flags |= (DTLB_ENTRY_PV_V2);
if (uni_flags & UNI_PAGE_VALID)
dtlb_flags |= (DTLB_ENTRY_VVA_V2);
if (uni_flags & UNI_PAGE_PROTECT)
dtlb_flags |= (DTLB_ENTRY_INT_PR_NON_EX_V2);
if (uni_flags & UNI_PAGE_GLOBAL)
dtlb_flags |= (DTLB_ENTRY_G_V2);
if (uni_flags & UNI_PAGE_DIRTY)
dtlb_flags |= (DTLB_ENTRY_D_V2);
if (uni_flags & UNI_PAGE_NWA)
dtlb_flags |= (DTLB_ENTRY_NWA_V2);
if (uni_flags & UNI_PAGE_MEM_TYPE)
dtlb_flags |= (DTLB_ENTRY_PCD1_V2 | DTLB_ENTRY_PCD2_V2 |
DTLB_ENTRY_PWT_V2);
if (uni_flags & UNI_PAGE_NON_EX)
dtlb_flags |= (DTLB_ENTRY_NON_EX_U_S_V2);
if (uni_flags & UNI_PAGE_PFN)
dtlb_flags |= (DTLB_ENTRY_PHA_V2);
if (uni_flags & UNI_PAGE_MEM_TYPE_MA)
dtlb_flags |= (DTLB_ENTRY_PCD1_V2 | DTLB_ENTRY_PCD2_V2 |
DTLB_ENTRY_PWT_V2);
if (uni_flags & UNI_PAGE_WRITE_INT)
dtlb_flags |= (DTLB_ENTRY_WP_V2);
if (uni_flags & UNI_PAGE_INTL_RD)
dtlb_flags |= (DTLB_ENTRY_INTL_RD_V2);
if (uni_flags & UNI_PAGE_INTL_WR)
dtlb_flags |= (DTLB_ENTRY_INTL_WR_V2);
if (uni_flags & UNI_DTLB_EP_RES)
dtlb_flags |= (DTLB_EP_RES_V2);
if (uni_flags & UNI_DTLB_PH_ADDR_AP_RES)
dtlb_flags |= (PH_ADDR_AP_RES_V2);
if (uni_flags & UNI_DTLB_ERROR_MASK)
dtlb_flags |= (DTLB_ENTRY_ERROR_MASK_V2);
if (uni_flags & UNI_DTLB_MISS_LEVEL)
dtlb_flags |= (DTLB_ENTRY_MISS_LEVEL_MASK_V2);
if (uni_flags & UNI_DTLB_SUCCESSFUL)
dtlb_flags |= (DTLB_ENTRY_PROBE_DISABLED_V2);
if (uni_flags & UNI_DTLB_RES_BITS)
dtlb_flags |= (DTLB_ENTRY_RES_BITS_V2);
BUILD_BUG_ON(dtlb_flags == 0);
return dtlb_flags;
}
static inline probe_entry_t
fill_dtlb_val_v2_flags(const uni_dtlb_t uni_flags)
{
return covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags);
}
static inline probe_entry_t
get_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags)
{
return dtlb_val & covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags);
}
static inline bool
test_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags)
{
return get_dtlb_val_v2_flags(dtlb_val, uni_flags) != 0;
}
static inline probe_entry_t
set_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags)
{
return dtlb_val | covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags);
}
static inline probe_entry_t
clear_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags)
{
return dtlb_val & ~covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags);
}
#endif /* ! __ASSEMBLY__ */
#endif /* ! _ASM_E2K_MMU_REGS_TYPES_V2_H */

Some files were not shown because too many files have changed in this diff Show More