linux-headers-5.4.0-2.9
This commit is contained in:
parent
d99c8c0484
commit
f99e7af53c
58
Makefile
58
Makefile
|
@ -1,8 +1,8 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 58
|
SUBLEVEL = 91
|
||||||
EXTRAVERSION = -2.3
|
EXTRAVERSION = -2.9
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -394,8 +394,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
|
||||||
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
|
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
|
||||||
HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
|
HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
|
||||||
|
|
||||||
|
ifneq ($(LLVM),)
|
||||||
|
HOSTCC = clang
|
||||||
|
HOSTCXX = clang++
|
||||||
|
else
|
||||||
HOSTCC = gcc
|
HOSTCC = gcc
|
||||||
HOSTCXX = g++
|
HOSTCXX = g++
|
||||||
|
endif
|
||||||
KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
|
KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
|
||||||
-fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
|
-fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
|
||||||
$(HOSTCFLAGS)
|
$(HOSTCFLAGS)
|
||||||
|
@ -404,39 +409,56 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
|
||||||
KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
|
KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
|
||||||
|
|
||||||
# Make variables (CC, etc...)
|
# Make variables (CC, etc...)
|
||||||
CC = $(CROSS_COMPILE)gcc
|
|
||||||
CPP = $(CC) -E
|
CPP = $(CC) -E
|
||||||
|
ifneq ($(LLVM),)
|
||||||
|
CC = clang
|
||||||
|
LD = ld.lld
|
||||||
|
AR = llvm-ar
|
||||||
|
NM = llvm-nm
|
||||||
|
OBJCOPY = llvm-objcopy
|
||||||
|
OBJDUMP = llvm-objdump
|
||||||
|
READELF = llvm-readelf
|
||||||
|
OBJSIZE = llvm-size
|
||||||
|
STRIP = llvm-strip
|
||||||
|
else
|
||||||
|
CC = $(CROSS_COMPILE)gcc
|
||||||
ifeq ($(call cc-lcc-yn),y)
|
ifeq ($(call cc-lcc-yn),y)
|
||||||
AS := $(shell $(CC) -print-prog-name=as)
|
|
||||||
LD := $(shell $(CC) -print-prog-name=ld)
|
LD := $(shell $(CC) -print-prog-name=ld)
|
||||||
AR := $(shell $(CC) -print-prog-name=ar)
|
AR := $(shell $(CC) -print-prog-name=ar)
|
||||||
NM := $(shell $(CC) -print-prog-name=nm)
|
NM := $(shell $(CC) -print-prog-name=nm)
|
||||||
STRIP := $(shell $(CC) -print-prog-name=strip)
|
|
||||||
OBJCOPY := $(shell $(CC) -print-prog-name=objcopy)
|
OBJCOPY := $(shell $(CC) -print-prog-name=objcopy)
|
||||||
OBJDUMP := $(shell $(CC) -print-prog-name=objdump)
|
OBJDUMP := $(shell $(CC) -print-prog-name=objdump)
|
||||||
|
READELF := $(shell $(CC) -print-prog-name=readelf)
|
||||||
OBJSIZE := $(shell $(CC) -print-prog-name=size)
|
OBJSIZE := $(shell $(CC) -print-prog-name=size)
|
||||||
|
STRIP := $(shell $(CC) -print-prog-name=strip)
|
||||||
else
|
else
|
||||||
AS = $(CROSS_COMPILE)as
|
|
||||||
LD = $(CROSS_COMPILE)ld
|
LD = $(CROSS_COMPILE)ld
|
||||||
AR = $(CROSS_COMPILE)ar
|
AR = $(CROSS_COMPILE)ar
|
||||||
NM = $(CROSS_COMPILE)nm
|
NM = $(CROSS_COMPILE)nm
|
||||||
STRIP = $(CROSS_COMPILE)strip
|
|
||||||
OBJCOPY = $(CROSS_COMPILE)objcopy
|
OBJCOPY = $(CROSS_COMPILE)objcopy
|
||||||
OBJDUMP = $(CROSS_COMPILE)objdump
|
OBJDUMP = $(CROSS_COMPILE)objdump
|
||||||
|
READELF = $(CROSS_COMPILE)readelf
|
||||||
OBJSIZE = $(CROSS_COMPILE)size
|
OBJSIZE = $(CROSS_COMPILE)size
|
||||||
|
STRIP = $(CROSS_COMPILE)strip
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
PAHOLE = pahole
|
PAHOLE = pahole
|
||||||
LEX = flex
|
LEX = flex
|
||||||
YACC = bison
|
YACC = bison
|
||||||
AWK = awk
|
AWK = awk
|
||||||
INSTALLKERNEL := installkernel
|
INSTALLKERNEL := installkernel
|
||||||
DEPMOD = /sbin/depmod
|
DEPMOD = depmod
|
||||||
PERL = perl
|
PERL = perl
|
||||||
PYTHON = python
|
PYTHON = python
|
||||||
PYTHON2 = python2
|
|
||||||
PYTHON3 = python3
|
PYTHON3 = python3
|
||||||
CHECK = sparse
|
CHECK = sparse
|
||||||
BASH = bash
|
BASH = bash
|
||||||
|
KGZIP = gzip
|
||||||
|
KBZIP2 = bzip2
|
||||||
|
KLZOP = lzop
|
||||||
|
LZMA = lzma
|
||||||
|
LZ4 = lz4c
|
||||||
|
XZ = xz
|
||||||
|
|
||||||
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
|
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
|
||||||
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
|
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
|
||||||
|
@ -475,7 +497,7 @@ ifeq ($(call cc-lcc-yn),y)
|
||||||
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
|
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
|
||||||
-fno-strict-aliasing -fno-common -fno-PIE \
|
-fno-strict-aliasing -fno-common -fno-PIE \
|
||||||
-Werror=implicit-function-declaration -Werror=implicit-int \
|
-Werror=implicit-function-declaration -Werror=implicit-int \
|
||||||
-Wno-format-security \
|
-Werror=return-type -Wno-format-security \
|
||||||
-std=gnu89
|
-std=gnu89
|
||||||
else
|
else
|
||||||
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
|
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
|
||||||
|
@ -498,9 +520,10 @@ KBUILD_LDFLAGS :=
|
||||||
GCC_PLUGINS_CFLAGS :=
|
GCC_PLUGINS_CFLAGS :=
|
||||||
CLANG_FLAGS :=
|
CLANG_FLAGS :=
|
||||||
|
|
||||||
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
|
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
|
||||||
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL
|
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
|
||||||
export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
|
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
|
||||||
|
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ
|
||||||
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
|
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
|
||||||
|
|
||||||
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
|
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
|
||||||
|
@ -561,7 +584,7 @@ endif
|
||||||
ifneq ($(GCC_TOOLCHAIN),)
|
ifneq ($(GCC_TOOLCHAIN),)
|
||||||
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
|
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
|
||||||
endif
|
endif
|
||||||
ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),)
|
ifneq ($(LLVM_IAS),1)
|
||||||
CLANG_FLAGS += -no-integrated-as
|
CLANG_FLAGS += -no-integrated-as
|
||||||
endif
|
endif
|
||||||
CLANG_FLAGS += -Werror=unknown-warning-option
|
CLANG_FLAGS += -Werror=unknown-warning-option
|
||||||
|
@ -811,8 +834,11 @@ DEBUG_CFLAGS += -gsplit-dwarf
|
||||||
else
|
else
|
||||||
DEBUG_CFLAGS += -g
|
DEBUG_CFLAGS += -g
|
||||||
endif
|
endif
|
||||||
|
ifneq ($(LLVM_IAS),1)
|
||||||
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
||||||
endif
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
ifdef CONFIG_DEBUG_INFO_DWARF4
|
ifdef CONFIG_DEBUG_INFO_DWARF4
|
||||||
DEBUG_CFLAGS += -gdwarf-4
|
DEBUG_CFLAGS += -gdwarf-4
|
||||||
endif
|
endif
|
||||||
|
@ -1018,10 +1044,10 @@ export mod_strip_cmd
|
||||||
mod_compress_cmd = true
|
mod_compress_cmd = true
|
||||||
ifdef CONFIG_MODULE_COMPRESS
|
ifdef CONFIG_MODULE_COMPRESS
|
||||||
ifdef CONFIG_MODULE_COMPRESS_GZIP
|
ifdef CONFIG_MODULE_COMPRESS_GZIP
|
||||||
mod_compress_cmd = gzip -n -f
|
mod_compress_cmd = $(KGZIP) -n -f
|
||||||
endif # CONFIG_MODULE_COMPRESS_GZIP
|
endif # CONFIG_MODULE_COMPRESS_GZIP
|
||||||
ifdef CONFIG_MODULE_COMPRESS_XZ
|
ifdef CONFIG_MODULE_COMPRESS_XZ
|
||||||
mod_compress_cmd = xz -f
|
mod_compress_cmd = $(XZ) -f
|
||||||
endif # CONFIG_MODULE_COMPRESS_XZ
|
endif # CONFIG_MODULE_COMPRESS_XZ
|
||||||
endif # CONFIG_MODULE_COMPRESS
|
endif # CONFIG_MODULE_COMPRESS
|
||||||
export mod_compress_cmd
|
export mod_compress_cmd
|
||||||
|
|
|
@ -17,7 +17,7 @@ LD = $(shell $(CC) -print-prog-name=ld)
|
||||||
OBJCOPY = $(shell $(CC) -print-prog-name=objcopy)
|
OBJCOPY = $(shell $(CC) -print-prog-name=objcopy)
|
||||||
|
|
||||||
KBUILD_CFLAGS += -fkernel -gline -masm-inline $(call cc-option,-fforbid-fp) \
|
KBUILD_CFLAGS += -fkernel -gline -masm-inline $(call cc-option,-fforbid-fp) \
|
||||||
$(call cc-option,-fmax-errors=5)
|
$(call cc-option,-fmax-errors=5) $(call cc-option,-fno-loop-apb)
|
||||||
|
|
||||||
ifeq ($(PROFILE_GENERATE), 1)
|
ifeq ($(PROFILE_GENERATE), 1)
|
||||||
KBUILD_CFLAGS += -fprofile-generate-kernel
|
KBUILD_CFLAGS += -fprofile-generate-kernel
|
||||||
|
@ -146,7 +146,6 @@ drivers-$(CONFIG_PM) += arch/e2k/power/
|
||||||
|
|
||||||
#KVM hypervisor and guest support
|
#KVM hypervisor and guest support
|
||||||
core-$(CONFIG_KVM) += arch/e2k/kvm/
|
core-$(CONFIG_KVM) += arch/e2k/kvm/
|
||||||
core-$(CONFIG_KVM_GUEST) += arch/e2k/kvm/guest/
|
|
||||||
|
|
||||||
# Elbrus common modules
|
# Elbrus common modules
|
||||||
core-y += arch/l/
|
core-y += arch/l/
|
||||||
|
|
|
@ -30,9 +30,9 @@
|
||||||
#define I2C_SPI_DATA_AREA_SIZE 0x40
|
#define I2C_SPI_DATA_AREA_SIZE 0x40
|
||||||
|
|
||||||
#define I2C_SPI_DEFAULT_IRQ 23
|
#define I2C_SPI_DEFAULT_IRQ 23
|
||||||
#define I2C_MAX_ADAPTERS_PER_CONTROLLER 5
|
|
||||||
|
|
||||||
#define I2C_MAX_BUSSES I2C_MAX_ADAPTERS_PER_CONTROLLER
|
#define I2C_MAX_BUSSES 5
|
||||||
|
#define I2C_DST_BUSSES 4
|
||||||
|
|
||||||
#ifdef CONFIG_E2K
|
#ifdef CONFIG_E2K
|
||||||
extern int iohub_i2c_line_id;
|
extern int iohub_i2c_line_id;
|
||||||
|
|
|
@ -61,21 +61,32 @@ typedef struct iohub_sysdata {
|
||||||
int link; /* local number of IO link on the node */
|
int link; /* local number of IO link on the node */
|
||||||
#endif /* CONFIG_IOHUB_DOMAINS */
|
#endif /* CONFIG_IOHUB_DOMAINS */
|
||||||
u32 pci_msi_addr_lo; /* MSI transaction address */
|
u32 pci_msi_addr_lo; /* MSI transaction address */
|
||||||
u32 pci_msi_addr_hi; /* MSI transaction upper address */
|
u32 pci_msi_addr_hi;/* MSI transaction upper address */
|
||||||
u8 revision; /* IOHUB revision */
|
/*IOHUB can be connected to EIOHUB and vice versa */
|
||||||
u8 generation; /* IOHUB generation */
|
bool has_iohub;
|
||||||
|
u8 iohub_revision; /* IOHUB revision */
|
||||||
|
u8 iohub_generation; /* IOHUB generation */
|
||||||
|
bool has_eioh;
|
||||||
|
u8 eioh_generation; /* EIOHUB generation */
|
||||||
|
u8 eioh_revision; /* EIOHUB revision */
|
||||||
|
|
||||||
struct resource mem_space;
|
struct resource mem_space; /* pci registers memory */
|
||||||
void *l_iommu;
|
void *l_iommu;
|
||||||
} iohub_sysdata_t;
|
} iohub_sysdata_t;
|
||||||
|
|
||||||
|
bool l_eioh_device(struct pci_dev *pdev);
|
||||||
|
|
||||||
#define iohub_revision(pdev) ({ \
|
#define iohub_revision(pdev) ({ \
|
||||||
struct iohub_sysdata *sd = pdev->bus->sysdata; \
|
struct iohub_sysdata *sd = pdev->bus->sysdata; \
|
||||||
(sd->revision >> 1); \
|
u8 rev = l_eioh_device(pdev) ? sd->eioh_revision : \
|
||||||
|
sd->iohub_revision; \
|
||||||
|
(rev >> 1); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define iohub_generation(pdev) ({ \
|
#define iohub_generation(pdev) ({ \
|
||||||
struct iohub_sysdata *sd = pdev->bus->sysdata; \
|
struct iohub_sysdata *sd = pdev->bus->sysdata; \
|
||||||
sd->generation; \
|
(l_eioh_device(pdev) ? sd->eioh_generation : \
|
||||||
|
sd->iohub_generation); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#ifdef CONFIG_IOHUB_DOMAINS
|
#ifdef CONFIG_IOHUB_DOMAINS
|
||||||
|
|
|
@ -233,6 +233,13 @@ native_set_aau_context(e2k_aau_t *context)
|
||||||
({ \
|
({ \
|
||||||
regs = native_read_aafstr_reg_value(); \
|
regs = native_read_aafstr_reg_value(); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
set_aau_context(e2k_aau_t *context)
|
||||||
|
{
|
||||||
|
native_set_aau_context(context);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -651,6 +651,8 @@ static inline void write_aaldv_reg(e2k_aaldv_t *aaldv)
|
||||||
native_write_aaldv_reg(aaldv);
|
native_write_aaldv_reg(aaldv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define clear_apb() native_clear_apb()
|
||||||
|
|
||||||
#ifdef CONFIG_USE_AAU
|
#ifdef CONFIG_USE_AAU
|
||||||
# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) \
|
# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) \
|
||||||
NATIVE_SAVE_AAU_REGS_FOR_PTRACE(__regs, ti)
|
NATIVE_SAVE_AAU_REGS_FOR_PTRACE(__regs, ti)
|
||||||
|
|
|
@ -50,11 +50,12 @@ typedef struct icache_range_array {
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
} icache_range_array_t;
|
} icache_range_array_t;
|
||||||
|
|
||||||
extern void __flush_icache_all(void);
|
extern void native_flush_icache_all(void);
|
||||||
extern void native_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
|
extern void native_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
|
||||||
extern void __flush_icache_range_array(
|
extern void native_flush_icache_range_array(
|
||||||
icache_range_array_t *icache_range_arr);
|
icache_range_array_t *icache_range_arr);
|
||||||
extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
extern void native_flush_icache_page(struct vm_area_struct *vma,
|
||||||
|
struct page *page);
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
#define flush_icache_all() __flush_icache_all()
|
#define flush_icache_all() __flush_icache_all()
|
||||||
|
@ -62,18 +63,18 @@ extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||||
#define flush_icache_range_array __flush_icache_range_array
|
#define flush_icache_range_array __flush_icache_range_array
|
||||||
#define flush_icache_page(vma, page) __flush_icache_page(vma, page)
|
#define flush_icache_page(vma, page) __flush_icache_page(vma, page)
|
||||||
|
|
||||||
|
#define smp_flush_icache_all()
|
||||||
#define native_smp_flush_icache_range(start, end)
|
#define native_smp_flush_icache_range(start, end)
|
||||||
#define native_smp_flush_icache_range_array(icache_range_arr)
|
#define native_smp_flush_icache_range_array(icache_range_arr)
|
||||||
#define native_smp_flush_icache_page(vma, page)
|
#define native_smp_flush_icache_page(vma, page)
|
||||||
#define native_smp_flush_icache_all()
|
|
||||||
#define native_smp_flush_icache_kernel_line(addr)
|
#define native_smp_flush_icache_kernel_line(addr)
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
|
extern void smp_flush_icache_all(void);
|
||||||
extern void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
|
extern void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
|
||||||
extern void native_smp_flush_icache_range_array(
|
extern void native_smp_flush_icache_range_array(
|
||||||
icache_range_array_t *icache_range_arr);
|
icache_range_array_t *icache_range_arr);
|
||||||
extern void native_smp_flush_icache_page(struct vm_area_struct *vma,
|
extern void native_smp_flush_icache_page(struct vm_area_struct *vma,
|
||||||
struct page *page);
|
struct page *page);
|
||||||
extern void native_smp_flush_icache_all(void);
|
|
||||||
extern void native_smp_flush_icache_kernel_line(e2k_addr_t addr);
|
extern void native_smp_flush_icache_kernel_line(e2k_addr_t addr);
|
||||||
|
|
||||||
#define flush_icache_all() smp_flush_icache_all()
|
#define flush_icache_all() smp_flush_icache_all()
|
||||||
|
@ -176,20 +177,32 @@ smp_flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||||
native_smp_flush_icache_page(vma, page);
|
native_smp_flush_icache_page(vma, page);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
smp_flush_icache_all(void)
|
|
||||||
{
|
|
||||||
native_smp_flush_icache_all();
|
|
||||||
}
|
|
||||||
static inline void
|
|
||||||
smp_flush_icache_kernel_line(e2k_addr_t addr)
|
smp_flush_icache_kernel_line(e2k_addr_t addr)
|
||||||
{
|
{
|
||||||
native_smp_flush_icache_kernel_line(addr);
|
native_smp_flush_icache_kernel_line(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
__flush_icache_all(void)
|
||||||
|
{
|
||||||
|
native_flush_icache_all();
|
||||||
|
}
|
||||||
static inline void
|
static inline void
|
||||||
__flush_icache_range(e2k_addr_t start, e2k_addr_t end)
|
__flush_icache_range(e2k_addr_t start, e2k_addr_t end)
|
||||||
{
|
{
|
||||||
native_flush_icache_range(start, end);
|
native_flush_icache_range(start, end);
|
||||||
}
|
}
|
||||||
|
static inline void
|
||||||
|
__flush_icache_range_array(icache_range_array_t *icache_range_arr)
|
||||||
|
{
|
||||||
|
native_flush_icache_range_array(icache_range_arr);
|
||||||
|
}
|
||||||
|
static inline void
|
||||||
|
__flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||||
|
{
|
||||||
|
native_flush_icache_page(vma, page);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
flush_DCACHE_range(void *addr, size_t len)
|
flush_DCACHE_range(void *addr, size_t len)
|
||||||
{
|
{
|
||||||
|
|
|
@ -208,7 +208,8 @@ struct compat_shmid64_ds {
|
||||||
|
|
||||||
static inline int is_compat_task(void)
|
static inline int is_compat_task(void)
|
||||||
{
|
{
|
||||||
return current->thread.flags & E2K_FLAG_32BIT;
|
return (TASK_IS_PROTECTED(current)) ? 0 :
|
||||||
|
(current->thread.flags & E2K_FLAG_32BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _ASM_E2K_COMPAT_H */
|
#endif /* _ASM_E2K_COMPAT_H */
|
||||||
|
|
|
@ -13,44 +13,56 @@
|
||||||
|
|
||||||
#ifdef CONFIG_PROTECTED_MODE
|
#ifdef CONFIG_PROTECTED_MODE
|
||||||
|
|
||||||
#define convert_array(prot_array, new_array, max_prot_array_size, fields, \
|
/* New mask format: 4 bits per structure field */
|
||||||
|
#define get_pm_struct_simple(struct128, struct64, \
|
||||||
|
max_prot_array_size, fields, \
|
||||||
items, mask_type, mask_align) \
|
items, mask_type, mask_align) \
|
||||||
convert_array_3(prot_array, new_array, max_prot_array_size, fields, \
|
get_pm_struct(struct128, struct64, \
|
||||||
|
max_prot_array_size, fields, \
|
||||||
items, mask_type, mask_align, 0, 0)
|
items, mask_type, mask_align, 0, 0)
|
||||||
|
|
||||||
extern int convert_array_3(long __user *prot_array, long *new_array,
|
|
||||||
const int max_prot_array_size, const int fields,
|
extern int get_pm_struct(long __user *struct128,
|
||||||
|
long *struct64,
|
||||||
|
const int max_prot_array_size, const int fieldnum,
|
||||||
const int items, const long mask_type,
|
const int items, const long mask_type,
|
||||||
const long mask_align, const long mask_rw,
|
const long mask_align, const long mask_rw,
|
||||||
const int rval_mode);
|
const int rval_mode);
|
||||||
/*
|
/*
|
||||||
* Converts the given array of structures, which can contain
|
* Converts protected structure (array of structures), which can contain
|
||||||
* protected user pointers to memory, function descriptors, and int values.
|
* protected user pointers to memory, function descriptors, and int values.
|
||||||
* prot_array - pointer to the original (user-space) array
|
* struct128 - pointer to the protected (user-space) structure (128 bit).
|
||||||
* new_array - pointer to area where to put converted array
|
* struct64 - pointer to allocated area where to put converted structure.
|
||||||
* max_prot_array_size - the maximum size, which prot_array can occupy
|
* max_prot_array_size - estimated maximum size, which struct128 occupies
|
||||||
* fileds - number of enries in each element
|
* filednum - number of fields in the given structure.
|
||||||
* items - number of identical elements in the array to convert
|
* items - number of elements (structures) in array (items == array size)
|
||||||
* mask_type - mask for encoding of field type in each element:
|
* if 'struct128' is array of structures to be converted.
|
||||||
* 2 bits per each entry:
|
* mask_type - mask for encoding structure field types:
|
||||||
* --- 00 (0x0) - int
|
* (4 bits per each entry):
|
||||||
* --- 01 (0x1) - long
|
* --- 0000 (0x0) - int
|
||||||
* --- 10 (0x2) - pointer to function
|
* --- 0001 (0x1) - long
|
||||||
* --- 11 (0x3) - pointer to memory (descriptor)
|
* --- 0010 (0x2) - Fptr (pointer to function)
|
||||||
* mask_align - mask for encoding of alignment of the NEXT! field
|
* --- 0011 (0x3) - descriptor (pointer to memory)
|
||||||
* 2 bits per each entry:
|
* --- 0100 (0x4) - descriptor or int
|
||||||
|
* --- 0101 (0x5) - descriptor or long
|
||||||
|
* --- 0110 (0x6) - descriptor or Fptr
|
||||||
|
* --- 0111 (0x7) - everything is possible (i/P/F)
|
||||||
|
* --- 1*** (0x8) - may be uninitialized (empty tag allowed)
|
||||||
|
* mask_align - mask for encoding alignment of the NEXT (!!!) structure field;
|
||||||
|
* for example, bits #0-3 code alignment of the 2nd structure field
|
||||||
|
* (4 bits per each entry):
|
||||||
* --- 00 (0x0) - next field aligned as int (to 4 bytes)
|
* --- 00 (0x0) - next field aligned as int (to 4 bytes)
|
||||||
* --- 01 (0x1) - next field aligned as long (to 8 bytes)
|
* --- 01 (0x1) - next field aligned as long (to 8 bytes)
|
||||||
* --- 10 (0x2) - not used yet
|
* --- 10 (0x2) - not used yet
|
||||||
* --- 11 (0x3) - next field aligned as pointer (to 16 bytes)
|
* --- 11 (0x3) - next field aligned as pointer (to 16 bytes)
|
||||||
* mask_rw - mask for encoding access type of the structure elements
|
* mask_rw - mask for encoding access type of structure fields
|
||||||
* 2 bits per each entry:
|
* (4 bits per each entry):
|
||||||
* --- 01 (0x1) - the field's content gets read by syscall (READ-able)
|
* --- 01 (0x1) - the field's content gets read by syscall (READ-able)
|
||||||
* --- 02 (0x2) - the field's content gets updated by syscall (WRITE-able)
|
* --- 02 (0x2) - the field's content gets updated by syscall (WRITE-able)
|
||||||
* --- 11 (0x3) - the field is both READ-able and WRITE-able
|
* --- 11 (0x3) - the field is both READ-able and WRITE-able
|
||||||
* --- 00 (0x0) - default type; the same as (READ-able)
|
* --- 00 (0x0) - default type; the same as (READ-able)
|
||||||
* rval_mode - error (return value) reporting mode mask:
|
* rval_mode - error (return value) reporting mode mask:
|
||||||
* 0 - report only critical problems in prot_array structure;
|
* 0 - report only critical problems in struct128 structure;
|
||||||
* 1 - return with -EFAULT if wrong tag in 'int' field;
|
* 1 - return with -EFAULT if wrong tag in 'int' field;
|
||||||
* 2 - --'-- --'-- 'long' field;
|
* 2 - --'-- --'-- 'long' field;
|
||||||
* 4 - --'-- --'-- 'func' field;
|
* 4 - --'-- --'-- 'func' field;
|
||||||
|
@ -63,6 +75,7 @@ extern int convert_array_3(long __user *prot_array, long *new_array,
|
||||||
* error number - otherwise.
|
* error number - otherwise.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#define CONV_ARR_WRONG_INT_FLD 1
|
#define CONV_ARR_WRONG_INT_FLD 1
|
||||||
#define CONV_ARR_WRONG_LONG_FLD 2
|
#define CONV_ARR_WRONG_LONG_FLD 2
|
||||||
#define CONV_ARR_WRONG_FUNC_FLD 4
|
#define CONV_ARR_WRONG_FUNC_FLD 4
|
||||||
|
@ -74,13 +87,11 @@ extern int convert_array_3(long __user *prot_array, long *new_array,
|
||||||
#define CONV_ARR_IGNORE_DSCR_FLD_ERR 128
|
#define CONV_ARR_IGNORE_DSCR_FLD_ERR 128
|
||||||
|
|
||||||
|
|
||||||
extern int check_args_array(const long __user *args_array,
|
extern int check_args_array4(const long __user *args_array,
|
||||||
const long tags,
|
const long tags,
|
||||||
const int arg_num,
|
const int arg_num,
|
||||||
const long mask_type,
|
const long mask_type,
|
||||||
const int rval_mode,
|
const int rval_mode, const char *ErrMsgHeader);
|
||||||
const char *ErrMsgHeader);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function checks protected syscall arguments on correspondence with
|
* This function checks protected syscall arguments on correspondence with
|
||||||
* the given mask:
|
* the given mask:
|
||||||
|
@ -88,11 +99,14 @@ extern int check_args_array(const long __user *args_array,
|
||||||
* tags - argument tags (4 bits per arg; lower to higher bits ordered)
|
* tags - argument tags (4 bits per arg; lower to higher bits ordered)
|
||||||
* arg_num - number of arguments
|
* arg_num - number of arguments
|
||||||
* mask_type - mask for encoding of field type in each element
|
* mask_type - mask for encoding of field type in each element
|
||||||
* 2 bits per each entry:
|
* 4 bits per each entry:
|
||||||
* --- 00 (0x0) - int
|
* --- 0000 (0x0) - int
|
||||||
* --- 01 (0x1) - long
|
* --- 0001 (0x1) - long
|
||||||
* --- 10 (0x2) - pointer to function
|
* --- 0010 (0x2) - pointer to function
|
||||||
* --- 11 (0x3) - pointer to memory.
|
* --- 0011 (0x3) - pointer to memory
|
||||||
|
* --- 0100 (0x4) - descriptor or int
|
||||||
|
* --- 0101 (0x5) - descriptor or long
|
||||||
|
* --- 1*** (0x8) - may be uninitialized (empty tag allowed)
|
||||||
* rval_mode - error (return value) reporting mode mask:
|
* rval_mode - error (return value) reporting mode mask:
|
||||||
* 0 - report only critical problems;
|
* 0 - report only critical problems;
|
||||||
* 1 - return with -EFAULT if wrong tag in 'int' field;
|
* 1 - return with -EFAULT if wrong tag in 'int' field;
|
||||||
|
@ -107,6 +121,31 @@ extern int check_args_array(const long __user *args_array,
|
||||||
* error number - otherwise.
|
* error number - otherwise.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/* This function realizes compact mask format: 2 bits per structure field */
|
||||||
|
extern int convert_array_3(long __user *prot_array, long *new_array,
|
||||||
|
const int max_prot_array_size, const int fields,
|
||||||
|
const int items, unsigned long mask_type,
|
||||||
|
unsigned long mask_align, unsigned long mask_rw,
|
||||||
|
const int rval_mode);
|
||||||
|
|
||||||
|
/* This function realizes compact mask format: 2 bits per structure field */
|
||||||
|
extern int check_args_array(const long __user *args_array,
|
||||||
|
const long tags,
|
||||||
|
const int arg_num,
|
||||||
|
unsigned long mask_type,
|
||||||
|
const int rval_mode,
|
||||||
|
const char *ErrMsgHeader);
|
||||||
|
|
||||||
|
|
||||||
|
/* This is deprecated. Not reconnemded to use.
|
||||||
|
* Old mask format: 2 bits per structure field
|
||||||
|
*/
|
||||||
|
#define convert_array(prot_array, new_array, max_prot_array_size, fields, \
|
||||||
|
items, mask_type, mask_align) \
|
||||||
|
convert_array_3(prot_array, new_array, max_prot_array_size, fields, \
|
||||||
|
items, mask_type, mask_align, 0, 0)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
# define convert_array(...) 0
|
# define convert_array(...) 0
|
||||||
#endif /* CONFIG_PROTECTED_MODE */
|
#endif /* CONFIG_PROTECTED_MODE */
|
||||||
|
|
|
@ -274,10 +274,14 @@
|
||||||
NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value)
|
NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value)
|
||||||
#define WRITE_SBR_REG_VALUE(SBR_value) \
|
#define WRITE_SBR_REG_VALUE(SBR_value) \
|
||||||
NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value)
|
NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value)
|
||||||
|
#define NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \
|
||||||
|
NATIVE_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo)
|
||||||
#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \
|
#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \
|
||||||
NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value)
|
NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value)
|
||||||
#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \
|
#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \
|
||||||
NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value)
|
NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value)
|
||||||
|
#define BOOT_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \
|
||||||
|
NATIVE_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read/write double-word Window Descriptor Register (WD)
|
* Read/write double-word Window Descriptor Register (WD)
|
||||||
|
|
|
@ -9,11 +9,11 @@
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
/* E2K physical address definitions */
|
/* E2K physical address definitions */
|
||||||
#define MAX_PA_SIZE 40 /* E2K physical address size */
|
|
||||||
/* (bits number) */
|
/* E2K physical address size (bits number) */
|
||||||
#define MAX_PA_MSB (MAX_PA_SIZE - 1) /* The number of the most */
|
#define MAX_PA_SIZE CONFIG_E2K_PA_BITS
|
||||||
/* significant bit of E2K */
|
/* The number of the most significant bit of E2K physical address */
|
||||||
/* physical address */
|
#define MAX_PA_MSB (MAX_PA_SIZE - 1)
|
||||||
#define MAX_PA_MASK ((1UL << MAX_PA_SIZE) - 1)
|
#define MAX_PA_MASK ((1UL << MAX_PA_SIZE) - 1)
|
||||||
#define MAX_PM_SIZE (1UL << MAX_PA_SIZE)
|
#define MAX_PM_SIZE (1UL << MAX_PA_SIZE)
|
||||||
|
|
||||||
|
@ -2356,23 +2356,23 @@ typedef struct e2k_svd_gregs_struct {
|
||||||
} e2k_svd_gregs_t;
|
} e2k_svd_gregs_t;
|
||||||
|
|
||||||
/* CU_HW0 register */
|
/* CU_HW0 register */
|
||||||
#define _CU_HW0_TRWM_ITAG_MASK 0x00000007 /* IB tag */
|
#define _CU_HW0_TRWM_ITAG_MASK 0x000000007 /* IB tag */
|
||||||
#define _CU_HW0_TRWM_IDATA_MASK 0x00000038 /* IB data */
|
#define _CU_HW0_TRWM_IDATA_MASK 0x000000038 /* IB data */
|
||||||
#define _CU_HW0_TRWM_CF_MASK 0x000001c0 /* Chain File */
|
#define _CU_HW0_TRWM_CF_MASK 0x0000001c0 /* Chain File */
|
||||||
/* Disable IB snooping */
|
#define _CU_HW0_IB_SNOOP_DISABLE_MASK 0x000000200 /* Disable IB snooping */
|
||||||
#define _CU_HW0_IB_SNOOP_DISABLE_MASK 0x00000200
|
#define _CU_HW0_BIST_CF_MASK 0x000000400 /* Chain File */
|
||||||
#define _CU_HW0_BIST_CF_MASK 0x00000400 /* Chain File */
|
#define _CU_HW0_BIST_TU_MASK 0x000000800 /* Trap Unit */
|
||||||
#define _CU_HW0_BIST_TU_MASK 0x00000800 /* Trap Unit */
|
#define _CU_HW0_BIST_ITAG_MASK 0x000001000 /* IB tag */
|
||||||
#define _CU_HW0_BIST_ITAG_MASK 0x00001000 /* IB tag */
|
#define _CU_HW0_BIST_ITLB_TAG_MASK 0x000002000 /* ITLB tag */
|
||||||
#define _CU_HW0_BIST_ITLB_TAG_MASK 0x00002000 /* ITLB tag */
|
#define _CU_HW0_BIST_ITLB_DATA_MASK 0x000004000 /* ITLB data */
|
||||||
#define _CU_HW0_BIST_ITLB_DATA_MASK 0x00004000 /* ITLB data */
|
#define _CU_HW0_BIST_IDATA_NM_MASK 0x000078000 /* IB data */
|
||||||
#define _CU_HW0_BIST_IDATA_NM_MASK 0x00078000 /* IB data */
|
#define _CU_HW0_BIST_IDATA_CNT_MASK 0x01ff80000 /* IB tag */
|
||||||
#define _CU_HW0_BIST_IDATA_CNT_MASK 0x1ff80000 /* IB tag */
|
#define _CU_HW0_PIPE_FROST_DISABLE_MASK 0x020000000 /* Instruction pipe */
|
||||||
#define _CU_HW0_PIPE_FROST_DISABLE_MASK 0x20000000 /* Instruction pipe */
|
#define _CU_HW0_RF_CLEAN_DISABLE_MASK 0x040000000 /* Register File */
|
||||||
#define _CU_HW0_RF_CLEAN_DISABLE_MASK 0x40000000 /* Register File */
|
#define _CU_HW0_VIRT_DISABLE_MASK 0x080000000 /* Disable hardware */
|
||||||
/* Disable hardware virtualization support */
|
/* virtualization support */
|
||||||
#define _CU_HW0_VIRT_DISABLE_MASK 0x80000000
|
#define _CU_HW0_UPT_SEC_AD_SHIFT_DSBL_MASK 0x100000000 /* Disable address shift in */
|
||||||
|
/* MMU_CR.upt mode */
|
||||||
|
|
||||||
struct hw_stacks {
|
struct hw_stacks {
|
||||||
e2k_psp_lo_t psp_lo;
|
e2k_psp_lo_t psp_lo;
|
||||||
|
|
|
@ -8,7 +8,12 @@
|
||||||
|
|
||||||
struct dev_archdata {
|
struct dev_archdata {
|
||||||
unsigned int link;
|
unsigned int link;
|
||||||
struct e2k_iommu_dev_data iommu;
|
#ifdef CONFIG_IOMMU_API
|
||||||
|
void *iommu; /* private IOMMU data */
|
||||||
|
struct e2k_iommu_domain *domain; /* Domain the device is bound to */
|
||||||
|
struct kvm *kvm; /* Virtual machine, to which device is
|
||||||
|
* passed */
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pdev_archdata {
|
struct pdev_archdata {
|
||||||
|
|
|
@ -47,8 +47,8 @@ extern void setup_APIC_vector_handler(int vector,
|
||||||
#define E12C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
|
#define E12C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
|
||||||
#define E12C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
|
#define E12C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
|
||||||
|
|
||||||
#define E12C_SIC_MC_COUNT E8C_SIC_MC_COUNT
|
#define E12C_SIC_MC_SIZE E16C_SIC_MC_SIZE
|
||||||
#define E12C_SIC_MC1_ECC E2S_SIC_MC1_ECC
|
#define E12C_SIC_MC_COUNT 2
|
||||||
|
|
||||||
#define E12C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
|
#define E12C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
|
||||||
|
|
||||||
|
|
|
@ -47,8 +47,8 @@ extern void setup_APIC_vector_handler(int vector,
|
||||||
#define E16C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
|
#define E16C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
|
||||||
#define E16C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
|
#define E16C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
|
||||||
|
|
||||||
#define E16C_SIC_MC_COUNT E8C_SIC_MC_COUNT
|
#define E16C_SIC_MC_SIZE 0x60
|
||||||
#define E16C_SIC_MC1_ECC E2S_SIC_MC1_ECC
|
#define E16C_SIC_MC_COUNT 8
|
||||||
|
|
||||||
#define E16C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
|
#define E16C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
|
||||||
|
|
||||||
|
|
|
@ -47,8 +47,8 @@ extern void setup_APIC_vector_handler(int vector,
|
||||||
#define E2C3_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
|
#define E2C3_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM
|
||||||
#define E2C3_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
|
#define E2C3_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT
|
||||||
|
|
||||||
#define E2C3_SIC_MC_COUNT E8C_SIC_MC_COUNT
|
#define E2C3_SIC_MC_SIZE E16C_SIC_MC_SIZE
|
||||||
#define E2C3_SIC_MC1_ECC E2S_SIC_MC1_ECC
|
#define E2C3_SIC_MC_COUNT E12C_SIC_MC_COUNT
|
||||||
|
|
||||||
#define E2C3_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
|
#define E2C3_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,6 @@
|
||||||
#ifndef __ASM_E2K_IOMMU_H
|
#ifndef __ASM_E2K_IOMMU_H
|
||||||
#define __ASM_E2K_IOMMU_H
|
#define __ASM_E2K_IOMMU_H
|
||||||
|
|
||||||
/*
|
|
||||||
* This struct contains device specific data for the IOMMU
|
|
||||||
*/
|
|
||||||
struct e2k_iommu_dev_data {
|
|
||||||
struct e2k_iommu_domain *domain; /* Domain the device is bound to */
|
|
||||||
struct kvm *kvm; /* Virtual machine, to which device is
|
|
||||||
* passed */
|
|
||||||
};
|
|
||||||
|
|
||||||
extern int iommu_panic_off;
|
extern int iommu_panic_off;
|
||||||
extern void e2k_iommu_error_interrupt(void);
|
extern void e2k_iommu_error_interrupt(void);
|
||||||
|
|
|
@ -1476,43 +1476,6 @@ do { \
|
||||||
#define NATIVE_WRITE_MAS_D(addr, val, mas) \
|
#define NATIVE_WRITE_MAS_D(addr, val, mas) \
|
||||||
NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u64_t, d, 2)
|
NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u64_t, d, 2)
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Relaxed IO read/write
|
|
||||||
*
|
|
||||||
* bug #81369: put every UC access into a separate
|
|
||||||
* wide instruction to avoid reorderings possible if
|
|
||||||
* one access hits in DTLB and another one misses.
|
|
||||||
*/
|
|
||||||
#define IO_READ(_addr, type, size_letter) \
|
|
||||||
({ \
|
|
||||||
type __ior_val; \
|
|
||||||
asm ("{ld" #size_letter " %[addr], %[val]}" \
|
|
||||||
: [val] "=r" (__ior_val) \
|
|
||||||
: [addr] "m" (*((volatile type *) (_addr))) \
|
|
||||||
: "memory"); \
|
|
||||||
__ior_val; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define IO_WRITE(_addr, _val, type, size_letter) \
|
|
||||||
do { \
|
|
||||||
asm ("{st" #size_letter " %[addr], %[val]}" \
|
|
||||||
: [addr] "=m" (*((volatile type *) (_addr))) \
|
|
||||||
: [val] "r" ((type) (_val)) \
|
|
||||||
: "memory"); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define IO_READ_B(addr) IO_READ((addr), u8, b)
|
|
||||||
#define IO_READ_H(addr) IO_READ((addr), u16, h)
|
|
||||||
#define IO_READ_W(addr) IO_READ((addr), u32, w)
|
|
||||||
#define IO_READ_D(addr) IO_READ((addr), u64, d)
|
|
||||||
|
|
||||||
#define IO_WRITE_B(addr, val) IO_WRITE((addr), (val), u8, b)
|
|
||||||
#define IO_WRITE_H(addr, val) IO_WRITE((addr), (val), u16, h)
|
|
||||||
#define IO_WRITE_W(addr, val) IO_WRITE((addr), (val), u32, w)
|
|
||||||
#define IO_WRITE_D(addr, val) IO_WRITE((addr), (val), u64, d)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read from and write to system configuration registers SIC
|
* Read from and write to system configuration registers SIC
|
||||||
* Now SIC is the same as NBSRs registers
|
* Now SIC is the same as NBSRs registers
|
||||||
|
@ -1719,7 +1682,7 @@ do { \
|
||||||
u64 val, val_8; \
|
u64 val, val_8; \
|
||||||
u32 __chan = (u32) (_chan); \
|
u32 __chan = (u32) (_chan); \
|
||||||
u32 __quadro = (u32) (_quadro); \
|
u32 __quadro = (u32) (_quadro); \
|
||||||
u32 __chan_q = (__quadro) ? __chan : -1; \
|
u32 __chan_q = (__quadro) ? __chan : 4; /* Not existent channel - skip */ \
|
||||||
u64 __opc = (_opc); \
|
u64 __opc = (_opc); \
|
||||||
asm volatile ( \
|
asm volatile ( \
|
||||||
"{disp %%ctpr1, qpswitchd_sm\n" \
|
"{disp %%ctpr1, qpswitchd_sm\n" \
|
||||||
|
@ -2103,7 +2066,7 @@ do { \
|
||||||
({ \
|
({ \
|
||||||
u64 tmp, tmp_ext; \
|
u64 tmp, tmp_ext; \
|
||||||
u32 __chan = (u32) (_chan); \
|
u32 __chan = (u32) (_chan); \
|
||||||
u32 __chan_q = (_quadro) ? __chan : -1; \
|
u32 __chan_q = (_quadro) ? __chan : 4; /* Not existent channel - skip */ \
|
||||||
asm ( "{nop 1\n" \
|
asm ( "{nop 1\n" \
|
||||||
" puttagd,2 %[val], %[tag], %[tmp]\n" \
|
" puttagd,2 %[val], %[tag], %[tmp]\n" \
|
||||||
" puttagd,5,sm %[val_ext], %[tag_ext], %[tmp_ext]\n" \
|
" puttagd,5,sm %[val_ext], %[tag_ext], %[tmp_ext]\n" \
|
||||||
|
@ -2120,7 +2083,7 @@ do { \
|
||||||
[val] "r" ((u64) (_val)), [val_ext] "r" ((u64) (_val_ext)), \
|
[val] "r" ((u64) (_val)), [val_ext] "r" ((u64) (_val_ext)), \
|
||||||
[tag] "r" ((u32) (_tag)), [tag_ext] "r" ((u32) (_tag_ext)), \
|
[tag] "r" ((u32) (_tag)), [tag_ext] "r" ((u32) (_tag_ext)), \
|
||||||
[opc] "ir" (_opc), [opc_ext] "ir" (_opc_ext), \
|
[opc] "ir" (_opc), [opc_ext] "ir" (_opc_ext), \
|
||||||
[chan] "r" ((u32) (__chan)), [chan_q] "r" ((u32) (__chan_q)) \
|
[chan] "ir" ((u32) (__chan)), [chan_q] "ir" ((u32) (__chan_q)) \
|
||||||
: "memory", "pred20", "pred21", "pred22", "pred23"); \
|
: "memory", "pred20", "pred21", "pred22", "pred23"); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -2250,17 +2213,23 @@ do { \
|
||||||
* quadro: set if this is a non-atomic quadro operation to move 16 bytes
|
* quadro: set if this is a non-atomic quadro operation to move 16 bytes
|
||||||
* vr: set to 0 if we want to preserve the lower 4-byte word
|
* vr: set to 0 if we want to preserve the lower 4-byte word
|
||||||
* (same as vr in cellar)
|
* (same as vr in cellar)
|
||||||
|
* not_single_byte: set to "false" if we want to write only 1 byte at target
|
||||||
|
* address (i.e. do not clear the whole register we are
|
||||||
|
* writing into). This makes sense when we manually split
|
||||||
|
* the faulting load into a series of 1-byte loads - only
|
||||||
|
* the first one should clear the register then.
|
||||||
*/
|
*/
|
||||||
#define NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(_from, _to, _to_hi, _vr, _opc, \
|
#define NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(_from, _to, _to_hi, _vr, _opc, \
|
||||||
_chan, _quadro) \
|
_chan, _quadro, _not_single_byte) \
|
||||||
do { \
|
do { \
|
||||||
u64 prev, val, val_8; \
|
u64 prev, val, val_8; \
|
||||||
u32 __chan = (u32) (_chan); \
|
u32 __chan = (u32) (_chan); \
|
||||||
u32 __quadro = (u32) (_quadro); \
|
u32 __quadro = (u32) (_quadro); \
|
||||||
u32 __chan_q = (__quadro) ? __chan : -1; \
|
u32 __chan_q = (__quadro) ? __chan : 4 /* Not existent channel - skip */; \
|
||||||
u64 __opc = (_opc); \
|
u64 __opc = (_opc); \
|
||||||
asm ( "{cmpesb %[quadro], 0, %%pred18\n" \
|
asm ( "{cmpesb %[quadro], 0, %%pred18\n" \
|
||||||
" cmpesb %[vr], 0, %%pred19}\n" \
|
" cmpesb %[vr], 0, %%pred19\n" \
|
||||||
|
" cmpesb %[not_single_byte], 0, %%pred28}\n" \
|
||||||
"{cmpesb,0 %[chan], 0, %%pred20\n" \
|
"{cmpesb,0 %[chan], 0, %%pred20\n" \
|
||||||
" cmpesb,1 %[chan], 1, %%pred21\n" \
|
" cmpesb,1 %[chan], 1, %%pred21\n" \
|
||||||
" cmpesb,3 %[chan], 2, %%pred22\n" \
|
" cmpesb,3 %[chan], 2, %%pred22\n" \
|
||||||
|
@ -2280,7 +2249,8 @@ do { \
|
||||||
" ldrd,3 [ %[from] + %[opc_8] ], %[val_8] ? %%pred26\n" \
|
" ldrd,3 [ %[from] + %[opc_8] ], %[val_8] ? %%pred26\n" \
|
||||||
" ldrd,5 [ %[from] + %[opc_8] ], %[val_8] ? %%pred27}\n" \
|
" ldrd,5 [ %[from] + %[opc_8] ], %[val_8] ? %%pred27}\n" \
|
||||||
"{movts,1 %[prev], %[val] ? %%pred19}\n" \
|
"{movts,1 %[prev], %[val] ? %%pred19}\n" \
|
||||||
"{strd,2 [ %[to] + %[opc_st] ], %[val]\n" \
|
"{strd,2 [ %[to] + %[opc_st_byte] ], %[val] ? %%pred28}\n" \
|
||||||
|
"{strd,2 [ %[to] + %[opc_st] ], %[val] ? ~%%pred28\n" \
|
||||||
" strd,5 [ %[to_hi] + %[opc_st] ], %[val_8] ? ~ %%pred18}\n" \
|
" strd,5 [ %[to_hi] + %[opc_st] ], %[val_8] ? ~ %%pred18}\n" \
|
||||||
: [prev] "=&r" (prev), [val] "=&r" (val), \
|
: [prev] "=&r" (prev), [val] "=&r" (val), \
|
||||||
[val_8] "=&r" (val_8) \
|
[val_8] "=&r" (val_8) \
|
||||||
|
@ -2288,10 +2258,13 @@ do { \
|
||||||
[vr] "ir" ((u32) (_vr)), [quadro] "r" (__quadro), \
|
[vr] "ir" ((u32) (_vr)), [quadro] "r" (__quadro), \
|
||||||
[chan] "ir" (__chan), [chan_q] "ir" (__chan_q), \
|
[chan] "ir" (__chan), [chan_q] "ir" (__chan_q), \
|
||||||
[opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \
|
[opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \
|
||||||
|
[not_single_byte] "ir" (_not_single_byte), \
|
||||||
[opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \
|
[opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \
|
||||||
|
[opc_st_byte] "i" (MEM_STORE_REC_OPC_B), \
|
||||||
[opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \
|
[opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \
|
||||||
: "memory", "pred18", "pred19", "pred20", "pred21", "pred22", \
|
: "memory", "pred18", "pred19", "pred20", "pred21", \
|
||||||
"pred23", "pred24", "pred25", "pred26", "pred27"); \
|
"pred22", "pred23", "pred24", "pred25", "pred26", \
|
||||||
|
"pred27", "pred28"); \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4938,6 +4911,20 @@ do { \
|
||||||
DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \
|
DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \
|
||||||
under_upsr_off, under_upsr_bool) \
|
under_upsr_off, under_upsr_bool) \
|
||||||
|
|
||||||
|
#define DO_ATOMIC_WRITE_UPSR_REG_VALUE(greg_no, upsr_off, upsr_value) \
|
||||||
|
({ \
|
||||||
|
asm volatile ( \
|
||||||
|
"{\n\t" \
|
||||||
|
" rws %1, %%upsr\n\t" \
|
||||||
|
" stw %%dg" #greg_no ", [%0], %1\n\t" \
|
||||||
|
"}" \
|
||||||
|
: \
|
||||||
|
: "ri" ((__e2k_u64_t)(upsr_off)), \
|
||||||
|
"r" ((__e2k_u32_t)(upsr_value))); \
|
||||||
|
})
|
||||||
|
#define KVM_DO_ATOMIC_WRITE_UPSR_REG_VALUE(greg_no, upsr_off, upsr_value) \
|
||||||
|
DO_ATOMIC_WRITE_UPSR_REG_VALUE(greg_no, upsr_off, upsr_value)
|
||||||
|
|
||||||
#define NATIVE_GET_TCD() \
|
#define NATIVE_GET_TCD() \
|
||||||
({ \
|
({ \
|
||||||
register __e2k_u64_t res; \
|
register __e2k_u64_t res; \
|
||||||
|
@ -4965,14 +4952,26 @@ do { \
|
||||||
|
|
||||||
/* Add ctpr3 to clobbers to explain to lcc that this
|
/* Add ctpr3 to clobbers to explain to lcc that this
|
||||||
* GNU asm does a return. */
|
* GNU asm does a return. */
|
||||||
#define E2K_DONE \
|
#define E2K_DONE() \
|
||||||
do { \
|
do { \
|
||||||
/* #80747: must repeat interrupted barriers */ \
|
/* #80747: must repeat interrupted barriers */ \
|
||||||
asm volatile ("{nop 3; wait st_c=1} {done}" ::: "ctpr3"); \
|
asm volatile ("{nop 3; wait st_c=1} {done}" ::: "ctpr3"); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define E2K_SYSCALL_RETURN E2K_RETURN
|
#define NATIVE_RETURN() \
|
||||||
#define E2K_RETURN(rval) \
|
do { \
|
||||||
|
asm volatile( "{\n" \
|
||||||
|
"return %%ctpr3\n" \
|
||||||
|
"}\n" \
|
||||||
|
"{\n" \
|
||||||
|
"ct %%ctpr3\n" \
|
||||||
|
"}\n" \
|
||||||
|
: \
|
||||||
|
: \
|
||||||
|
: "ctpr3"); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define NATIVE_RETURN_VALUE(rval) \
|
||||||
do { \
|
do { \
|
||||||
asm volatile( "{\n" \
|
asm volatile( "{\n" \
|
||||||
"return %%ctpr3\n" \
|
"return %%ctpr3\n" \
|
||||||
|
@ -4981,10 +4980,13 @@ do { \
|
||||||
"{\n" \
|
"{\n" \
|
||||||
"ct %%ctpr3\n" \
|
"ct %%ctpr3\n" \
|
||||||
"}\n" \
|
"}\n" \
|
||||||
:: [r0] "ir" (rval) \
|
: \
|
||||||
|
: [r0] "ir" (rval) \
|
||||||
: "ctpr3"); \
|
: "ctpr3"); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define E2K_SYSCALL_RETURN NATIVE_RETURN_VALUE
|
||||||
|
|
||||||
#define E2K_EMPTY_CMD(input...) \
|
#define E2K_EMPTY_CMD(input...) \
|
||||||
do { \
|
do { \
|
||||||
asm volatile ("{nop}" :: input); \
|
asm volatile ("{nop}" :: input); \
|
||||||
|
@ -5517,6 +5519,24 @@ _Pragma("no_asm_inline") \
|
||||||
"ri" ((__e2k_u64_t) (arg3)) \
|
"ri" ((__e2k_u64_t) (arg3)) \
|
||||||
); \
|
); \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
#define E2K_GOTO_ARG4(label, arg1, arg2, arg3, arg4) \
|
||||||
|
do { \
|
||||||
|
_Pragma("no_asm_inline") \
|
||||||
|
asm volatile ("\n" \
|
||||||
|
"{\n" \
|
||||||
|
"addd \t 0, %0, %%dr0\n" \
|
||||||
|
"addd \t 0, %1, %%dr1\n" \
|
||||||
|
"addd \t 0, %2, %%dr2\n" \
|
||||||
|
"addd \t 0, %3, %%dr3\n" \
|
||||||
|
"ibranch \t" #label "\n" \
|
||||||
|
"}\n" \
|
||||||
|
: \
|
||||||
|
: "ri" ((__e2k_u64_t) (arg1)), \
|
||||||
|
"ri" ((__e2k_u64_t) (arg2)), \
|
||||||
|
"ri" ((__e2k_u64_t) (arg3)), \
|
||||||
|
"ri" ((__e2k_u64_t) (arg4)) \
|
||||||
|
); \
|
||||||
|
} while (false)
|
||||||
#define E2K_GOTO_AND_RETURN_ARG6(label, \
|
#define E2K_GOTO_AND_RETURN_ARG6(label, \
|
||||||
arg1, arg2, arg3, arg4, arg5, arg6) \
|
arg1, arg2, arg3, arg4, arg5, arg6) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -6134,6 +6154,199 @@ do { \
|
||||||
__dres; \
|
__dres; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define SIMPLE_RECOVERY_STORE(_addr, _data, _opc) \
|
||||||
|
do { \
|
||||||
|
u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \
|
||||||
|
u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \
|
||||||
|
asm ( \
|
||||||
|
"{nop 1\n" \
|
||||||
|
" cmpesb,0 %[fmt], 1, %%pred20\n" \
|
||||||
|
" cmpesb,1 %[fmt], 2, %%pred21\n" \
|
||||||
|
" cmpesb,3 %[fmt], 3, %%pred22\n" \
|
||||||
|
" cmpesb,4 %[fmt], 4, %%pred23}\n" \
|
||||||
|
"{stb,2 %[addr], %[ind], %[data] ? %%pred20\n" \
|
||||||
|
" sth,5 %[addr], %[ind], %[data] ? %%pred21}\n" \
|
||||||
|
"{stw,2 %[addr], %[ind], %[data] ? %%pred22\n" \
|
||||||
|
" std,5 %[addr], %[ind], %[data] ? %%pred23}\n" \
|
||||||
|
: \
|
||||||
|
: [addr] "r" (_addr), [data] "r" (_data), \
|
||||||
|
[fmt] "r" (_fmt), [ind] "r" (_ind) \
|
||||||
|
: "memory", "pred20", "pred21", "pred22", "pred23" \
|
||||||
|
); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, _greg_no, _sm, _mas) \
|
||||||
|
do { \
|
||||||
|
u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \
|
||||||
|
u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \
|
||||||
|
asm ( \
|
||||||
|
"{nop 1\n" \
|
||||||
|
" cmpesb,0 %[fmt], 1, %%pred20\n" \
|
||||||
|
" cmpesb,1 %[fmt], 2, %%pred21\n" \
|
||||||
|
" cmpesb,3 %[fmt], 3, %%pred22\n" \
|
||||||
|
" cmpesb,4 %[fmt], 4, %%pred23}\n" \
|
||||||
|
"{nop 4\n" \
|
||||||
|
" ldb" _sm ",0 %[addr], %[ind], %%dg" #_greg_no ", " \
|
||||||
|
"mas=%[mas] ? %%pred20\n" \
|
||||||
|
" ldh" _sm ",2 %[addr], %[ind], %%dg" #_greg_no ", " \
|
||||||
|
"mas=%[mas] ? %%pred21\n" \
|
||||||
|
" ldw" _sm ",3 %[addr], %[ind], %%dg" #_greg_no ", " \
|
||||||
|
"mas=%[mas] ? %%pred22\n" \
|
||||||
|
" ldd" _sm ",5 %[addr], %[ind], %%dg" #_greg_no ", " \
|
||||||
|
"mas=%[mas] ? %%pred23}\n" \
|
||||||
|
: \
|
||||||
|
: [addr] "r" (_addr), [fmt] "r" (_fmt), \
|
||||||
|
[ind] "r" (_ind), [mas] "r" (_mas) \
|
||||||
|
: "memory", "pred20", "pred21", "pred22", "pred23", \
|
||||||
|
"g" #_greg_no \
|
||||||
|
); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define SIMPLE_RECOVERY_LOAD_TO_GREG(_addr, _opc, _greg_num, _sm, _mas) \
|
||||||
|
do { \
|
||||||
|
switch (_greg_num) { \
|
||||||
|
case 0: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 0, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 1: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 1, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 2: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 2, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 3: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 3, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 4, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 5: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 5, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 6: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 6, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 7: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 7, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 8: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 8, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 9: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 9, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 10: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 10, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 11: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 11, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 12: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 12, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 13: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 13, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 14: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 14, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 15: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 15, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
/* Do not load g16-g19 as they are used by kernel */ \
|
||||||
|
case 16: \
|
||||||
|
case 17: \
|
||||||
|
case 18: \
|
||||||
|
case 19: \
|
||||||
|
break; \
|
||||||
|
case 20: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 20, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 21: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 21, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 22: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 22, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 23: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 23, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 24: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 24, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 25: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 25, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 26: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 26, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 27: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 27, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 28: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 28, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 29: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 29, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 30: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 30, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
case 31: \
|
||||||
|
SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 31, _sm, _mas); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
panic("Invalid global register # %d\n", _greg_num); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define SIMPLE_RECOVERY_MOVE(_from, _to, _opc, _first_time, _sm, _mas) \
|
||||||
|
do { \
|
||||||
|
u64 _data; \
|
||||||
|
u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \
|
||||||
|
u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \
|
||||||
|
asm ( \
|
||||||
|
"{nop 1\n" \
|
||||||
|
" cmpesb,0 %[fmt], 1, %%pred20\n" \
|
||||||
|
" cmpesb,1 %[fmt], 2, %%pred21\n" \
|
||||||
|
" cmpesb,3 %[fmt], 3, %%pred22\n" \
|
||||||
|
" cmpesb,4 %[fmt], 4, %%pred23}\n" \
|
||||||
|
"{nop 4\n" \
|
||||||
|
" ldb" _sm ",0 %[from], %[ind], %[data], " \
|
||||||
|
"mas=%[mas] ? %%pred20\n" \
|
||||||
|
" ldh" _sm ",2 %[from], %[ind], %[data], " \
|
||||||
|
"mas=%[mas] ? %%pred21\n" \
|
||||||
|
" ldw" _sm ",3 %[from], %[ind], %[data], " \
|
||||||
|
"mas=%[mas] ? %%pred22\n" \
|
||||||
|
" ldd" _sm ",5 %[from], %[ind], %[data], " \
|
||||||
|
"mas=%[mas] ? %%pred23}\n" \
|
||||||
|
"{cmpesb,0 %[first_time], 0, %%pred19}\n" \
|
||||||
|
"{pass %%pred19, @p0\n" \
|
||||||
|
" pass %%pred20, @p1\n" \
|
||||||
|
" pass %%pred21, @p2\n" \
|
||||||
|
" pass %%pred22, @p3\n" \
|
||||||
|
" landp @p0, @p1, @p4\n" \
|
||||||
|
" pass @p4, %%pred20\n" \
|
||||||
|
" landp @p0, @p2, @p5\n" \
|
||||||
|
" pass @p5, %%pred21\n" \
|
||||||
|
" landp @p0, @p3, @p6\n" \
|
||||||
|
" pass @p6, %%pred22}\n" \
|
||||||
|
"{pass %%pred19, @p0\n" \
|
||||||
|
" pass %%pred23, @p1\n" \
|
||||||
|
" landp @p0, ~@p1, @p4\n" \
|
||||||
|
" pass @p4, %%pred23}\n" \
|
||||||
|
"{stb,sm,2 %[to], 0, %[data] ? %%pred20\n" \
|
||||||
|
" sth,sm,5 %[to], 0, %[data] ? %%pred21}\n" \
|
||||||
|
"{stw,sm,2 %[to], 0, %[data] ? %%pred22\n" \
|
||||||
|
" std,sm,5 %[to], 0, %[data] ? ~%%pred23}\n" \
|
||||||
|
: [data] "=&r" (_data) \
|
||||||
|
: [from] "r" (_from), [to] "r" (_to), \
|
||||||
|
[fmt] "r" (_fmt), [ind] "r" (_ind), \
|
||||||
|
[first_time] "r" (_first_time), [mas] "r" (_mas) \
|
||||||
|
: "memory", "pred19", "pred20", "pred21", "pred22", "pred23" \
|
||||||
|
); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* Since v6 this got replaced with "wait int=1,mem_mod=1" */
|
/* Since v6 this got replaced with "wait int=1,mem_mod=1" */
|
||||||
#define C1_WAIT_TRAP_V3() \
|
#define C1_WAIT_TRAP_V3() \
|
||||||
do { \
|
do { \
|
||||||
|
@ -6204,7 +6417,7 @@ do { \
|
||||||
[addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \
|
[addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \
|
||||||
[val_icache] "r" (0ULL), \
|
[val_icache] "r" (0ULL), \
|
||||||
[mas_icache] "i" (MAS_ICACHE_FLUSH), \
|
[mas_icache] "i" (MAS_ICACHE_FLUSH), \
|
||||||
[addr_flush_tlb] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \
|
[addr_flush_tlb] "r" ((u64) (_FLUSH_TLB_ALL_OP)), \
|
||||||
[val_tlb] "r" (0ULL), \
|
[val_tlb] "r" (0ULL), \
|
||||||
[mas_tlb] "i" (MAS_TLB_FLUSH), \
|
[mas_tlb] "i" (MAS_TLB_FLUSH), \
|
||||||
[mas_ioaddr] "i" (MAS_IOADDR), \
|
[mas_ioaddr] "i" (MAS_IOADDR), \
|
||||||
|
@ -6266,7 +6479,7 @@ do { \
|
||||||
[addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \
|
[addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \
|
||||||
[val_icache] "r" (0ULL), \
|
[val_icache] "r" (0ULL), \
|
||||||
[mas_icache] "i" (MAS_ICACHE_FLUSH), \
|
[mas_icache] "i" (MAS_ICACHE_FLUSH), \
|
||||||
[addr_flush_tlb] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \
|
[addr_flush_tlb] "r" ((u64) (_FLUSH_TLB_ALL_OP)), \
|
||||||
[val_tlb] "r" (0ULL), \
|
[val_tlb] "r" (0ULL), \
|
||||||
[mas_tlb] "i" (MAS_TLB_FLUSH), \
|
[mas_tlb] "i" (MAS_TLB_FLUSH), \
|
||||||
[mas_ioaddr] "i" (MAS_IOADDR) \
|
[mas_ioaddr] "i" (MAS_IOADDR) \
|
||||||
|
|
|
@ -25,6 +25,14 @@
|
||||||
|
|
||||||
#define CHK_DEBUGGER(trapnr, signr, error_code, address, regs, after)
|
#define CHK_DEBUGGER(trapnr, signr, error_code, address, regs, after)
|
||||||
|
|
||||||
|
#define IS_KERNEL_THREAD(task, mm) \
|
||||||
|
({ \
|
||||||
|
e2k_addr_t ps_base; \
|
||||||
|
\
|
||||||
|
ps_base = (e2k_addr_t)task_thread_info(task)->u_hw_stack.ps.base; \
|
||||||
|
((mm) == NULL || ps_base >= TASK_SIZE); \
|
||||||
|
})
|
||||||
|
|
||||||
extern void print_stack_frames(struct task_struct *task,
|
extern void print_stack_frames(struct task_struct *task,
|
||||||
struct pt_regs *pt_regs, int show_reg_window) __cold;
|
struct pt_regs *pt_regs, int show_reg_window) __cold;
|
||||||
extern void print_mmap(struct task_struct *task) __cold;
|
extern void print_mmap(struct task_struct *task) __cold;
|
||||||
|
@ -190,6 +198,18 @@ typedef struct stack_regs {
|
||||||
|
|
||||||
extern void print_chain_stack(struct stack_regs *regs,
|
extern void print_chain_stack(struct stack_regs *regs,
|
||||||
int show_reg_window);
|
int show_reg_window);
|
||||||
|
extern void copy_stack_regs(struct task_struct *task,
|
||||||
|
const struct pt_regs *limit_regs, struct stack_regs *regs);
|
||||||
|
extern int parse_chain_stack(int flags, struct task_struct *p,
|
||||||
|
parse_chain_fn_t func, void *arg);
|
||||||
|
|
||||||
|
extern struct stack_regs stack_regs_cache[NR_CPUS];
|
||||||
|
extern int debug_userstack;
|
||||||
|
extern int print_window_regs;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DATA_STACK_WINDOW
|
||||||
|
extern int debug_datastack;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_VIRTUALIZATION
|
#ifndef CONFIG_VIRTUALIZATION
|
||||||
/* it is native kernel without any virtualization */
|
/* it is native kernel without any virtualization */
|
||||||
|
|
|
@ -203,15 +203,23 @@ DO_FAST_CLOCK_GETTIME(const clockid_t which_clock, struct timespec *tp)
|
||||||
|
|
||||||
/* trap table entry is called as function (it is closer to hardware start) */
|
/* trap table entry is called as function (it is closer to hardware start) */
|
||||||
typedef long (*ttable_entry_args3)(int sys_num, u64 arg1, u64 arg2);
|
typedef long (*ttable_entry_args3)(int sys_num, u64 arg1, u64 arg2);
|
||||||
|
typedef long (*ttable_entry_args4)(int sys_num, u64 arg1, u64 arg2, u64 arg3);
|
||||||
#define ttable_entry3_args3(sys_num, arg1, arg2) \
|
#define ttable_entry3_args3(sys_num, arg1, arg2) \
|
||||||
((ttable_entry_args3)(get_ttable_entry3))(sys_num, arg1, arg2)
|
((ttable_entry_args3)(get_ttable_entry3))(sys_num, arg1, arg2)
|
||||||
|
#define ttable_entry3_args4(sys_num, arg1, arg2) \
|
||||||
|
((ttable_entry_args4)(get_ttable_entry3))(sys_num, arg1, arg2, arg3)
|
||||||
|
|
||||||
/* trap table entry started by direct branch (it is closer to fast system */
|
/* trap table entry started by direct branch (it is closer to fast system */
|
||||||
/* call wirthout switch and use user local data stack */
|
/* call wirthout switch and use user local data stack */
|
||||||
#define goto_ttable_entry_args3(entry_label, sys_num, arg1, arg2) \
|
#define goto_ttable_entry_args3(entry_label, sys_num, arg1, arg2) \
|
||||||
E2K_GOTO_ARG3(entry_label, sys_num, arg1, arg2)
|
E2K_GOTO_ARG3(entry_label, sys_num, arg1, arg2)
|
||||||
|
#define goto_ttable_entry_args4(entry_label, sys_num, arg1, arg2, arg3) \
|
||||||
|
E2K_GOTO_ARG4(entry_label, sys_num, arg1, arg2, arg3)
|
||||||
#define goto_ttable_entry3_args3(sys_num, arg1, arg2) \
|
#define goto_ttable_entry3_args3(sys_num, arg1, arg2) \
|
||||||
goto_ttable_entry_args3(ttable_entry3, sys_num, arg1, arg2)
|
goto_ttable_entry_args3(ttable_entry3, sys_num, arg1, arg2)
|
||||||
|
#define goto_ttable_entry3_args4(sys_num, arg1, arg2, arg3) \
|
||||||
|
goto_ttable_entry_args4(ttable_entry3, sys_num, arg1, arg2, arg3)
|
||||||
|
|
||||||
|
|
||||||
#define ttable_entry_clock_gettime(which, time) \
|
#define ttable_entry_clock_gettime(which, time) \
|
||||||
/* ibranch */ goto_ttable_entry3_args3(__NR_clock_gettime, which, time)
|
/* ibranch */ goto_ttable_entry3_args3(__NR_clock_gettime, which, time)
|
||||||
|
@ -219,6 +227,12 @@ typedef long (*ttable_entry_args3)(int sys_num, u64 arg1, u64 arg2);
|
||||||
#define ttable_entry_gettimeofday(tv, tz) \
|
#define ttable_entry_gettimeofday(tv, tz) \
|
||||||
/* ibranch */ goto_ttable_entry3_args3(__NR_gettimeofday, tv, tz)
|
/* ibranch */ goto_ttable_entry3_args3(__NR_gettimeofday, tv, tz)
|
||||||
/* ttable_entry3_args3(__NR_gettimeofday, tv, tz) */
|
/* ttable_entry3_args3(__NR_gettimeofday, tv, tz) */
|
||||||
|
#define ttable_entry_sigprocmask(how, nset, oset) \
|
||||||
|
/* ibranch */ goto_ttable_entry3_args4(__NR_sigprocmask, how, nset, oset)
|
||||||
|
/* ttable_entry3_args4(__NR_sigprocmask, how, nset, oset) */
|
||||||
|
#define ttable_entry_getcpu(cpup, nodep, unused) \
|
||||||
|
/* ibranch */ goto_ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused)
|
||||||
|
/* ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused) */
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
FAST_SYS_CLOCK_GETTIME(const clockid_t which_clock, struct timespec __user *tp)
|
FAST_SYS_CLOCK_GETTIME(const clockid_t which_clock, struct timespec __user *tp)
|
||||||
|
@ -228,14 +242,19 @@ FAST_SYS_CLOCK_GETTIME(const clockid_t which_clock, struct timespec __user *tp)
|
||||||
|
|
||||||
prefetchw(&fsys_data);
|
prefetchw(&fsys_data);
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_HOST_MODE
|
||||||
|
if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)))
|
||||||
|
ttable_entry_clock_gettime((u64) which_clock, (u64) tp);
|
||||||
|
#endif
|
||||||
|
|
||||||
tp = (typeof(tp)) ((u64) tp & E2K_VA_MASK);
|
tp = (typeof(tp)) ((u64) tp & E2K_VA_MASK);
|
||||||
if (unlikely((u64) tp + sizeof(struct timespec) > ti->addr_limit.seg))
|
if (unlikely((u64) tp + sizeof(struct timespec) > ti->addr_limit.seg))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
r = do_fast_clock_gettime(which_clock, tp);
|
r = do_fast_clock_gettime(which_clock, tp);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
/* ibranch */ ttable_entry_clock_gettime((u64) which_clock, (u64) tp);
|
ttable_entry_clock_gettime((u64) which_clock, (u64) tp);
|
||||||
/* call r = ttable_entry_clock_gettime((u64) which_clock, (u64) tp); */
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,6 +278,9 @@ FAST_SYS_SIGGETMASK(u64 __user *oset, size_t sigsetsize)
|
||||||
{
|
{
|
||||||
struct thread_info *const ti = READ_CURRENT_REG();
|
struct thread_info *const ti = READ_CURRENT_REG();
|
||||||
struct task_struct *task = thread_info_task(ti);
|
struct task_struct *task = thread_info_task(ti);
|
||||||
|
#ifdef CONFIG_KVM_HOST_MODE
|
||||||
|
bool guest = test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE);
|
||||||
|
#endif
|
||||||
u64 set;
|
u64 set;
|
||||||
|
|
||||||
set = task->blocked.sig[0];
|
set = task->blocked.sig[0];
|
||||||
|
@ -266,6 +288,11 @@ FAST_SYS_SIGGETMASK(u64 __user *oset, size_t sigsetsize)
|
||||||
if (unlikely(sigsetsize != 8))
|
if (unlikely(sigsetsize != 8))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_HOST_MODE
|
||||||
|
if (unlikely(guest))
|
||||||
|
ttable_entry_sigprocmask((u64) 0, (u64) NULL, (u64) oset);
|
||||||
|
#endif
|
||||||
|
|
||||||
oset = (typeof(oset)) ((u64) oset & E2K_VA_MASK);
|
oset = (typeof(oset)) ((u64) oset & E2K_VA_MASK);
|
||||||
if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg))
|
if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
|
@ -38,7 +38,7 @@ extern int __init native_arch_pci_init(void);
|
||||||
|
|
||||||
static inline u8 native_readb_relaxed(const volatile void __iomem *addr)
|
static inline u8 native_readb_relaxed(const volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
u8 res = IO_READ_B(addr);
|
u8 res = *(const volatile u8 __force *) addr;
|
||||||
if (cpu_has(CPU_HWBUG_PIO_READS))
|
if (cpu_has(CPU_HWBUG_PIO_READS))
|
||||||
__E2K_WAIT(_ld_c);
|
__E2K_WAIT(_ld_c);
|
||||||
return res;
|
return res;
|
||||||
|
@ -46,7 +46,7 @@ static inline u8 native_readb_relaxed(const volatile void __iomem *addr)
|
||||||
|
|
||||||
static inline u16 native_readw_relaxed(const volatile void __iomem *addr)
|
static inline u16 native_readw_relaxed(const volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
u16 res = IO_READ_H(addr);
|
u16 res = *(const volatile u16 __force *) addr;
|
||||||
if (cpu_has(CPU_HWBUG_PIO_READS))
|
if (cpu_has(CPU_HWBUG_PIO_READS))
|
||||||
__E2K_WAIT(_ld_c);
|
__E2K_WAIT(_ld_c);
|
||||||
return res;
|
return res;
|
||||||
|
@ -54,7 +54,7 @@ static inline u16 native_readw_relaxed(const volatile void __iomem *addr)
|
||||||
|
|
||||||
static inline u32 native_readl_relaxed(const volatile void __iomem *addr)
|
static inline u32 native_readl_relaxed(const volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
u32 res = IO_READ_W(addr);
|
u32 res = *(const volatile u32 __force *) addr;
|
||||||
if (cpu_has(CPU_HWBUG_PIO_READS))
|
if (cpu_has(CPU_HWBUG_PIO_READS))
|
||||||
__E2K_WAIT(_ld_c);
|
__E2K_WAIT(_ld_c);
|
||||||
return res;
|
return res;
|
||||||
|
@ -62,7 +62,7 @@ static inline u32 native_readl_relaxed(const volatile void __iomem *addr)
|
||||||
|
|
||||||
static inline u64 native_readq_relaxed(const volatile void __iomem *addr)
|
static inline u64 native_readq_relaxed(const volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
u64 res = IO_READ_D(addr);
|
u64 res = *(const volatile u64 __force *) addr;
|
||||||
if (cpu_has(CPU_HWBUG_PIO_READS))
|
if (cpu_has(CPU_HWBUG_PIO_READS))
|
||||||
__E2K_WAIT(_ld_c);
|
__E2K_WAIT(_ld_c);
|
||||||
return res;
|
return res;
|
||||||
|
@ -70,23 +70,22 @@ static inline u64 native_readq_relaxed(const volatile void __iomem *addr)
|
||||||
|
|
||||||
static inline void native_writeb_relaxed(u8 value, volatile void __iomem *addr)
|
static inline void native_writeb_relaxed(u8 value, volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
IO_WRITE_B(addr, value);
|
*(volatile u8 __force *) addr = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_writew_relaxed(u16 value, volatile void __iomem *addr)
|
static inline void native_writew_relaxed(u16 value, volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
IO_WRITE_H(addr, value);
|
|
||||||
*(volatile u16 __force *) addr = value;
|
*(volatile u16 __force *) addr = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_writel_relaxed(u32 value, volatile void __iomem *addr)
|
static inline void native_writel_relaxed(u32 value, volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
IO_WRITE_W(addr, value);
|
*(volatile u32 __force *) addr = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_writeq_relaxed(u64 value, volatile void __iomem *addr)
|
static inline void native_writeq_relaxed(u64 value, volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
IO_WRITE_D(addr, value);
|
*(volatile u64 __force *) addr = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -269,8 +269,6 @@ do { \
|
||||||
#define native_psr_irqs_disabled() \
|
#define native_psr_irqs_disabled() \
|
||||||
psr_irqs_disabled_flags(NATIVE_NV_READ_PSR_REG_VALUE())
|
psr_irqs_disabled_flags(NATIVE_NV_READ_PSR_REG_VALUE())
|
||||||
|
|
||||||
#define native_trap_irqs_disabled(regs) (regs->irqs_disabled)
|
|
||||||
|
|
||||||
#define psr_and_upsr_nm_irqs_disabled() \
|
#define psr_and_upsr_nm_irqs_disabled() \
|
||||||
({ \
|
({ \
|
||||||
int ret; \
|
int ret; \
|
||||||
|
|
|
@ -612,6 +612,8 @@ kvm_write_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p)
|
||||||
KVM_SET_AAU_4_AADs(AADs_no, mem_p);
|
KVM_SET_AAU_4_AADs(AADs_no, mem_p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define kvm_clear_apb() /* AAU context should restore host */
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* It is pure kvm kernel without paravirtualization */
|
/* It is pure kvm kernel without paravirtualization */
|
||||||
|
|
||||||
|
@ -658,6 +660,8 @@ static inline void write_aaldv_reg(e2k_aaldv_t *aaldv)
|
||||||
kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi);
|
kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define clear_apb() kvm_clear_apb()
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
#endif /* _KVM_AAU_REGS_ACCESS_H_ */
|
#endif /* _KVM_AAU_REGS_ACCESS_H_ */
|
||||||
|
|
|
@ -53,6 +53,9 @@
|
||||||
(offsetof(kvm_cpu_regs_t, CPU_TIRs)) + \
|
(offsetof(kvm_cpu_regs_t, CPU_TIRs)) + \
|
||||||
(sizeof(e2k_tir_t) * TIR_no) + \
|
(sizeof(e2k_tir_t) * TIR_no) + \
|
||||||
(offsetof(e2k_tir_t, TIR_hi)))
|
(offsetof(e2k_tir_t, TIR_hi)))
|
||||||
|
#define GUEST_CPU_SBBP(SBBP_no) (GUEST_CPU_SREGS_BASE + \
|
||||||
|
(offsetof(kvm_cpu_regs_t, CPU_SBBP)) + \
|
||||||
|
(sizeof(u64) * SBBP_no))
|
||||||
#define GUEST_GET_CPU_SREG(reg_name) \
|
#define GUEST_GET_CPU_SREG(reg_name) \
|
||||||
E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_CPU_SREG(reg_name))
|
E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_CPU_SREG(reg_name))
|
||||||
#define GUEST_GET_CPU_DSREG(reg_name) \
|
#define GUEST_GET_CPU_DSREG(reg_name) \
|
||||||
|
@ -65,6 +68,8 @@
|
||||||
E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_lo(TIR_no))
|
E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_lo(TIR_no))
|
||||||
#define GUEST_GET_CPU_TIR_hi(TIR_no) \
|
#define GUEST_GET_CPU_TIR_hi(TIR_no) \
|
||||||
E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_hi(TIR_no))
|
E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_hi(TIR_no))
|
||||||
|
#define GUEST_GET_CPU_SBBP(SBBP_no) \
|
||||||
|
E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_SBBP(SBBP_no))
|
||||||
#define GUEST_IRQS_UNDER_UPSR() \
|
#define GUEST_IRQS_UNDER_UPSR() \
|
||||||
offsetof(kvm_vcpu_state_t, irqs_under_upsr)
|
offsetof(kvm_vcpu_state_t, irqs_under_upsr)
|
||||||
|
|
||||||
|
@ -750,6 +755,11 @@
|
||||||
#define KVM_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \
|
#define KVM_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \
|
||||||
KVM_WRITE_TIRs_num(-1)
|
KVM_WRITE_TIRs_num(-1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read double-word Stcak of Base Blocks Pointers (SBBP)
|
||||||
|
*/
|
||||||
|
#define KVM_READ_SBBP_REG_VALUE(no) GUEST_GET_CPU_SBBP(no)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read/write virtual deferred traps register - DTR
|
* Read/write virtual deferred traps register - DTR
|
||||||
*/
|
*/
|
||||||
|
@ -1034,7 +1044,6 @@
|
||||||
*/
|
*/
|
||||||
#define KVM_READ_RPR_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.lo)
|
#define KVM_READ_RPR_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.lo)
|
||||||
#define KVM_READ_RPR_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.hi)
|
#define KVM_READ_RPR_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.hi)
|
||||||
#define KVM_READ_SBBP_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbbp)
|
|
||||||
|
|
||||||
#define KVM_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \
|
#define KVM_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \
|
||||||
NATIVE_SET_DSREG_OPEN(rpr.lo, RPR_lo_value)
|
NATIVE_SET_DSREG_OPEN(rpr.lo, RPR_lo_value)
|
||||||
|
@ -1092,7 +1101,7 @@
|
||||||
#define KVM_READ_PSR_REG_VALUE() \
|
#define KVM_READ_PSR_REG_VALUE() \
|
||||||
({ \
|
({ \
|
||||||
extern void dump_stack(void); \
|
extern void dump_stack(void); \
|
||||||
unsigned long PSR_value = GUEST_GET_CPU_SREG(PSR); \
|
unsigned long PSR_value = GUEST_GET_CPU_SREG(E2K_PSR); \
|
||||||
unsigned long vcpu_base; \
|
unsigned long vcpu_base; \
|
||||||
\
|
\
|
||||||
KVM_GET_VCPU_STATE_BASE(vcpu_base); \
|
KVM_GET_VCPU_STATE_BASE(vcpu_base); \
|
||||||
|
@ -1110,14 +1119,24 @@ extern void dump_stack(void); \
|
||||||
if (BOOT_IS_HV_GM()) \
|
if (BOOT_IS_HV_GM()) \
|
||||||
PSR_value = NATIVE_NV_READ_PSR_REG_VALUE(); \
|
PSR_value = NATIVE_NV_READ_PSR_REG_VALUE(); \
|
||||||
else \
|
else \
|
||||||
PSR_value = GUEST_GET_CPU_SREG(PSR); \
|
PSR_value = GUEST_GET_CPU_SREG(E2K_PSR); \
|
||||||
PSR_value; \
|
PSR_value; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr) \
|
#define KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr) \
|
||||||
|
({ \
|
||||||
KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \
|
KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \
|
||||||
GUEST_CPU_SREG(PSR), PSR_value, \
|
GUEST_CPU_SREG(E2K_PSR), PSR_value, \
|
||||||
GUEST_IRQS_UNDER_UPSR(), under_upsr)
|
GUEST_IRQS_UNDER_UPSR(), under_upsr); \
|
||||||
|
trace_vcpu_psr_update(PSR_value, under_upsr); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define BOOT_KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr) \
|
||||||
|
({ \
|
||||||
|
KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \
|
||||||
|
GUEST_CPU_SREG(E2K_PSR), PSR_value, \
|
||||||
|
GUEST_IRQS_UNDER_UPSR(), under_upsr); \
|
||||||
|
})
|
||||||
|
|
||||||
#define KVM_WRITE_SW_PSR_REG_VALUE(PSR_value) \
|
#define KVM_WRITE_SW_PSR_REG_VALUE(PSR_value) \
|
||||||
({ \
|
({ \
|
||||||
|
@ -1133,6 +1152,21 @@ extern void dump_stack(void); \
|
||||||
under_upsr = false; \
|
under_upsr = false; \
|
||||||
KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr); \
|
KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define BOOT_KVM_WRITE_SW_PSR_REG_VALUE(PSR_value) \
|
||||||
|
({ \
|
||||||
|
kvm_vcpu_state_t *vcpu_state; \
|
||||||
|
bool under_upsr; \
|
||||||
|
\
|
||||||
|
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
||||||
|
under_upsr = vcpu_state->irqs_under_upsr; \
|
||||||
|
if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == \
|
||||||
|
(PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) \
|
||||||
|
under_upsr = true; \
|
||||||
|
if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == 0) \
|
||||||
|
under_upsr = false; \
|
||||||
|
BOOT_KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr); \
|
||||||
|
})
|
||||||
#define KVM_WRITE_PSR_REG_VALUE(PSR_value) \
|
#define KVM_WRITE_PSR_REG_VALUE(PSR_value) \
|
||||||
({ \
|
({ \
|
||||||
KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \
|
KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \
|
||||||
|
@ -1141,7 +1175,7 @@ extern void dump_stack(void); \
|
||||||
})
|
})
|
||||||
#define BOOT_KVM_WRITE_PSR_REG_VALUE(PSR_value) \
|
#define BOOT_KVM_WRITE_PSR_REG_VALUE(PSR_value) \
|
||||||
({ \
|
({ \
|
||||||
KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \
|
BOOT_KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \
|
||||||
if (BOOT_IS_HV_GM()) \
|
if (BOOT_IS_HV_GM()) \
|
||||||
NATIVE_WRITE_PSR_REG_VALUE(PSR_value); \
|
NATIVE_WRITE_PSR_REG_VALUE(PSR_value); \
|
||||||
})
|
})
|
||||||
|
@ -1172,6 +1206,19 @@ extern void dump_stack(void); \
|
||||||
UPSR_value = GUEST_GET_CPU_SREG(UPSR); \
|
UPSR_value = GUEST_GET_CPU_SREG(UPSR); \
|
||||||
UPSR_value; \
|
UPSR_value; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
||||||
|
({ \
|
||||||
|
KVM_DO_ATOMIC_WRITE_UPSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \
|
||||||
|
GUEST_CPU_SREG(UPSR), UPSR_value); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define BOOT_KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
||||||
|
({ \
|
||||||
|
KVM_DO_ATOMIC_WRITE_UPSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \
|
||||||
|
GUEST_CPU_SREG(UPSR), UPSR_value); \
|
||||||
|
})
|
||||||
|
|
||||||
#if defined(CONFIG_DIRECT_VIRQ_INJECTION)
|
#if defined(CONFIG_DIRECT_VIRQ_INJECTION)
|
||||||
#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
||||||
({ \
|
({ \
|
||||||
|
@ -1180,11 +1227,14 @@ extern void dump_stack(void); \
|
||||||
\
|
\
|
||||||
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
||||||
under_upsr = vcpu_state->irqs_under_upsr; \
|
under_upsr = vcpu_state->irqs_under_upsr; \
|
||||||
GUEST_SET_CPU_SREG(UPSR, UPSR_value); \
|
KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value); \
|
||||||
NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \
|
|
||||||
if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \
|
if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \
|
||||||
if ((UPSR_value) & UPSR_IE) \
|
if ((UPSR_value) & UPSR_IE) { \
|
||||||
HYPERVISOR_inject_interrupt(); \
|
trace_vcpu_upsr_update(UPSR_value, true); \
|
||||||
|
kvm_hypervisor_inject_interrupt(); \
|
||||||
|
} \
|
||||||
|
} else { \
|
||||||
|
trace_vcpu_upsr_update(UPSR_value, false); \
|
||||||
} \
|
} \
|
||||||
})
|
})
|
||||||
#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
||||||
|
@ -1194,25 +1244,13 @@ extern void dump_stack(void); \
|
||||||
\
|
\
|
||||||
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
||||||
under_upsr = vcpu_state->irqs_under_upsr; \
|
under_upsr = vcpu_state->irqs_under_upsr; \
|
||||||
GUEST_SET_CPU_SREG(UPSR, UPSR_value); \
|
BOOT_KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value); \
|
||||||
NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \
|
|
||||||
if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \
|
if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \
|
||||||
if ((UPSR_value) & UPSR_IE) \
|
if ((UPSR_value) & UPSR_IE) \
|
||||||
HYPERVISOR_inject_interrupt(); \
|
HYPERVISOR_inject_interrupt(); \
|
||||||
} \
|
} \
|
||||||
})
|
})
|
||||||
#elif defined(CONFIG_VIRQ_VCPU_INJECTION)
|
#else /* ! CONFIG_DIRECT_VIRQ_INJECTION */
|
||||||
#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
|
||||||
({ \
|
|
||||||
GUEST_SET_CPU_SREG(UPSR, UPSR_value); \
|
|
||||||
NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \
|
|
||||||
})
|
|
||||||
#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \
|
|
||||||
({ \
|
|
||||||
GUEST_SET_CPU_SREG(UPSR, UPSR_value); \
|
|
||||||
NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \
|
|
||||||
})
|
|
||||||
#else /* ! CONFIG_DIRECT_VIRQ_INJECTION && ! CONFIG_VIRQ_VCPU_INJECTION */
|
|
||||||
#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value)
|
#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value)
|
||||||
#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value)
|
#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value)
|
||||||
#endif /* CONFIG_DIRECT_VIRQ_INJECTION */
|
#endif /* CONFIG_DIRECT_VIRQ_INJECTION */
|
||||||
|
@ -1488,10 +1526,14 @@ extern void dump_stack(void); \
|
||||||
KVM_WRITE_SBR_REG_VALUE(SBR_value)
|
KVM_WRITE_SBR_REG_VALUE(SBR_value)
|
||||||
#define WRITE_USBR_REG_VALUE(USBR_value) \
|
#define WRITE_USBR_REG_VALUE(USBR_value) \
|
||||||
KVM_WRITE_USBR_REG_VALUE(USBR_value)
|
KVM_WRITE_USBR_REG_VALUE(USBR_value)
|
||||||
|
#define NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \
|
||||||
|
KVM_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo)
|
||||||
#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \
|
#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \
|
||||||
KVM_WRITE_USBR_REG_VALUE(USBR_value)
|
KVM_WRITE_USBR_REG_VALUE(USBR_value)
|
||||||
#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \
|
#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \
|
||||||
KVM_WRITE_SBR_REG_VALUE(SBR_value)
|
KVM_WRITE_SBR_REG_VALUE(SBR_value)
|
||||||
|
#define BOOT_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \
|
||||||
|
KVM_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read/write double-word Window Descriptor Register (WD)
|
* Read/write double-word Window Descriptor Register (WD)
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
/* do not include this header directly, only through asm/e2k_debug.h */
|
/* do not include this header directly, only through asm/e2k_debug.h */
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <asm/kvm/vcpu-regs-debug-inline.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some definitions to print/dump/show stacks
|
* Some definitions to print/dump/show stacks
|
||||||
|
|
|
@ -232,7 +232,9 @@ switch_guest_mm(gthread_info_t *next_gti, struct gmm_struct *next_gmm)
|
||||||
DebugKVMSW("started to switch guest mm from GPID #%d to GPID #%d\n",
|
DebugKVMSW("started to switch guest mm from GPID #%d to GPID #%d\n",
|
||||||
cur_gti->gpid->nid.nr, next_gti->gpid->nid.nr);
|
cur_gti->gpid->nid.nr, next_gti->gpid->nid.nr);
|
||||||
active_gmm = pv_vcpu_get_active_gmm(vcpu);
|
active_gmm = pv_vcpu_get_active_gmm(vcpu);
|
||||||
if (next_gmm == NULL || next_gti->gmm == NULL) {
|
if (next_gmm == NULL ||
|
||||||
|
next_gti->gmm == NULL ||
|
||||||
|
next_gti->gmm_in_release) {
|
||||||
#ifdef DO_NOT_USE_ACTIVE_GMM
|
#ifdef DO_NOT_USE_ACTIVE_GMM
|
||||||
/* switch to guest kernel thread, but optimization */
|
/* switch to guest kernel thread, but optimization */
|
||||||
/* has been turned OFF, so switch to init gmm & PTs */
|
/* has been turned OFF, so switch to init gmm & PTs */
|
||||||
|
@ -258,12 +260,13 @@ switch_guest_mm(gthread_info_t *next_gti, struct gmm_struct *next_gmm)
|
||||||
active_gmm, active_gmm->nid.nr);
|
active_gmm, active_gmm->nid.nr);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (likely(!next_gmm->in_release && !next_gti->gmm_in_release &&
|
if (likely(!pv_vcpu_is_init_gmm(vcpu, next_gmm))) {
|
||||||
!pv_vcpu_is_init_gmm(vcpu, next_gmm))) {
|
|
||||||
next_pgd = kvm_mmu_load_gmm_root(current_thread_info(),
|
next_pgd = kvm_mmu_load_gmm_root(current_thread_info(),
|
||||||
next_gti);
|
next_gti);
|
||||||
|
pv_vcpu_set_gmm(vcpu, next_gmm);
|
||||||
} else {
|
} else {
|
||||||
next_pgd = kvm_mmu_load_init_root(vcpu);
|
next_pgd = kvm_mmu_load_init_root(vcpu);
|
||||||
|
pv_vcpu_clear_gmm(vcpu);
|
||||||
}
|
}
|
||||||
switch_guest_pgd(next_pgd);
|
switch_guest_pgd(next_pgd);
|
||||||
pv_vcpu_set_active_gmm(vcpu, next_gmm);
|
pv_vcpu_set_active_gmm(vcpu, next_gmm);
|
||||||
|
|
|
@ -30,6 +30,15 @@
|
||||||
greg_vs; \
|
greg_vs; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define HOST_GET_SAVED_VCPU_STATE_GREG_AS_LIGHT(__ti) \
|
||||||
|
({ \
|
||||||
|
unsigned long greg_vs; \
|
||||||
|
\
|
||||||
|
HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(&(__ti)->k_gregs_light, \
|
||||||
|
greg_vs); \
|
||||||
|
greg_vs; \
|
||||||
|
})
|
||||||
|
|
||||||
#define HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__) \
|
#define HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__) \
|
||||||
({ \
|
({ \
|
||||||
(vs__) = NATIVE_GET_UNTEGGED_DGREG(GUEST_VCPU_STATE_GREG); \
|
(vs__) = NATIVE_GET_UNTEGGED_DGREG(GUEST_VCPU_STATE_GREG); \
|
||||||
|
@ -69,7 +78,7 @@
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HOST_SAVE_KERNEL_GREGS_AS_LIGHT(__ti) \
|
#define HOST_SAVE_KERNEL_GREGS_AS_LIGHT(__ti) \
|
||||||
HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs_light, true)
|
HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs_light, false)
|
||||||
|
|
||||||
#define HOST_SAVE_KERNEL_GREGS(__ti) \
|
#define HOST_SAVE_KERNEL_GREGS(__ti) \
|
||||||
HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, true)
|
HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, true)
|
||||||
|
@ -77,6 +86,9 @@
|
||||||
#define HOST_SAVE_HOST_GREGS(__ti) \
|
#define HOST_SAVE_HOST_GREGS(__ti) \
|
||||||
HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, false)
|
HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, false)
|
||||||
|
|
||||||
|
#define HOST_SAVE_GUEST_KERNEL_GREGS(__gti) \
|
||||||
|
HOST_SAVE_HOST_GREGS_TO(&(__gti)->gk_gregs, false)
|
||||||
|
|
||||||
#define HOST_RESTORE_HOST_GREGS_FROM(__k_gregs, only_kernel) \
|
#define HOST_RESTORE_HOST_GREGS_FROM(__k_gregs, only_kernel) \
|
||||||
({ \
|
({ \
|
||||||
kernel_gregs_t *k_gregs = (__k_gregs); \
|
kernel_gregs_t *k_gregs = (__k_gregs); \
|
||||||
|
@ -97,13 +109,16 @@
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) \
|
#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) \
|
||||||
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs_light, true)
|
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs_light, false)
|
||||||
|
|
||||||
#define HOST_RESTORE_KERNEL_GREGS(_ti) \
|
#define HOST_RESTORE_KERNEL_GREGS(_ti) \
|
||||||
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, true)
|
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, true)
|
||||||
|
|
||||||
#define HOST_RESTORE_HOST_GREGS(_ti) \
|
#define HOST_RESTORE_HOST_GREGS(_ti) \
|
||||||
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, false)
|
HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, false)
|
||||||
|
|
||||||
|
#define HOST_RESTORE_GUEST_KERNEL_GREGS(_gti) \
|
||||||
|
HOST_RESTORE_HOST_GREGS_FROM(&(_gti)->gk_gregs, false)
|
||||||
#else /* !CONFIG_KVM_HOST_MODE */
|
#else /* !CONFIG_KVM_HOST_MODE */
|
||||||
#define HOST_SAVE_HOST_GREGS(__ti)
|
#define HOST_SAVE_HOST_GREGS(__ti)
|
||||||
#define HOST_RESTORE_HOST_GREGS(_ti)
|
#define HOST_RESTORE_HOST_GREGS(_ti)
|
||||||
|
|
|
@ -19,8 +19,9 @@
|
||||||
#include <asm/epicdef.h>
|
#include <asm/epicdef.h>
|
||||||
#include <asm/mmu_regs_types.h>
|
#include <asm/mmu_regs_types.h>
|
||||||
#include <asm/mmu_types.h>
|
#include <asm/mmu_types.h>
|
||||||
|
#include <asm/kvm/vcpu-regs-debug.h>
|
||||||
#include <asm/kvm/irq.h>
|
#include <asm/kvm/irq.h>
|
||||||
|
#include <uapi/asm/sigcontext.h>
|
||||||
|
|
||||||
typedef struct kvm_cpu_regs {
|
typedef struct kvm_cpu_regs {
|
||||||
#if defined(CONFIG_KVM_GUEST_KERNEL) && \
|
#if defined(CONFIG_KVM_GUEST_KERNEL) && \
|
||||||
|
@ -62,6 +63,7 @@ typedef struct kvm_cpu_regs {
|
||||||
e2k_ctpr_t CPU_CTPR3;
|
e2k_ctpr_t CPU_CTPR3;
|
||||||
e2k_tir_t CPU_TIRs[MAX_TIRs_NUM]; /* Trap Info Registers */
|
e2k_tir_t CPU_TIRs[MAX_TIRs_NUM]; /* Trap Info Registers */
|
||||||
int CPU_TIRs_num; /* number of occupied TIRs */
|
int CPU_TIRs_num; /* number of occupied TIRs */
|
||||||
|
u64 CPU_SBBP[SBBP_ENTRIES_NUM];
|
||||||
e2k_wd_t CPU_WD; /* Window Descriptor Register */
|
e2k_wd_t CPU_WD; /* Window Descriptor Register */
|
||||||
e2k_bgr_t CPU_BGR; /* Base Global Register */
|
e2k_bgr_t CPU_BGR; /* Base Global Register */
|
||||||
e2k_lsr_t CPU_LSR; /* Loop Status Register */
|
e2k_lsr_t CPU_LSR; /* Loop Status Register */
|
||||||
|
@ -72,7 +74,7 @@ typedef struct kvm_cpu_regs {
|
||||||
e2k_cuir_t CPU_OSCUIR; /* CUI register of OS */
|
e2k_cuir_t CPU_OSCUIR; /* CUI register of OS */
|
||||||
u64 CPU_OSR0; /* OS register #0 */
|
u64 CPU_OSR0; /* OS register #0 */
|
||||||
u32 CPU_OSEM; /* OS Entries Mask */
|
u32 CPU_OSEM; /* OS Entries Mask */
|
||||||
e2k_psr_t CPU_PSR; /* Processor State Register */
|
e2k_psr_t CPU_E2K_PSR; /* Processor State Register */
|
||||||
e2k_upsr_t CPU_UPSR; /* User Processor State Register */
|
e2k_upsr_t CPU_UPSR; /* User Processor State Register */
|
||||||
e2k_pfpfr_t CPU_PFPFR; /* floating point control registers */
|
e2k_pfpfr_t CPU_PFPFR; /* floating point control registers */
|
||||||
e2k_fpcr_t CPU_FPCR;
|
e2k_fpcr_t CPU_FPCR;
|
||||||
|
@ -297,6 +299,9 @@ typedef struct kvm_vcpu_state {
|
||||||
bool irqs_under_upsr;
|
bool irqs_under_upsr;
|
||||||
bool do_dump_state; /* dump all stacks */
|
bool do_dump_state; /* dump all stacks */
|
||||||
bool do_dump_stack; /* dump only active stack */
|
bool do_dump_stack; /* dump only active stack */
|
||||||
|
#ifdef VCPU_REGS_DEBUG
|
||||||
|
vcpu_regs_trace_t trace; /* VCPU state trace */
|
||||||
|
#endif /* VCPU_REGS_DEBUG */
|
||||||
} kvm_vcpu_state_t;
|
} kvm_vcpu_state_t;
|
||||||
|
|
||||||
#define DEBUG_MODE_ON (vcpu->arch.kmap_vcpu_state->debug_mode_on)
|
#define DEBUG_MODE_ON (vcpu->arch.kmap_vcpu_state->debug_mode_on)
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#define KVM_RESTORE_AAU_MASK_REGS(aau_context) \
|
#define KVM_RESTORE_AAU_MASK_REGS(aau_context) \
|
||||||
({ \
|
({ \
|
||||||
if (IS_HV_GM()) { \
|
if (IS_HV_GM()) { \
|
||||||
|
E2K_CMD_SEPARATOR; \
|
||||||
NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \
|
NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \
|
||||||
} else { \
|
} else { \
|
||||||
PREFIX_RESTORE_AAU_MASK_REGS(KVM, kvm, aau_context); \
|
PREFIX_RESTORE_AAU_MASK_REGS(KVM, kvm, aau_context); \
|
||||||
|
@ -253,6 +254,12 @@ kvm_get_aau_context_v5(e2k_aau_t *context)
|
||||||
KVM_GET_AAU_CONTEXT_V5(context);
|
KVM_GET_AAU_CONTEXT_V5(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
kvm_set_aau_context(e2k_aau_t *context)
|
||||||
|
{
|
||||||
|
/* AAU contesxt should restore host */
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* It is pure kvm kernel without paravirtualization */
|
/* It is pure kvm kernel without paravirtualization */
|
||||||
|
|
||||||
|
@ -307,6 +314,12 @@ get_aau_context(e2k_aau_t *context)
|
||||||
kvm_get_aau_context(context);
|
kvm_get_aau_context(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
set_aau_context(e2k_aau_t *context)
|
||||||
|
{
|
||||||
|
kvm_set_aau_context(context);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
#endif /* _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ */
|
#endif /* _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ */
|
||||||
|
|
|
@ -3,35 +3,39 @@
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
struct icache_range_array;
|
struct icache_range_array;
|
||||||
struct vm_area_struct;
|
struct vm_area_struct;
|
||||||
struct page;
|
struct page;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
* Guest kernel supports pseudo page tables,
|
* Guest kernel functions can be run on any guest user processes and can have
|
||||||
* real page tables are managed now by host kernel
|
* arbitrary MMU contexts to track which on host is not possible, therefore
|
||||||
* So guest flushes can be empty
|
* it is necessary to flush all instruction caches
|
||||||
*/
|
*/
|
||||||
|
extern void smp_flush_icache_all(void);
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
kvm_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end)
|
kvm_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end)
|
||||||
{
|
{
|
||||||
|
smp_flush_icache_all();
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
kvm_smp_flush_icache_range_array(struct icache_range_array *icache_range_arr)
|
kvm_smp_flush_icache_range_array(struct icache_range_array *icache_range_arr)
|
||||||
{
|
{
|
||||||
|
smp_flush_icache_all();
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
kvm_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
kvm_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||||
{
|
{
|
||||||
}
|
smp_flush_icache_all();
|
||||||
static inline void
|
|
||||||
kvm_smp_flush_icache_all(void)
|
|
||||||
{
|
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
kvm_smp_flush_icache_kernel_line(e2k_addr_t addr)
|
kvm_smp_flush_icache_kernel_line(e2k_addr_t addr)
|
||||||
{
|
{
|
||||||
|
smp_flush_icache_all();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
@ -42,7 +46,12 @@ extern void kvm_clear_dcache_l1_range(void *virt_addr, size_t len);
|
||||||
extern void kvm_write_dcache_l2_reg(unsigned long reg_val,
|
extern void kvm_write_dcache_l2_reg(unsigned long reg_val,
|
||||||
int reg_num, int bank_num);
|
int reg_num, int bank_num);
|
||||||
extern unsigned long kvm_read_dcache_l2_reg(int reg_num, int bank_num);
|
extern unsigned long kvm_read_dcache_l2_reg(int reg_num, int bank_num);
|
||||||
extern int kvm_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
|
extern void kvm_flush_icache_all(void);
|
||||||
|
extern void kvm_flush_icache_range(e2k_addr_t start, e2k_addr_t end);
|
||||||
|
extern void kvm_flush_icache_range_array(
|
||||||
|
struct icache_range_array *icache_range_arr);
|
||||||
|
extern void kvm_flush_icache_page(struct vm_area_struct *vma,
|
||||||
|
struct page *page);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
|
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
|
||||||
|
@ -63,11 +72,6 @@ smp_flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||||
kvm_smp_flush_icache_page(vma, page);
|
kvm_smp_flush_icache_page(vma, page);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
smp_flush_icache_all(void)
|
|
||||||
{
|
|
||||||
kvm_smp_flush_icache_all();
|
|
||||||
}
|
|
||||||
static inline void
|
|
||||||
smp_flush_icache_kernel_line(e2k_addr_t addr)
|
smp_flush_icache_kernel_line(e2k_addr_t addr)
|
||||||
{
|
{
|
||||||
kvm_smp_flush_icache_kernel_line(addr);
|
kvm_smp_flush_icache_kernel_line(addr);
|
||||||
|
@ -85,16 +89,27 @@ clear_DCACHE_L1_range(void *virt_addr, size_t len)
|
||||||
kvm_clear_dcache_l1_range(virt_addr, len);
|
kvm_clear_dcache_l1_range(virt_addr, len);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
|
__flush_icache_all(void)
|
||||||
|
{
|
||||||
|
kvm_flush_icache_all();
|
||||||
|
}
|
||||||
|
static inline void
|
||||||
__flush_icache_range(e2k_addr_t start, e2k_addr_t end)
|
__flush_icache_range(e2k_addr_t start, e2k_addr_t end)
|
||||||
{
|
{
|
||||||
int ret;
|
kvm_flush_icache_range(start, end);
|
||||||
|
}
|
||||||
ret = kvm_flush_icache_range(start, end);
|
static inline void
|
||||||
if (ret) {
|
__flush_icache_range_array(struct icache_range_array *icache_range_arr)
|
||||||
panic("%s(): could not flush ICACHE, error %d\n",
|
{
|
||||||
__func__, ret);
|
kvm_flush_icache_range_array(icache_range_arr);
|
||||||
}
|
}
|
||||||
|
static inline void
|
||||||
|
__flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||||
|
{
|
||||||
|
kvm_flush_icache_page(vma, page);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* __ASM_KVM_GUEST_CACHEFLUSH_H */
|
#endif /* __ASM_KVM_GUEST_CACHEFLUSH_H */
|
||||||
|
|
|
@ -27,6 +27,8 @@ kvm_modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip,
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
|
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
|
||||||
|
|
||||||
|
#include <asm/kvm/vcpu-regs-debug-inline.h>
|
||||||
|
|
||||||
#define GET_PHYS_ADDR(task, addr) GUEST_GET_PHYS_ADDR(task, addr)
|
#define GET_PHYS_ADDR(task, addr) GUEST_GET_PHYS_ADDR(task, addr)
|
||||||
|
|
||||||
#define debug_guest_regs(task) false /* none any guests */
|
#define debug_guest_regs(task) false /* none any guests */
|
||||||
|
|
|
@ -10,26 +10,10 @@
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUALIZATION
|
#ifdef CONFIG_VIRTUALIZATION
|
||||||
|
|
||||||
#if defined(CONFIG_PARAVIRT_GUEST)
|
#if defined(CONFIG_KVM_GUEST_KERNEL) || defined(CONFIG_PARAVIRT_GUEST)
|
||||||
extern unsigned int guest_machine_id;
|
extern unsigned int guest_machine_id;
|
||||||
#define boot_guest_machine_id boot_get_vo_value(guest_machine_id)
|
#define boot_guest_machine_id boot_get_vo_value(guest_machine_id)
|
||||||
#endif /* CONFIG_E2K_MACHINE */
|
#endif /* CONFIG_KVM_GUEST_KERNEL || CONFIG_PARAVIRT_GUEST */
|
||||||
|
|
||||||
extern void kvm_set_mach_type_id(void);
|
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
|
||||||
/* it is native guest kernel */
|
|
||||||
#ifdef CONFIG_E2K_MACHINE
|
|
||||||
#if defined(CONFIG_E2K_VIRT)
|
|
||||||
#define guest_machine_id MACHINE_ID_E2K_VIRT
|
|
||||||
#define boot_guest_machine_id guest_machine_id
|
|
||||||
#else
|
|
||||||
#error "E2K VIRTUAL MACHINE type does not defined"
|
|
||||||
#endif
|
|
||||||
#else /* ! CONFIG_E2K_MACHINE */
|
|
||||||
extern unsigned int guest_machine_id;
|
|
||||||
#define boot_guest_machine_id boot_get_vo_value(guest_machine_id)
|
|
||||||
#endif /* CONFIG_E2K_MACHINE */
|
|
||||||
|
|
||||||
#define machine_id guest_machine_id
|
#define machine_id guest_machine_id
|
||||||
#define boot_machine_id boot_guest_machine_id
|
#define boot_machine_id boot_guest_machine_id
|
||||||
|
@ -39,13 +23,12 @@ extern unsigned int guest_machine_id;
|
||||||
#define set_machine_id(mach_id) (machine_id = (mach_id))
|
#define set_machine_id(mach_id) (machine_id = (mach_id))
|
||||||
#define boot_set_machine_id(mach_id) (boot_machine_id = (mach_id))
|
#define boot_set_machine_id(mach_id) (boot_machine_id = (mach_id))
|
||||||
|
|
||||||
|
extern void kvm_set_mach_type_id(void);
|
||||||
static inline void set_mach_type_id(void)
|
static inline void set_mach_type_id(void)
|
||||||
{
|
{
|
||||||
kvm_set_mach_type_id();
|
kvm_set_mach_type_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
|
||||||
|
|
||||||
#endif /* CONFIG_VIRTUALIZATION */
|
#endif /* CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
#endif /* _ASM_KVM_GUEST_E2K_H_ */
|
#endif /* _ASM_KVM_GUEST_E2K_H_ */
|
||||||
|
|
|
@ -15,6 +15,17 @@ extern int kvm_host_printk(const char *fmt, ...);
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* it is native guest */
|
/* it is native guest */
|
||||||
#define host_printk(fmt, args...) kvm_host_printk(fmt, ##args)
|
#define host_printk(fmt, args...) kvm_host_printk(fmt, ##args)
|
||||||
|
|
||||||
|
#define host_pr_alert(fmt, args...) host_printk(fmt, ##args)
|
||||||
|
#define host_pr_cont(fmt, args...) host_printk(fmt, ##args)
|
||||||
|
#define host_pr_info(fmt, args...) host_printk(fmt, ##args)
|
||||||
|
|
||||||
|
extern void host_dump_stack(void);
|
||||||
|
extern u64 host_print_all_TIRs(const e2k_tir_t *TIRs, u64 nr_TIRs);
|
||||||
|
extern void host_print_tc_record(const trap_cellar_t *tcellar, int num);
|
||||||
|
extern void host_print_all_TC(const trap_cellar_t *TC, int TC_count);
|
||||||
|
extern void host_print_pt_regs(const struct pt_regs *regs);
|
||||||
|
|
||||||
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
|
#endif /* ! CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
#endif /* ! _E2K_KVM_GUEST_HOST_PRINTK_H */
|
#endif /* ! _E2K_KVM_GUEST_HOST_PRINTK_H */
|
||||||
|
|
|
@ -8,21 +8,24 @@
|
||||||
#include <asm/tlb_regs_types.h>
|
#include <asm/tlb_regs_types.h>
|
||||||
#include <asm/mmu_fault.h>
|
#include <asm/mmu_fault.h>
|
||||||
|
|
||||||
extern long kvm_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
extern void kvm_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
||||||
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
|
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
|
||||||
u64 opc_ext, int chan, int qp_store, int atomic_store);
|
u64 opc_ext, int chan, int qp_store, int atomic_store);
|
||||||
extern long kvm_recovery_faulted_load(e2k_addr_t address, u64 *ld_val,
|
extern void kvm_recovery_faulted_load(e2k_addr_t address, u64 *ld_val,
|
||||||
u8 *data_tag, u64 ld_rec_opc, int chan);
|
u8 *data_tag, u64 ld_rec_opc, int chan,
|
||||||
extern long kvm_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
tc_cond_t cond);
|
||||||
|
extern void kvm_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load);
|
int qp_load, int atomic_load, u32 first_time,
|
||||||
extern long kvm_recovery_faulted_load_to_greg(e2k_addr_t address,
|
tc_cond_t cond);
|
||||||
|
extern void kvm_recovery_faulted_load_to_greg(e2k_addr_t address,
|
||||||
u32 greg_num_d, int vr, u64 ld_rec_opc, int chan,
|
u32 greg_num_d, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load,
|
int qp_load, int atomic_load,
|
||||||
void *saved_greg_lo, void *saved_greg_hi);
|
void *saved_greg_lo, void *saved_greg_hi,
|
||||||
extern long kvm_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to);
|
tc_cond_t cond);
|
||||||
extern long kvm_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to);
|
extern void kvm_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to);
|
||||||
extern long kvm_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to);
|
extern void kvm_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to);
|
||||||
|
extern void kvm_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to);
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
kvm_handle_mpdma_fault(e2k_addr_t hva)
|
kvm_handle_mpdma_fault(e2k_addr_t hva)
|
||||||
|
@ -47,13 +50,13 @@ kvm_is_guest_kernel_gregs(struct thread_info *ti,
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
guest_addr_to_host(void **addr, pt_regs_t *regs)
|
guest_addr_to_host(void **addr, const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
return native_guest_addr_to_host(addr);
|
return native_guest_addr_to_host(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs)
|
guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
/* there are not any guests, so nothing convertion */
|
/* there are not any guests, so nothing convertion */
|
||||||
return native_guest_ptr_to_host(ptr, size);
|
return native_guest_ptr_to_host(ptr, size);
|
||||||
|
@ -65,82 +68,85 @@ is_guest_kernel_gregs(struct thread_info *ti,
|
||||||
{
|
{
|
||||||
return kvm_is_guest_kernel_gregs(ti, greg_num_d, greg_copy);
|
return kvm_is_guest_kernel_gregs(ti, greg_num_d, greg_copy);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
||||||
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
|
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
|
||||||
u64 opc_ext, int chan, int qp_store, int atomic_store)
|
u64 opc_ext, int chan, int qp_store, int atomic_store)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_recovery_faulted_tagged_store(address, wr_data,
|
native_recovery_faulted_tagged_store(address, wr_data,
|
||||||
data_tag, st_rec_opc, data_ext, data_ext_tag,
|
data_tag, st_rec_opc, data_ext, data_ext_tag,
|
||||||
opc_ext, chan, qp_store, atomic_store);
|
opc_ext, chan, qp_store, atomic_store);
|
||||||
else
|
else
|
||||||
return kvm_recovery_faulted_tagged_store(address, wr_data,
|
kvm_recovery_faulted_tagged_store(address, wr_data,
|
||||||
data_tag, st_rec_opc, data_ext, data_ext_tag,
|
data_tag, st_rec_opc, data_ext, data_ext_tag,
|
||||||
opc_ext, chan, qp_store, atomic_store);
|
opc_ext, chan, qp_store, atomic_store);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
||||||
u64 ld_rec_opc, int chan)
|
u64 ld_rec_opc, int chan,
|
||||||
|
tc_cond_t cond)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_recovery_faulted_load(address, ld_val,
|
native_recovery_faulted_load(address, ld_val,
|
||||||
data_tag, ld_rec_opc, chan);
|
data_tag, ld_rec_opc, chan);
|
||||||
else
|
else
|
||||||
return kvm_recovery_faulted_load(address, ld_val,
|
kvm_recovery_faulted_load(address, ld_val,
|
||||||
data_tag, ld_rec_opc, chan);
|
data_tag, ld_rec_opc, chan, cond);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load)
|
int qp_load, int atomic_load, u32 first_time,
|
||||||
|
tc_cond_t cond)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_recovery_faulted_move(addr_from, addr_to,
|
native_recovery_faulted_move(addr_from, addr_to,
|
||||||
addr_to_hi, vr, ld_rec_opc, chan,
|
addr_to_hi, vr, ld_rec_opc, chan,
|
||||||
qp_load, atomic_load);
|
qp_load, atomic_load, first_time);
|
||||||
else
|
else
|
||||||
return kvm_recovery_faulted_move(addr_from, addr_to,
|
kvm_recovery_faulted_move(addr_from, addr_to,
|
||||||
addr_to_hi, vr, ld_rec_opc, chan,
|
addr_to_hi, vr, ld_rec_opc, chan,
|
||||||
qp_load, atomic_load);
|
qp_load, atomic_load, first_time, cond);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, int vr,
|
recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, int vr,
|
||||||
u64 ld_rec_opc, int chan, int qp_load, int atomic_load,
|
u64 ld_rec_opc, int chan, int qp_load, int atomic_load,
|
||||||
void *saved_greg_lo, void *saved_greg_hi)
|
void *saved_greg_lo, void *saved_greg_hi,
|
||||||
|
tc_cond_t cond)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_recovery_faulted_load_to_greg(address, greg_num_d,
|
native_recovery_faulted_load_to_greg(address, greg_num_d,
|
||||||
vr, ld_rec_opc, chan, qp_load, atomic_load,
|
vr, ld_rec_opc, chan, qp_load, atomic_load,
|
||||||
saved_greg_lo, saved_greg_hi);
|
saved_greg_lo, saved_greg_hi);
|
||||||
else
|
else
|
||||||
return kvm_recovery_faulted_load_to_greg(address, greg_num_d,
|
kvm_recovery_faulted_load_to_greg(address, greg_num_d,
|
||||||
vr, ld_rec_opc, chan, qp_load, atomic_load,
|
vr, ld_rec_opc, chan, qp_load, atomic_load,
|
||||||
saved_greg_lo, saved_greg_hi);
|
saved_greg_lo, saved_greg_hi, cond);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_move_tagged_word(addr_from, addr_to);
|
native_move_tagged_word(addr_from, addr_to);
|
||||||
else
|
else
|
||||||
return kvm_move_tagged_word(addr_from, addr_to);
|
kvm_move_tagged_word(addr_from, addr_to);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_move_tagged_dword(addr_from, addr_to);
|
native_move_tagged_dword(addr_from, addr_to);
|
||||||
else
|
else
|
||||||
return kvm_move_tagged_dword(addr_from, addr_to);
|
kvm_move_tagged_dword(addr_from, addr_to);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
||||||
{
|
{
|
||||||
if (likely(IS_HV_GM()))
|
if (likely(IS_HV_GM()))
|
||||||
return native_move_tagged_qword(addr_from, addr_to);
|
native_move_tagged_qword(addr_from, addr_to);
|
||||||
else
|
else
|
||||||
return kvm_move_tagged_qword(addr_from, addr_to);
|
kvm_move_tagged_qword(addr_from, addr_to);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
|
|
@ -19,6 +19,7 @@ static inline void
|
||||||
deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm)
|
deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
native_deactivate_mm(dead_task, mm);
|
native_deactivate_mm(dead_task, mm);
|
||||||
|
if (!dead_task->clear_child_tid || (atomic_read(&mm->mm_users) <= 1))
|
||||||
HYPERVISOR_switch_to_guest_init_mm();
|
HYPERVISOR_switch_to_guest_init_mm();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
|
@ -100,6 +100,35 @@ static inline void KVM_COPY_STACKS_TO_MEMORY(void)
|
||||||
HYPERVISOR_copy_stacks_to_memory();
|
HYPERVISOR_copy_stacks_to_memory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* own VCPU state: directly accessible through global registers */
|
||||||
|
static inline kvm_vcpu_state_t *kvm_get_vcpu_state(void)
|
||||||
|
{
|
||||||
|
unsigned long vcpu_base;
|
||||||
|
|
||||||
|
KVM_GET_VCPU_STATE_BASE(vcpu_base);
|
||||||
|
return (kvm_vcpu_state_t *)(vcpu_base);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore proper psize field of WD register
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
kvm_restore_wd_register_psize(e2k_wd_t wd_from)
|
||||||
|
{
|
||||||
|
HYPERVISOR_update_wd_psize(wd_from.WD_psize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preserve current p[c]shtp as they indicate how much to FILL when returning
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
kvm_preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks,
|
||||||
|
e2k_stacks_t *cur_stacks)
|
||||||
|
{
|
||||||
|
/* guest user hardware stacks sizes to copy should be updated */
|
||||||
|
/* after copying and therefore are not preserve */
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
kvm_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
kvm_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
||||||
{
|
{
|
||||||
|
@ -271,7 +300,7 @@ failed:
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int
|
static __always_inline int
|
||||||
kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num)
|
kvm_user_hw_stacks_copy(pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
e2k_psp_lo_t psp_lo;
|
e2k_psp_lo_t psp_lo;
|
||||||
e2k_psp_hi_t psp_hi;
|
e2k_psp_hi_t psp_hi;
|
||||||
|
@ -282,10 +311,30 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num)
|
||||||
e2k_stacks_t *stacks;
|
e2k_stacks_t *stacks;
|
||||||
void __user *dst;
|
void __user *dst;
|
||||||
void *src;
|
void *src;
|
||||||
long to_copy, from, there_are, add_frames_size;
|
long copyed_ps_size, copyed_pcs_size, to_copy, from, there_are;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(irqs_disabled());
|
if (unlikely(irqs_disabled())) {
|
||||||
|
pr_err("%s() called with IRQs disabled PSP: 0x%lx UPSR: 0x%lx "
|
||||||
|
"under UPSR %d\n",
|
||||||
|
__func__, KVM_READ_PSR_REG_VALUE(),
|
||||||
|
KVM_READ_UPSR_REG_VALUE(),
|
||||||
|
kvm_get_vcpu_state()->irqs_under_upsr);
|
||||||
|
local_irq_enable();
|
||||||
|
WARN_ON(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
stacks = ®s->stacks;
|
||||||
|
copyed_ps_size = regs->copyed.ps_size;
|
||||||
|
copyed_pcs_size = regs->copyed.pcs_size;
|
||||||
|
if (unlikely(copyed_ps_size || copyed_pcs_size)) {
|
||||||
|
/* stacks have been already copyed */
|
||||||
|
BUG_ON(copyed_ps_size != GET_PSHTP_MEM_INDEX(stacks->pshtp) &&
|
||||||
|
GET_PSHTP_MEM_INDEX(stacks->pshtp) != 0);
|
||||||
|
BUG_ON(copyed_pcs_size != PCSHTP_SIGN_EXTEND(stacks->pcshtp) &&
|
||||||
|
PCSHTP_SIGN_EXTEND(stacks->pcshtp) != SZ_OF_CR);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
ret = HYPERVISOR_copy_stacks_to_memory();
|
ret = HYPERVISOR_copy_stacks_to_memory();
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
@ -295,7 +344,6 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy user part of procedure stack from kernel back to user */
|
/* copy user part of procedure stack from kernel back to user */
|
||||||
stacks = ®s->stacks;
|
|
||||||
ATOMIC_READ_HW_STACKS_REGS(psp_lo.PSP_lo_half, psp_hi.PSP_hi_half,
|
ATOMIC_READ_HW_STACKS_REGS(psp_lo.PSP_lo_half, psp_hi.PSP_hi_half,
|
||||||
pshtp.PSHTP_reg,
|
pshtp.PSHTP_reg,
|
||||||
pcsp_lo.PCSP_lo_half, pcsp_hi.PCSP_hi_half,
|
pcsp_lo.PCSP_lo_half, pcsp_hi.PCSP_hi_half,
|
||||||
|
@ -339,17 +387,16 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num)
|
||||||
__func__, src, dst, to_copy, ret);
|
__func__, src, dst, to_copy, ret);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
regs->copyed.ps_size = to_copy;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy user part of chain stack from kernel back to user */
|
/* copy user part of chain stack from kernel back to user */
|
||||||
add_frames_size = add_frames_num * SZ_OF_CR;
|
|
||||||
src = (void *)pcsp_lo.PCSP_lo_base;
|
src = (void *)pcsp_lo.PCSP_lo_base;
|
||||||
DebugUST("chain stack at kernel from %px, size 0x%x + 0x%lx, ind 0x%x, "
|
DebugUST("chain stack at kernel from %px, size 0x%x, ind 0x%x, "
|
||||||
"pcshtp 0x%x\n",
|
"pcshtp 0x%x\n",
|
||||||
src, pcsp_hi.PCSP_hi_size, add_frames_size, pcsp_hi.PCSP_hi_ind,
|
src, pcsp_hi.PCSP_hi_size, pcsp_hi.PCSP_hi_ind, pcshtp);
|
||||||
pcshtp);
|
BUG_ON(pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp) >
|
||||||
BUG_ON(pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp) +
|
pcsp_hi.PCSP_hi_size);
|
||||||
add_frames_size > pcsp_hi.PCSP_hi_size);
|
|
||||||
if (stacks->pcsp_hi.PCSP_hi_ind >= stacks->pcsp_hi.PCSP_hi_size) {
|
if (stacks->pcsp_hi.PCSP_hi_ind >= stacks->pcsp_hi.PCSP_hi_size) {
|
||||||
/* chain stack overflow, need expand */
|
/* chain stack overflow, need expand */
|
||||||
ret = handle_chain_stack_bounds(stacks, regs->trap);
|
ret = handle_chain_stack_bounds(stacks, regs->trap);
|
||||||
|
@ -365,7 +412,6 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num)
|
||||||
from = stacks->pcsp_hi.PCSP_hi_ind - to_copy;
|
from = stacks->pcsp_hi.PCSP_hi_ind - to_copy;
|
||||||
BUG_ON(from < 0);
|
BUG_ON(from < 0);
|
||||||
dst = (void *)stacks->pcsp_lo.PCSP_lo_base + from;
|
dst = (void *)stacks->pcsp_lo.PCSP_lo_base + from;
|
||||||
to_copy += add_frames_size;
|
|
||||||
BUG_ON(to_copy > pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp));
|
BUG_ON(to_copy > pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp));
|
||||||
DebugUST("chain stack at user from %px, ind 0x%x, "
|
DebugUST("chain stack at user from %px, ind 0x%x, "
|
||||||
"pcshtp size to copy 0x%lx\n",
|
"pcshtp size to copy 0x%lx\n",
|
||||||
|
@ -385,10 +431,95 @@ kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num)
|
||||||
__func__, src, dst, to_copy, ret);
|
__func__, src, dst, to_copy, ret);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
regs->copyed.pcs_size = to_copy;
|
||||||
}
|
}
|
||||||
if (add_frames_size > 0) {
|
|
||||||
|
failed:
|
||||||
|
if (DEBUG_USER_STACKS_MODE)
|
||||||
|
debug_ustacks = false;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy additional frames injected to the guest kernel stack, but these frames
|
||||||
|
* are for guest user stack and should be copyed from kernel back to the top
|
||||||
|
* of user.
|
||||||
|
*/
|
||||||
|
static __always_inline int
|
||||||
|
kvm_copy_injected_pcs_frames_to_user(pt_regs_t *regs, int frames_num)
|
||||||
|
{
|
||||||
|
e2k_size_t pcs_ind, pcs_size;
|
||||||
|
e2k_addr_t pcs_base;
|
||||||
|
int pcsh_top;
|
||||||
|
e2k_stacks_t *stacks;
|
||||||
|
void __user *dst;
|
||||||
|
void *src;
|
||||||
|
long copyed_frames_size, to_copy, from, there_are, frames_size;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
BUG_ON(irqs_disabled());
|
||||||
|
|
||||||
|
frames_size = frames_num * SZ_OF_CR;
|
||||||
|
copyed_frames_size = regs->copyed.pcs_injected_frames_size;
|
||||||
|
if (unlikely(copyed_frames_size >= frames_size)) {
|
||||||
|
/* all frames have been already copyed */
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
/* copyed only part of frames - not implemented case */
|
||||||
|
BUG_ON(copyed_frames_size != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
stacks = ®s->stacks;
|
||||||
|
ATOMIC_GET_HW_PCS_SIZES_BASE_TOP(pcs_ind, pcs_size, pcs_base, pcsh_top);
|
||||||
|
|
||||||
|
/* guest user stacks part spilled to kernel should be already copyed */
|
||||||
|
BUG_ON(PCSHTP_SIGN_EXTEND(regs->copyed.pcs_size != stacks->pcshtp));
|
||||||
|
|
||||||
|
src = (void *)(pcs_base + regs->copyed.pcs_size);
|
||||||
|
DebugUST("chain stack at kernel from %px, size 0x%lx + 0x%lx, "
|
||||||
|
"ind 0x%lx, pcsh top 0x%x\n",
|
||||||
|
src, pcs_size, frames_size, pcs_ind, pcsh_top);
|
||||||
|
BUG_ON(regs->copyed.pcs_size + frames_size > pcs_ind + pcsh_top);
|
||||||
|
if (stacks->pcsp_hi.PCSP_hi_ind + frames_size >
|
||||||
|
stacks->pcsp_hi.PCSP_hi_size) {
|
||||||
|
/* user chain stack can overflow, need expand */
|
||||||
|
ret = handle_chain_stack_bounds(stacks, regs->trap);
|
||||||
|
if (unlikely(ret)) {
|
||||||
|
pr_err("%s(): could not handle process %s (%d) "
|
||||||
|
"chain stack overflow, error %d\n",
|
||||||
|
__func__, current->comm, current->pid, ret);
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
to_copy = frames_size;
|
||||||
|
BUG_ON(to_copy < 0);
|
||||||
|
from = stacks->pcsp_hi.PCSP_hi_ind;
|
||||||
|
BUG_ON(from < regs->copyed.pcs_size);
|
||||||
|
dst = (void *)stacks->pcsp_lo.PCSP_lo_base + from;
|
||||||
|
DebugUST("chain stack at user from %px, ind 0x%x, "
|
||||||
|
"frames size to copy 0x%lx\n",
|
||||||
|
dst, stacks->pcsp_hi.PCSP_hi_ind, to_copy);
|
||||||
|
there_are = stacks->pcsp_hi.PCSP_hi_size - from;
|
||||||
|
if (there_are < to_copy) {
|
||||||
|
pr_err("%s(): user chain stack overflow, there are 0x%lx "
|
||||||
|
"to copy need 0x%lx, not yet implemented\n",
|
||||||
|
__func__, there_are, to_copy);
|
||||||
|
BUG_ON(true);
|
||||||
|
}
|
||||||
|
if (likely(to_copy > 0)) {
|
||||||
|
ret = kvm_copy_user_stack_from_kernel(dst, src, to_copy, true);
|
||||||
|
if (ret != 0) {
|
||||||
|
pr_err("%s(): chain stack copying from kernel %px "
|
||||||
|
"to user %px, size 0x%lx failed, error %d\n",
|
||||||
|
__func__, src, dst, to_copy, ret);
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
|
regs->copyed.pcs_injected_frames_size = to_copy;
|
||||||
/* increment chain stack pointer */
|
/* increment chain stack pointer */
|
||||||
stacks->pcsp_hi.PCSP_hi_ind += add_frames_size;
|
stacks->pcsp_hi.PCSP_hi_ind += to_copy;
|
||||||
|
} else {
|
||||||
|
BUG_ON(true);
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
|
@ -451,7 +582,7 @@ static __always_inline int kvm_user_hw_stacks_prepare(
|
||||||
* 2) User data copying will be done some later at
|
* 2) User data copying will be done some later at
|
||||||
* kvm_prepare_user_hv_stacks()
|
* kvm_prepare_user_hv_stacks()
|
||||||
*/
|
*/
|
||||||
ret = kvm_user_hw_stacks_copy(regs, 0);
|
ret = kvm_user_hw_stacks_copy(regs);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
pr_err("%s(): copying of hardware stacks failed< error %d\n",
|
pr_err("%s(): copying of hardware stacks failed< error %d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
|
@ -463,7 +594,44 @@ static __always_inline int kvm_user_hw_stacks_prepare(
|
||||||
static inline int
|
static inline int
|
||||||
kvm_ret_from_fork_prepare_hv_stacks(struct pt_regs *regs)
|
kvm_ret_from_fork_prepare_hv_stacks(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return kvm_user_hw_stacks_copy(regs, 0);
|
return kvm_user_hw_stacks_copy(regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
kvm_jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from)
|
||||||
|
{
|
||||||
|
if (from & FROM_SYSCALL_N_PROT) {
|
||||||
|
switch (regs->kernel_entry) {
|
||||||
|
case 1:
|
||||||
|
case 3:
|
||||||
|
case 4:
|
||||||
|
KVM_WRITE_UPSR_REG(E2K_KERNEL_UPSR_ENABLED);
|
||||||
|
regs->stack_regs_saved = true;
|
||||||
|
__E2K_JUMP_WITH_ARGUMENTS_8(handle_sys_call,
|
||||||
|
regs->sys_func,
|
||||||
|
regs->args[1], regs->args[2],
|
||||||
|
regs->args[3], regs->args[4],
|
||||||
|
regs->args[5], regs->args[6],
|
||||||
|
regs);
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
} else if (from & FROM_SYSCALL_PROT_8) {
|
||||||
|
/* the syscall restart is not yet implemented */
|
||||||
|
BUG();
|
||||||
|
} else if (from & FROM_SYSCALL_PROT_10) {
|
||||||
|
/* the syscall restart is not yet implemented */
|
||||||
|
BUG();
|
||||||
|
} else {
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvm_clear_virt_thread_struct(thread_info_t *ti)
|
||||||
|
{
|
||||||
|
/* guest PID/MMID's can be received only after registration on host */
|
||||||
|
ti->gpid_nr = -1;
|
||||||
|
ti->gmmid_nr = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_release_task_struct(struct task_struct *task)
|
static inline void kvm_release_task_struct(struct task_struct *task)
|
||||||
|
@ -473,6 +641,11 @@ static inline void kvm_release_task_struct(struct task_struct *task)
|
||||||
|
|
||||||
ti = task_thread_info(task);
|
ti = task_thread_info(task);
|
||||||
BUG_ON(ti == NULL);
|
BUG_ON(ti == NULL);
|
||||||
|
if (ti->gpid_nr == -1) {
|
||||||
|
/* the process was not registered on host, nothing to do */
|
||||||
|
BUG_ON(ti->gmmid_nr != -1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ret = HYPERVISOR_release_task_struct(ti->gpid_nr);
|
ret = HYPERVISOR_release_task_struct(ti->gpid_nr);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
@ -527,15 +700,6 @@ static inline kvm_vcpu_state_t *kvm_get_the_vcpu_state(long vcpu_id)
|
||||||
return vcpus_state[vcpu_id];
|
return vcpus_state[vcpu_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* own VCPU state: directly accessible through global registers */
|
|
||||||
static inline kvm_vcpu_state_t *kvm_get_vcpu_state(void)
|
|
||||||
{
|
|
||||||
unsigned long vcpu_base;
|
|
||||||
|
|
||||||
KVM_GET_VCPU_STATE_BASE(vcpu_base);
|
|
||||||
return (kvm_vcpu_state_t *)(vcpu_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define KVM_ONLY_SET_GUEST_GREGS(ti) \
|
#define KVM_ONLY_SET_GUEST_GREGS(ti) \
|
||||||
KVM_SET_VCPU_STATE_BASE(kvm_get_the_vcpu_state( \
|
KVM_SET_VCPU_STATE_BASE(kvm_get_the_vcpu_state( \
|
||||||
smp_processor_id()))
|
smp_processor_id()))
|
||||||
|
@ -646,6 +810,19 @@ static inline void COPY_STACKS_TO_MEMORY(void)
|
||||||
KVM_COPY_STACKS_TO_MEMORY();
|
KVM_COPY_STACKS_TO_MEMORY();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
restore_wd_register_psize(e2k_wd_t wd_from)
|
||||||
|
{
|
||||||
|
kvm_restore_wd_register_psize(wd_from);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks,
|
||||||
|
e2k_stacks_t *cur_stacks)
|
||||||
|
{
|
||||||
|
kvm_preserve_user_hw_stacks_to_copy(u_stacks, cur_stacks);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
||||||
{
|
{
|
||||||
|
@ -664,6 +841,13 @@ collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size)
|
||||||
kvm_collapse_kernel_pcs(dst, src, spilled_size);
|
kvm_collapse_kernel_pcs(dst, src, spilled_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline int
|
||||||
|
user_hw_stacks_copy(struct e2k_stacks *stacks,
|
||||||
|
pt_regs_t *regs, u64 cur_window_q, bool copy_full)
|
||||||
|
{
|
||||||
|
return kvm_user_hw_stacks_copy(regs);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void host_user_hw_stacks_prepare(
|
static __always_inline void host_user_hw_stacks_prepare(
|
||||||
struct e2k_stacks *stacks, pt_regs_t *regs,
|
struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
u64 cur_window_q, enum restore_caller from, int syscall)
|
u64 cur_window_q, enum restore_caller from, int syscall)
|
||||||
|
@ -676,12 +860,24 @@ static __always_inline void host_user_hw_stacks_prepare(
|
||||||
from, syscall);
|
from, syscall);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
host_exit_to_usermode_loop(struct pt_regs *regs, bool syscall, bool has_signal)
|
||||||
|
{
|
||||||
|
/* native & guest kernels cannot be as host */
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
ret_from_fork_prepare_hv_stacks(struct pt_regs *regs)
|
ret_from_fork_prepare_hv_stacks(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return kvm_ret_from_fork_prepare_hv_stacks(regs);
|
return kvm_ret_from_fork_prepare_hv_stacks(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from)
|
||||||
|
{
|
||||||
|
kvm_jump_to_ttable_entry(regs, from);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
virt_cpu_thread_init(struct task_struct *boot_task)
|
virt_cpu_thread_init(struct task_struct *boot_task)
|
||||||
{
|
{
|
||||||
|
@ -850,6 +1046,7 @@ complete_go2user(thread_info_t *ti, long fn)
|
||||||
static inline void
|
static inline void
|
||||||
clear_virt_thread_struct(thread_info_t *ti)
|
clear_virt_thread_struct(thread_info_t *ti)
|
||||||
{
|
{
|
||||||
|
kvm_clear_virt_thread_struct(ti);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void virt_setup_arch(void)
|
static inline void virt_setup_arch(void)
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include <asm/mmu_regs.h>
|
#include <asm/mmu_regs.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/process.h>
|
|
||||||
#include <asm/tags.h>
|
#include <asm/tags.h>
|
||||||
#include <asm/gregs.h>
|
#include <asm/gregs.h>
|
||||||
#include <asm/kvm/gregs.h>
|
#include <asm/kvm/gregs.h>
|
||||||
|
@ -21,9 +20,10 @@
|
||||||
|
|
||||||
extern void kvm_save_glob_regs(global_regs_t *gregs);
|
extern void kvm_save_glob_regs(global_regs_t *gregs);
|
||||||
extern void kvm_save_glob_regs_dirty_bgr(global_regs_t *gregs);
|
extern void kvm_save_glob_regs_dirty_bgr(global_regs_t *gregs);
|
||||||
extern void kvm_save_local_glob_regs(local_gregs_t *l_gregs);
|
extern void kvm_save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal);
|
||||||
extern void kvm_restore_glob_regs(const global_regs_t *gregs);
|
extern void kvm_restore_glob_regs(const global_regs_t *gregs);
|
||||||
extern void kvm_restore_local_glob_regs(const local_gregs_t *l_gregs);
|
extern void kvm_restore_local_glob_regs(const local_gregs_t *l_gregs,
|
||||||
|
bool is_signal);
|
||||||
extern void kvm_get_all_user_glob_regs(global_regs_t *gregs);
|
extern void kvm_get_all_user_glob_regs(global_regs_t *gregs);
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -51,9 +51,9 @@ guest_save_glob_regs_dirty_bgr_v5(global_regs_t *gregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
guest_save_local_glob_regs_v2(local_gregs_t *l_gregs)
|
guest_save_local_glob_regs_v2(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
kvm_guest_save_local_gregs_v2(l_gregs);
|
kvm_guest_save_local_gregs_v2(l_gregs, is_signal);
|
||||||
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
||||||
copy_k_gregs_to_l_gregs(l_gregs,
|
copy_k_gregs_to_l_gregs(l_gregs,
|
||||||
¤t_thread_info()->k_gregs);
|
¤t_thread_info()->k_gregs);
|
||||||
|
@ -63,9 +63,9 @@ guest_save_local_glob_regs_v2(local_gregs_t *l_gregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
guest_save_local_glob_regs_v5(local_gregs_t *l_gregs)
|
guest_save_local_glob_regs_v5(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
kvm_guest_save_local_gregs_v5(l_gregs);
|
kvm_guest_save_local_gregs_v5(l_gregs, is_signal);
|
||||||
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
||||||
copy_k_gregs_to_l_gregs(l_gregs,
|
copy_k_gregs_to_l_gregs(l_gregs,
|
||||||
¤t_thread_info()->k_gregs);
|
¤t_thread_info()->k_gregs);
|
||||||
|
@ -87,9 +87,9 @@ guest_restore_glob_regs_v5(const global_regs_t *gregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
guest_restore_local_glob_regs_v2(const local_gregs_t *l_gregs)
|
guest_restore_local_glob_regs_v2(const local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
kvm_guest_restore_local_gregs_v2(l_gregs);
|
kvm_guest_restore_local_gregs_v2(l_gregs, is_signal);
|
||||||
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
||||||
get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs,
|
get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs,
|
||||||
l_gregs);
|
l_gregs);
|
||||||
|
@ -99,9 +99,9 @@ guest_restore_local_glob_regs_v2(const local_gregs_t *l_gregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
guest_restore_local_glob_regs_v5(const local_gregs_t *l_gregs)
|
guest_restore_local_glob_regs_v5(const local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
kvm_guest_restore_local_gregs_v5(l_gregs);
|
kvm_guest_restore_local_gregs_v5(l_gregs, is_signal);
|
||||||
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK)
|
||||||
get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs,
|
get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs,
|
||||||
l_gregs);
|
l_gregs);
|
||||||
|
@ -197,8 +197,11 @@ guest_get_all_user_glob_regs(global_regs_t *gregs)
|
||||||
do { \
|
do { \
|
||||||
if (IS_HV_GM()) { \
|
if (IS_HV_GM()) { \
|
||||||
NATIVE_SAVE_STACK_REGS(regs, ti, from_ti, trap); \
|
NATIVE_SAVE_STACK_REGS(regs, ti, from_ti, trap); \
|
||||||
} else { \
|
} else if (!(regs)->stack_regs_saved) { \
|
||||||
PREFIX_SAVE_STACK_REGS(KVM, regs, ti, from_ti, trap); \
|
PREFIX_SAVE_STACK_REGS(KVM, regs, ti, from_ti, trap); \
|
||||||
|
} else { \
|
||||||
|
/* registers were already saved */ \
|
||||||
|
; \
|
||||||
} \
|
} \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
|
@ -250,6 +253,8 @@ do { \
|
||||||
KVM_RESTORE_USER_STACK_REGS(regs, true)
|
KVM_RESTORE_USER_STACK_REGS(regs, true)
|
||||||
#define KVM_RESTORE_USER_CUT_REGS(ti, regs) /* CUTD is set by host */
|
#define KVM_RESTORE_USER_CUT_REGS(ti, regs) /* CUTD is set by host */
|
||||||
|
|
||||||
|
#define KVM_RESTORE_COMMON_REGS(regs) /* should be restored by host */
|
||||||
|
|
||||||
#define KVM_SAVE_TRAP_CELLAR(regs, trap) \
|
#define KVM_SAVE_TRAP_CELLAR(regs, trap) \
|
||||||
({ \
|
({ \
|
||||||
kernel_trap_cellar_t *kernel_tcellar = \
|
kernel_trap_cellar_t *kernel_tcellar = \
|
||||||
|
@ -340,6 +345,8 @@ do { \
|
||||||
RESTORE_USER_STACK_REGS(regs, true)
|
RESTORE_USER_STACK_REGS(regs, true)
|
||||||
#define RESTORE_USER_CUT_REGS(ti, regs, in_sys_call) \
|
#define RESTORE_USER_CUT_REGS(ti, regs, in_sys_call) \
|
||||||
KVM_RESTORE_USER_CUT_REGS(ti, regs)
|
KVM_RESTORE_USER_CUT_REGS(ti, regs)
|
||||||
|
#define RESTORE_COMMON_REGS(regs) \
|
||||||
|
KVM_RESTORE_COMMON_REGS(regs)
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
save_glob_regs_v2(global_regs_t *gregs)
|
save_glob_regs_v2(global_regs_t *gregs)
|
||||||
|
@ -382,22 +389,22 @@ save_glob_regs_dirty_bgr_v5(global_regs_t *gregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
save_local_glob_regs_v2(local_gregs_t *l_gregs)
|
save_local_glob_regs_v2(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
if (IS_HV_GM()) {
|
if (IS_HV_GM()) {
|
||||||
guest_save_local_glob_regs_v2(l_gregs);
|
guest_save_local_glob_regs_v2(l_gregs, is_signal);
|
||||||
} else {
|
} else {
|
||||||
kvm_save_local_glob_regs(l_gregs);
|
kvm_save_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
save_local_glob_regs_v5(local_gregs_t *l_gregs)
|
save_local_glob_regs_v5(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
if (IS_HV_GM()) {
|
if (IS_HV_GM()) {
|
||||||
guest_save_local_glob_regs_v5(l_gregs);
|
guest_save_local_glob_regs_v5(l_gregs, is_signal);
|
||||||
} else {
|
} else {
|
||||||
kvm_save_local_glob_regs(l_gregs);
|
kvm_save_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -422,32 +429,32 @@ restore_glob_regs_v5(const global_regs_t *gregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
restore_local_glob_regs_v2(const local_gregs_t *l_gregs)
|
restore_local_glob_regs_v2(const local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
if (IS_HV_GM())
|
if (IS_HV_GM())
|
||||||
guest_restore_local_glob_regs_v2(l_gregs);
|
guest_restore_local_glob_regs_v2(l_gregs, is_signal);
|
||||||
else
|
else
|
||||||
kvm_restore_local_glob_regs(l_gregs);
|
kvm_restore_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
restore_local_glob_regs_v5(const local_gregs_t *l_gregs)
|
restore_local_glob_regs_v5(const local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
if (IS_HV_GM())
|
if (IS_HV_GM())
|
||||||
guest_restore_local_glob_regs_v5(l_gregs);
|
guest_restore_local_glob_regs_v5(l_gregs, is_signal);
|
||||||
else
|
else
|
||||||
kvm_restore_local_glob_regs(l_gregs);
|
kvm_restore_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
save_local_glob_regs(local_gregs_t *l_gregs)
|
save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
machine.save_local_gregs(l_gregs);
|
machine.save_local_gregs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
restore_local_glob_regs(const local_gregs_t *l_gregs)
|
restore_local_glob_regs(const local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
machine.restore_local_gregs(l_gregs);
|
machine.restore_local_gregs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
extern void __init boot_e2k_virt_setup_arch(void);
|
extern void __init boot_e2k_virt_setup_arch(void);
|
||||||
extern void __init e2k_virt_setup_machine(void);
|
extern void __init e2k_virt_setup_machine(void);
|
||||||
extern void kvm_bsp_switch_to_init_stack(void);
|
extern void __init kvm_bsp_switch_to_init_stack(void);
|
||||||
extern void kvm_setup_bsp_idle_task(int cpu);
|
extern void kvm_setup_bsp_idle_task(int cpu);
|
||||||
extern void setup_guest_interface(void);
|
extern void setup_guest_interface(void);
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#define _E2K_KVM_GUEST_STRING_H_
|
#define _E2K_KVM_GUEST_STRING_H_
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
|
||||||
#include <asm/pv_info.h>
|
#include <asm/pv_info.h>
|
||||||
#include <asm/kvm/hypercall.h>
|
#include <asm/kvm/hypercall.h>
|
||||||
|
@ -19,14 +20,11 @@ kvm_do_fast_tagged_memory_copy(void *dst, const void *src, size_t len,
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)src) ||
|
do {
|
||||||
IS_HOST_KERNEL_ADDRESS((e2k_addr_t)dst)) {
|
|
||||||
ret = HYPERVISOR_fast_tagged_guest_memory_copy(dst, src, len,
|
|
||||||
strd_opcode, ldrd_opcode, prefetch);
|
|
||||||
} else {
|
|
||||||
ret = HYPERVISOR_fast_tagged_memory_copy(dst, src, len,
|
ret = HYPERVISOR_fast_tagged_memory_copy(dst, src, len,
|
||||||
strd_opcode, ldrd_opcode, prefetch);
|
strd_opcode, ldrd_opcode, prefetch);
|
||||||
}
|
} while (ret == -EAGAIN);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
|
|
|
@ -68,25 +68,18 @@ static inline struct e2k_stacks *
|
||||||
kvm_syscall_guest_get_restore_stacks(struct thread_info *ti,
|
kvm_syscall_guest_get_restore_stacks(struct thread_info *ti,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return native_syscall_guest_get_restore_stacks(ti, regs);
|
return native_syscall_guest_get_restore_stacks(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The function should return bool is the system call from guest
|
* The function should return bool is the system call from guest
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static inline bool kvm_guest_syscall_enter(struct pt_regs *regs)
|
||||||
kvm_guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs)
|
|
||||||
{
|
{
|
||||||
/* guest cannot have own nested guests */
|
/* guest cannot have own nested guests */
|
||||||
|
|
||||||
return false; /* it is not nested guest system call */
|
return false; /* it is not nested guest system call */
|
||||||
}
|
}
|
||||||
static inline void
|
|
||||||
kvm_guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs,
|
|
||||||
unsigned flags)
|
|
||||||
{
|
|
||||||
/* nothing guests can be */
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* it is pure guest kernel (not paravrtualized) */
|
/* it is pure guest kernel (not paravrtualized) */
|
||||||
|
@ -138,25 +131,28 @@ trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct e2k_stacks *
|
static inline struct e2k_stacks *
|
||||||
syscall_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
syscall_guest_get_restore_stacks(bool ts_host_at_vcpu_mode, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return kvm_syscall_guest_get_restore_stacks(ti, regs);
|
return kvm_syscall_guest_get_restore_stacks(
|
||||||
|
current_thread_info(), regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ts_host_at_vcpu_mode() false
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The function should return bool is the system call from guest
|
* The function should return bool is the system call from guest
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static inline bool guest_syscall_enter(struct pt_regs *regs,
|
||||||
guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs)
|
bool ts_host_at_vcpu_mode)
|
||||||
{
|
{
|
||||||
return kvm_guest_syscall_enter(ti, regs);
|
return kvm_guest_syscall_enter(regs);
|
||||||
}
|
|
||||||
static inline void
|
|
||||||
guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs,
|
|
||||||
unsigned flags)
|
|
||||||
{
|
|
||||||
kvm_guest_syscall_exit_to(ti, regs, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void guest_exit_intc(struct pt_regs *regs,
|
||||||
|
bool intc_emul_flag) { }
|
||||||
|
static inline void guest_syscall_exit_trap(struct pt_regs *regs,
|
||||||
|
bool ts_host_at_vcpu_mode) { }
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
#endif /* ! _E2K_KVM_GUEST_SWITCH_H */
|
#endif /* ! _E2K_KVM_GUEST_SWITCH_H */
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
/* Functions to sync shadow page tables with guest page tables
|
||||||
|
* without flushing tlb. Used only by guest kernels
|
||||||
|
*
|
||||||
|
* Copyright 2021 Andrey Alekhin (alekhin_amcst.ru)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _E2K_GST_SYNC_PG_TABLES_H
|
||||||
|
#define _E2K_GST_SYNC_PG_TABLES_H
|
||||||
|
|
||||||
|
#include <asm/types.h>
|
||||||
|
#include <asm/kvm/hypercall.h>
|
||||||
|
|
||||||
|
static inline void kvm_sync_addr_range(e2k_addr_t start, e2k_addr_t end)
|
||||||
|
{
|
||||||
|
if (!IS_HV_GM())
|
||||||
|
HYPERVISOR_sync_addr_range(start, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -18,20 +18,40 @@ extern long kvm_guest_ttable_entry5(int sys_num,
|
||||||
extern long kvm_guest_ttable_entry6(int sys_num,
|
extern long kvm_guest_ttable_entry6(int sys_num,
|
||||||
u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6);
|
u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6);
|
||||||
|
|
||||||
|
static __always_inline void kvm_init_pt_regs_copyed_fields(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
|
if (likely(!regs->stack_regs_saved)) {
|
||||||
|
regs->copyed.ps_size = 0;
|
||||||
|
regs->copyed.pcs_size = 0;
|
||||||
|
regs->copyed.pcs_injected_frames_size = 0;
|
||||||
|
} else {
|
||||||
|
/* the regs is reused and all stacks should be already copyed */
|
||||||
|
;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void kvm_init_pt_regs(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
kvm_init_pt_regs_copyed_fields(regs);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kvm_init_traps_handling(struct pt_regs *regs, bool user_mode_trap)
|
kvm_init_traps_handling(struct pt_regs *regs, bool user_mode_trap)
|
||||||
{
|
{
|
||||||
regs->deferred_traps = 0;
|
kvm_init_pt_regs(regs);
|
||||||
}
|
}
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kvm_init_syscalls_handling(struct pt_regs *regs)
|
kvm_init_syscalls_handling(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
kvm_init_traps_handling(regs, true); /* now as traps init */
|
kvm_init_traps_handling(regs, true); /* now as traps init */
|
||||||
}
|
}
|
||||||
static inline bool
|
|
||||||
kvm_have_deferred_traps(struct pt_regs *regs)
|
static inline void kvm_clear_fork_child_pt_regs(struct pt_regs *childregs)
|
||||||
{
|
{
|
||||||
return regs->deferred_traps != 0;
|
native_clear_fork_child_pt_regs(childregs);
|
||||||
|
kvm_init_pt_regs_copyed_fields(childregs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define KVM_FILL_HARDWARE_STACKS() /* host itself will fill */
|
#define KVM_FILL_HARDWARE_STACKS() /* host itself will fill */
|
||||||
|
@ -99,15 +119,12 @@ kvm_stack_bounds_trap_enable(void)
|
||||||
kvm_set_sge();
|
kvm_set_sge();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void kvm_handle_deferred_traps(struct pt_regs *regs);
|
static inline int
|
||||||
extern void kvm_handle_deferred_traps_in_syscall(struct pt_regs *regs);
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
kvm_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
kvm_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
||||||
const tc_cond_t condition, const tc_mask_t mask,
|
const tc_cond_t condition, const tc_mask_t mask,
|
||||||
const unsigned int aa_no)
|
const unsigned int aa_no)
|
||||||
{
|
{
|
||||||
native_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
return native_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST_KERNEL
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
|
@ -178,16 +195,12 @@ is_guest_TIRs_frozen(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return false; /* none any guest */
|
return false; /* none any guest */
|
||||||
}
|
}
|
||||||
static inline bool
|
|
||||||
have_deferred_traps(struct pt_regs *regs)
|
static inline void clear_fork_child_pt_regs(struct pt_regs *childregs)
|
||||||
{
|
{
|
||||||
return kvm_have_deferred_traps(regs);
|
kvm_clear_fork_child_pt_regs(childregs);
|
||||||
}
|
|
||||||
static inline void
|
|
||||||
handle_deferred_traps_in_syscall(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
kvm_handle_deferred_traps_in_syscall(regs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs)
|
is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@ -229,12 +242,12 @@ handle_guest_last_wish(struct pt_regs *regs)
|
||||||
return false; /* none any guest and any wishes from */
|
return false; /* none any guest and any wishes from */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
||||||
const tc_cond_t condition, const tc_mask_t mask,
|
const tc_cond_t condition, const tc_mask_t mask,
|
||||||
const unsigned int aa_no)
|
const unsigned int aa_no)
|
||||||
{
|
{
|
||||||
kvm_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
return kvm_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -15,6 +15,19 @@
|
||||||
/* it is native host without any virtualization or */
|
/* it is native host without any virtualization or */
|
||||||
/* native kernel with virtualization support */
|
/* native kernel with virtualization support */
|
||||||
#define host_printk(fmt, args...) printk(fmt, ##args)
|
#define host_printk(fmt, args...) printk(fmt, ##args)
|
||||||
|
|
||||||
|
#define host_pr_alert(fmt, args...) pr_alert(fmt, ##args)
|
||||||
|
#define host_pr_cont(fmt, args...) pr_cont(fmt, ##args)
|
||||||
|
#define host_pr_info(fmt, args...) pr_info(fmt, ##args)
|
||||||
|
|
||||||
|
#define host_dump_stack() dump_stack()
|
||||||
|
#define host_print_pt_regs(regs) print_pt_regs(regs)
|
||||||
|
#define host_print_all_TIRs(TIRs, nr_TIRs) \
|
||||||
|
print_all_TIRs(TIRs, nr_TIRs)
|
||||||
|
#define host_print_tc_record(tcellar, num) \
|
||||||
|
print_tc_record(tcellar, num)
|
||||||
|
#define host_print_all_TC(TC, TC_count) \
|
||||||
|
print_all_TC(TC, TC_count)
|
||||||
#elif defined(CONFIG_PARAVIRT_GUEST)
|
#elif defined(CONFIG_PARAVIRT_GUEST)
|
||||||
/* it is paravirtualized host and guest */
|
/* it is paravirtualized host and guest */
|
||||||
#include <asm/paravirt/host_printk.h>
|
#include <asm/paravirt/host_printk.h>
|
||||||
|
|
|
@ -213,6 +213,8 @@ static inline unsigned long generic_hypercall6(unsigned long nr,
|
||||||
/* PCSP_hi register */
|
/* PCSP_hi register */
|
||||||
#define KVM_HCALL_SETUP_IDLE_TASK 12 /* setup current task of */
|
#define KVM_HCALL_SETUP_IDLE_TASK 12 /* setup current task of */
|
||||||
/* guest as task */
|
/* guest as task */
|
||||||
|
#define KVM_HCALL_UPDATE_WD_PSIZE 13 /* write updated psize field */
|
||||||
|
/* to the WD register */
|
||||||
#define KVM_HCALL_MOVE_TAGGED_DATA 15 /* move quad value from to */
|
#define KVM_HCALL_MOVE_TAGGED_DATA 15 /* move quad value from to */
|
||||||
#define KVM_HCALL_UNFREEZE_TRAPS 16 /* unfreeze TIRs & trap */
|
#define KVM_HCALL_UNFREEZE_TRAPS 16 /* unfreeze TIRs & trap */
|
||||||
/* cellar */
|
/* cellar */
|
||||||
|
@ -236,7 +238,7 @@ static inline unsigned long generic_hypercall6(unsigned long nr,
|
||||||
/* virtual addresses */
|
/* virtual addresses */
|
||||||
#define KVM_HCALL_MMU_PROBE 29 /* probe MMU entry or */
|
#define KVM_HCALL_MMU_PROBE 29 /* probe MMU entry or */
|
||||||
/* address */
|
/* address */
|
||||||
#define KVM_HCALL_FLUSH_ICACHE_RANGE 30 /* flush ICACHE range */
|
#define KVM_HCALL_FLUSH_ICACHE_ALL 30 /* flush all ICACHE */
|
||||||
/* notify host kernel aboout switch to updated procedure stack on guest */
|
/* notify host kernel aboout switch to updated procedure stack on guest */
|
||||||
#define KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK 31
|
#define KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK 31
|
||||||
/* notify host kernel aboout switch to updated procedure chain stack on guest */
|
/* notify host kernel aboout switch to updated procedure chain stack on guest */
|
||||||
|
@ -312,6 +314,12 @@ HYPERVISOR_update_psp_hi(unsigned long psp_hi_value)
|
||||||
return light_hypercall1(KVM_HCALL_UPDATE_PSP_HI, psp_hi_value);
|
return light_hypercall1(KVM_HCALL_UPDATE_PSP_HI, psp_hi_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long
|
||||||
|
HYPERVISOR_update_wd_psize(unsigned long psize_value)
|
||||||
|
{
|
||||||
|
return light_hypercall1(KVM_HCALL_UPDATE_WD_PSIZE, psize_value);
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_update_pcsp_hi(unsigned long pcsp_hi_value)
|
HYPERVISOR_update_pcsp_hi(unsigned long pcsp_hi_value)
|
||||||
{
|
{
|
||||||
|
@ -366,6 +374,7 @@ HYPERVISOR_inject_interrupt(void)
|
||||||
{
|
{
|
||||||
return light_hypercall0(KVM_HCALL_INJECT_INTERRUPT);
|
return light_hypercall0(KVM_HCALL_INJECT_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
extern unsigned long kvm_hypervisor_inject_interrupt(void);
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_virqs_handled(void)
|
HYPERVISOR_virqs_handled(void)
|
||||||
{
|
{
|
||||||
|
@ -411,10 +420,9 @@ HYPERVISOR_clear_dcache_l1_range(void *addr, size_t len)
|
||||||
(unsigned long)addr, len);
|
(unsigned long)addr, len);
|
||||||
}
|
}
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_flush_icache_range(e2k_addr_t start, e2k_addr_t end, u64 dummy)
|
HYPERVISOR_flush_icache_all(void)
|
||||||
{
|
{
|
||||||
return light_hypercall3(KVM_HCALL_FLUSH_ICACHE_RANGE,
|
return light_hypercall0(KVM_HCALL_FLUSH_ICACHE_ALL);
|
||||||
start, end, dummy);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef enum kvm_mmu_probe {
|
typedef enum kvm_mmu_probe {
|
||||||
|
@ -599,8 +607,10 @@ HYPERVISOR_switch_to_expanded_guest_chain_stack(long delta_size,
|
||||||
#define KVM_HCALL_PV_ENABLE_ASYNC_PF 133 /* enable async pf */
|
#define KVM_HCALL_PV_ENABLE_ASYNC_PF 133 /* enable async pf */
|
||||||
/* on current vcpu */
|
/* on current vcpu */
|
||||||
#endif /* CONFIG_KVM_ASYNC_PF */
|
#endif /* CONFIG_KVM_ASYNC_PF */
|
||||||
#define KVM_HCALL_FLUSH_TLB_RANGE 134 /* flush given address */
|
#define KVM_HCALL_FLUSH_TLB_RANGE 134 /* sync given address range */
|
||||||
/* range in tlb */
|
/* in page tables and flush tlb */
|
||||||
|
#define KVM_HCALL_SYNC_ADDR_RANGE 135 /* sync ptes in page */
|
||||||
|
/* tables without flushing tlb */
|
||||||
#define KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE 141
|
#define KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE 141
|
||||||
/* recovery faulted store */
|
/* recovery faulted store */
|
||||||
/* tagged value operations */
|
/* tagged value operations */
|
||||||
|
@ -679,9 +689,11 @@ typedef struct kvm_task_info {
|
||||||
unsigned long gd_size; /* and size */
|
unsigned long gd_size; /* and size */
|
||||||
unsigned long cut_base; /* CUTD: base */
|
unsigned long cut_base; /* CUTD: base */
|
||||||
unsigned long cut_size; /* and size */
|
unsigned long cut_size; /* and size */
|
||||||
unsigned int cui; /* compilation unit index of code */
|
int cui; /* compilation unit index of code */
|
||||||
|
bool kernel; /* task in kernel mode */
|
||||||
unsigned long entry_point; /* entry point (address) of task */
|
unsigned long entry_point; /* entry point (address) of task */
|
||||||
unsigned long tls; /* TLS of new user thread */
|
unsigned long gregs; /* pointer to the global registers */
|
||||||
|
/* state of the new process */
|
||||||
} kvm_task_info_t;
|
} kvm_task_info_t;
|
||||||
|
|
||||||
/* hardware stack extention, update and change */
|
/* hardware stack extention, update and change */
|
||||||
|
@ -834,10 +846,11 @@ HYPERVISOR_complete_long_jump(kvm_long_jump_info_t *regs_state)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_launch_sig_handler(kvm_stacks_info_t *regs_state, long sys_rval)
|
HYPERVISOR_launch_sig_handler(kvm_stacks_info_t *regs_state,
|
||||||
|
unsigned long sigreturn_entry, long sys_rval)
|
||||||
{
|
{
|
||||||
return generic_hypercall2(KVM_HCALL_LAUNCH_SIG_HANDLER,
|
return generic_hypercall3(KVM_HCALL_LAUNCH_SIG_HANDLER,
|
||||||
(unsigned long)regs_state, sys_rval);
|
(unsigned long)regs_state, sigreturn_entry, sys_rval);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
|
@ -1005,16 +1018,16 @@ HYPERVISOR_set_guest_glob_regs_dirty_bgr(unsigned long *gregs[2],
|
||||||
(unsigned long)false, (unsigned long)NULL);
|
(unsigned long)false, (unsigned long)NULL);
|
||||||
}
|
}
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_get_guest_local_glob_regs(unsigned long *l_gregs[2])
|
HYPERVISOR_get_guest_local_glob_regs(unsigned long *l_gregs[2], bool is_signal)
|
||||||
{
|
{
|
||||||
return generic_hypercall1(KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS,
|
return generic_hypercall2(KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS,
|
||||||
(unsigned long)l_gregs);
|
(unsigned long)l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_set_guest_local_glob_regs(unsigned long *l_gregs[2])
|
HYPERVISOR_set_guest_local_glob_regs(unsigned long *l_gregs[2], bool is_signal)
|
||||||
{
|
{
|
||||||
return generic_hypercall1(KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS,
|
return generic_hypercall2(KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS,
|
||||||
(unsigned long)l_gregs);
|
(unsigned long)l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
|
@ -1051,7 +1064,7 @@ HYPERVISOR_recovery_faulted_guest_load(e2k_addr_t address,
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_recovery_faulted_guest_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
HYPERVISOR_recovery_faulted_guest_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load)
|
int qp_load, int atomic_load, u32 first_time)
|
||||||
{
|
{
|
||||||
union recovery_faulted_arg arg = {
|
union recovery_faulted_arg arg = {
|
||||||
.vr = vr,
|
.vr = vr,
|
||||||
|
@ -1059,9 +1072,9 @@ HYPERVISOR_recovery_faulted_guest_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
.qp = !!qp_load,
|
.qp = !!qp_load,
|
||||||
.atomic = !!atomic_load
|
.atomic = !!atomic_load
|
||||||
};
|
};
|
||||||
return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE,
|
return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE,
|
||||||
addr_from, addr_to, addr_to_hi,
|
addr_from, addr_to, addr_to_hi,
|
||||||
ld_rec_opc, arg.entire);
|
ld_rec_opc, arg.entire, first_time);
|
||||||
}
|
}
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_recovery_faulted_load_to_guest_greg(e2k_addr_t address,
|
HYPERVISOR_recovery_faulted_load_to_guest_greg(e2k_addr_t address,
|
||||||
|
@ -1108,7 +1121,7 @@ HYPERVISOR_recovery_faulted_load(e2k_addr_t address, u64 *ld_val,
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
HYPERVISOR_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load)
|
int qp_load, int atomic_load, u32 first_time)
|
||||||
{
|
{
|
||||||
union recovery_faulted_arg arg = {
|
union recovery_faulted_arg arg = {
|
||||||
.vr = vr,
|
.vr = vr,
|
||||||
|
@ -1116,9 +1129,9 @@ HYPERVISOR_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
.qp = !!qp_load,
|
.qp = !!qp_load,
|
||||||
.atomic = !!atomic_load
|
.atomic = !!atomic_load
|
||||||
};
|
};
|
||||||
return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_MOVE,
|
return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_MOVE,
|
||||||
addr_from, addr_to, addr_to_hi,
|
addr_from, addr_to, addr_to_hi,
|
||||||
ld_rec_opc, arg.entire);
|
ld_rec_opc, arg.entire, first_time);
|
||||||
}
|
}
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
HYPERVISOR_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d,
|
HYPERVISOR_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d,
|
||||||
|
@ -1449,6 +1462,12 @@ HYPERVISOR_flush_tlb_range(e2k_addr_t start_gva, e2k_addr_t end_gva)
|
||||||
return generic_hypercall2(KVM_HCALL_FLUSH_TLB_RANGE,
|
return generic_hypercall2(KVM_HCALL_FLUSH_TLB_RANGE,
|
||||||
start_gva, end_gva);
|
start_gva, end_gva);
|
||||||
}
|
}
|
||||||
|
static inline void
|
||||||
|
HYPERVISOR_sync_addr_range(e2k_addr_t start_gva, e2k_addr_t end_gva)
|
||||||
|
{
|
||||||
|
generic_hypercall2(KVM_HCALL_SYNC_ADDR_RANGE,
|
||||||
|
start_gva, end_gva);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arguments:
|
* arguments:
|
||||||
|
|
|
@ -22,8 +22,10 @@ extern void kvm_save_host_gregs_v2(struct host_gregs *gregs);
|
||||||
extern void kvm_save_host_gregs_v5(struct host_gregs *gregs);
|
extern void kvm_save_host_gregs_v5(struct host_gregs *gregs);
|
||||||
extern void kvm_restore_host_gregs_v5(const struct host_gregs *gregs);
|
extern void kvm_restore_host_gregs_v5(const struct host_gregs *gregs);
|
||||||
|
|
||||||
extern void kvm_guest_save_local_gregs_v2(struct local_gregs *gregs);
|
extern void kvm_guest_save_local_gregs_v2(struct local_gregs *gregs,
|
||||||
extern void kvm_guest_save_local_gregs_v5(struct local_gregs *gregs);
|
bool is_signal);
|
||||||
|
extern void kvm_guest_save_local_gregs_v5(struct local_gregs *gregs,
|
||||||
|
bool is_signal);
|
||||||
extern void kvm_guest_save_kernel_gregs_v2(kernel_gregs_t *gregs);
|
extern void kvm_guest_save_kernel_gregs_v2(kernel_gregs_t *gregs);
|
||||||
extern void kvm_guest_save_kernel_gregs_v5(kernel_gregs_t *gregs);
|
extern void kvm_guest_save_kernel_gregs_v5(kernel_gregs_t *gregs);
|
||||||
extern void kvm_guest_save_gregs_v2(struct global_regs *gregs);
|
extern void kvm_guest_save_gregs_v2(struct global_regs *gregs);
|
||||||
|
@ -34,8 +36,10 @@ extern void kvm_guest_restore_gregs_v2(const global_regs_t *gregs);
|
||||||
extern void kvm_guest_restore_gregs_v5(const global_regs_t *gregs);
|
extern void kvm_guest_restore_gregs_v5(const global_regs_t *gregs);
|
||||||
extern void kvm_guest_restore_kernel_gregs_v2(global_regs_t *gregs);
|
extern void kvm_guest_restore_kernel_gregs_v2(global_regs_t *gregs);
|
||||||
extern void kvm_guest_restore_kernel_gregs_v5(global_regs_t *gregs);
|
extern void kvm_guest_restore_kernel_gregs_v5(global_regs_t *gregs);
|
||||||
extern void kvm_guest_restore_local_gregs_v2(const struct local_gregs *gregs);
|
extern void kvm_guest_restore_local_gregs_v2(const struct local_gregs *gregs,
|
||||||
extern void kvm_guest_restore_local_gregs_v5(const struct local_gregs *gregs);
|
bool is_signal);
|
||||||
|
extern void kvm_guest_restore_local_gregs_v5(const struct local_gregs *gregs,
|
||||||
|
bool is_signal);
|
||||||
|
|
||||||
#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL)
|
#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
/* it is paravirtualized host and guest kernel */
|
/* it is paravirtualized host and guest kernel */
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#define __E2K_KVM_HOST_MM_H
|
#define __E2K_KVM_HOST_MM_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/list.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
|
|
||||||
|
@ -27,6 +28,13 @@ typedef struct gmm_struct {
|
||||||
/* of guest mm structure */
|
/* of guest mm structure */
|
||||||
atomic_t mm_count; /* How many references to guest mm */
|
atomic_t mm_count; /* How many references to guest mm */
|
||||||
/* shared mm */
|
/* shared mm */
|
||||||
|
#ifdef CONFIG_GUEST_MM_SPT_LIST
|
||||||
|
struct list_head spt_list; /* shadow page tables pages list */
|
||||||
|
spinlock_t spt_list_lock; /* spin lock to access to list */
|
||||||
|
size_t spt_list_size; /* current numbers of SPs in list */
|
||||||
|
size_t total_released; /* total number of allocated and */
|
||||||
|
/* released SPs through list */
|
||||||
|
#endif /* CONFIG_GUEST_MM_SPT_LIST */
|
||||||
#ifdef CONFIG_KVM_HV_MMU
|
#ifdef CONFIG_KVM_HV_MMU
|
||||||
hpa_t root_hpa; /* physical base of root shadow PT */
|
hpa_t root_hpa; /* physical base of root shadow PT */
|
||||||
/* for guest mm on host */
|
/* for guest mm on host */
|
||||||
|
@ -44,8 +52,6 @@ typedef struct gmm_struct {
|
||||||
/* the guest mm */
|
/* the guest mm */
|
||||||
cpumask_t cpu_vm_mask; /* mask of CPUs where the mm is */
|
cpumask_t cpu_vm_mask; /* mask of CPUs where the mm is */
|
||||||
/* in use or was some early */
|
/* in use or was some early */
|
||||||
bool in_release; /* guest mm is in release and cannot */
|
|
||||||
/* be used as active */
|
|
||||||
} gmm_struct_t;
|
} gmm_struct_t;
|
||||||
|
|
||||||
/* same as accessor for struct mm_struct's cpu_vm_mask but for guest mm */
|
/* same as accessor for struct mm_struct's cpu_vm_mask but for guest mm */
|
||||||
|
|
|
@ -390,14 +390,15 @@ kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address,
|
||||||
#endif /* CONFIG_KVM_SHADOW_PT_ENABLE */
|
#endif /* CONFIG_KVM_SHADOW_PT_ENABLE */
|
||||||
|
|
||||||
extern int kvm_guest_addr_to_host(void **addr);
|
extern int kvm_guest_addr_to_host(void **addr);
|
||||||
extern void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, int size);
|
extern void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, int size,
|
||||||
|
bool need_inject);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_HOST_MODE
|
#ifdef CONFIG_KVM_HOST_MODE
|
||||||
/* it is native host kernel with virtualization support */
|
/* it is native host kernel with virtualization support */
|
||||||
static inline int
|
static inline int
|
||||||
guest_addr_to_host(void **addr, pt_regs_t *regs)
|
guest_addr_to_host(void **addr, const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
if (likely(!host_test_intc_emul_mode((const struct pt_regs *)regs))) {
|
if (likely(!host_test_intc_emul_mode(regs))) {
|
||||||
/* faulted addres is not paravirtualized guest one */
|
/* faulted addres is not paravirtualized guest one */
|
||||||
return native_guest_addr_to_host(addr);
|
return native_guest_addr_to_host(addr);
|
||||||
}
|
}
|
||||||
|
@ -405,14 +406,14 @@ guest_addr_to_host(void **addr, pt_regs_t *regs)
|
||||||
return kvm_guest_addr_to_host(addr);
|
return kvm_guest_addr_to_host(addr);
|
||||||
}
|
}
|
||||||
static inline void *
|
static inline void *
|
||||||
guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs)
|
guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
if (likely(!host_test_intc_emul_mode((const struct pt_regs *)regs))) {
|
if (likely(!host_test_intc_emul_mode(regs))) {
|
||||||
/* faulted addres is not paravirtualized guest one */
|
/* faulted addres is not paravirtualized guest one */
|
||||||
return native_guest_ptr_to_host(ptr, size);
|
return native_guest_ptr_to_host(ptr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return kvm_guest_ptr_to_host_ptr(ptr, size);
|
return kvm_guest_ptr_to_host_ptr(ptr, size, false);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KVM_HOST_MODE */
|
#endif /* CONFIG_KVM_HOST_MODE */
|
||||||
|
|
||||||
|
|
|
@ -411,6 +411,16 @@ KVM_READ_CLW_REG(clw_addr_t clw_addr)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
KVM_WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val)
|
||||||
|
{
|
||||||
|
panic("KVM_WRITE_CLW_REG() is not yet implemented\n");
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KVM MMU DEBUG registers access
|
* KVM MMU DEBUG registers access
|
||||||
*/
|
*/
|
||||||
|
@ -731,6 +741,16 @@ READ_CLW_REG(clw_addr_t clw_addr)
|
||||||
return KVM_READ_CLW_REG(clw_addr);
|
return KVM_READ_CLW_REG(clw_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val)
|
||||||
|
{
|
||||||
|
KVM_WRITE_CLW_REG(clw_addr, val);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KVM MMU DEBUG registers access
|
* KVM MMU DEBUG registers access
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -131,27 +131,27 @@ do { \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
kvm_clear_virt_thread_struct(thread_info_t *thread_info)
|
kvm_clear_virt_thread_struct(thread_info_t *ti)
|
||||||
{
|
{
|
||||||
thread_info->gpid_nr = -1; /* cannot inherit, only set by */
|
if (likely(ti->vcpu == NULL)) {
|
||||||
/* guest/host kernel */
|
/* it is not creation of host process */
|
||||||
#ifdef CONFIG_KVM_HOST_MODE
|
/* to support virtualization */
|
||||||
/* clear KVM support fields and flags */
|
return;
|
||||||
if (test_ti_thread_flag(thread_info, TIF_VIRTUALIZED_HOST) ||
|
|
||||||
test_ti_thread_flag(thread_info, TIF_VIRTUALIZED_GUEST))
|
|
||||||
/* It is clone() on host to create guest */
|
|
||||||
/* VCPU or VIRQ VCPU threads */
|
|
||||||
kvm_clear_host_thread_info(thread_info);
|
|
||||||
if (thread_info->gthread_info) {
|
|
||||||
/* It is guest thread: clear from old process */
|
|
||||||
thread_info->gthread_info = NULL;
|
|
||||||
/* kvm_pv_clear_guest_thread_info(thread_info->gthread_info); */
|
|
||||||
}
|
}
|
||||||
/* VCPU host/guest thread flags and VCPU structure cannot inherit */
|
|
||||||
/* only to pass */
|
/*
|
||||||
clear_ti_thread_flag(thread_info, TIF_VIRTUALIZED_HOST);
|
* Host VCPU thread can be only created by user process (for example
|
||||||
thread_info->vcpu = NULL;
|
* by qemu) and only user process can clone the thread to handle
|
||||||
#endif /* CONFIG_KVM_HOST_MODE */
|
* some VCPU running exit reasons.
|
||||||
|
* But the new thread cannot be one more host VCPU thread,
|
||||||
|
* so clean up all about VCPU
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* VCPU thread should be only at host mode (handle exit reason), */
|
||||||
|
/* not at running VCPU mode */
|
||||||
|
KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE));
|
||||||
|
|
||||||
|
ti->gthread_info = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
|
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
|
@ -618,6 +618,14 @@ pv_vcpu_user_hw_stacks_prepare(struct kvm_vcpu *vcpu, pt_regs_t *regs,
|
||||||
do_exit(SIGKILL);
|
do_exit(SIGKILL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Same as for native kernel without virtualization support */
|
||||||
|
static __always_inline int
|
||||||
|
user_hw_stacks_copy(struct e2k_stacks *stacks,
|
||||||
|
pt_regs_t *regs, u64 cur_window_q, bool copy_full)
|
||||||
|
{
|
||||||
|
return native_user_hw_stacks_copy(stacks, regs, cur_window_q, copy_full);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
u64 cur_window_q, enum restore_caller from, int syscall)
|
u64 cur_window_q, enum restore_caller from, int syscall)
|
||||||
|
@ -635,7 +643,54 @@ host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
pv_vcpu_user_hw_stacks_prepare(vcpu, regs, cur_window_q, from, syscall);
|
pv_vcpu_user_hw_stacks_prepare(vcpu, regs, cur_window_q, from, syscall);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SAVE_HOST_KERNEL_GREGS_COPY_TO(__k_gregs, __g_gregs) \
|
static __always_inline void
|
||||||
|
host_exit_to_usermode_loop(struct pt_regs *regs, bool syscall, bool has_signal)
|
||||||
|
{
|
||||||
|
KVM_BUG_ON(!host_test_intc_emul_mode(regs));
|
||||||
|
|
||||||
|
WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED));
|
||||||
|
|
||||||
|
/* Check for rescheduling first */
|
||||||
|
if (need_resched()) {
|
||||||
|
schedule();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_signal) {
|
||||||
|
/*
|
||||||
|
* This is guest VCPU interception emulation, but
|
||||||
|
* there is (are) pending signal for host VCPU mode,
|
||||||
|
* so it need switch to host VCPU mode to handle
|
||||||
|
* signal and probably to kill VM
|
||||||
|
*/
|
||||||
|
WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED));
|
||||||
|
pv_vcpu_switch_to_host_from_intc(current_thread_info());
|
||||||
|
} else if (likely(guest_trap_pending(current_thread_info()))) {
|
||||||
|
/*
|
||||||
|
* This is guest VCPU interception emulation and
|
||||||
|
* there is (are) the guest trap(s) to handle
|
||||||
|
*/
|
||||||
|
insert_pv_vcpu_traps(current_thread_info(), regs);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* This is just a return from VCPU interception
|
||||||
|
* emulation mode to the continue execution
|
||||||
|
* of the guest paravirtualized VCPU.
|
||||||
|
* In such case:
|
||||||
|
* - the currents point to the host qemu-VCPU
|
||||||
|
* process structures;
|
||||||
|
* - the regs points to the host guest-VCPU
|
||||||
|
* process structure.
|
||||||
|
* So nothing works based on these non-interconnected
|
||||||
|
* structures cannot be running
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED));
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
#define SAVE_GUEST_KERNEL_GREGS_COPY_TO(__k_gregs, __g_gregs, \
|
||||||
|
only_kernel) \
|
||||||
({ \
|
({ \
|
||||||
kernel_gregs_t *kg = (__k_gregs); \
|
kernel_gregs_t *kg = (__k_gregs); \
|
||||||
kernel_gregs_t *gg = (__g_gregs); \
|
kernel_gregs_t *gg = (__g_gregs); \
|
||||||
|
@ -643,19 +698,45 @@ host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
unsigned long cpu_id__; \
|
unsigned long cpu_id__; \
|
||||||
unsigned long cpu_off__; \
|
unsigned long cpu_off__; \
|
||||||
\
|
\
|
||||||
|
if (likely(!(only_kernel))) { \
|
||||||
|
unsigned long vs__; \
|
||||||
|
\
|
||||||
|
HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__); \
|
||||||
|
HOST_ONLY_COPY_TO_VCPU_STATE_GREG(gg, vs__); \
|
||||||
|
} \
|
||||||
ONLY_COPY_FROM_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \
|
ONLY_COPY_FROM_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \
|
||||||
ONLY_COPY_TO_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \
|
ONLY_COPY_TO_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \
|
||||||
})
|
})
|
||||||
|
#else /* ! CONFIG_SMP */
|
||||||
|
#define SAVE_GUEST_KERNEL_GREGS_COPY_TO(__k_gregs, __g_gregs, \
|
||||||
|
only_kernel) \
|
||||||
|
({ \
|
||||||
|
kernel_gregs_t *kg = (__k_gregs); \
|
||||||
|
kernel_gregs_t *gg = (__g_gregs); \
|
||||||
|
unsigned long task__; \
|
||||||
|
\
|
||||||
|
if (likely(!(only_kernel))) { \
|
||||||
|
unsigned long vs__; \
|
||||||
|
\
|
||||||
|
HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__); \
|
||||||
|
HOST_ONLY_COPY_TO_VCPU_STATE_GREG(gg, vs__); \
|
||||||
|
} \
|
||||||
|
ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(kg, task__); \
|
||||||
|
ONLY_COPY_TO_KERNEL_CURRENT_GREGS(gg, task__); \
|
||||||
|
})
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#define SAVE_HOST_KERNEL_GREGS_COPY(__ti, __gti) \
|
#define SAVE_GUEST_KERNEL_GREGS_COPY(__ti, __gti) \
|
||||||
({ \
|
({ \
|
||||||
kernel_gregs_t *k_gregs = &(__ti)->k_gregs_light; \
|
kernel_gregs_t *k_gregs = &(__ti)->k_gregs_light; \
|
||||||
kernel_gregs_t *g_gregs = &(__gti)->g_gregs; \
|
kernel_gregs_t *g_gregs = &(__gti)->gk_gregs; \
|
||||||
\
|
\
|
||||||
SAVE_HOST_KERNEL_GREGS_COPY_TO(k_gregs, g_gregs); \
|
SAVE_GUEST_KERNEL_GREGS_COPY_TO(k_gregs, g_gregs, false); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define RESTORE_HOST_KERNEL_GREGS_COPY_FROM(__k_gregs, __g_gregs) \
|
#ifdef CONFIG_SMP
|
||||||
|
#define RESTORE_GUEST_KERNEL_GREGS_COPY_FROM(__k_gregs, __g_gregs, \
|
||||||
|
only_kernel) \
|
||||||
({ \
|
({ \
|
||||||
kernel_gregs_t *kg = (__k_gregs); \
|
kernel_gregs_t *kg = (__k_gregs); \
|
||||||
kernel_gregs_t *gg = (__g_gregs); \
|
kernel_gregs_t *gg = (__g_gregs); \
|
||||||
|
@ -663,16 +744,40 @@ host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
unsigned long cpu_id__; \
|
unsigned long cpu_id__; \
|
||||||
unsigned long cpu_off__; \
|
unsigned long cpu_off__; \
|
||||||
\
|
\
|
||||||
|
if (likely(!(only_kernel))) { \
|
||||||
|
unsigned long vs__; \
|
||||||
|
\
|
||||||
|
HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(k_gregs, vs__); \
|
||||||
|
HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__); \
|
||||||
|
} \
|
||||||
ONLY_COPY_FROM_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \
|
ONLY_COPY_FROM_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \
|
||||||
ONLY_COPY_TO_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \
|
ONLY_COPY_TO_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \
|
||||||
})
|
})
|
||||||
|
#else /* ! CONFIG_SMP */
|
||||||
|
#define RESTORE_GUEST_KERNEL_GREGS_COPY_FROM(__k_gregs, __g_gregs, \
|
||||||
|
only_kernel) \
|
||||||
|
({ \
|
||||||
|
kernel_gregs_t *kg = (__k_gregs); \
|
||||||
|
kernel_gregs_t *gg = (__g_gregs); \
|
||||||
|
unsigned long task__; \
|
||||||
|
\
|
||||||
|
if (likely(!(only_kernel))) { \
|
||||||
|
unsigned long vs__; \
|
||||||
|
\
|
||||||
|
HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(k_gregs, vs__); \
|
||||||
|
HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__); \
|
||||||
|
} \
|
||||||
|
ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(gg, task__); \
|
||||||
|
ONLY_COPY_TO_KERNEL_CURRENT_GREGS(kg, task__); \
|
||||||
|
})
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#define RESTORE_HOST_KERNEL_GREGS_COPY(__ti, __gti, __vcpu) \
|
#define RESTORE_GUEST_KERNEL_GREGS_COPY(__ti, __gti, __vcpu) \
|
||||||
({ \
|
({ \
|
||||||
kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \
|
kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \
|
||||||
kernel_gregs_t *g_gregs = &(__gti)->g_gregs; \
|
kernel_gregs_t *g_gregs = &(__gti)->gu_gregs; \
|
||||||
\
|
\
|
||||||
RESTORE_HOST_KERNEL_GREGS_COPY_FROM(k_gregs, g_gregs); \
|
RESTORE_GUEST_KERNEL_GREGS_COPY_FROM(k_gregs, g_gregs, true); \
|
||||||
INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu); \
|
INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -557,8 +557,7 @@ check_is_user_address(struct task_struct *task, e2k_addr_t address)
|
||||||
pt_regs_t *__regs = (pt_regs); \
|
pt_regs_t *__regs = (pt_regs); \
|
||||||
bool is_ligh_hypercall; \
|
bool is_ligh_hypercall; \
|
||||||
\
|
\
|
||||||
is_ligh_hypercall = \
|
is_ligh_hypercall = __regs->flags.light_hypercall; \
|
||||||
(__regs->flags & LIGHT_HYPERCALL_FLAG_PT_REGS) != 0; \
|
|
||||||
is_ligh_hypercall; \
|
is_ligh_hypercall; \
|
||||||
})
|
})
|
||||||
#define TI_LIGHT_HYPERCALL_MODE(thread_info) \
|
#define TI_LIGHT_HYPERCALL_MODE(thread_info) \
|
||||||
|
@ -596,6 +595,7 @@ typedef struct pv_vcpu_ctxt {
|
||||||
e2k_psr_t guest_psr; /* guest PSR state before trap */
|
e2k_psr_t guest_psr; /* guest PSR state before trap */
|
||||||
bool irq_under_upsr; /* is IRQ control under UOSR? */
|
bool irq_under_upsr; /* is IRQ control under UOSR? */
|
||||||
bool in_sig_handler; /* signal handler in progress */
|
bool in_sig_handler; /* signal handler in progress */
|
||||||
|
unsigned long sigreturn_entry; /* guest signal return start IP */
|
||||||
} pv_vcpu_ctxt_t;
|
} pv_vcpu_ctxt_t;
|
||||||
|
|
||||||
#else /* !CONFIG_KVM_HOST_MODE */
|
#else /* !CONFIG_KVM_HOST_MODE */
|
||||||
|
|
|
@ -11,29 +11,22 @@
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUALIZATION
|
#ifdef CONFIG_VIRTUALIZATION
|
||||||
|
|
||||||
static __always_inline bool
|
|
||||||
kvm_host_at_pv_vcpu_mode(thread_info_t *ti)
|
|
||||||
{
|
|
||||||
return ti->vcpu && test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kvm_set_intc_emul_flag(pt_regs_t *regs)
|
kvm_set_intc_emul_flag(pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
regs->flags |= TRAP_AS_INTC_EMUL_PT_REGS;
|
regs->flags.trap_as_intc_emul = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
kvm_test_intc_emul_flag(pt_regs_t *regs)
|
kvm_test_intc_emul_flag(pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
return !!(regs->flags & TRAP_AS_INTC_EMUL_PT_REGS);
|
return regs->flags.trap_as_intc_emul;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kvm_clear_intc_emul_flag(pt_regs_t *regs)
|
kvm_clear_intc_emul_flag(pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
regs->flags &= ~TRAP_AS_INTC_EMUL_PT_REGS;
|
regs->flags.trap_as_intc_emul = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
|
@ -59,6 +52,15 @@ host_test_intc_emul_mode(const struct pt_regs *regs)
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern void pv_vcpu_switch_to_host_from_intc(thread_info_t *ti);
|
||||||
|
extern void pv_vcpu_return_to_intc_mode(thread_info_t *ti, struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
static inline void return_to_pv_vcpu_intc(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
pv_vcpu_return_to_intc_mode(current_thread_info(), vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_KVM_HOST_MODE */
|
#else /* !CONFIG_KVM_HOST_MODE */
|
||||||
/* it is not host kernel */
|
/* it is not host kernel */
|
||||||
static inline bool
|
static inline bool
|
||||||
|
@ -66,6 +68,12 @@ host_test_intc_emul_mode(const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline __interrupt void
|
||||||
|
pv_vcpu_switch_to_host_from_intc(thread_info_t *ti)
|
||||||
|
{
|
||||||
|
/* nothing to do */
|
||||||
|
}
|
||||||
#endif /* CONFIG_KVM_HOST_MODE */
|
#endif /* CONFIG_KVM_HOST_MODE */
|
||||||
|
|
||||||
static inline int kvm_get_vcpu_intc_TIRs_num(struct kvm_vcpu *vcpu)
|
static inline int kvm_get_vcpu_intc_TIRs_num(struct kvm_vcpu *vcpu)
|
||||||
|
@ -113,6 +121,8 @@ kvm_clear_vcpu_guest_stacks_pending(struct kvm_vcpu *vcpu, pt_regs_t *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
extern noinline void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs);
|
extern noinline void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs);
|
||||||
|
extern void insert_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu,
|
||||||
|
pv_vcpu_ctxt_t *vcpu_ctxt, pt_regs_t *regs);
|
||||||
|
|
||||||
extern void kvm_emulate_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs,
|
extern void kvm_emulate_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs,
|
||||||
trap_pt_regs_t *trap);
|
trap_pt_regs_t *trap);
|
||||||
|
@ -235,12 +245,6 @@ static inline mm_context_t *pv_vcpu_get_gmm_context(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_VIRTUALIZATION */
|
#else /* !CONFIG_VIRTUALIZATION */
|
||||||
static __always_inline bool
|
|
||||||
kvm_host_at_pv_vcpu_mode(thread_info_t *ti)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kvm_set_intc_emul_flag(pt_regs_t *regs)
|
kvm_set_intc_emul_flag(pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
|
|
|
@ -408,7 +408,7 @@ do { \
|
||||||
e2k_cutd_t cutd; \
|
e2k_cutd_t cutd; \
|
||||||
struct kvm_vcpu *vcpu; \
|
struct kvm_vcpu *vcpu; \
|
||||||
\
|
\
|
||||||
if (likely(!test_ti_thread_flag((ti), TIF_HOST_AT_VCPU_MODE))) { \
|
if (likely(!test_ti_status_flag((ti), TS_HOST_AT_VCPU_MODE))) { \
|
||||||
/* host at native or hypervisor mode */ \
|
/* host at native or hypervisor mode */ \
|
||||||
/* so CUT context is alredy set */ \
|
/* so CUT context is alredy set */ \
|
||||||
break; \
|
break; \
|
||||||
|
@ -438,11 +438,6 @@ do { \
|
||||||
#error "Undefined virtualization mode"
|
#error "Undefined virtualization mode"
|
||||||
#endif /* !CONFIG_VIRTUALIZATION */
|
#endif /* !CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
#define RESTORE_USER_TRAP_CUT_REGS(ti, regs) \
|
|
||||||
RESTORE_USER_CUT_REGS(ti, regs, false)
|
|
||||||
#define RESTORE_USER_SYSCALL_CUT_REGS(ti, regs) \
|
|
||||||
RESTORE_USER_CUT_REGS(ti, regs, true)
|
|
||||||
|
|
||||||
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL)
|
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
#define SAVE_GUEST_VCPU_STATE_GREGS(regs) \
|
#define SAVE_GUEST_VCPU_STATE_GREGS(regs) \
|
||||||
({ \
|
({ \
|
||||||
|
|
|
@ -8,9 +8,12 @@
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
#include <asm/mmu_regs_access.h>
|
#include <asm/mmu_regs_access.h>
|
||||||
#include <asm/gregs.h>
|
#include <asm/gregs.h>
|
||||||
|
#include <asm/regs_state.h>
|
||||||
#include <asm/kvm/cpu_hv_regs_access.h>
|
#include <asm/kvm/cpu_hv_regs_access.h>
|
||||||
#include <asm/kvm/mmu_hv_regs_access.h>
|
#include <asm/kvm/mmu_hv_regs_access.h>
|
||||||
|
|
||||||
|
#define DEBUG_UPSR_FP_DISABLE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See below the 'flags' argument of xxx_guest_enter()/xxx_guest_exit()
|
* See below the 'flags' argument of xxx_guest_enter()/xxx_guest_exit()
|
||||||
*/
|
*/
|
||||||
|
@ -23,10 +26,12 @@
|
||||||
#define DONT_CU_REGS_SWITCH 0x0010U /* do not save/restore CUT and CU */
|
#define DONT_CU_REGS_SWITCH 0x0010U /* do not save/restore CUT and CU */
|
||||||
/* registers */
|
/* registers */
|
||||||
#define DONT_MMU_CONTEXT_SWITCH 0x0020U /* do not switch MMU context */
|
#define DONT_MMU_CONTEXT_SWITCH 0x0020U /* do not switch MMU context */
|
||||||
#define DONT_SAVE_GREGS_SWITCH 0x0040U /* do not save global regs */
|
#define DONT_SAVE_KGREGS_SWITCH 0x0040U /* do not save and set kernel global */
|
||||||
|
/* regs */
|
||||||
#define DONT_AAU_CONTEXT_SWITCH 0x0080U /* do not switch AAU context */
|
#define DONT_AAU_CONTEXT_SWITCH 0x0080U /* do not switch AAU context */
|
||||||
#define EXIT_FROM_INTC_SWITCH 0x0100U /* complete intercept emulation mode */
|
#define DONT_TRAP_MASK_SWITCH 0x0100U /* do not switch OSEM context */
|
||||||
#define EXIT_FROM_TRAP_SWITCH 0x0200U /* complete trap mode */
|
#define EXIT_FROM_INTC_SWITCH 0x1000U /* complete intercept emulation mode */
|
||||||
|
#define EXIT_FROM_TRAP_SWITCH 0x2000U /* complete trap mode */
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
native_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs,
|
native_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs,
|
||||||
|
@ -77,8 +82,7 @@ native_trap_guest_get_restore_stacks(struct thread_info *ti,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct e2k_stacks *
|
static inline struct e2k_stacks *
|
||||||
native_syscall_guest_get_restore_stacks(struct thread_info *ti,
|
native_syscall_guest_get_restore_stacks(struct pt_regs *regs)
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
{
|
||||||
return ®s->stacks;
|
return ®s->stacks;
|
||||||
}
|
}
|
||||||
|
@ -87,55 +91,15 @@ native_syscall_guest_get_restore_stacks(struct thread_info *ti,
|
||||||
* The function should return bool is the system call from guest
|
* The function should return bool is the system call from guest
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static inline bool
|
||||||
native_guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs)
|
native_guest_syscall_enter(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/* nothing guests can be */
|
/* nothing guests can be */
|
||||||
|
|
||||||
return false; /* it is not guest system call */
|
return false; /* it is not guest system call */
|
||||||
}
|
}
|
||||||
static inline void
|
|
||||||
native_guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs,
|
|
||||||
unsigned flags)
|
|
||||||
{
|
|
||||||
/* nothing guests can be */
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUALIZATION
|
#ifdef CONFIG_VIRTUALIZATION
|
||||||
|
|
||||||
/*
|
|
||||||
* Normally data stack is switched on interceptions as follows:
|
|
||||||
* 1) Upon interception guest's USD_hi.size is saved into backup
|
|
||||||
* stacks (cr1_lo.ussz field).
|
|
||||||
* 2) Then hardware switches PCSP stack (see Phase 5) and does an
|
|
||||||
* equivalent of DONE which modifies guest's USD with 'cr1_lo.ussz'
|
|
||||||
* from the function that called GLAUNCH.
|
|
||||||
* 3) Hypervisor in software saves this modified USD and restores it
|
|
||||||
* before GLAUNCH.
|
|
||||||
* 4) Hardware in GLAUNCH switches PCSP stack (see Phase 4)
|
|
||||||
* 5) Hardware in GLAUNCH does an equivalent of DONE (see Phase 6)
|
|
||||||
* which restores proper guest USD.
|
|
||||||
*
|
|
||||||
* But if hypervisor sets VIRT_CTRL_CU.glnch.g_th then that DONE is
|
|
||||||
* skipped and guest's data stack is incorrect. So we manually do
|
|
||||||
* here what DONE does. For simplicity do it always although it
|
|
||||||
* actually is needed only in 'g_th' case.
|
|
||||||
*/
|
|
||||||
static inline void kvm_correct_guest_data_stack_regs(
|
|
||||||
struct kvm_sw_cpu_context *sw_ctxt, e2k_cr1_hi_t cr1_hi)
|
|
||||||
{
|
|
||||||
e2k_usd_lo_t usd_lo;
|
|
||||||
e2k_usd_hi_t usd_hi;
|
|
||||||
e2k_size_t real_size;
|
|
||||||
|
|
||||||
real_size = cr1_hi.CR1_hi_ussz << 4;
|
|
||||||
usd_hi = sw_ctxt->usd_hi;
|
|
||||||
usd_lo = sw_ctxt->usd_lo;
|
|
||||||
usd_lo.USD_lo_base += (real_size - usd_hi.USD_hi_size);
|
|
||||||
usd_hi.USD_hi_size = real_size;
|
|
||||||
sw_ctxt->usd_lo = usd_lo;
|
|
||||||
sw_ctxt->usd_hi = usd_hi;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For interceptions just switch actual registers with saved values
|
* For interceptions just switch actual registers with saved values
|
||||||
* in 'sw_ctxt'.
|
* in 'sw_ctxt'.
|
||||||
|
@ -187,9 +151,6 @@ static inline void kvm_switch_stack_regs(struct kvm_sw_cpu_context *sw_ctxt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define Compiler_bug_128308_workaround
|
|
||||||
|
|
||||||
#ifndef Compiler_bug_128308_workaround
|
|
||||||
static inline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt)
|
static inline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt)
|
||||||
{
|
{
|
||||||
e2k_fpcr_t fpcr;
|
e2k_fpcr_t fpcr;
|
||||||
|
@ -212,9 +173,6 @@ static inline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt)
|
||||||
sw_ctxt->pfpfr = pfpfr;
|
sw_ctxt->pfpfr = pfpfr;
|
||||||
sw_ctxt->upsr = upsr;
|
sw_ctxt->upsr = upsr;
|
||||||
}
|
}
|
||||||
#else /* Compiler_bug_128308_workaround */
|
|
||||||
extern noinline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt);
|
|
||||||
#endif /* !Compiler_bug_128308_workaround */
|
|
||||||
|
|
||||||
static inline void kvm_switch_cu_regs(struct kvm_sw_cpu_context *sw_ctxt)
|
static inline void kvm_switch_cu_regs(struct kvm_sw_cpu_context *sw_ctxt)
|
||||||
{
|
{
|
||||||
|
@ -377,6 +335,39 @@ static inline void kvm_switch_debug_regs(struct kvm_sw_cpu_context *sw_ctxt,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CLW_ENABLE
|
||||||
|
static inline void kvm_switch_clw_regs(struct kvm_sw_cpu_context *sw_ctxt, bool guest_enter)
|
||||||
|
{
|
||||||
|
if (guest_enter) {
|
||||||
|
native_write_US_CL_B(sw_ctxt->us_cl_b);
|
||||||
|
native_write_US_CL_UP(sw_ctxt->us_cl_up);
|
||||||
|
native_write_US_CL_M0(sw_ctxt->us_cl_m0);
|
||||||
|
native_write_US_CL_M1(sw_ctxt->us_cl_m1);
|
||||||
|
native_write_US_CL_M2(sw_ctxt->us_cl_m2);
|
||||||
|
native_write_US_CL_M3(sw_ctxt->us_cl_m3);
|
||||||
|
|
||||||
|
NATIVE_WRITE_MMU_US_CL_D(sw_ctxt->us_cl_d);
|
||||||
|
} else {
|
||||||
|
sw_ctxt->us_cl_d = NATIVE_READ_MMU_US_CL_D();
|
||||||
|
|
||||||
|
DISABLE_US_CLW();
|
||||||
|
|
||||||
|
sw_ctxt->us_cl_b = native_read_US_CL_B();
|
||||||
|
sw_ctxt->us_cl_up = native_read_US_CL_UP();
|
||||||
|
sw_ctxt->us_cl_m0 = native_read_US_CL_M0();
|
||||||
|
sw_ctxt->us_cl_m1 = native_read_US_CL_M1();
|
||||||
|
sw_ctxt->us_cl_m2 = native_read_US_CL_M2();
|
||||||
|
sw_ctxt->us_cl_m3 = native_read_US_CL_M3();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline void kvm_switch_clw_regs(struct kvm_sw_cpu_context *sw_ctxt, bool guest_enter)
|
||||||
|
{
|
||||||
|
/* Nothing to do */
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
switch_ctxt_trap_enable_mask(struct kvm_sw_cpu_context *sw_ctxt)
|
switch_ctxt_trap_enable_mask(struct kvm_sw_cpu_context *sw_ctxt)
|
||||||
{
|
{
|
||||||
|
@ -392,11 +383,23 @@ static inline void host_guest_enter(struct thread_info *ti,
|
||||||
{
|
{
|
||||||
struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt;
|
struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt;
|
||||||
|
|
||||||
|
if (likely(!(flags & DONT_TRAP_MASK_SWITCH))) {
|
||||||
switch_ctxt_trap_enable_mask(sw_ctxt);
|
switch_ctxt_trap_enable_mask(sw_ctxt);
|
||||||
/* In full virtualization mode guest sets his own OSEM in thread_init() */
|
/* In full virtualization mode guest sets his own OSEM */
|
||||||
if (!vcpu->is_hv)
|
/* in thread_init() */
|
||||||
KVM_BUG_ON((NATIVE_READ_OSEM_REG_VALUE() & HYPERCALLS_TRAPS_MASK) !=
|
if (!vcpu->is_hv) {
|
||||||
|
KVM_BUG_ON((NATIVE_READ_OSEM_REG_VALUE() &
|
||||||
|
HYPERCALLS_TRAPS_MASK) !=
|
||||||
HYPERCALLS_TRAPS_MASK);
|
HYPERCALLS_TRAPS_MASK);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* In full virtualization mode guest sets his own OSEM */
|
||||||
|
/* in thread_init() */
|
||||||
|
if (!vcpu->is_hv) {
|
||||||
|
KVM_BUG_ON((NATIVE_READ_OSEM_REG_VALUE() &
|
||||||
|
HYPERCALLS_TRAPS_MASK) != 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (flags & FROM_HYPERCALL_SWITCH) {
|
if (flags & FROM_HYPERCALL_SWITCH) {
|
||||||
/*
|
/*
|
||||||
|
@ -427,8 +430,13 @@ static inline void host_guest_enter(struct thread_info *ti,
|
||||||
machine.calculate_aau_aaldis_aaldas(NULL, ti, &sw_ctxt->aau_context);
|
machine.calculate_aau_aaldis_aaldas(NULL, ti, &sw_ctxt->aau_context);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (machine.flushts)
|
||||||
|
machine.flushts();
|
||||||
|
|
||||||
|
if (likely(!(flags & DONT_SAVE_KGREGS_SWITCH))) {
|
||||||
/* For interceptions restore extended part */
|
/* For interceptions restore extended part */
|
||||||
NATIVE_RESTORE_KERNEL_GREGS(&ti->k_gregs);
|
NATIVE_RESTORE_KERNEL_GREGS(&ti->k_gregs);
|
||||||
|
}
|
||||||
|
|
||||||
NATIVE_RESTORE_INTEL_REGS(sw_ctxt);
|
NATIVE_RESTORE_INTEL_REGS(sw_ctxt);
|
||||||
|
|
||||||
|
@ -438,7 +446,9 @@ static inline void host_guest_enter(struct thread_info *ti,
|
||||||
* the list in sw_ctxt definition */
|
* the list in sw_ctxt definition */
|
||||||
kvm_switch_fpu_regs(sw_ctxt);
|
kvm_switch_fpu_regs(sw_ctxt);
|
||||||
kvm_switch_cu_regs(sw_ctxt);
|
kvm_switch_cu_regs(sw_ctxt);
|
||||||
|
if (likely(!(flags & DONT_MMU_CONTEXT_SWITCH))) {
|
||||||
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
|
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USE_AAU
|
#ifdef CONFIG_USE_AAU
|
||||||
if (!(flags & DONT_AAU_CONTEXT_SWITCH)) {
|
if (!(flags & DONT_AAU_CONTEXT_SWITCH)) {
|
||||||
|
@ -477,6 +487,9 @@ static inline void host_guest_enter(struct thread_info *ti,
|
||||||
/* restore saved source pointers of host stack */
|
/* restore saved source pointers of host stack */
|
||||||
kvm_switch_stack_regs(sw_ctxt, false, true);
|
kvm_switch_stack_regs(sw_ctxt, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vcpu->is_hv)
|
||||||
|
kvm_switch_clw_regs(sw_ctxt, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,11 +522,16 @@ static inline void host_guest_exit(struct thread_info *ti,
|
||||||
{
|
{
|
||||||
struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt;
|
struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt;
|
||||||
|
|
||||||
|
if (likely(!(flags & DONT_TRAP_MASK_SWITCH))) {
|
||||||
switch_ctxt_trap_enable_mask(sw_ctxt);
|
switch_ctxt_trap_enable_mask(sw_ctxt);
|
||||||
|
}
|
||||||
KVM_BUG_ON(NATIVE_READ_OSEM_REG_VALUE() & HYPERCALLS_TRAPS_MASK);
|
KVM_BUG_ON(NATIVE_READ_OSEM_REG_VALUE() & HYPERCALLS_TRAPS_MASK);
|
||||||
|
|
||||||
/* Switch data stack before all function calls */
|
/* Switch data stack before all function calls */
|
||||||
if (flags & USD_CONTEXT_SWITCH) {
|
if (flags & USD_CONTEXT_SWITCH) {
|
||||||
|
if (vcpu->is_hv)
|
||||||
|
kvm_switch_clw_regs(sw_ctxt, false);
|
||||||
|
|
||||||
if (!(flags & FROM_HYPERCALL_SWITCH) || !vcpu->is_hv) {
|
if (!(flags & FROM_HYPERCALL_SWITCH) || !vcpu->is_hv) {
|
||||||
kvm_switch_stack_regs(sw_ctxt, false, false);
|
kvm_switch_stack_regs(sw_ctxt, false, false);
|
||||||
} else {
|
} else {
|
||||||
|
@ -593,21 +611,23 @@ static inline void host_guest_exit(struct thread_info *ti,
|
||||||
if (cpu_has(CPU_HWBUG_L1I_STOPS_WORKING))
|
if (cpu_has(CPU_HWBUG_L1I_STOPS_WORKING))
|
||||||
E2K_DISP_CTPRS();
|
E2K_DISP_CTPRS();
|
||||||
|
|
||||||
|
if (likely(!(flags & DONT_SAVE_KGREGS_SWITCH))) {
|
||||||
/* For interceptions save extended part. */
|
/* For interceptions save extended part. */
|
||||||
machine.save_kernel_gregs(&ti->k_gregs);
|
machine.save_kernel_gregs(&ti->k_gregs);
|
||||||
ONLY_SET_KERNEL_GREGS(ti);
|
ONLY_SET_KERNEL_GREGS(ti);
|
||||||
|
}
|
||||||
|
|
||||||
NATIVE_SAVE_INTEL_REGS(sw_ctxt);
|
NATIVE_SAVE_INTEL_REGS(sw_ctxt);
|
||||||
#ifdef CONFIG_MLT_STORAGE
|
#ifdef CONFIG_MLT_STORAGE
|
||||||
machine.invalidate_MLT();
|
machine.invalidate_MLT();
|
||||||
#endif
|
#endif
|
||||||
if (machine.flushts)
|
|
||||||
machine.flushts();
|
|
||||||
|
|
||||||
/* Isolate from QEMU */
|
/* Isolate from QEMU */
|
||||||
kvm_switch_fpu_regs(sw_ctxt);
|
kvm_switch_fpu_regs(sw_ctxt);
|
||||||
kvm_switch_cu_regs(sw_ctxt);
|
kvm_switch_cu_regs(sw_ctxt);
|
||||||
|
if (likely(!(flags & DONT_MMU_CONTEXT_SWITCH))) {
|
||||||
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
|
kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Starting emulation of interseption of paravirtualized vcpu
|
* Starting emulation of interseption of paravirtualized vcpu
|
||||||
|
@ -708,6 +728,7 @@ pv_vcpu_switch_guest_host_context(struct kvm_vcpu *vcpu,
|
||||||
static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
|
static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt;
|
kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt;
|
||||||
|
struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt;
|
||||||
unsigned long *stack;
|
unsigned long *stack;
|
||||||
pt_regs_t *regs;
|
pt_regs_t *regs;
|
||||||
e2k_usd_hi_t k_usd_hi;
|
e2k_usd_hi_t k_usd_hi;
|
||||||
|
@ -717,13 +738,16 @@ static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
|
||||||
e2k_psp_hi_t k_psp_hi;
|
e2k_psp_hi_t k_psp_hi;
|
||||||
e2k_pcsp_lo_t k_pcsp_lo;
|
e2k_pcsp_lo_t k_pcsp_lo;
|
||||||
e2k_pcsp_hi_t k_pcsp_hi;
|
e2k_pcsp_hi_t k_pcsp_hi;
|
||||||
|
e2k_upsr_t upsr;
|
||||||
unsigned long base;
|
unsigned long base;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
unsigned long used;
|
unsigned long used;
|
||||||
|
unsigned osem;
|
||||||
|
|
||||||
/* keep current state of context */
|
/* keep current state of context */
|
||||||
stack = current->stack;
|
stack = current->stack;
|
||||||
regs = current_thread_info()->pt_regs;
|
regs = current_thread_info()->pt_regs;
|
||||||
|
upsr = current_thread_info()->upsr;
|
||||||
k_usd_lo = current_thread_info()->k_usd_lo;
|
k_usd_lo = current_thread_info()->k_usd_lo;
|
||||||
k_usd_hi = current_thread_info()->k_usd_hi;
|
k_usd_hi = current_thread_info()->k_usd_hi;
|
||||||
k_sbr.SBR_reg = (unsigned long)stack + KERNEL_C_STACK_SIZE +
|
k_sbr.SBR_reg = (unsigned long)stack + KERNEL_C_STACK_SIZE +
|
||||||
|
@ -736,6 +760,7 @@ static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
|
||||||
/* restore VCPU thread context */
|
/* restore VCPU thread context */
|
||||||
current->stack = host_ctxt->stack;
|
current->stack = host_ctxt->stack;
|
||||||
current_thread_info()->pt_regs = host_ctxt->pt_regs;
|
current_thread_info()->pt_regs = host_ctxt->pt_regs;
|
||||||
|
current_thread_info()->upsr = host_ctxt->upsr;
|
||||||
current_thread_info()->k_usd_hi = host_ctxt->k_usd_hi;
|
current_thread_info()->k_usd_hi = host_ctxt->k_usd_hi;
|
||||||
current_thread_info()->k_usd_lo = host_ctxt->k_usd_lo;
|
current_thread_info()->k_usd_lo = host_ctxt->k_usd_lo;
|
||||||
current_thread_info()->k_psp_lo = host_ctxt->k_psp_lo;
|
current_thread_info()->k_psp_lo = host_ctxt->k_psp_lo;
|
||||||
|
@ -746,6 +771,7 @@ static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
|
||||||
/* save VCPU thread context */
|
/* save VCPU thread context */
|
||||||
host_ctxt->stack = stack;
|
host_ctxt->stack = stack;
|
||||||
host_ctxt->pt_regs = regs;
|
host_ctxt->pt_regs = regs;
|
||||||
|
host_ctxt->upsr = upsr;
|
||||||
host_ctxt->k_usd_lo = k_usd_lo;
|
host_ctxt->k_usd_lo = k_usd_lo;
|
||||||
host_ctxt->k_usd_hi = k_usd_hi;
|
host_ctxt->k_usd_hi = k_usd_hi;
|
||||||
host_ctxt->k_sbr = k_sbr;
|
host_ctxt->k_sbr = k_sbr;
|
||||||
|
@ -754,6 +780,11 @@ static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
|
||||||
host_ctxt->k_pcsp_lo = k_pcsp_lo;
|
host_ctxt->k_pcsp_lo = k_pcsp_lo;
|
||||||
host_ctxt->k_pcsp_hi = k_pcsp_hi;
|
host_ctxt->k_pcsp_hi = k_pcsp_hi;
|
||||||
|
|
||||||
|
/* remember host/guest OSEM registers state & restore guest/host state */
|
||||||
|
osem = host_ctxt->osem;
|
||||||
|
host_ctxt->osem = sw_ctxt->osem;
|
||||||
|
sw_ctxt->osem = osem;
|
||||||
|
|
||||||
/* keep current signal stack state */
|
/* keep current signal stack state */
|
||||||
base = current_thread_info()->signal_stack.base;
|
base = current_thread_info()->signal_stack.base;
|
||||||
size = current_thread_info()->signal_stack.size;
|
size = current_thread_info()->signal_stack.size;
|
||||||
|
@ -778,6 +809,20 @@ static inline void pv_vcpu_exit_to_host(struct kvm_vcpu *vcpu)
|
||||||
/* save VCPU guest thread context */
|
/* save VCPU guest thread context */
|
||||||
/* restore VCPU host thread context */
|
/* restore VCPU host thread context */
|
||||||
pv_vcpu_switch_host_context(vcpu);
|
pv_vcpu_switch_host_context(vcpu);
|
||||||
|
#ifdef DEBUG_UPSR_FP_DISABLE
|
||||||
|
if (unlikely(!current_thread_info()->upsr.UPSR_fe)) {
|
||||||
|
pr_err("%s(): switch to host QEMU process with disabled "
|
||||||
|
"FloatPoint mask, UPSR 0x%x\n",
|
||||||
|
__func__, current_thread_info()->upsr.UPSR_reg);
|
||||||
|
/* correct UPSR to enable float pointing */
|
||||||
|
current_thread_info()->upsr.UPSR_fe = 1;
|
||||||
|
}
|
||||||
|
if (unlikely(!vcpu->arch.host_ctxt.upsr.UPSR_fe)) {
|
||||||
|
pr_err("%s(): switch from host VCPU process where disabled "
|
||||||
|
"FloatPoint mask, UPSR 0x%x\n",
|
||||||
|
__func__, vcpu->arch.host_ctxt.upsr.UPSR_reg);
|
||||||
|
}
|
||||||
|
#endif /* DEBUG_UPSR_FP_DISABLE */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pv_vcpu_enter_to_guest(struct kvm_vcpu *vcpu)
|
static inline void pv_vcpu_enter_to_guest(struct kvm_vcpu *vcpu)
|
||||||
|
@ -785,6 +830,14 @@ static inline void pv_vcpu_enter_to_guest(struct kvm_vcpu *vcpu)
|
||||||
/* save VCPU host thread context */
|
/* save VCPU host thread context */
|
||||||
/* restore VCPU guest thread context */
|
/* restore VCPU guest thread context */
|
||||||
pv_vcpu_switch_host_context(vcpu);
|
pv_vcpu_switch_host_context(vcpu);
|
||||||
|
#ifdef DEBUG_UPSR_FP_DISABLE
|
||||||
|
if (unlikely(!current_thread_info()->upsr.UPSR_fe)) {
|
||||||
|
pr_err("%s(): switch to host VCPU process with disabled "
|
||||||
|
"FloatPoint mask, UPSR 0x%x\n",
|
||||||
|
__func__, current_thread_info()->upsr.UPSR_reg);
|
||||||
|
/* do not correct UPSR, maybe it should be */
|
||||||
|
}
|
||||||
|
#endif /* DEBUG_UPSR_FP_DISABLE */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -879,11 +932,12 @@ host_syscall_from_guest_user(struct thread_info *ti)
|
||||||
static inline void
|
static inline void
|
||||||
host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs)
|
host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (!kvm_test_and_clear_intc_emul_flag(regs)) {
|
if (likely(!kvm_test_intc_emul_flag(regs))) {
|
||||||
/* it is not paravirtualized guest VCPU intercepts*/
|
/* it is not paravirtualized guest VCPU intercepts*/
|
||||||
/* emulation mode, so nothing to do more */
|
/* emulation mode, so nothing to do more */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
kvm_clear_intc_emul_flag(regs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return from trap on paravirtualized guest VCPU which was
|
* Return from trap on paravirtualized guest VCPU which was
|
||||||
|
@ -972,31 +1026,20 @@ host_syscall_guest_get_pv_vcpu_restore_stacks(struct thread_info *ti,
|
||||||
return ®s->g_stacks;
|
return ®s->g_stacks;
|
||||||
} else {
|
} else {
|
||||||
/* it need switch to guest user context */
|
/* it need switch to guest user context */
|
||||||
return native_syscall_guest_get_restore_stacks(ti, regs);
|
return native_syscall_guest_get_restore_stacks(regs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct e2k_stacks *
|
static inline struct e2k_stacks *
|
||||||
host_trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
host_trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) {
|
if (test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)) {
|
||||||
/* host return to paravirtualized guest (VCPU) mode */
|
/* host return to paravirtualized guest (VCPU) mode */
|
||||||
return host_trap_guest_get_pv_vcpu_restore_stacks(ti, regs);
|
return host_trap_guest_get_pv_vcpu_restore_stacks(ti, regs);
|
||||||
}
|
}
|
||||||
return native_trap_guest_get_restore_stacks(ti, regs);
|
return native_trap_guest_get_restore_stacks(ti, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct e2k_stacks *
|
|
||||||
host_syscall_guest_get_restore_stacks(struct thread_info *ti,
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
if (test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) {
|
|
||||||
/* host return to paravirtualized guest (VCPU) mode */
|
|
||||||
return host_syscall_guest_get_pv_vcpu_restore_stacks(ti, regs);
|
|
||||||
}
|
|
||||||
return native_syscall_guest_get_restore_stacks(ti, regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
host_trap_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
host_trap_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@ -1021,7 +1064,7 @@ host_trap_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
||||||
static inline void
|
static inline void
|
||||||
host_trap_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
host_trap_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) {
|
if (test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)) {
|
||||||
/* host return to paravirtualized guest (VCPU) mode */
|
/* host return to paravirtualized guest (VCPU) mode */
|
||||||
host_trap_pv_vcpu_exit_trap(ti, regs);
|
host_trap_pv_vcpu_exit_trap(ti, regs);
|
||||||
}
|
}
|
||||||
|
@ -1065,29 +1108,7 @@ host_syscall_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
||||||
atomic_inc(&vcpu->arch.host_ctxt.signal.in_syscall);
|
atomic_inc(&vcpu->arch.host_ctxt.signal.in_syscall);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
extern void host_syscall_guest_exit_trap(struct thread_info *, struct pt_regs *);
|
||||||
host_syscall_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
if (!test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* host return to paravirtualized guest (VCPU) mode */
|
|
||||||
host_syscall_pv_vcpu_exit_trap(ti, regs);
|
|
||||||
|
|
||||||
host_switch_trap_enable_mask(ti, regs, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
host_guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs,
|
|
||||||
unsigned flags)
|
|
||||||
{
|
|
||||||
if (flags & EXIT_FROM_INTC_SWITCH) {
|
|
||||||
host_trap_guest_exit_intc(ti, regs);
|
|
||||||
}
|
|
||||||
if (flags & EXIT_FROM_TRAP_SWITCH) {
|
|
||||||
host_syscall_guest_exit_trap(ti, regs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void kvm_init_pv_vcpu_intc_handling(struct kvm_vcpu *vcpu, pt_regs_t *regs);
|
extern void kvm_init_pv_vcpu_intc_handling(struct kvm_vcpu *vcpu, pt_regs_t *regs);
|
||||||
extern int last_light_hcall;
|
extern int last_light_hcall;
|
||||||
|
@ -1096,13 +1117,14 @@ static inline void
|
||||||
host_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs,
|
host_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs,
|
||||||
trap_pt_regs_t *trap, unsigned flags)
|
trap_pt_regs_t *trap, unsigned flags)
|
||||||
{
|
{
|
||||||
if (!test_and_clear_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE))
|
if (likely(!test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
clear_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trap on paravirtualized guest VCPU is interpreted as intercept
|
* Trap on paravirtualized guest VCPU is interpreted as intercept
|
||||||
*/
|
*/
|
||||||
|
|
||||||
kvm_emulate_pv_vcpu_intc(ti, regs, trap);
|
kvm_emulate_pv_vcpu_intc(ti, regs, trap);
|
||||||
|
|
||||||
/* only after switch to host MMU context at previous function */
|
/* only after switch to host MMU context at previous function */
|
||||||
|
@ -1112,13 +1134,14 @@ host_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs,
|
||||||
/*
|
/*
|
||||||
* The function should return bool 'is the system call from guest?'
|
* The function should return bool 'is the system call from guest?'
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static inline bool host_guest_syscall_enter(struct pt_regs *regs,
|
||||||
host_guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs)
|
bool ts_host_at_vcpu_mode)
|
||||||
{
|
{
|
||||||
if (!test_and_clear_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE))
|
if (likely(!ts_host_at_vcpu_mode))
|
||||||
return false; /* it is not guest system call */
|
return false; /* it is not guest system call */
|
||||||
|
|
||||||
return pv_vcpu_syscall_intc(ti, regs);
|
clear_ts_flag(TS_HOST_AT_VCPU_MODE);
|
||||||
|
return pv_vcpu_syscall_intc(current_thread_info(), regs);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_VIRTUALIZATION */
|
#endif /* CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
|
@ -1188,25 +1211,26 @@ trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct e2k_stacks *
|
static inline struct e2k_stacks *
|
||||||
syscall_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
syscall_guest_get_restore_stacks(bool ts_host_at_vcpu_mode, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return native_syscall_guest_get_restore_stacks(ti, regs);
|
return native_syscall_guest_get_restore_stacks(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ts_host_at_vcpu_mode() false
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The function should return bool is the system call from guest
|
* The function should return bool is the system call from guest
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static inline bool guest_syscall_enter(struct pt_regs *regs,
|
||||||
guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs)
|
bool ts_host_at_vcpu_mode)
|
||||||
{
|
{
|
||||||
return native_guest_syscall_enter(ti, regs);
|
return native_guest_syscall_enter(regs);
|
||||||
}
|
|
||||||
static inline void
|
|
||||||
guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs,
|
|
||||||
unsigned flags)
|
|
||||||
{
|
|
||||||
native_guest_syscall_exit_to(ti, regs, flags);
|
|
||||||
}
|
}
|
||||||
|
static inline void guest_exit_intc(struct pt_regs *regs,
|
||||||
|
bool intc_emul_flag) { }
|
||||||
|
static inline void guest_syscall_exit_trap(struct pt_regs *regs,
|
||||||
|
bool ts_host_at_vcpu_mode) { }
|
||||||
|
|
||||||
#else /* CONFIG_VIRTUALIZATION */
|
#else /* CONFIG_VIRTUALIZATION */
|
||||||
/* it is only host kernel with virtualization support */
|
/* it is only host kernel with virtualization support */
|
||||||
static inline void __guest_enter(struct thread_info *ti,
|
static inline void __guest_enter(struct thread_info *ti,
|
||||||
|
@ -1268,25 +1292,47 @@ trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct e2k_stacks *
|
static inline struct e2k_stacks *
|
||||||
syscall_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs)
|
syscall_guest_get_restore_stacks(bool ts_host_at_vcpu_mode, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return host_syscall_guest_get_restore_stacks(ti, regs);
|
if (unlikely(ts_host_at_vcpu_mode)) {
|
||||||
|
/* host return to paravirtualized guest (VCPU) mode */
|
||||||
|
return host_syscall_guest_get_pv_vcpu_restore_stacks(
|
||||||
|
current_thread_info(), regs);
|
||||||
|
}
|
||||||
|
return native_syscall_guest_get_restore_stacks(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ts_host_at_vcpu_mode() unlikely(!!test_ts_flag(TS_HOST_AT_VCPU_MODE))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The function should return bool is the system call from guest
|
* The function should return bool is the system call from guest
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static inline bool guest_syscall_enter(struct pt_regs *regs,
|
||||||
guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs)
|
bool ts_host_at_vcpu_mode)
|
||||||
{
|
{
|
||||||
return host_guest_syscall_enter(ti, regs);
|
return host_guest_syscall_enter(regs, ts_host_at_vcpu_mode);
|
||||||
}
|
}
|
||||||
static inline void
|
|
||||||
guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs,
|
static inline void guest_exit_intc(struct pt_regs *regs, bool intc_emul_flag)
|
||||||
unsigned flags)
|
|
||||||
{
|
{
|
||||||
host_guest_syscall_exit_to(ti, regs, flags);
|
if (unlikely(intc_emul_flag)) {
|
||||||
|
kvm_clear_intc_emul_flag(regs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return from trap on paravirtualized guest VCPU which was
|
||||||
|
* interpreted as interception
|
||||||
|
*/
|
||||||
|
return_from_pv_vcpu_intc(current_thread_info(), regs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void guest_syscall_exit_trap(struct pt_regs *regs,
|
||||||
|
bool ts_host_at_vcpu_mode)
|
||||||
|
{
|
||||||
|
if (unlikely(ts_host_at_vcpu_mode))
|
||||||
|
host_syscall_guest_exit_trap(current_thread_info(), regs);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* ! CONFIG_VIRTUALIZATION */
|
#endif /* ! CONFIG_VIRTUALIZATION */
|
||||||
#endif /* CONFIG_PARAVIRT_GUEST */
|
#endif /* CONFIG_PARAVIRT_GUEST */
|
||||||
|
|
||||||
|
|
|
@ -147,8 +147,13 @@ typedef struct gthread_info {
|
||||||
vcpu_l_gregs_t l_gregs; /* guest user "local" global */
|
vcpu_l_gregs_t l_gregs; /* guest user "local" global */
|
||||||
/* registers to save updated on page */
|
/* registers to save updated on page */
|
||||||
/* fault values */
|
/* fault values */
|
||||||
kernel_gregs_t g_gregs; /* guest kernel global resiters state */
|
kernel_gregs_t gk_gregs; /* guest kernel global resiters state */
|
||||||
|
/* some registers can be updated only */
|
||||||
/* after migration to other VCPU */
|
/* after migration to other VCPU */
|
||||||
|
kernel_gregs_t gu_gregs; /* guest user global resiters state */
|
||||||
|
/* only for global registers which */
|
||||||
|
/* used by the guest kernel for its */
|
||||||
|
/* own purposes */
|
||||||
|
|
||||||
/* the following flags to mark event: */
|
/* the following flags to mark event: */
|
||||||
/* hardware stacks bounds trap occured, but 'sge' on guest */
|
/* hardware stacks bounds trap occured, but 'sge' on guest */
|
||||||
|
@ -236,6 +241,8 @@ typedef struct gthread_info {
|
||||||
/* thread */
|
/* thread */
|
||||||
#define GTIF_THREAD_MIGRATED 2 /* the thread was migrated from one */
|
#define GTIF_THREAD_MIGRATED 2 /* the thread was migrated from one */
|
||||||
/* VCPU to other */
|
/* VCPU to other */
|
||||||
|
#define GTIF_USER_THREAD 4 /* the process is user thread on */
|
||||||
|
/* common virtual memory (gmm) */
|
||||||
#define GTIF_HW_PS_LOCKED 16 /* hardware procedure stack */
|
#define GTIF_HW_PS_LOCKED 16 /* hardware procedure stack */
|
||||||
/* was locked by host */
|
/* was locked by host */
|
||||||
#define GTIF_HW_PCS_LOCKED 17 /* hardware chain stack */
|
#define GTIF_HW_PCS_LOCKED 17 /* hardware chain stack */
|
||||||
|
@ -258,6 +265,7 @@ typedef struct gthread_info {
|
||||||
#define _GTIF_VCPU_START_THREAD (1UL << GTIF_VCPU_START_THREAD)
|
#define _GTIF_VCPU_START_THREAD (1UL << GTIF_VCPU_START_THREAD)
|
||||||
#define _GTIF_KERNEL_THREAD (1UL << GTIF_KERNEL_THREAD)
|
#define _GTIF_KERNEL_THREAD (1UL << GTIF_KERNEL_THREAD)
|
||||||
#define _GTIF_THREAD_MIGRATED (1UL << GTIF_THREAD_MIGRATED)
|
#define _GTIF_THREAD_MIGRATED (1UL << GTIF_THREAD_MIGRATED)
|
||||||
|
#define _GTIF_USER_THREAD (1UL << GTIF_USER_THREAD)
|
||||||
#define _GTIF_HW_PS_LOCKED (1UL << GTIF_HW_PS_LOCKED)
|
#define _GTIF_HW_PS_LOCKED (1UL << GTIF_HW_PS_LOCKED)
|
||||||
#define _GTIF_HW_PCS_LOCKED (1UL << GTIF_HW_PCS_LOCKED)
|
#define _GTIF_HW_PCS_LOCKED (1UL << GTIF_HW_PCS_LOCKED)
|
||||||
#define _GTIF_HW_PS_PRESENTED (1UL << GTIF_HW_PS_PRESENTED)
|
#define _GTIF_HW_PS_PRESENTED (1UL << GTIF_HW_PS_PRESENTED)
|
||||||
|
|
|
@ -444,6 +444,41 @@ TRACE_EVENT(
|
||||||
TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data)
|
TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(
|
||||||
|
intc_clw,
|
||||||
|
|
||||||
|
TP_PROTO(bool us_cl_d, unsigned long us_cl_b, unsigned long us_cl_up,
|
||||||
|
unsigned long us_cl_m0, unsigned long us_cl_m1,
|
||||||
|
unsigned long us_cl_m2, unsigned long us_cl_m3),
|
||||||
|
|
||||||
|
TP_ARGS(us_cl_d, us_cl_b, us_cl_up, us_cl_m0, us_cl_m1, us_cl_m2, us_cl_m3),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field( bool, us_cl_d )
|
||||||
|
__field( unsigned long, us_cl_b )
|
||||||
|
__field( unsigned long, us_cl_up )
|
||||||
|
__field( unsigned long, us_cl_m0 )
|
||||||
|
__field( unsigned long, us_cl_m1 )
|
||||||
|
__field( unsigned long, us_cl_m2 )
|
||||||
|
__field( unsigned long, us_cl_m3 )
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->us_cl_d = us_cl_d;
|
||||||
|
__entry->us_cl_b = us_cl_b;
|
||||||
|
__entry->us_cl_up = us_cl_up;
|
||||||
|
__entry->us_cl_m0 = us_cl_m0;
|
||||||
|
__entry->us_cl_m1 = us_cl_m1;
|
||||||
|
__entry->us_cl_m2 = us_cl_m2;
|
||||||
|
__entry->us_cl_m3 = us_cl_m3;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("us_cl_d %d, us_cl_b 0x%lx, us_cl_up 0x%lx\n"
|
||||||
|
"us_cl_m0 0x%lx us_cl_m1 0x%lx us_cl_m2 0x%lx, us_cl_m3 0x%lx\n",
|
||||||
|
__entry->us_cl_d, __entry->us_cl_b, __entry->us_cl_up,
|
||||||
|
__entry->us_cl_m0, __entry->us_cl_m1, __entry->us_cl_m2, __entry->us_cl_m3)
|
||||||
|
);
|
||||||
|
|
||||||
#endif /* _TRACE_KVM_HV_H */
|
#endif /* _TRACE_KVM_HV_H */
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
|
|
@ -143,39 +143,29 @@
|
||||||
}
|
}
|
||||||
.endm /* GOTO_GUEST_KERNEL_TTABLE */
|
.endm /* GOTO_GUEST_KERNEL_TTABLE */
|
||||||
|
|
||||||
|
# ifdef CONFIG_PARAVIRT_GUEST
|
||||||
/*
|
/*
|
||||||
* goto guest kernel system call table entry, if system call is from guest user
|
* goto guest kernel system call table entry, if system call is from guest user
|
||||||
* rti: register of current_thread_info()
|
* rti: register of current_thread_info()
|
||||||
* rtmp0 rtmp1 rtmp2: temporary registers
|
* rtmp0 rtmp1 rtmp2: temporary registers
|
||||||
* ptmp0 ptmp1: temporary predicates
|
* ptmp0 ptmp1: temporary predicates
|
||||||
*/
|
*/
|
||||||
.macro GOTO_PV_VCPU_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \
|
.macro GOTO_PV_VCPU_KERNEL_TTABLE rti rtmp0 rtmp1 rtmp2 ptmp1
|
||||||
ptmp0 ptmp1
|
|
||||||
/* thread_info_t *ti = %dr7 */
|
/* thread_info_t *ti = %dr7 */
|
||||||
/* e2k_cr1_lo_t cr1_lo = NATIVE_READ_CR1_LO_REG(); */
|
/* e2k_cr1_lo_t cr1_lo = NATIVE_READ_CR1_LO_REG(); */
|
||||||
/* */
|
/* */
|
||||||
/* if ((ti->flags & TIF_HOST_AT_VCPU_MODE)) { */
|
|
||||||
/* if (ti->flags & _TIF_PARAVIRT_GUEST) { */
|
/* if (ti->flags & _TIF_PARAVIRT_GUEST) { */
|
||||||
/* DO_SWITCH_TO_KERNEL_IMAGE_PGD() */
|
/* DO_SWITCH_TO_KERNEL_IMAGE_PGD() */
|
||||||
/* } */
|
/* } */
|
||||||
/* goto goto_guest_kernel_ttable_C( */
|
|
||||||
/* sys_num << 32 | entry, */
|
|
||||||
/* arg1, arg2, arg3, arg4, */
|
|
||||||
/* arg5, arg6); */
|
|
||||||
/* } */
|
|
||||||
|
|
||||||
{
|
{
|
||||||
ldd [\rti + TI_FLAGS], \rtmp0;
|
ldd [\rti + TI_FLAGS], \rtmp0;
|
||||||
sxt 2, %r0, %dr0;
|
sxt 2, %r0, %dr0;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
cmpandedb \rtmp0, _TIF_HOST_AT_VCPU_MODE, \ptmp0;
|
|
||||||
cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \ptmp1;
|
cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \ptmp1;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
pass \ptmp0, @p0;
|
|
||||||
landp ~@p0, ~@p0, @p4;
|
|
||||||
pass @p4, \ptmp0;
|
|
||||||
pass \ptmp1, @p2;
|
pass \ptmp1, @p2;
|
||||||
landp ~@p2, ~@p2, @p5;
|
landp ~@p2, ~@p2, @p5;
|
||||||
pass @p5, \ptmp1;
|
pass @p5, \ptmp1;
|
||||||
|
@ -185,6 +175,10 @@
|
||||||
/* rtmp0, rtmp1, rtmp2: temporary registers */
|
/* rtmp0, rtmp1, rtmp2: temporary registers */
|
||||||
DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \ptmp1, \rtmp0, \rtmp1, \rtmp2
|
DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \ptmp1, \rtmp0, \rtmp1, \rtmp2
|
||||||
.endm /* GOTO_GUEST_KERNEL_TTABLE */
|
.endm /* GOTO_GUEST_KERNEL_TTABLE */
|
||||||
|
# else
|
||||||
|
.macro GOTO_PV_VCPU_KERNEL_TTABLE rti rtmp0 rtmp1 rtmp2 ptmp1
|
||||||
|
.endm
|
||||||
|
# endif /* CONFIG_PARAVIRT_GUEST */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* goto guest kernel fast system call table entry, if system call is
|
* goto guest kernel fast system call table entry, if system call is
|
||||||
|
@ -329,64 +323,6 @@
|
||||||
#ifdef CONFIG_KVM_HOST_MODE
|
#ifdef CONFIG_KVM_HOST_MODE
|
||||||
/* it is host kernel with virtualization support */
|
/* it is host kernel with virtualization support */
|
||||||
/* or paravirtualized host and guest kernel */
|
/* or paravirtualized host and guest kernel */
|
||||||
.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \
|
|
||||||
drtmp0, drtmp1, predtmp, \
|
|
||||||
predCUR, predVCPU, predEXTk
|
|
||||||
/*
|
|
||||||
* drti - pointer to thread_info
|
|
||||||
* predV5 - ISET is V5
|
|
||||||
* predCUR - is now set to true (trap from user) and can be updated
|
|
||||||
* to does not save kernel global regs and set current
|
|
||||||
* Trap at host mode and host kernel currents and other global registers
|
|
||||||
* (GCURTI & GCURTASK & CPU_ID & CPU_OFF)
|
|
||||||
* should not be saved to not invalidate guest kernel or user state of
|
|
||||||
* global registers, which were or will be saved at thread info
|
|
||||||
* %predVCPU - save VCPU state pointer regs
|
|
||||||
* predEXTk - need save kernel (predCUR) & need save extention (!predV5)
|
|
||||||
*
|
|
||||||
* predCUR = test_thread_flag(TIF_HOST_AT_VCPU_MODE) &&
|
|
||||||
* !test_thread_flag(TIF_LIGHT_HYPERCALL) ||
|
|
||||||
* !test_thread_flag(TIF_HOST_AT_VCPU_MODE) &&
|
|
||||||
* (cr0_hi.CR0_hi_IP >= NATIVE_TASK_SIZE)
|
|
||||||
* predVCPU = predCUR;
|
|
||||||
* predEXTk = predCUR & !predV5
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
rrd %cr0.hi, \drtmp0; /* %drtmp0: cr0_hi.IP */
|
|
||||||
ldd [\drti + TI_FLAGS], \drtmp1; /* %drtmp1: ti->flags */
|
|
||||||
}
|
|
||||||
{
|
|
||||||
cmpbdb \drtmp0, NATIVE_TASK_SIZE, \predtmp;
|
|
||||||
cmpandedb \drtmp1, _TIF_LIGHT_HYPERCALL, \predCUR;
|
|
||||||
cmpandedb \drtmp1, _TIF_HOST_AT_VCPU_MODE, \predVCPU;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
nop 1;
|
|
||||||
pass \predtmp, @p2;
|
|
||||||
pass \predCUR, @p0;
|
|
||||||
pass \predVCPU, @p1;
|
|
||||||
landp @p0, ~@p1, @p4;
|
|
||||||
pass @p4, \predCUR;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
nop 1;
|
|
||||||
pass \predVCPU, @p0;
|
|
||||||
pass \predCUR, @p2;
|
|
||||||
pass \predtmp, @p1;
|
|
||||||
landp @p0, ~@p1, @p4;
|
|
||||||
landp ~@p2, ~@p4, @p5;
|
|
||||||
landp ~@p2, ~@p4, @p6;
|
|
||||||
pass @p5, \predCUR;
|
|
||||||
pass @p6, \predVCPU;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
pass \predV5, @p0;
|
|
||||||
pass \predCUR, @p1;
|
|
||||||
landp ~@p0, @p1, @p4;
|
|
||||||
pass @p4, \predEXTk;
|
|
||||||
}
|
|
||||||
.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */
|
|
||||||
|
|
||||||
.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
|
.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \
|
||||||
drti, predSAVE, drtmp, rtmp0, rtmp1
|
drti, predSAVE, drtmp, rtmp0, rtmp1
|
||||||
/* drtmp: thread_info->h_gregs.g */
|
/* drtmp: thread_info->h_gregs.g */
|
||||||
|
@ -439,27 +375,6 @@
|
||||||
#include <asm/kvm/guest/trap_table.S.h>
|
#include <asm/kvm/guest/trap_table.S.h>
|
||||||
#else /* ! CONFIG_KVM_HOST_MODE && ! CONFIG_KVM_GUEST_KERNEL */
|
#else /* ! CONFIG_KVM_HOST_MODE && ! CONFIG_KVM_GUEST_KERNEL */
|
||||||
/* It is native host kernel without any virtualization */
|
/* It is native host kernel without any virtualization */
|
||||||
.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \
|
|
||||||
drtmp0, drtmp1, predtmp, \
|
|
||||||
predCUR, predVCPU, predEXTk
|
|
||||||
/*
|
|
||||||
* drti - pointer to thread_info (unused)
|
|
||||||
* predV5 - ISET is V5
|
|
||||||
* predCUR - save kernel global regs and set current (already
|
|
||||||
* calculated, don't update)
|
|
||||||
* %predVCPU - set to false (none any VCPUs)
|
|
||||||
* predEXTk - need save kernel (predCUR) & need save extention (!predV5)
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
pass \predV5, @p0;
|
|
||||||
pass \predCUR, @p1;
|
|
||||||
landp ~@p0, @p1, @p4;
|
|
||||||
landp ~@p1, @p1, @p5;
|
|
||||||
pass @p4, \predEXTk;
|
|
||||||
pass @p5, \predVCPU;
|
|
||||||
}
|
|
||||||
.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */
|
|
||||||
|
|
||||||
.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
|
.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1
|
||||||
/* not used */
|
/* not used */
|
||||||
.endm /* SAVE_VCPU_STATE_GREGS */
|
.endm /* SAVE_VCPU_STATE_GREGS */
|
||||||
|
|
|
@ -156,11 +156,6 @@ is_guest_TIRs_frozen(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return false; /* none any guest */
|
return false; /* none any guest */
|
||||||
}
|
}
|
||||||
static inline bool
|
|
||||||
have_deferred_traps(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
return native_have_deferred_traps(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
handle_guest_last_wish(struct pt_regs *regs)
|
handle_guest_last_wish(struct pt_regs *regs)
|
||||||
|
@ -261,7 +256,6 @@ typedef enum trap_hndl {
|
||||||
} trap_hndl_t;
|
} trap_hndl_t;
|
||||||
|
|
||||||
extern trap_hndl_t kvm_do_handle_guest_traps(struct pt_regs *regs);
|
extern trap_hndl_t kvm_do_handle_guest_traps(struct pt_regs *regs);
|
||||||
extern trap_hndl_t kvm_handle_guest_deferred_traps(struct pt_regs *regs);
|
|
||||||
|
|
||||||
extern bool kvm_is_guest_TIRs_frozen(struct pt_regs *regs);
|
extern bool kvm_is_guest_TIRs_frozen(struct pt_regs *regs);
|
||||||
extern bool kvm_is_guest_proc_stack_bounds(struct pt_regs *regs);
|
extern bool kvm_is_guest_proc_stack_bounds(struct pt_regs *regs);
|
||||||
|
@ -285,8 +279,7 @@ extern unsigned long kvm_pass_page_fault_to_guest(struct pt_regs *regs,
|
||||||
trap_cellar_t *tcellar);
|
trap_cellar_t *tcellar);
|
||||||
extern void kvm_complete_page_fault_to_guest(unsigned long what_complete);
|
extern void kvm_complete_page_fault_to_guest(unsigned long what_complete);
|
||||||
|
|
||||||
extern noinline notrace int do_hret_last_wish_intc(struct kvm_vcpu *vcpu,
|
extern int do_hret_last_wish_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs);
|
||||||
struct pt_regs *regs);
|
|
||||||
|
|
||||||
extern void trap_handler_trampoline(void);
|
extern void trap_handler_trampoline(void);
|
||||||
extern void syscall_handler_trampoline(void);
|
extern void syscall_handler_trampoline(void);
|
||||||
|
@ -302,12 +295,10 @@ kvm_init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap)
|
||||||
{
|
{
|
||||||
regs->traps_to_guest = 0; /* only for host */
|
regs->traps_to_guest = 0; /* only for host */
|
||||||
regs->is_guest_user = false; /* only for host */
|
regs->is_guest_user = false; /* only for host */
|
||||||
regs->deferred_traps = 0; /* for host and guest */
|
|
||||||
regs->g_stacks_valid = false; /* only for host */
|
regs->g_stacks_valid = false; /* only for host */
|
||||||
if (user_mode_trap &&
|
if (user_mode_trap && test_thread_flag(TIF_LIGHT_HYPERCALL) &&
|
||||||
test_thread_flag(TIF_LIGHT_HYPERCALL) &&
|
|
||||||
(NATIVE_NV_READ_CR1_LO_REG().CR1_lo_pm)) {
|
(NATIVE_NV_READ_CR1_LO_REG().CR1_lo_pm)) {
|
||||||
regs->flags |= LIGHT_HYPERCALL_FLAG_PT_REGS;
|
regs->flags.light_hypercall = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,14 +307,8 @@ kvm_init_guest_syscalls_handling(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
regs->traps_to_guest = 0; /* only for host */
|
regs->traps_to_guest = 0; /* only for host */
|
||||||
regs->is_guest_user = true; /* only for host */
|
regs->is_guest_user = true; /* only for host */
|
||||||
regs->deferred_traps = 0; /* only for guest */
|
|
||||||
regs->g_stacks_valid = false; /* only for host */
|
regs->g_stacks_valid = false; /* only for host */
|
||||||
}
|
}
|
||||||
static inline bool
|
|
||||||
kvm_have_guest_deferred_traps(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
return regs->deferred_traps != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
kvm_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi,
|
kvm_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi,
|
||||||
|
@ -348,6 +333,12 @@ kvm_handle_guest_last_wish(struct pt_regs *regs)
|
||||||
/* it is not guest VCPU thread, or completed */
|
/* it is not guest VCPU thread, or completed */
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (vcpu->arch.trap_wish) {
|
||||||
|
/* some trap was injected, goto trap handling */
|
||||||
|
regs->traps_to_guest |= vcpu->arch.trap_mask_wish;
|
||||||
|
vcpu->arch.trap_mask_wish = 0;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (vcpu->arch.virq_wish) {
|
if (vcpu->arch.virq_wish) {
|
||||||
/* trap is only to interrupt guest kernel on guest mode */
|
/* trap is only to interrupt guest kernel on guest mode */
|
||||||
/* to provide injection of pending VIRQs on guest */
|
/* to provide injection of pending VIRQs on guest */
|
||||||
|
@ -407,8 +398,11 @@ kvm_should_pass_the_trap_to_guest(struct pt_regs *regs, int trap_no)
|
||||||
}
|
}
|
||||||
} else if (vcpu->arch.is_pv) {
|
} else if (vcpu->arch.is_pv) {
|
||||||
if (vcpu->arch.virq_wish) {
|
if (vcpu->arch.virq_wish) {
|
||||||
/* it is paravirtualized guest, pass trap */
|
/* it is paravirtualized guest, pass */
|
||||||
/* to guest, if it is enabled */
|
/* interrupt to guest, if it is enabled */
|
||||||
|
;
|
||||||
|
} else if (vcpu->arch.trap_wish) {
|
||||||
|
/* it is wish to inject some trap to guest */
|
||||||
;
|
;
|
||||||
} else {
|
} else {
|
||||||
/* there is not any wish for guest */
|
/* there is not any wish for guest */
|
||||||
|
@ -479,15 +473,8 @@ static inline bool kvm_handle_guest_traps(struct pt_regs *regs)
|
||||||
"created\n");
|
"created\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
regs->flags |= GUEST_FLAG_PT_REGS;
|
|
||||||
ret = kvm_do_handle_guest_traps(regs);
|
ret = kvm_do_handle_guest_traps(regs);
|
||||||
regs->traps_to_guest = 0;
|
regs->traps_to_guest = 0;
|
||||||
if (regs->deferred_traps) {
|
|
||||||
/* New traps (VIRQs interrupt) occured to pass to guest */
|
|
||||||
ret = kvm_handle_guest_deferred_traps(regs);
|
|
||||||
regs->deferred_traps = 0;
|
|
||||||
}
|
|
||||||
regs->flags &= ~GUEST_FLAG_PT_REGS;
|
|
||||||
|
|
||||||
if (ret == GUEST_TRAP_HANDLED) {
|
if (ret == GUEST_TRAP_HANDLED) {
|
||||||
DebugKVMGT("the guest trap handled\n");
|
DebugKVMGT("the guest trap handled\n");
|
||||||
|
@ -552,11 +539,6 @@ is_guest_TIRs_frozen(struct pt_regs *regs)
|
||||||
|
|
||||||
return kvm_is_guest_TIRs_frozen(regs);
|
return kvm_is_guest_TIRs_frozen(regs);
|
||||||
}
|
}
|
||||||
static inline bool
|
|
||||||
have_deferred_traps(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
return kvm_have_guest_deferred_traps(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
handle_guest_last_wish(struct pt_regs *regs)
|
handle_guest_last_wish(struct pt_regs *regs)
|
||||||
|
@ -580,17 +562,17 @@ kvm_host_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype,
|
||||||
kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, async_instr);
|
kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, async_instr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
kvm_host_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
kvm_host_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
||||||
const tc_cond_t condition, const tc_mask_t mask,
|
const tc_cond_t condition, const tc_mask_t mask,
|
||||||
const unsigned int aa_no)
|
const unsigned int aa_no)
|
||||||
{
|
{
|
||||||
if (likely(!kvm_test_intc_emul_flag(regs))) {
|
if (likely(!kvm_test_intc_emul_flag(regs))) {
|
||||||
native_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
return native_do_aau_page_fault(regs, address, condition, mask,
|
||||||
return;
|
aa_no);
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_pv_mmu_aau_page_fault(current_thread_info()->vcpu, regs,
|
return kvm_pv_mmu_aau_page_fault(current_thread_info()->vcpu, regs,
|
||||||
address, condition, aa_no);
|
address, condition, aa_no);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -830,12 +812,13 @@ instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype,
|
||||||
kvm_host_instr_page_fault(regs, ftype, async_instr);
|
kvm_host_instr_page_fault(regs, ftype, async_instr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
||||||
const tc_cond_t condition, const tc_mask_t mask,
|
const tc_cond_t condition, const tc_mask_t mask,
|
||||||
const unsigned int aa_no)
|
const unsigned int aa_no)
|
||||||
{
|
{
|
||||||
kvm_host_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
return kvm_host_do_aau_page_fault(regs, address, condition, mask,
|
||||||
|
aa_no);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_VIRTUALIZATION */
|
#endif /* CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright (C) 2020 MCST
|
||||||
|
*
|
||||||
|
* Definitions of KVM traps handling routines.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _E2K_KVM_TTABLE_HELP_H
|
||||||
|
#define _E2K_KVM_TTABLE_HELP_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_HOST_MODE
|
||||||
|
/* it is native kernel with virtualization support (hypervisor) */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_HW_CLEAR_RF
|
||||||
|
|
||||||
|
# ifdef GENERATING_HEADER
|
||||||
|
# define RETURN_PV_VCPU_TRAP_SIZE 0x1
|
||||||
|
# define HANDLE_PV_VCPU_SYS_CALL_SIZE 0x1
|
||||||
|
# define HANDLE_PV_VCPU_SYS_FORK_SIZE 0x1
|
||||||
|
# endif
|
||||||
|
|
||||||
|
# define CLEAR_RETURN_PV_VCPU_TRAP_WINDOW() E2K_DONE()
|
||||||
|
# define CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(r0) E2K_SYSCALL_RETURN(r0)
|
||||||
|
# define CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(r0) E2K_SYSCALL_RETURN(r0)
|
||||||
|
|
||||||
|
#else /* ! CONFIG_CPU_HW_CLEAR_RF */
|
||||||
|
|
||||||
|
# ifdef GENERATING_HEADER
|
||||||
|
# define CLEAR_RETURN_PV_VCPU_TRAP_WINDOW() E2K_EMPTY_CMD(: "ctpr3")
|
||||||
|
# define CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(r0) \
|
||||||
|
E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3")
|
||||||
|
# define CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(r0) \
|
||||||
|
E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3")
|
||||||
|
# define RETURN_PV_VCPU_TRAP_SIZE 0x1
|
||||||
|
# define HANDLE_PV_VCPU_SYS_CALL_SIZE 0x1
|
||||||
|
# define HANDLE_PV_VCPU_SYS_FORK_SIZE 0x1
|
||||||
|
# endif
|
||||||
|
|
||||||
|
#endif /* CONFIG_CPU_HW_CLEAR_RF */
|
||||||
|
|
||||||
|
#else /* !CONFIG_KVM_HOST_MODE */
|
||||||
|
/* It is native guest kernel whithout virtualization support */
|
||||||
|
/* Virtualiztion in guest mode cannot be supported */
|
||||||
|
|
||||||
|
# define CLEAR_RETURN_PV_VCPU_TRAP_WINDOW()
|
||||||
|
# define CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(rval)
|
||||||
|
# define CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(rval)
|
||||||
|
|
||||||
|
#endif /* CONFIG_KVM_HOST_MODE */
|
||||||
|
|
||||||
|
#endif /* _E2K_KVM_TTABLE_HELP_H */
|
|
@ -54,24 +54,38 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
|
||||||
({ \
|
({ \
|
||||||
__typeof__(*(uptr)) __user *___pu_ptr = (uptr); \
|
__typeof__(*(uptr)) __user *___pu_ptr = (uptr); \
|
||||||
int sz_uptr = sizeof(*(uptr)); \
|
int sz_uptr = sizeof(*(uptr)); \
|
||||||
|
long res; \
|
||||||
\
|
\
|
||||||
___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \
|
___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \
|
||||||
(uptr) \
|
(uptr) \
|
||||||
: \
|
: \
|
||||||
kvm_guest_ptr_to_host_ptr((uptr), sz_uptr); \
|
kvm_guest_ptr_to_host_ptr((uptr), sz_uptr, \
|
||||||
(___pu_ptr) ? native_get_user(kval, ___pu_ptr) : -EFAULT; \
|
true); \
|
||||||
|
if (PTR_ERR(___pu_ptr) == -EAGAIN) \
|
||||||
|
res = -EAGAIN; \
|
||||||
|
else \
|
||||||
|
res = (___pu_ptr) ? native_get_user(kval, ___pu_ptr) : \
|
||||||
|
-EFAULT; \
|
||||||
|
(res); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define host_put_user(kval, uptr, hregs) \
|
#define host_put_user(kval, uptr, hregs) \
|
||||||
({ \
|
({ \
|
||||||
__typeof__(*(uptr)) __user *___pu_ptr = (uptr); \
|
__typeof__(*(uptr)) __user *___pu_ptr = (uptr); \
|
||||||
int sz_uptr = sizeof(*(uptr)); \
|
int sz_uptr = sizeof(*(uptr)); \
|
||||||
|
long res; \
|
||||||
\
|
\
|
||||||
___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \
|
___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \
|
||||||
(uptr) \
|
(uptr) \
|
||||||
: \
|
: \
|
||||||
kvm_guest_ptr_to_host_ptr((uptr), sz_uptr); \
|
kvm_guest_ptr_to_host_ptr((uptr), sz_uptr, \
|
||||||
(___pu_ptr) ? native_put_user(kval, ___pu_ptr) : -EFAULT; \
|
true); \
|
||||||
|
if (PTR_ERR(___pu_ptr) == -EAGAIN) \
|
||||||
|
res = -EAGAIN; \
|
||||||
|
else \
|
||||||
|
res = (___pu_ptr) ? native_put_user(kval, ___pu_ptr) : \
|
||||||
|
-EFAULT; \
|
||||||
|
(res); \
|
||||||
})
|
})
|
||||||
|
|
||||||
extern unsigned long kvm_copy_in_user_with_tags(void __user *to,
|
extern unsigned long kvm_copy_in_user_with_tags(void __user *to,
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
/*
|
||||||
|
* KVM guest kernel processes support
|
||||||
|
* Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _E2K_KVM_VCPU_REGS_DEBUG_INLINE_H
|
||||||
|
#define _E2K_KVM_VCPU_REGS_DEBUG_INLINE_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <asm/e2k_api.h>
|
||||||
|
#include <asm/kvm/guest.h>
|
||||||
|
|
||||||
|
#ifdef VCPU_REGS_DEBUG
|
||||||
|
|
||||||
|
static inline void init_vcpu_regs_trace(void)
|
||||||
|
{
|
||||||
|
vcpu_regs_trace_t *trace;
|
||||||
|
|
||||||
|
trace = get_vcpu_regs_trace_struct();
|
||||||
|
atomic_set(&trace->count, 0);
|
||||||
|
vcpu_regs_trace_on = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dump_vcpu_regs_entry(vcpu_regs_t *regs, int entry_no)
|
||||||
|
{
|
||||||
|
u32 updated = regs->updated;
|
||||||
|
|
||||||
|
pr_alert("[%02d] : PSR %02x\tUPSR %03x\tunder UPSR %d\n",
|
||||||
|
entry_no, regs->psr, regs->upsr, regs->under_upsr);
|
||||||
|
pr_alert(" updated : %s %s %s %s\n",
|
||||||
|
(updated & PSR_UPDATE_MASK) ? "PSR" : "",
|
||||||
|
(updated & UPSR_UPDATE_MASK) ? "UPSR" : "",
|
||||||
|
(updated & UNDER_UPSR_UPDATE_MASK) ? "under UPSR" : "",
|
||||||
|
(regs->injected) ? "injected IRQs" : "");
|
||||||
|
pr_alert(" IP %pF called from IP %pF\n",
|
||||||
|
(void *)regs->IP, (void *)regs->IP_from);
|
||||||
|
pr_alert(" clock : start 0x%llx end 0x%llx delta 0x%llx\n",
|
||||||
|
regs->clock_start, regs->clock_end,
|
||||||
|
regs->clock_end - regs->clock_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dump_vcpu_regs_trace(void)
|
||||||
|
{
|
||||||
|
vcpu_regs_trace_t *trace;
|
||||||
|
vcpu_regs_t *regs;
|
||||||
|
int count, ent, num, entries;
|
||||||
|
|
||||||
|
/* stop tracing */
|
||||||
|
vcpu_regs_trace_on = false;
|
||||||
|
|
||||||
|
trace = get_vcpu_regs_trace_struct();
|
||||||
|
count = atomic_read(&trace->count);
|
||||||
|
pr_alert("CPU #%d : Trace of VCPU #%d some registers update history\n",
|
||||||
|
raw_smp_processor_id(), KVM_READ_VCPU_ID());
|
||||||
|
if (count == 0) {
|
||||||
|
pr_alert(" trace is empty\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = (count > MAX_VCPU_REGS_TRACE_NUM) ?
|
||||||
|
MAX_VCPU_REGS_TRACE_NUM : count;
|
||||||
|
for (ent = VCPU_REGS_TRACE_INDEX(count), num = 0;
|
||||||
|
num < entries;
|
||||||
|
ent = VCPU_REGS_TRACE_INDEX(ent - 1), num++) {
|
||||||
|
regs = &trace->regs[ent];
|
||||||
|
dump_vcpu_regs_entry(regs, ent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else /* !VCPU_REGS_DEBUG */
|
||||||
|
|
||||||
|
#define vcpu_regs_trace_on false
|
||||||
|
|
||||||
|
static inline void init_vcpu_regs_trace(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#define trace_vcpu_upsr_update(upsr_val, injected_irqs)
|
||||||
|
#define trace_vcpu_psr_update(psr_val, under_upsr)
|
||||||
|
|
||||||
|
static inline void dump_vcpu_regs_trace(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* VCPU_REGS_DEBUG */
|
||||||
|
|
||||||
|
#endif /* ! _E2K_KVM_VCPU_REGS_DEBUG_INLINE_H */
|
|
@ -0,0 +1,130 @@
|
||||||
|
/*
|
||||||
|
* KVM guest kernel processes support
|
||||||
|
* Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _E2K_KVM_VCPU_REGS_DEBUG_H
|
||||||
|
#define _E2K_KVM_VCPU_REGS_DEBUG_H
|
||||||
|
|
||||||
|
/* do not include this header directly, only through asm/kvm/guest.h */
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#undef VCPU_REGS_DEBUG
|
||||||
|
|
||||||
|
typedef struct vcpu_regs {
|
||||||
|
u64 IP;
|
||||||
|
u64 IP_from;
|
||||||
|
u64 clock_start;
|
||||||
|
u64 clock_end;
|
||||||
|
u32 psr;
|
||||||
|
u32 upsr;
|
||||||
|
bool under_upsr;
|
||||||
|
u8 updated;
|
||||||
|
bool injected;
|
||||||
|
} vcpu_regs_t;
|
||||||
|
|
||||||
|
#define MAX_VCPU_REGS_TRACE_NUM 32
|
||||||
|
#define VCPU_REGS_TRACE_MASK (MAX_VCPU_REGS_TRACE_NUM - 1)
|
||||||
|
#define VCPU_REGS_TRACE_INDEX(count) ((count) & VCPU_REGS_TRACE_MASK)
|
||||||
|
|
||||||
|
typedef struct vcpu_regs_trace {
|
||||||
|
atomic_t count;
|
||||||
|
vcpu_regs_t regs[MAX_VCPU_REGS_TRACE_NUM];
|
||||||
|
} vcpu_regs_trace_t;
|
||||||
|
|
||||||
|
#define PSR_UPDATE_MASK 0x01U
|
||||||
|
#define UPSR_UPDATE_MASK 0x02U
|
||||||
|
#define UNDER_UPSR_UPDATE_MASK 0x04U
|
||||||
|
|
||||||
|
#define GET_CLOCK_REG() NATIVE_READ_CLKR_REG_VALUE()
|
||||||
|
|
||||||
|
#define GUEST_GET_IRQS_UNDER_UPSR() \
|
||||||
|
({ \
|
||||||
|
kvm_vcpu_state_t *vcpu_state; \
|
||||||
|
bool under_upsr; \
|
||||||
|
\
|
||||||
|
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
||||||
|
under_upsr = vcpu_state->irqs_under_upsr; \
|
||||||
|
under_upsr; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#ifdef VCPU_REGS_DEBUG
|
||||||
|
|
||||||
|
#undef PSR
|
||||||
|
extern bool vcpu_regs_trace_on;
|
||||||
|
extern int vcpu_regs_trace_on_num;
|
||||||
|
|
||||||
|
#define get_vcpu_regs_trace_struct() \
|
||||||
|
({ \
|
||||||
|
struct kvm_vcpu_state *vcpu_state; \
|
||||||
|
\
|
||||||
|
KVM_GET_VCPU_STATE_BASE(vcpu_state); \
|
||||||
|
&vcpu_state->trace; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define get_next_vcpu_regs_trace() \
|
||||||
|
({ \
|
||||||
|
vcpu_regs_trace_t *trace; \
|
||||||
|
vcpu_regs_t *regs; \
|
||||||
|
int count; \
|
||||||
|
\
|
||||||
|
if (likely(!vcpu_regs_trace_on)) { \
|
||||||
|
regs = NULL; \
|
||||||
|
} else { \
|
||||||
|
trace = get_vcpu_regs_trace_struct(); \
|
||||||
|
count = atomic_inc_return(&trace->count); \
|
||||||
|
regs = &trace->regs[VCPU_REGS_TRACE_INDEX(count)]; \
|
||||||
|
regs->clock_start = GET_CLOCK_REG(); \
|
||||||
|
regs->IP = NATIVE_READ_IP_REG_VALUE(); \
|
||||||
|
regs->IP_from = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \
|
||||||
|
regs->updated = 0; \
|
||||||
|
regs->psr = GUEST_GET_CPU_SREG(E2K_PSR); \
|
||||||
|
regs->upsr = GUEST_GET_CPU_SREG(UPSR); \
|
||||||
|
regs->under_upsr = GUEST_GET_IRQS_UNDER_UPSR(); \
|
||||||
|
regs->injected = false; \
|
||||||
|
} \
|
||||||
|
regs; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define trace_vcpu_upsr_update(upsr_val, injected_irqs) \
|
||||||
|
do { \
|
||||||
|
vcpu_regs_t *regs; \
|
||||||
|
if (likely(!vcpu_regs_trace_on)) \
|
||||||
|
break; \
|
||||||
|
regs = get_next_vcpu_regs_trace(); \
|
||||||
|
if (unlikely(regs == NULL)) \
|
||||||
|
break; \
|
||||||
|
regs->upsr = (upsr_val); \
|
||||||
|
regs->updated |= UPSR_UPDATE_MASK; \
|
||||||
|
if (injected_irqs) { \
|
||||||
|
regs->injected = (injected_irqs); \
|
||||||
|
} \
|
||||||
|
E2K_CMD_SEPARATOR; \
|
||||||
|
regs->clock_end = GET_CLOCK_REG(); \
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
#define trace_vcpu_psr_update(psr_val, under_upsr_mode) \
|
||||||
|
do { \
|
||||||
|
vcpu_regs_t *regs; \
|
||||||
|
if (likely(!vcpu_regs_trace_on)) \
|
||||||
|
break; \
|
||||||
|
regs = get_next_vcpu_regs_trace(); \
|
||||||
|
if (unlikely(regs == NULL)) \
|
||||||
|
break; \
|
||||||
|
regs->psr = (psr_val); \
|
||||||
|
regs->updated |= PSR_UPDATE_MASK | UNDER_UPSR_UPDATE_MASK; \
|
||||||
|
regs->under_upsr = (under_upsr_mode); \
|
||||||
|
E2K_CMD_SEPARATOR; \
|
||||||
|
regs->clock_end = GET_CLOCK_REG(); \
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
#else /* !VCPU_REGS_DEBUG */
|
||||||
|
|
||||||
|
#define vcpu_regs_trace_on false
|
||||||
|
#define trace_vcpu_upsr_update(upsr_val, injected_irqs)
|
||||||
|
#define trace_vcpu_psr_update(psr_val, under_upsr)
|
||||||
|
|
||||||
|
#endif /* VCPU_REGS_DEBUG */
|
||||||
|
|
||||||
|
#endif /* ! _E2K_KVM_VCPU_REGS_DEBUG_H */
|
|
@ -110,6 +110,7 @@ kvm_is_hw_pv_vm_available(void)
|
||||||
#define E2K_INVALID_PAGE (~(hpa_t)0)
|
#define E2K_INVALID_PAGE (~(hpa_t)0)
|
||||||
|
|
||||||
#define UNMAPPED_GVA (~(gpa_t)0)
|
#define UNMAPPED_GVA (~(gpa_t)0)
|
||||||
|
#define arch_is_error_gpa(gpa) ((gpa_t)(gpa) == UNMAPPED_GVA)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See include/linux/kvm_host.h
|
* See include/linux/kvm_host.h
|
||||||
|
@ -387,6 +388,10 @@ typedef struct kvm_mmu_page {
|
||||||
|
|
||||||
/* Number of writes since the last time traversal visited this page. */
|
/* Number of writes since the last time traversal visited this page. */
|
||||||
atomic_t write_flooding_count;
|
atomic_t write_flooding_count;
|
||||||
|
#ifdef CONFIG_GUEST_MM_SPT_LIST
|
||||||
|
struct list_head gmm_entry; /* entry at the gmm list of SPs */
|
||||||
|
gmm_struct_t *gmm; /* the gmm in whose list the entry */
|
||||||
|
#endif /* CONFIG_GUEST_MM_SPT_LIST */
|
||||||
} kvm_mmu_page_t;
|
} kvm_mmu_page_t;
|
||||||
|
|
||||||
/* page fault handling results */
|
/* page fault handling results */
|
||||||
|
@ -533,9 +538,9 @@ typedef struct kvm_mmu {
|
||||||
struct kvm_arch_exception *exception);
|
struct kvm_arch_exception *exception);
|
||||||
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||||
pgprot_t *spte, const void *pte);
|
pgprot_t *spte, const void *pte);
|
||||||
void (*flush_gva)(struct kvm_vcpu *vcpu, gva_t gva);
|
void (*sync_gva)(struct kvm_vcpu *vcpu, gva_t gva);
|
||||||
void (*flush_gva_range)(struct kvm_vcpu *vcpu, gva_t gva_start,
|
void (*sync_gva_range)(struct kvm_vcpu *vcpu, gva_t gva_start,
|
||||||
gva_t gva_end);
|
gva_t gva_end, bool flush_tlb);
|
||||||
int (*sync_page)(struct kvm_vcpu *vcpu, kvm_mmu_page_t *sp);
|
int (*sync_page)(struct kvm_vcpu *vcpu, kvm_mmu_page_t *sp);
|
||||||
} kvm_mmu_t;
|
} kvm_mmu_t;
|
||||||
|
|
||||||
|
@ -649,6 +654,18 @@ typedef struct kvm_sw_cpu_context {
|
||||||
e2k_sbr_t sbr;
|
e2k_sbr_t sbr;
|
||||||
} saved;
|
} saved;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Host VCPU local data stack pointer registers state (to save/restore).
|
||||||
|
* It is relevant only for paravirtualization, since in this case
|
||||||
|
* there is one VCPU process, but there are two mode of its execution:
|
||||||
|
* as host part of VCPU (qemu)
|
||||||
|
* as guest part of VCPU
|
||||||
|
* and, accordingly, two stacks: host & guest.
|
||||||
|
*/
|
||||||
|
e2k_usd_lo_t host_usd_lo;
|
||||||
|
e2k_usd_hi_t host_usd_hi;
|
||||||
|
e2k_sbr_t host_sbr;
|
||||||
|
|
||||||
e2k_mem_crs_t crs; /* only for PV guest */
|
e2k_mem_crs_t crs; /* only for PV guest */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -726,6 +743,14 @@ typedef struct kvm_sw_cpu_context {
|
||||||
u64 rpr_lo;
|
u64 rpr_lo;
|
||||||
u64 rpr_hi;
|
u64 rpr_hi;
|
||||||
u64 tcd;
|
u64 tcd;
|
||||||
|
|
||||||
|
mmu_reg_t us_cl_d;
|
||||||
|
clw_reg_t us_cl_b;
|
||||||
|
clw_reg_t us_cl_up;
|
||||||
|
clw_reg_t us_cl_m0;
|
||||||
|
clw_reg_t us_cl_m1;
|
||||||
|
clw_reg_t us_cl_m2;
|
||||||
|
clw_reg_t us_cl_m3;
|
||||||
} kvm_sw_cpu_context_t;
|
} kvm_sw_cpu_context_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -873,6 +898,9 @@ typedef struct kvm_host_context {
|
||||||
|
|
||||||
/* pointer to the top of 'pt_regs' structures list */
|
/* pointer to the top of 'pt_regs' structures list */
|
||||||
pt_regs_t *pt_regs;
|
pt_regs_t *pt_regs;
|
||||||
|
/* some additional items of processes context */
|
||||||
|
e2k_upsr_t upsr; /* user UPSR register state */
|
||||||
|
unsigned osem; /* OSEM register state */
|
||||||
/* the host kernel's signal/trap stack of contexts */
|
/* the host kernel's signal/trap stack of contexts */
|
||||||
kvm_signal_context_t signal;
|
kvm_signal_context_t signal;
|
||||||
} kvm_host_context_t;
|
} kvm_host_context_t;
|
||||||
|
@ -896,6 +924,9 @@ struct kvm_vcpu_arch {
|
||||||
/* support */
|
/* support */
|
||||||
bool is_hv; /* VCPU is under hardware virtualized */
|
bool is_hv; /* VCPU is under hardware virtualized */
|
||||||
/* support */
|
/* support */
|
||||||
|
/* host switch to vcpu-host mode from host interception emulation mode */
|
||||||
|
/* (trap or system call on PV mode) */
|
||||||
|
bool from_pv_intc;
|
||||||
|
|
||||||
kvm_vcpu_state_t *vcpu_state;
|
kvm_vcpu_state_t *vcpu_state;
|
||||||
kvm_vcpu_state_t *kmap_vcpu_state; /* alias of VCPU state */
|
kvm_vcpu_state_t *kmap_vcpu_state; /* alias of VCPU state */
|
||||||
|
@ -990,6 +1021,9 @@ struct kvm_vcpu_arch {
|
||||||
bool on_virqs_handling; /* VCPU is handling pending VIRQs */
|
bool on_virqs_handling; /* VCPU is handling pending VIRQs */
|
||||||
bool vm_exit_wish; /* VCPU is need to VM exit and */
|
bool vm_exit_wish; /* VCPU is need to VM exit and */
|
||||||
/* exit reason handling */
|
/* exit reason handling */
|
||||||
|
bool trap_wish; /* VCPU is need to inject traps */
|
||||||
|
bool hcall_irqs_disabled; /* VCPU entered HCALL with disabled interrupts */
|
||||||
|
unsigned long trap_mask_wish; /* mask of traps to wish */
|
||||||
struct completion exited; /* guest VCPU thread completed */
|
struct completion exited; /* guest VCPU thread completed */
|
||||||
struct completion released; /* all VCPU threads completed and */
|
struct completion released; /* all VCPU threads completed and */
|
||||||
/* VCPU can be freed */
|
/* VCPU can be freed */
|
||||||
|
@ -1371,16 +1405,12 @@ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
|
|
||||||
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
|
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags);
|
||||||
unsigned long start, unsigned long end);
|
|
||||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */
|
#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */
|
||||||
|
|
||||||
extern int kvm_mmu_load(struct kvm_vcpu *vcpu, unsigned flags);
|
|
||||||
extern void kvm_mmu_unload(struct kvm_vcpu *vcpu, unsigned flags);
|
|
||||||
|
|
||||||
extern void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
|
extern void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
|
||||||
|
|
||||||
extern int kvm_wake_up_vcpu_host(struct kvm_vcpu *vcpu, int wait);
|
extern int kvm_wake_up_vcpu_host(struct kvm_vcpu *vcpu, int wait);
|
||||||
|
|
|
@ -52,7 +52,7 @@ typedef struct { unsigned iopte; } iopte_t;
|
||||||
|
|
||||||
#define addr_to_flush(__a) ((__a) >> IO_PAGE_SHIFT)
|
#define addr_to_flush(__a) ((__a) >> IO_PAGE_SHIFT)
|
||||||
|
|
||||||
static inline void l_iommu_write(unsigned node, u32 val, unsigned long addr)
|
static inline void __l_iommu_write(unsigned node, u32 val, unsigned long addr)
|
||||||
{
|
{
|
||||||
sic_write_node_iolink_nbsr_reg(node, 0, addr, val);
|
sic_write_node_iolink_nbsr_reg(node, 0, addr, val);
|
||||||
}
|
}
|
||||||
|
@ -62,10 +62,13 @@ static inline u32 l_iommu_read(unsigned node, unsigned long addr)
|
||||||
return sic_read_node_iolink_nbsr_reg(node, 0, addr);
|
return sic_read_node_iolink_nbsr_reg(node, 0, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void l_iommu_set_ba(unsigned node, unsigned long *ba)
|
#define __l_iommu_set_ba __l_iommu_set_ba
|
||||||
|
static inline void __l_iommu_set_ba(unsigned node, unsigned long *ba)
|
||||||
{
|
{
|
||||||
l_iommu_write(node, pa_to_iopte(ba[IOMMU_LOW_TABLE]), SIC_iommu_ba_lo);
|
__l_iommu_write(node, pa_to_iopte(ba[IOMMU_LOW_TABLE]),
|
||||||
l_iommu_write(node, pa_to_iopte(ba[IOMMU_HIGH_TABLE]), SIC_iommu_ba_hi);
|
SIC_iommu_ba_lo);
|
||||||
|
__l_iommu_write(node, pa_to_iopte(ba[IOMMU_HIGH_TABLE]),
|
||||||
|
SIC_iommu_ba_hi);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define l_prefetch_iopte_supported l_prefetch_iopte_supported
|
#define l_prefetch_iopte_supported l_prefetch_iopte_supported
|
||||||
|
@ -82,9 +85,9 @@ static inline void l_prefetch_iopte(iopte_t *iopte, int prefetch)
|
||||||
iopte_val(iopte[0]) |= IOPTE_STP_PREF_IOPTE;
|
iopte_val(iopte[0]) |= IOPTE_STP_PREF_IOPTE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *l_iommu_map_table(void *va, unsigned long size)
|
static inline void *l_iommu_map_table(unsigned long pa, unsigned long size)
|
||||||
{
|
{
|
||||||
phys_addr_t start = __pa(va);
|
phys_addr_t start = pa;
|
||||||
pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
|
pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
phys_addr_t page_start;
|
phys_addr_t page_start;
|
||||||
|
@ -93,7 +96,7 @@ static inline void *l_iommu_map_table(void *va, unsigned long size)
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
|
||||||
if (!cpu_has(CPU_HWBUG_IOMMU))
|
if (!cpu_has(CPU_HWBUG_IOMMU))
|
||||||
return va;
|
return __va(pa);
|
||||||
|
|
||||||
page_start = start - offset_in_page(start);
|
page_start = start - offset_in_page(start);
|
||||||
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
|
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
|
||||||
|
|
|
@ -41,11 +41,6 @@ static inline bool l_mcmonitor_eec_enabled(void)
|
||||||
#define L_MC_ECC_WORDS_NR 4
|
#define L_MC_ECC_WORDS_NR 4
|
||||||
#define L_MCMONITOR_TEST_SIZE (256 * L_MC_ECC_WORDS_NR)
|
#define L_MCMONITOR_TEST_SIZE (256 * L_MC_ECC_WORDS_NR)
|
||||||
|
|
||||||
static inline void local_set_mc_ecc(void *node_nbsr, int num, unsigned int reg_value)
|
|
||||||
{
|
|
||||||
nbsr_write(reg_value, node_nbsr + SIC_mc0_ecc + num * 0x40);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void l_mcmonitor_fill_data(u64 *a, bool make_error)
|
static inline void l_mcmonitor_fill_data(u64 *a, bool make_error)
|
||||||
{
|
{
|
||||||
int i, mc = SIC_MC_COUNT;
|
int i, mc = SIC_MC_COUNT;
|
||||||
|
@ -53,15 +48,13 @@ static inline void l_mcmonitor_fill_data(u64 *a, bool make_error)
|
||||||
e2k_mc_ecc_struct_t mc_ecc[SIC_MAX_MC_COUNT];
|
e2k_mc_ecc_struct_t mc_ecc[SIC_MAX_MC_COUNT];
|
||||||
a = (void *)__pa(a);
|
a = (void *)__pa(a);
|
||||||
|
|
||||||
void *node_nbsr = sic_get_node_nbsr_base(0);
|
|
||||||
|
|
||||||
for (i = 0; i < mc; i++)
|
for (i = 0; i < mc; i++)
|
||||||
mc_ecc[i].E2K_MC_ECC_reg = sic_get_mc_ecc(0, i);
|
mc_ecc[i].E2K_MC_ECC_reg = sic_get_mc_ecc(0, i);
|
||||||
|
|
||||||
for (i = 0; i < mc; i++) {
|
for (i = 0; i < mc; i++) {
|
||||||
l_mc_ecc_struct_t e = mc_ecc[i];
|
l_mc_ecc_struct_t e = mc_ecc[i];
|
||||||
e.E2K_MC_ECC_dmode = 1;
|
e.E2K_MC_ECC_dmode = 1;
|
||||||
local_set_mc_ecc(node_nbsr, i, e.E2K_MC_ECC_reg);
|
sic_set_mc_ecc(0, i, e.E2K_MC_ECC_reg);
|
||||||
}
|
}
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
|
@ -78,7 +71,7 @@ static inline void l_mcmonitor_fill_data(u64 *a, bool make_error)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < mc; i++)
|
for (i = 0; i < mc; i++)
|
||||||
local_set_mc_ecc(node_nbsr, i, mc_ecc[i].E2K_MC_ECC_reg);
|
sic_set_mc_ecc(0, i, mc_ecc[i].E2K_MC_ECC_reg);
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,12 +122,12 @@ typedef struct machdep {
|
||||||
|
|
||||||
void (*save_kernel_gregs)(struct kernel_gregs *);
|
void (*save_kernel_gregs)(struct kernel_gregs *);
|
||||||
void (*save_gregs)(struct global_regs *);
|
void (*save_gregs)(struct global_regs *);
|
||||||
void (*save_local_gregs)(struct local_gregs *);
|
void (*save_local_gregs)(struct local_gregs *, bool is_signal);
|
||||||
void (*save_gregs_dirty_bgr)(struct global_regs *);
|
void (*save_gregs_dirty_bgr)(struct global_regs *);
|
||||||
void (*save_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
|
void (*save_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
|
||||||
unsigned long not_save_gregs_mask);
|
unsigned long not_save_gregs_mask);
|
||||||
void (*restore_gregs)(const struct global_regs *);
|
void (*restore_gregs)(const struct global_regs *);
|
||||||
void (*restore_local_gregs)(const struct local_gregs *);
|
void (*restore_local_gregs)(const struct local_gregs *, bool is_signal);
|
||||||
void (*restore_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
|
void (*restore_gregs_on_mask)(struct global_regs *, bool dirty_bgr,
|
||||||
unsigned long not_restore_gregs_mask);
|
unsigned long not_restore_gregs_mask);
|
||||||
|
|
||||||
|
@ -598,8 +598,8 @@ extern void save_kernel_gregs_v2(struct kernel_gregs *);
|
||||||
extern void save_kernel_gregs_v5(struct kernel_gregs *);
|
extern void save_kernel_gregs_v5(struct kernel_gregs *);
|
||||||
extern void save_gregs_v2(struct global_regs *);
|
extern void save_gregs_v2(struct global_regs *);
|
||||||
extern void save_gregs_v5(struct global_regs *);
|
extern void save_gregs_v5(struct global_regs *);
|
||||||
extern void save_local_gregs_v2(struct local_gregs *);
|
extern void save_local_gregs_v2(struct local_gregs *, bool is_signal);
|
||||||
extern void save_local_gregs_v5(struct local_gregs *);
|
extern void save_local_gregs_v5(struct local_gregs *, bool is_signal);
|
||||||
extern void save_gregs_dirty_bgr_v2(struct global_regs *);
|
extern void save_gregs_dirty_bgr_v2(struct global_regs *);
|
||||||
extern void save_gregs_dirty_bgr_v5(struct global_regs *);
|
extern void save_gregs_dirty_bgr_v5(struct global_regs *);
|
||||||
extern void save_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
|
extern void save_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
|
||||||
|
@ -608,8 +608,8 @@ extern void save_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr,
|
||||||
unsigned long mask_not_save);
|
unsigned long mask_not_save);
|
||||||
extern void restore_gregs_v2(const struct global_regs *);
|
extern void restore_gregs_v2(const struct global_regs *);
|
||||||
extern void restore_gregs_v5(const struct global_regs *);
|
extern void restore_gregs_v5(const struct global_regs *);
|
||||||
extern void restore_local_gregs_v2(const struct local_gregs *);
|
extern void restore_local_gregs_v2(const struct local_gregs *, bool is_signal);
|
||||||
extern void restore_local_gregs_v5(const struct local_gregs *);
|
extern void restore_local_gregs_v5(const struct local_gregs *, bool is_signal);
|
||||||
extern void restore_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
|
extern void restore_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr,
|
||||||
unsigned long mask_not_restore);
|
unsigned long mask_not_restore);
|
||||||
extern void restore_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr,
|
extern void restore_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr,
|
||||||
|
|
|
@ -101,6 +101,56 @@ static inline bool is_mas_secondary_lock_trap_on_load_store(unsigned int mas)
|
||||||
return (mas & 3) == 2;
|
return (mas & 3) == 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note that 'chan', 'spec' and 'store' must also be checked */
|
||||||
|
static inline bool is_mas_special_mmu_aau(unsigned int mas)
|
||||||
|
{
|
||||||
|
return (mas & 7) == 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mas is conflict check between ld and st */
|
||||||
|
static inline bool is_mas_check(unsigned int mas)
|
||||||
|
{
|
||||||
|
unsigned int m1 = (mas >> 3) & 0x2;
|
||||||
|
unsigned int big_endian = (mas >> 3) & 0x1;
|
||||||
|
unsigned int mod = mas & 0x7;
|
||||||
|
|
||||||
|
return m1 == 0x0 && mod == 0x2 && !big_endian;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mas is conflict check with unlock between ld and st */
|
||||||
|
static inline bool is_mas_check_unlock(unsigned int mas)
|
||||||
|
{
|
||||||
|
unsigned int m1 = (mas >> 3) & 0x2;
|
||||||
|
unsigned int big_endian = (mas >> 3) & 0x1;
|
||||||
|
unsigned int mod = mas & 0x7;
|
||||||
|
|
||||||
|
return m1 == 0x0 && mod == 0x3 && !big_endian;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mas is semi-speculative conflict lock check between ld and st */
|
||||||
|
static inline bool is_mas_lock_check(unsigned int mas)
|
||||||
|
{
|
||||||
|
unsigned int m1 = (mas >> 3) & 0x2;
|
||||||
|
unsigned int big_endian = (mas >> 3) & 0x1;
|
||||||
|
unsigned int mod = mas & 0x7;
|
||||||
|
unsigned int m2 = mas & 0x3;
|
||||||
|
|
||||||
|
return (m1 == 0x0 && mod == 0x4 || m1 == 0x1 && m2 == 0x1) &&
|
||||||
|
!big_endian;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mas is speculative conflict lock check between ld and st */
|
||||||
|
static inline bool is_mas_spec_lock_check(unsigned int mas)
|
||||||
|
{
|
||||||
|
unsigned int m1 = (mas >> 3) & 0x2;
|
||||||
|
unsigned int big_endian = (mas >> 3) & 0x1;
|
||||||
|
unsigned int mod = mas & 0x7;
|
||||||
|
unsigned int m2 = mas & 0x3;
|
||||||
|
|
||||||
|
return (m1 == 0x0 && mod == 0x7 || m1 == 0x1 && m2 == 0x3) &&
|
||||||
|
!big_endian;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _E2K_MAS_H_ */
|
#endif /* _E2K_MAS_H_ */
|
||||||
|
|
|
@ -57,23 +57,9 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
|
||||||
|
|
||||||
vm_flags = cui << VM_CUI_SHIFT;
|
vm_flags = cui << VM_CUI_SHIFT;
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if we are allocating hardware stacks.
|
|
||||||
*/
|
|
||||||
if (current_thread_info()->status & TS_MMAP_DONTEXPAND) {
|
|
||||||
/*
|
|
||||||
* VM_DONTEXPAND makes sure that even if VM_MLOCK
|
|
||||||
* is set, this area won't be populated on mmap().
|
|
||||||
*/
|
|
||||||
vm_flags |= VM_DONTEXPAND;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (current_thread_info()->status & TS_MMAP_PRIVILEGED)
|
if (current_thread_info()->status & TS_MMAP_PRIVILEGED)
|
||||||
vm_flags |= VM_PRIVILEGED;
|
vm_flags |= VM_PRIVILEGED;
|
||||||
|
|
||||||
if (current_thread_info()->status & TS_MMAP_DONTCOPY)
|
|
||||||
vm_flags |= VM_DONTCOPY;
|
|
||||||
|
|
||||||
if (current_thread_info()->status & TS_MMAP_PS)
|
if (current_thread_info()->status & TS_MMAP_PS)
|
||||||
vm_flags |= VM_HW_STACK_PS;
|
vm_flags |= VM_HW_STACK_PS;
|
||||||
|
|
||||||
|
@ -83,9 +69,6 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
|
||||||
if (current_thread_info()->status & TS_MMAP_SIGNAL_STACK)
|
if (current_thread_info()->status & TS_MMAP_SIGNAL_STACK)
|
||||||
vm_flags |= VM_SIGNAL_STACK;
|
vm_flags |= VM_SIGNAL_STACK;
|
||||||
|
|
||||||
if (current_thread_info()->status & TS_MMAP_NOHUGEPAGE)
|
|
||||||
vm_flags |= VM_NOHUGEPAGE;
|
|
||||||
|
|
||||||
return vm_flags;
|
return vm_flags;
|
||||||
}
|
}
|
||||||
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
|
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
|
||||||
|
@ -136,13 +119,13 @@ enum exec_mmu_ret {
|
||||||
/* Trap cellar record should be executed again */
|
/* Trap cellar record should be executed again */
|
||||||
EXEC_MMU_REPEAT
|
EXEC_MMU_REPEAT
|
||||||
};
|
};
|
||||||
extern int execute_mmu_operations(trap_cellar_t *tcellar,
|
extern enum exec_mmu_ret execute_mmu_operations(trap_cellar_t *tcellar,
|
||||||
trap_cellar_t *next_tcellar, struct pt_regs *regs,
|
trap_cellar_t *next_tcellar, struct pt_regs *regs,
|
||||||
int rg, int zeroing, e2k_addr_t *addr,
|
int rg, int zeroing, e2k_addr_t *addr,
|
||||||
bool (*is_spill_fill_recovery)(tc_cond_t cond,
|
bool (*is_spill_fill_recovery)(tc_cond_t cond,
|
||||||
e2k_addr_t address, bool s_f,
|
e2k_addr_t address, bool s_f,
|
||||||
struct pt_regs *regs),
|
struct pt_regs *regs),
|
||||||
int (*calculate_rf_frame)(struct pt_regs *regs,
|
enum exec_mmu_ret (*calculate_rf_frame)(struct pt_regs *regs,
|
||||||
tc_cond_t cond, u64 **radr,
|
tc_cond_t cond, u64 **radr,
|
||||||
bool *load_to_rf));
|
bool *load_to_rf));
|
||||||
|
|
||||||
|
|
|
@ -182,65 +182,10 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize a new mmu context. This is invoked when a new
|
|
||||||
* address space instance (unique or shared) is instantiated.
|
|
||||||
* This just needs to set mm->context[] to an invalid context.
|
|
||||||
*/
|
|
||||||
static inline int
|
|
||||||
__init_new_context(struct task_struct *p, struct mm_struct *mm,
|
|
||||||
mm_context_t *context)
|
|
||||||
{
|
|
||||||
bool is_fork = p && (p != current);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
memset(&context->cpumsk, 0, nr_cpu_ids * sizeof(context->cpumsk[0]));
|
extern int __init_new_context(struct task_struct *p, struct mm_struct *mm,
|
||||||
|
mm_context_t *context);
|
||||||
if (is_fork) {
|
static inline int init_new_context(struct task_struct *p, struct mm_struct *mm)
|
||||||
/*
|
|
||||||
* Copy data on user fork
|
|
||||||
*/
|
|
||||||
mm_context_t *curr_context = ¤t->mm->context;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy cut mask from the context of parent process
|
|
||||||
* to the context of new process
|
|
||||||
*/
|
|
||||||
mutex_lock(&curr_context->cut_mask_lock);
|
|
||||||
bitmap_copy((unsigned long *) &context->cut_mask,
|
|
||||||
(unsigned long *) &curr_context->cut_mask,
|
|
||||||
USER_CUT_AREA_SIZE/sizeof(e2k_cute_t));
|
|
||||||
mutex_unlock(&curr_context->cut_mask_lock);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Initialize by zero cut_mask of new process
|
|
||||||
*/
|
|
||||||
mutex_init(&context->cut_mask_lock);
|
|
||||||
bitmap_zero((unsigned long *) &context->cut_mask,
|
|
||||||
USER_CUT_AREA_SIZE/sizeof(e2k_cute_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_set(&context->tstart, 1);
|
|
||||||
|
|
||||||
init_rwsem(&context->sival_ptr_list_sem);
|
|
||||||
INIT_LIST_HEAD(&context->sival_ptr_list_head);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&context->delay_free_stacks);
|
|
||||||
init_rwsem(&context->core_lock);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&context->cached_stacks);
|
|
||||||
spin_lock_init(&context->cached_stacks_lock);
|
|
||||||
context->cached_stacks_size = 0;
|
|
||||||
|
|
||||||
if (mm == NULL)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = hw_contexts_init(p, context, is_fork);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
init_new_context(struct task_struct *p, struct mm_struct *mm)
|
|
||||||
{
|
{
|
||||||
return __init_new_context(p, mm, &mm->context);
|
return __init_new_context(p, mm, &mm->context);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ native_guest_ptr_to_host(void *ptr, int size)
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
native_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
native_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
||||||
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
|
u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag,
|
||||||
u64 opc_ext, int chan, int qp_store, int atomic_store)
|
u64 opc_ext, int chan, int qp_store, int atomic_store)
|
||||||
|
@ -45,10 +45,8 @@ native_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data,
|
||||||
st_rec_opc, data_ext, data_ext_tag, opc_ext,
|
st_rec_opc, data_ext, data_ext_tag, opc_ext,
|
||||||
chan, qp_store);
|
chan, qp_store);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
native_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
native_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
||||||
u64 ld_rec_opc, int chan)
|
u64 ld_rec_opc, int chan)
|
||||||
{
|
{
|
||||||
|
@ -58,25 +56,23 @@ native_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
||||||
NATIVE_RECOVERY_TAGGED_LOAD_TO(address, ld_rec_opc, val, tag, chan);
|
NATIVE_RECOVERY_TAGGED_LOAD_TO(address, ld_rec_opc, val, tag, chan);
|
||||||
*ld_val = val;
|
*ld_val = val;
|
||||||
*data_tag = tag;
|
*data_tag = tag;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
native_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
native_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load)
|
int qp_load, int atomic_load, u32 first_time)
|
||||||
{
|
{
|
||||||
if (atomic_load) {
|
if (atomic_load) {
|
||||||
NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_VR_ATOMIC(addr_from, addr_to,
|
NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_VR_ATOMIC(addr_from, addr_to,
|
||||||
addr_to_hi, vr, ld_rec_opc);
|
addr_to_hi, vr, ld_rec_opc);
|
||||||
} else {
|
} else {
|
||||||
NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(addr_from, addr_to,
|
NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(addr_from, addr_to,
|
||||||
addr_to_hi, vr, ld_rec_opc, chan, qp_load);
|
addr_to_hi, vr, ld_rec_opc, chan, qp_load,
|
||||||
|
first_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
native_recovery_faulted_load_to_cpu_greg(e2k_addr_t address, u32 greg_num_d,
|
native_recovery_faulted_load_to_cpu_greg(e2k_addr_t address, u32 greg_num_d,
|
||||||
int vr, u64 ld_rec_opc, int chan_opc,
|
int vr, u64 ld_rec_opc, int chan_opc,
|
||||||
int qp_load, int atomic_load)
|
int qp_load, int atomic_load)
|
||||||
|
@ -88,24 +84,23 @@ native_recovery_faulted_load_to_cpu_greg(e2k_addr_t address, u32 greg_num_d,
|
||||||
NATIVE_RECOVERY_LOAD_TO_A_GREG_CH_VR(address,
|
NATIVE_RECOVERY_LOAD_TO_A_GREG_CH_VR(address,
|
||||||
ld_rec_opc, greg_num_d, chan_opc, vr, qp_load);
|
ld_rec_opc, greg_num_d, chan_opc, vr, qp_load);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
native_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d,
|
native_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d,
|
||||||
int vr, u64 ld_rec_opc, int chan_opc,
|
int vr, u64 ld_rec_opc, int chan_opc,
|
||||||
int qp_load, int atomic_load, u64 *saved_greg_lo,
|
int qp_load, int atomic_load, u64 *saved_greg_lo,
|
||||||
u64 *saved_greg_hi)
|
u64 *saved_greg_hi)
|
||||||
{
|
{
|
||||||
if (!saved_greg_lo) {
|
if (!saved_greg_lo) {
|
||||||
return native_recovery_faulted_load_to_cpu_greg(address,
|
native_recovery_faulted_load_to_cpu_greg(address,
|
||||||
greg_num_d, vr, ld_rec_opc, chan_opc, qp_load,
|
greg_num_d, vr, ld_rec_opc, chan_opc, qp_load,
|
||||||
atomic_load);
|
atomic_load);
|
||||||
} else {
|
} else {
|
||||||
return native_recovery_faulted_move(address,
|
native_recovery_faulted_move(address,
|
||||||
(u64) saved_greg_lo, (u64) saved_greg_hi,
|
(u64) saved_greg_lo, (u64) saved_greg_hi,
|
||||||
vr, ld_rec_opc, chan_opc, qp_load, atomic_load);
|
vr, ld_rec_opc, chan_opc, qp_load,
|
||||||
|
atomic_load, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,27 +113,21 @@ native_is_guest_kernel_gregs(struct thread_info *ti,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
native_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
native_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
||||||
{
|
{
|
||||||
NATIVE_MOVE_TAGGED_WORD(addr_from, addr_to);
|
NATIVE_MOVE_TAGGED_WORD(addr_from, addr_to);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
native_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
native_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
||||||
{
|
{
|
||||||
NATIVE_MOVE_TAGGED_DWORD(addr_from, addr_to);
|
NATIVE_MOVE_TAGGED_DWORD(addr_from, addr_to);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
native_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
native_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to)
|
||||||
{
|
{
|
||||||
NATIVE_MOVE_TAGGED_QWORD(addr_from, addr_from + sizeof(long),
|
NATIVE_MOVE_TAGGED_QWORD(addr_from, addr_from + sizeof(long),
|
||||||
addr_to, addr_to + sizeof(long));
|
addr_to, addr_to + sizeof(long));
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void native_handle_mpdma_fault(e2k_addr_t hva);
|
extern void native_handle_mpdma_fault(e2k_addr_t hva);
|
||||||
|
@ -153,39 +142,40 @@ extern e2k_addr_t print_address_ptes(pgd_t *pgdp, e2k_addr_t address,
|
||||||
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
|
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
/* it is native kernel without any virtualization */
|
/* it is native kernel without any virtualization */
|
||||||
/* or it is native host kernel with virtualization support */
|
/* or it is native host kernel with virtualization support */
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, u32 data_tag,
|
recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, u32 data_tag,
|
||||||
u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, u64 opc_ext,
|
u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, u64 opc_ext,
|
||||||
int chan, int qp_store, int atomic_store)
|
int chan, int qp_store, int atomic_store)
|
||||||
{
|
{
|
||||||
return native_recovery_faulted_tagged_store(address, wr_data, data_tag,
|
native_recovery_faulted_tagged_store(address, wr_data, data_tag,
|
||||||
st_rec_opc, data_ext, data_ext_tag, opc_ext,
|
st_rec_opc, data_ext, data_ext_tag, opc_ext,
|
||||||
chan, qp_store, atomic_store);
|
chan, qp_store, atomic_store);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag,
|
||||||
u64 ld_rec_opc, int chan)
|
u64 ld_rec_opc, int chan, tc_cond_t cond)
|
||||||
{
|
{
|
||||||
return native_recovery_faulted_load(address, ld_val, data_tag,
|
native_recovery_faulted_load(address, ld_val, data_tag,
|
||||||
ld_rec_opc, chan);
|
ld_rec_opc, chan);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d,
|
recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d,
|
||||||
int vr, u64 ld_rec_opc, int chan,
|
int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load, u64 *saved_greg_lo,
|
int qp_load, int atomic_load, u64 *saved_greg_lo,
|
||||||
u64 *saved_greg_hi)
|
u64 *saved_greg_hi, tc_cond_t cond)
|
||||||
{
|
{
|
||||||
return native_recovery_faulted_load_to_greg(address, greg_num_d,
|
native_recovery_faulted_load_to_greg(address, greg_num_d,
|
||||||
vr, ld_rec_opc, chan, qp_load, atomic_load,
|
vr, ld_rec_opc, chan, qp_load, atomic_load,
|
||||||
saved_greg_lo, saved_greg_hi);
|
saved_greg_lo, saved_greg_hi);
|
||||||
}
|
}
|
||||||
static inline long
|
static inline void
|
||||||
recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to,
|
||||||
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan,
|
||||||
int qp_load, int atomic_load)
|
int qp_load, int atomic_load, u32 first_time,
|
||||||
|
tc_cond_t cond)
|
||||||
{
|
{
|
||||||
return native_recovery_faulted_move(addr_from, addr_to, addr_to_hi, vr,
|
native_recovery_faulted_move(addr_from, addr_to, addr_to_hi, vr,
|
||||||
ld_rec_opc, chan, qp_load, atomic_load);
|
ld_rec_opc, chan, qp_load, atomic_load, first_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
|
@ -217,12 +207,12 @@ handle_mpdma_fault(e2k_addr_t hva)
|
||||||
|
|
||||||
# ifndef CONFIG_VIRTUALIZATION
|
# ifndef CONFIG_VIRTUALIZATION
|
||||||
/* it is native kernel without any virtualization */
|
/* it is native kernel without any virtualization */
|
||||||
static inline int guest_addr_to_host(void **addr, pt_regs_t *regs)
|
static inline int guest_addr_to_host(void **addr, const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
return native_guest_addr_to_host(addr);
|
return native_guest_addr_to_host(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs)
|
static inline void *guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs)
|
||||||
{
|
{
|
||||||
return native_guest_ptr_to_host(ptr, size);
|
return native_guest_ptr_to_host(ptr, size);
|
||||||
}
|
}
|
||||||
|
@ -241,31 +231,31 @@ static inline void *guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs)
|
||||||
#error "Unknown virtualization type"
|
#error "Unknown virtualization type"
|
||||||
#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
|
#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
store_tagged_dword(void *address, u64 data, u32 tag)
|
store_tagged_dword(void *address, u64 data, u32 tag)
|
||||||
{
|
{
|
||||||
return recovery_faulted_tagged_store((e2k_addr_t) address, data, tag,
|
recovery_faulted_tagged_store((e2k_addr_t) address, data, tag,
|
||||||
TAGGED_MEM_STORE_REC_OPC, 0, 0, 0, 1, 0, 0);
|
TAGGED_MEM_STORE_REC_OPC, 0, 0, 0, 1, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
load_value_and_tagd(const void *address, u64 *ld_val, u8 *ld_tag)
|
load_value_and_tagd(const void *address, u64 *ld_val, u8 *ld_tag)
|
||||||
{
|
{
|
||||||
return recovery_faulted_load((e2k_addr_t) address, ld_val, ld_tag,
|
recovery_faulted_load((e2k_addr_t) address, ld_val, ld_tag,
|
||||||
TAGGED_MEM_LOAD_REC_OPC, 0);
|
TAGGED_MEM_LOAD_REC_OPC, 0,
|
||||||
|
(tc_cond_t) {.word = 0});
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline void
|
||||||
load_qvalue_and_tagq(e2k_addr_t address, u64 *val_lo, u64 *val_hi,
|
load_qvalue_and_tagq(e2k_addr_t address, u64 *val_lo, u64 *val_hi,
|
||||||
u8 *tag_lo, u8 *tag_hi)
|
u8 *tag_lo, u8 *tag_hi)
|
||||||
{
|
{
|
||||||
long ret;
|
recovery_faulted_load(address, val_lo, tag_lo,
|
||||||
|
TAGGED_MEM_LOAD_REC_OPC, 0,
|
||||||
ret = recovery_faulted_load(address, val_lo, tag_lo,
|
(tc_cond_t) {.word = 0});
|
||||||
TAGGED_MEM_LOAD_REC_OPC, 0);
|
recovery_faulted_load(address + sizeof(long), val_hi, tag_hi,
|
||||||
ret |= recovery_faulted_load(address + sizeof(long), val_hi, tag_hi,
|
TAGGED_MEM_LOAD_REC_OPC, 0,
|
||||||
TAGGED_MEM_LOAD_REC_OPC, 0);
|
(tc_cond_t) {.word = 0});
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _E2K_MMU_FAULT_H_ */
|
#endif /* _E2K_MMU_FAULT_H_ */
|
||||||
|
|
|
@ -760,17 +760,28 @@ read_CLW_reg(clw_addr_t clw_addr)
|
||||||
return READ_CLW_REG(clw_addr);
|
return READ_CLW_REG(clw_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline clw_reg_t
|
||||||
|
native_read_CLW_reg(clw_addr_t clw_addr)
|
||||||
|
{
|
||||||
|
DebugCLW("Read CLW reg 0x%lx\n", clw_addr);
|
||||||
|
return NATIVE_READ_CLW_REG(clw_addr);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read CLW bottom register
|
* Read CLW bottom register
|
||||||
*/
|
*/
|
||||||
#define read_US_CL_B() read_CLW_reg(ADDR_US_CL_B)
|
#define read_US_CL_B() read_CLW_reg(ADDR_US_CL_B)
|
||||||
#define READ_US_CL_B() READ_CLW_REG(ADDR_US_CL_B)
|
#define READ_US_CL_B() READ_CLW_REG(ADDR_US_CL_B)
|
||||||
|
#define native_read_US_CL_B() native_read_CLW_reg(ADDR_US_CL_B)
|
||||||
|
#define NATIVE_READ_US_CL_B() NATIVE_READ_CLW_REG(ADDR_US_CL_B)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read CLW up register
|
* Read CLW up register
|
||||||
*/
|
*/
|
||||||
#define read_US_CL_UP() read_CLW_reg(ADDR_US_CL_UP)
|
#define read_US_CL_UP() read_CLW_reg(ADDR_US_CL_UP)
|
||||||
#define READ_US_CL_UP() READ_CLW_REG(ADDR_US_CL_UP)
|
#define READ_US_CL_UP() READ_CLW_REG(ADDR_US_CL_UP)
|
||||||
|
#define native_read_US_CL_UP() native_read_CLW_reg(ADDR_US_CL_UP)
|
||||||
|
#define NATIVE_READ_US_CL_UP() NATIVE_READ_CLW_REG(ADDR_US_CL_UP)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read CLW bit-mask registers
|
* Read CLW bit-mask registers
|
||||||
|
@ -783,6 +794,69 @@ read_CLW_reg(clw_addr_t clw_addr)
|
||||||
#define READ_US_CL_M2() READ_CLW_REG(ADDR_US_CL_M2)
|
#define READ_US_CL_M2() READ_CLW_REG(ADDR_US_CL_M2)
|
||||||
#define read_US_CL_M3() read_CLW_reg(ADDR_US_CL_M3)
|
#define read_US_CL_M3() read_CLW_reg(ADDR_US_CL_M3)
|
||||||
#define READ_US_CL_M3() READ_CLW_REG(ADDR_US_CL_M3)
|
#define READ_US_CL_M3() READ_CLW_REG(ADDR_US_CL_M3)
|
||||||
|
#define native_read_US_CL_M0() native_read_CLW_reg(ADDR_US_CL_M0)
|
||||||
|
#define NATIVE_READ_US_CL_M0() NATIVE_READ_CLW_REG(ADDR_US_CL_M0)
|
||||||
|
#define native_read_US_CL_M1() native_read_CLW_reg(ADDR_US_CL_M1)
|
||||||
|
#define NATIVE_READ_US_CL_M1() NATIVE_READ_CLW_REG(ADDR_US_CL_M1)
|
||||||
|
#define native_read_US_CL_M2() native_read_CLW_reg(ADDR_US_CL_M2)
|
||||||
|
#define NATIVE_READ_US_CL_M2() NATIVE_READ_CLW_REG(ADDR_US_CL_M2)
|
||||||
|
#define native_read_US_CL_M3() native_read_CLW_reg(ADDR_US_CL_M3)
|
||||||
|
#define NATIVE_READ_US_CL_M3() NATIVE_READ_CLW_REG(ADDR_US_CL_M3)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
write_CLW_reg(clw_addr_t clw_addr, clw_reg_t val)
|
||||||
|
{
|
||||||
|
DebugCLW("Write CLW reg 0x%lx value 0x%lx\n", clw_addr, val);
|
||||||
|
WRITE_CLW_REG(clw_addr, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
native_write_CLW_reg(clw_addr_t clw_addr, clw_reg_t val)
|
||||||
|
{
|
||||||
|
DebugCLW("Write CLW reg 0x%lx value 0x%lx\n", clw_addr, val);
|
||||||
|
NATIVE_WRITE_CLW_REG(clw_addr, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW bottom register
|
||||||
|
*/
|
||||||
|
#define write_US_CL_B(val) write_CLW_reg(ADDR_US_CL_B, val)
|
||||||
|
#define WRITE_US_CL_B(val) WRITE_CLW_REG(ADDR_US_CL_B, val)
|
||||||
|
#define native_write_US_CL_B(val) native_write_CLW_reg(ADDR_US_CL_B, val)
|
||||||
|
#define NATIVE_WRITE_US_CL_B(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_B, val)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW up register
|
||||||
|
*/
|
||||||
|
#define write_US_CL_UP(val) write_CLW_reg(ADDR_US_CL_UP, val)
|
||||||
|
#define WRITE_US_CL_UP(val) WRITE_CLW_REG(ADDR_US_CL_UP, val)
|
||||||
|
#define native_write_US_CL_UP(val) native_write_CLW_reg(ADDR_US_CL_UP, val)
|
||||||
|
#define NATIVE_WRITE_US_CL_UP(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_UP, val)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW bit-mask registers
|
||||||
|
*/
|
||||||
|
#define write_US_CL_M0(val) write_CLW_reg(ADDR_US_CL_M0, val)
|
||||||
|
#define WRITE_US_CL_M0(val) WRITE_CLW_REG(ADDR_US_CL_M0, val)
|
||||||
|
#define write_US_CL_M1(val) write_CLW_reg(ADDR_US_CL_M1, val)
|
||||||
|
#define WRITE_US_CL_M1(val) WRITE_CLW_REG(ADDR_US_CL_M1, val)
|
||||||
|
#define write_US_CL_M2(val) write_CLW_reg(ADDR_US_CL_M2, val)
|
||||||
|
#define WRITE_US_CL_M2(val) WRITE_CLW_REG(ADDR_US_CL_M2, val)
|
||||||
|
#define write_US_CL_M3(val) write_CLW_reg(ADDR_US_CL_M3, val)
|
||||||
|
#define WRITE_US_CL_M3(val) WRITE_CLW_REG(ADDR_US_CL_M3, val)
|
||||||
|
#define native_write_US_CL_M0(val) native_write_CLW_reg(ADDR_US_CL_M0, val)
|
||||||
|
#define NATIVE_WRITE_US_CL_M0(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M0, val)
|
||||||
|
#define native_write_US_CL_M1(val) native_write_CLW_reg(ADDR_US_CL_M1, val)
|
||||||
|
#define NATIVE_WRITE_US_CL_M1(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M1, val)
|
||||||
|
#define native_write_US_CL_M2(val) native_write_CLW_reg(ADDR_US_CL_M2, val)
|
||||||
|
#define NATIVE_WRITE_US_CL_M2(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M2, val)
|
||||||
|
#define native_write_US_CL_M3(val) native_write_CLW_reg(ADDR_US_CL_M3, val)
|
||||||
|
#define NATIVE_WRITE_US_CL_M3(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M3, val)
|
||||||
|
|
||||||
|
|
||||||
#endif /* ! __ASSEMBLY__ */
|
#endif /* ! __ASSEMBLY__ */
|
||||||
|
|
||||||
|
|
|
@ -227,6 +227,12 @@ extern void boot_native_write_MMU_OS_VAB_reg_value(unsigned long value);
|
||||||
#define READ_CLW_REG(clw_addr) \
|
#define READ_CLW_REG(clw_addr) \
|
||||||
NATIVE_READ_CLW_REG(clw_addr)
|
NATIVE_READ_CLW_REG(clw_addr)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
#define WRITE_CLW_REG(clw_addr, val) \
|
||||||
|
NATIVE_WRITE_CLW_REG(clw_addr, val)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MMU DEBUG registers access
|
* MMU DEBUG registers access
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -320,14 +320,6 @@ typedef unsigned long long mmu_reg_t;
|
||||||
|
|
||||||
#define mmu_trap_count_get(mmu_reg) MMU_TRAP_COUNT_GET(mmu_reg)
|
#define mmu_trap_count_get(mmu_reg) MMU_TRAP_COUNT_GET(mmu_reg)
|
||||||
|
|
||||||
/*
|
|
||||||
* MMU Memory Protection Table Base MMU_MPT_B
|
|
||||||
* The base address of Memory Protection Table,
|
|
||||||
* aligned to table size
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define _MMU_MPT_B 0x000000fffffff000UL
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MMU PCI Low Bound MMU_PCI_L_B
|
* MMU PCI Low Bound MMU_PCI_L_B
|
||||||
* Fix the boundary between PCIand main memory addresses
|
* Fix the boundary between PCIand main memory addresses
|
||||||
|
@ -688,12 +680,12 @@ typedef e2k_addr_t flush_addr_t;
|
||||||
typedef e2k_addr_t clw_addr_t;
|
typedef e2k_addr_t clw_addr_t;
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#define US_CL_B_NO 0x024 /* User stack bottom to clean */
|
#define ADDR_US_CL_B 0x024 /* User stack bottom to clean */
|
||||||
#define US_CL_UP_NO 0x124 /* User stack up to clean */
|
#define ADDR_US_CL_UP 0x124 /* User stack up to clean */
|
||||||
#define US_CL_M0_NO 0x004 /* User stack bit-mask [0:63] */
|
#define ADDR_US_CL_M0 0x004 /* User stack bit-mask [0:63] */
|
||||||
#define US_CL_M1_NO 0x084 /* User stack bit-mask [64:127] */
|
#define ADDR_US_CL_M1 0x084 /* User stack bit-mask [64:127] */
|
||||||
#define US_CL_M2_NO 0x104 /* User stack bit-mask [128:195] */
|
#define ADDR_US_CL_M2 0x104 /* User stack bit-mask [128:195] */
|
||||||
#define US_CL_M3_NO 0x184 /* User stack bit-mask [196:255] */
|
#define ADDR_US_CL_M3 0x184 /* User stack bit-mask [196:255] */
|
||||||
|
|
||||||
/* CLW internel register contents */
|
/* CLW internel register contents */
|
||||||
|
|
||||||
|
|
|
@ -411,6 +411,11 @@ struct mmu_tc_opcode {
|
||||||
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
static inline bool tc_fmt_has_valid_mask(int fmt)
|
||||||
|
{
|
||||||
|
return fmt == LDST_QP_FMT || fmt == TC_FMT_QWORD_QP || fmt == TC_FMT_DWORD_QP;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
typedef union {
|
typedef union {
|
||||||
unsigned word;
|
unsigned word;
|
||||||
|
@ -500,6 +505,56 @@ typedef union {
|
||||||
|
|
||||||
#define TC_COND_FMT_FULL(cond) (AS(cond).fmt | (AS(cond).fmtc << 3))
|
#define TC_COND_FMT_FULL(cond) (AS(cond).fmt | (AS(cond).fmtc << 3))
|
||||||
|
|
||||||
|
static inline bool tc_cond_is_special_mmu_aau(tc_cond_t cond)
|
||||||
|
{
|
||||||
|
unsigned int mas = AS(cond).mas;
|
||||||
|
int chan = AS(cond).chan;
|
||||||
|
int store = AS(cond).store;
|
||||||
|
int spec_mode = AS(cond).spec;
|
||||||
|
|
||||||
|
if (unlikely(is_mas_special_mmu_aau(mas) && (store ||
|
||||||
|
!store && !spec_mode && (chan == 1 || chan == 3))))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool tc_cond_is_check_ld(tc_cond_t cond)
|
||||||
|
{
|
||||||
|
unsigned int mas = AS(cond).mas;
|
||||||
|
int store = AS(cond).store;
|
||||||
|
int spec_mode = AS(cond).spec;
|
||||||
|
|
||||||
|
return is_mas_check(mas) && !spec_mode && !store;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool tc_cond_is_check_unlock_ld(tc_cond_t cond)
|
||||||
|
{
|
||||||
|
unsigned int mas = AS(cond).mas;
|
||||||
|
int store = AS(cond).store;
|
||||||
|
int spec_mode = AS(cond).spec;
|
||||||
|
|
||||||
|
return is_mas_check_unlock(mas) && !spec_mode && !store;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool tc_cond_is_lock_check_ld(tc_cond_t cond)
|
||||||
|
{
|
||||||
|
unsigned int mas = AS(cond).mas;
|
||||||
|
int store = AS(cond).store;
|
||||||
|
int spec_mode = AS(cond).spec;
|
||||||
|
|
||||||
|
return is_mas_lock_check(mas) && spec_mode && !store;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool tc_cond_is_spec_lock_check_ld(tc_cond_t cond)
|
||||||
|
{
|
||||||
|
unsigned int mas = AS(cond).mas;
|
||||||
|
int store = AS(cond).store;
|
||||||
|
int spec_mode = AS(cond).spec;
|
||||||
|
|
||||||
|
return is_mas_spec_lock_check(mas) && spec_mode && !store;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Caveat: for qword accesses this will return 16 bytes for
|
* Caveat: for qword accesses this will return 16 bytes for
|
||||||
* the first entry in trap cellar and 8 bytes for the second one.
|
* the first entry in trap cellar and 8 bytes for the second one.
|
||||||
|
@ -746,35 +801,6 @@ typedef union {
|
||||||
#define LD_ST_REC_OPC_mask(ld_st_rec) (ld_st_rec.mask)
|
#define LD_ST_REC_OPC_mask(ld_st_rec) (ld_st_rec.mask)
|
||||||
#define LD_ST_REC_OPC_reg(ld_st_rec) (ld_st_rec.word)
|
#define LD_ST_REC_OPC_reg(ld_st_rec) (ld_st_rec.word)
|
||||||
|
|
||||||
typedef enum ld_st_rec_mode {
|
|
||||||
primary_rec_mode = 0, /* primary, privileged, */
|
|
||||||
primary_prot_rec_mode = 1, /* primary, privileged, protected */
|
|
||||||
secondary_rec_mode = 2, /* secondary, privileged */
|
|
||||||
guest_physical_rec_mode = 3, /* guest, physical, privileged */
|
|
||||||
primary_user_rec_mode = 4, /* primary */
|
|
||||||
guest_primary_rec_mode = 5, /* guest, primary, privileged, prot */
|
|
||||||
secondary_user_rec_mode = 6, /* secondary, not privileged */
|
|
||||||
} ld_st_rec_mode_t;
|
|
||||||
|
|
||||||
static inline ld_st_rec_mode_t
|
|
||||||
get_ld_st_rec_opc_mode(ldst_rec_op_t rec_opcode)
|
|
||||||
{
|
|
||||||
unsigned mode = 0;
|
|
||||||
|
|
||||||
mode |= LD_ST_REC_OPC_prot(rec_opcode) ? 0x01 : 0x00;
|
|
||||||
mode |= LD_ST_REC_OPC_root(rec_opcode) ? 0x02 : 0x00;
|
|
||||||
mode |= LD_ST_REC_OPC_mode_h(rec_opcode) ? 0x04 : 0x00;
|
|
||||||
return (ld_st_rec_mode_t)mode;
|
|
||||||
}
|
|
||||||
static inline ldst_rec_op_t
|
|
||||||
set_ld_st_rec_opc_mode(ldst_rec_op_t rec_opcode, ld_st_rec_mode_t mode)
|
|
||||||
{
|
|
||||||
LD_ST_REC_OPC_prot(rec_opcode) = (mode & 0x01) ? 1 : 0;
|
|
||||||
LD_ST_REC_OPC_root(rec_opcode) = (mode & 0x02) ? 1 : 0;
|
|
||||||
LD_ST_REC_OPC_mode_h(rec_opcode) = (mode & 0x04) ? 1 : 0;
|
|
||||||
return rec_opcode;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* ! __ASSEMBLY__ */
|
#endif /* ! __ASSEMBLY__ */
|
||||||
|
|
||||||
#define LDST_REC_OPC_BYPASS_L1 (MAS_BYPASS_L1_CACHE << \
|
#define LDST_REC_OPC_BYPASS_L1 (MAS_BYPASS_L1_CACHE << \
|
||||||
|
@ -790,6 +816,7 @@ set_ld_st_rec_opc_mode(ldst_rec_op_t rec_opcode, ld_st_rec_mode_t mode)
|
||||||
MAS_FILL_OPERATION << LDST_REC_OPC_MAS_SHIFT)
|
MAS_FILL_OPERATION << LDST_REC_OPC_MAS_SHIFT)
|
||||||
#define TAGGED_MEM_STORE_REC_OPC (LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT)
|
#define TAGGED_MEM_STORE_REC_OPC (LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT)
|
||||||
#define TAGGED_MEM_STORE_REC_OPC_W (LDST_WORD_FMT << LDST_REC_OPC_FMT_SHIFT)
|
#define TAGGED_MEM_STORE_REC_OPC_W (LDST_WORD_FMT << LDST_REC_OPC_FMT_SHIFT)
|
||||||
|
#define MEM_STORE_REC_OPC_B (LDST_BYTE_FMT << LDST_REC_OPC_FMT_SHIFT)
|
||||||
|
|
||||||
|
|
||||||
#endif /* _E2K_MMU_TYPES_H_ */
|
#endif /* _E2K_MMU_TYPES_H_ */
|
||||||
|
|
|
@ -47,6 +47,13 @@
|
||||||
#define NATIVE_READ_MMU_TRAP_POINT() \
|
#define NATIVE_READ_MMU_TRAP_POINT() \
|
||||||
NATIVE_READ_MMU_REG( \
|
NATIVE_READ_MMU_REG( \
|
||||||
_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO))
|
_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO))
|
||||||
|
#define NATIVE_WRITE_MMU_US_CL_D(us_cl_d) \
|
||||||
|
NATIVE_WRITE_MMU_REG( \
|
||||||
|
_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), \
|
||||||
|
mmu_reg_val(us_cl_d))
|
||||||
|
#define NATIVE_READ_MMU_US_CL_D() \
|
||||||
|
NATIVE_READ_MMU_REG( \
|
||||||
|
_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO))
|
||||||
#define NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(mmu_phys_ptb) \
|
#define NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(mmu_phys_ptb) \
|
||||||
NATIVE_WRITE_MMU_REG( \
|
NATIVE_WRITE_MMU_REG( \
|
||||||
_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO), \
|
_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO), \
|
||||||
|
@ -226,6 +233,12 @@ native_flush_ICACHE_all(void)
|
||||||
#define NATIVE_READ_CLW_REG(clw_addr) \
|
#define NATIVE_READ_CLW_REG(clw_addr) \
|
||||||
NATIVE_READ_MAS_D_5((clw_addr), MAS_CLW_REG)
|
NATIVE_READ_MAS_D_5((clw_addr), MAS_CLW_REG)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
#define NATIVE_WRITE_CLW_REG(clw_addr, val) \
|
||||||
|
NATIVE_WRITE_MAS_D((clw_addr), (val), MAS_CLW_REG)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* native MMU DEBUG registers access
|
* native MMU DEBUG registers access
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -249,7 +249,6 @@ extern struct page *e2k_virt_to_page(const void *kaddr);
|
||||||
#define VM_HW_STACK_PCS 0x00400000000UL /* chain stack area */
|
#define VM_HW_STACK_PCS 0x00400000000UL /* chain stack area */
|
||||||
#define VM_WRITECOMBINED 0x00800000000UL
|
#define VM_WRITECOMBINED 0x00800000000UL
|
||||||
#define VM_PRIVILEGED 0x04000000000UL /* pages are privileged */
|
#define VM_PRIVILEGED 0x04000000000UL /* pages are privileged */
|
||||||
#define VM_GUARD 0x08000000000UL /* guard page(s) mapping */
|
|
||||||
#define VM_MPDMA 0x10000000000UL /* pages are under MPDMA */
|
#define VM_MPDMA 0x10000000000UL /* pages are under MPDMA */
|
||||||
/* hardware protection */
|
/* hardware protection */
|
||||||
#define VM_SIGNAL_STACK 0x20000000000UL /* Signal stack area */
|
#define VM_SIGNAL_STACK 0x20000000000UL /* Signal stack area */
|
||||||
|
|
|
@ -215,6 +215,15 @@ PV_READ_CLW_REG(clw_addr_t clw_addr)
|
||||||
return pv_mmu_ops.read_clw_reg(clw_addr);
|
return pv_mmu_ops.read_clw_reg(clw_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
PV_WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val)
|
||||||
|
{
|
||||||
|
pv_mmu_ops.write_clw_reg(clw_addr, val);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MMU DEBUG registers access
|
* MMU DEBUG registers access
|
||||||
*/
|
*/
|
||||||
|
@ -531,6 +540,16 @@ READ_CLW_REG(clw_addr_t clw_addr)
|
||||||
return PV_READ_CLW_REG(clw_addr);
|
return PV_READ_CLW_REG(clw_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write CLW register
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val)
|
||||||
|
{
|
||||||
|
PV_WRITE_CLW_REG(clw_addr, val);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KVM MMU DEBUG registers access
|
* KVM MMU DEBUG registers access
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -367,8 +367,6 @@ typedef struct pv_cpu_ops {
|
||||||
bool proc_bounds, bool chain_bounds);
|
bool proc_bounds, bool chain_bounds);
|
||||||
irqreturn_t (*handle_interrupt)(struct pt_regs *regs);
|
irqreturn_t (*handle_interrupt)(struct pt_regs *regs);
|
||||||
void (*init_guest_system_handlers_table)(void);
|
void (*init_guest_system_handlers_table)(void);
|
||||||
void (*handle_deferred_traps_in_syscall)(struct pt_regs *regs,
|
|
||||||
bool use_pt_regs, bool new_hs);
|
|
||||||
void (*fix_process_pt_regs)(struct thread_info *ti,
|
void (*fix_process_pt_regs)(struct thread_info *ti,
|
||||||
struct e2k_stacks *stacks, struct pt_regs *regs,
|
struct e2k_stacks *stacks, struct pt_regs *regs,
|
||||||
struct pt_regs *old_regs);
|
struct pt_regs *old_regs);
|
||||||
|
@ -390,8 +388,10 @@ typedef struct pv_cpu_ops {
|
||||||
unsigned long (*fast_tagged_memory_set)(void *addr, u64 val, u64 tag,
|
unsigned long (*fast_tagged_memory_set)(void *addr, u64 val, u64 tag,
|
||||||
size_t len, u64 strd_opcode);
|
size_t len, u64 strd_opcode);
|
||||||
unsigned long (*extract_tags_32)(u16 *dst, const void *src);
|
unsigned long (*extract_tags_32)(u16 *dst, const void *src);
|
||||||
void (*save_local_glob_regs)(struct local_gregs *l_gregs);
|
void (*save_local_glob_regs)(struct local_gregs *l_gregs,
|
||||||
void (*restore_local_glob_regs)(struct local_gregs *l_gregs);
|
bool is_signal);
|
||||||
|
void (*restore_local_glob_regs)(struct local_gregs *l_gregs,
|
||||||
|
bool is_signal);
|
||||||
void (*restore_kernel_gregs_in_syscall)(struct thread_info *ti);
|
void (*restore_kernel_gregs_in_syscall)(struct thread_info *ti);
|
||||||
void (*get_all_user_glob_regs)(struct global_regs *gregs);
|
void (*get_all_user_glob_regs)(struct global_regs *gregs);
|
||||||
void (*arch_setup_machine)(void);
|
void (*arch_setup_machine)(void);
|
||||||
|
@ -574,6 +574,7 @@ typedef struct pv_mmu_ops {
|
||||||
probe_entry_t (*entry_probe_mmu_op)(e2k_addr_t virt_addr);
|
probe_entry_t (*entry_probe_mmu_op)(e2k_addr_t virt_addr);
|
||||||
probe_entry_t (*address_probe_mmu_op)(e2k_addr_t virt_addr);
|
probe_entry_t (*address_probe_mmu_op)(e2k_addr_t virt_addr);
|
||||||
clw_reg_t (*read_clw_reg)(clw_addr_t clw_addr);
|
clw_reg_t (*read_clw_reg)(clw_addr_t clw_addr);
|
||||||
|
void (*write_clw_reg)(clw_addr_t clw_addr, clw_reg_t val);
|
||||||
void (*save_DAM)(unsigned long long *dam);
|
void (*save_DAM)(unsigned long long *dam);
|
||||||
void (*write_mmu_debug_reg)(int reg_no, mmu_reg_t mmu_reg);
|
void (*write_mmu_debug_reg)(int reg_no, mmu_reg_t mmu_reg);
|
||||||
mmu_reg_t (*read_mmu_debug_reg)(int reg_no);
|
mmu_reg_t (*read_mmu_debug_reg)(int reg_no);
|
||||||
|
|
|
@ -15,14 +15,14 @@
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
pv_save_local_glob_regs(local_gregs_t *l_gregs)
|
pv_save_local_glob_regs(local_gregs_t *l_gregs, is_signal)
|
||||||
{
|
{
|
||||||
pv_cpu_ops.save_local_glob_regs(l_gregs);
|
pv_cpu_ops.save_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
pv_restore_local_glob_regs(local_gregs_t *l_gregs)
|
pv_restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
pv_cpu_ops.restore_local_glob_regs(l_gregs);
|
pv_cpu_ops.restore_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
pv_get_all_user_glob_regs(struct global_regs *gregs)
|
pv_get_all_user_glob_regs(struct global_regs *gregs)
|
||||||
|
@ -46,14 +46,14 @@ pv_restore_kernel_gregs_in_syscall(struct thread_info *ti)
|
||||||
#define INIT_G_REGS() PV_INIT_G_REGS()
|
#define INIT_G_REGS() PV_INIT_G_REGS()
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
save_local_glob_regs(local_gregs_t *l_gregs)
|
save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
pv_save_local_glob_regs(l_gregs);
|
pv_save_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
restore_local_glob_regs(local_gregs_t *l_gregs)
|
restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
pv_restore_local_glob_regs(l_gregs);
|
pv_restore_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
get_all_user_glob_regs(struct global_regs *gregs)
|
get_all_user_glob_regs(struct global_regs *gregs)
|
||||||
|
|
|
@ -9,15 +9,6 @@
|
||||||
#define pv_ttable_entry3 (pv_cpu_ops.trap_table_entry3)
|
#define pv_ttable_entry3 (pv_cpu_ops.trap_table_entry3)
|
||||||
#define pv_ttable_entry4 (pv_cpu_ops.trap_table_entry4)
|
#define pv_ttable_entry4 (pv_cpu_ops.trap_table_entry4)
|
||||||
|
|
||||||
static inline void
|
|
||||||
pv_handle_deferred_traps_in_syscall(struct pt_regs *regs,
|
|
||||||
bool use_pt_regs, bool new_hs)
|
|
||||||
{
|
|
||||||
if (pv_cpu_ops.handle_deferred_traps_in_syscall)
|
|
||||||
pv_cpu_ops.handle_deferred_traps_in_syscall(regs,
|
|
||||||
use_pt_regs, new_hs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
pv_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi,
|
pv_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi,
|
||||||
e2k_usd_lo_t usd_lo, e2k_upsr_t upsr)
|
e2k_usd_lo_t usd_lo, e2k_upsr_t upsr)
|
||||||
|
@ -80,13 +71,6 @@ exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi,
|
||||||
pv_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr);
|
pv_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
handle_deferred_traps_in_syscall(struct pt_regs *regs,
|
|
||||||
bool use_pt_regs, bool new_hs)
|
|
||||||
{
|
|
||||||
pv_handle_deferred_traps_in_syscall(regs, use_pt_regs, new_hs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs)
|
is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#define _ASM_E2K_PERF_EVENT_H
|
#define _ASM_E2K_PERF_EVENT_H
|
||||||
|
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
#include <asm/process.h>
|
||||||
#include <asm/regs_state.h>
|
#include <asm/regs_state.h>
|
||||||
|
|
||||||
static inline void set_perf_event_pending(void) {}
|
static inline void set_perf_event_pending(void) {}
|
||||||
|
|
|
@ -42,8 +42,6 @@ extern void __init *node_early_get_zeroed_page(int nid);
|
||||||
|
|
||||||
extern int mem_init_done;
|
extern int mem_init_done;
|
||||||
|
|
||||||
#define check_pgt_cache() do { } while (0)
|
|
||||||
|
|
||||||
static inline void pgd_ctor(pgd_t *pgd)
|
static inline void pgd_ctor(pgd_t *pgd)
|
||||||
{
|
{
|
||||||
int root_pt_index;
|
int root_pt_index;
|
||||||
|
|
|
@ -377,14 +377,18 @@ static inline void untrack_pfn_moved(struct vm_area_struct *vma)
|
||||||
#define NATIVE_VMALLOC_START (NATIVE_KERNEL_IMAGE_AREA_BASE + \
|
#define NATIVE_VMALLOC_START (NATIVE_KERNEL_IMAGE_AREA_BASE + \
|
||||||
0x020000000000UL)
|
0x020000000000UL)
|
||||||
/* 0x0000 e400 0000 0000 */
|
/* 0x0000 e400 0000 0000 */
|
||||||
#define NATIVE_VMALLOC_END (NATIVE_VMALLOC_START + 0x010000000000UL)
|
/* We need big enough vmalloc area since usage of pcpu_embed_first_chunk()
|
||||||
/* 0x0000 e500 0000 0000 */
|
* on e2k leads to having pcpu area span large ranges, and vmalloc area
|
||||||
#define NATIVE_VMEMMAP_START (NATIVE_VMALLOC_END + 0x010000000000UL)
|
* should be able to span those same ranges (see pcpu_embed_first_chunk()). */
|
||||||
/* 0x0000 e600 0000 0000 */
|
#define NATIVE_VMALLOC_END (NATIVE_VMALLOC_START + 0x100000000000UL)
|
||||||
|
/* 0x0000 f400 0000 0000 */
|
||||||
|
#define NATIVE_VMEMMAP_START NATIVE_VMALLOC_END
|
||||||
|
/* 0x0000 f400 0000 0000 */
|
||||||
#define NATIVE_VMEMMAP_END (NATIVE_VMEMMAP_START + \
|
#define NATIVE_VMEMMAP_END (NATIVE_VMEMMAP_START + \
|
||||||
(1ULL << (E2K_MAX_PHYS_BITS - \
|
(1ULL << (E2K_MAX_PHYS_BITS - PAGE_SHIFT)) * \
|
||||||
PAGE_SHIFT)) * \
|
|
||||||
sizeof(struct page))
|
sizeof(struct page))
|
||||||
|
/* 0x0000 f800 0000 0000 - for 64 bytes struct page */
|
||||||
|
/* 0x0000 fc00 0000 0000 - for 128 bytes struct page */
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -821,7 +825,7 @@ extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
|
||||||
unsigned long address, pmd_t *pmdp);
|
unsigned long address, pmd_t *pmdp);
|
||||||
|
|
||||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||||
unsigned long address, pmd_t *pmdp)
|
unsigned long addr, pmd_t *pmdp)
|
||||||
{
|
{
|
||||||
# ifdef CONFIG_SMP
|
# ifdef CONFIG_SMP
|
||||||
u64 newval;
|
u64 newval;
|
||||||
|
@ -829,7 +833,8 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||||
newval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ?
|
newval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ?
|
||||||
_PAGE_INIT_VALID : 0UL;
|
_PAGE_INIT_VALID : 0UL;
|
||||||
|
|
||||||
return __pmd(__api_xchg_return(newval, &pmdp->pmd, d, RELAXED_MB));
|
return __pmd(pt_get_and_xchg_atomic(mm, addr, newval,
|
||||||
|
(pgprot_t *)pmdp));
|
||||||
# else
|
# else
|
||||||
pmd_t pmd = *pmdp;
|
pmd_t pmd = *pmdp;
|
||||||
pmd_clear(pmdp);
|
pmd_clear(pmdp);
|
||||||
|
@ -841,8 +846,8 @@ static inline pmd_t pmdp_huge_get_and_clear_as_valid(struct mm_struct *mm,
|
||||||
unsigned long addr, pmd_t *pmdp)
|
unsigned long addr, pmd_t *pmdp)
|
||||||
{
|
{
|
||||||
# ifdef CONFIG_SMP
|
# ifdef CONFIG_SMP
|
||||||
return __pmd(__api_xchg_return(_PAGE_INIT_VALID, &pmdp->pmd, d,
|
return __pmd(pt_get_and_xchg_atomic(mm, addr, _PAGE_INIT_VALID,
|
||||||
RELAXED_MB));
|
(pgprot_t *)pmdp));
|
||||||
# else
|
# else
|
||||||
pmd_t pmd = *pmdp;
|
pmd_t pmd = *pmdp;
|
||||||
set_pmd_at(mm, addr, pmdp, __pmd(_PAGE_INIT_VALID));
|
set_pmd_at(mm, addr, pmdp, __pmd(_PAGE_INIT_VALID));
|
||||||
|
|
|
@ -12,8 +12,6 @@
|
||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
#include <linux/rmap.h>
|
#include <linux/rmap.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/swap.h>
|
|
||||||
#include <linux/swapops.h>
|
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
|
|
||||||
#include <asm/e2k_syswork.h>
|
#include <asm/e2k_syswork.h>
|
||||||
|
@ -794,6 +792,13 @@ clear_virt_thread_struct(thread_info_t *thread_info)
|
||||||
{
|
{
|
||||||
/* virtual machines is not supported */
|
/* virtual machines is not supported */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
host_exit_to_usermode_loop(struct pt_regs *regs, bool syscall, bool has_signal)
|
||||||
|
{
|
||||||
|
/* native & guest kernels cannot be as host */
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline __interrupt void
|
static __always_inline __interrupt void
|
||||||
complete_switch_to_user_func(void)
|
complete_switch_to_user_func(void)
|
||||||
{
|
{
|
||||||
|
@ -821,6 +826,32 @@ static inline void free_virt_task_struct(struct task_struct *task)
|
||||||
*/
|
*/
|
||||||
#endif /* ! CONFIG_VIRTUALIZATION */
|
#endif /* ! CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore proper psize field of WD register
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
native_restore_wd_register_psize(e2k_wd_t wd_from)
|
||||||
|
{
|
||||||
|
e2k_wd_t wd;
|
||||||
|
|
||||||
|
raw_all_irq_disable();
|
||||||
|
wd = NATIVE_READ_WD_REG();
|
||||||
|
wd.psize = wd_from.WD_psize;
|
||||||
|
NATIVE_WRITE_WD_REG(wd);
|
||||||
|
raw_all_irq_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preserve current p[c]shtp as they indicate how much to FILL when returning
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
native_preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks,
|
||||||
|
e2k_stacks_t *cur_stacks)
|
||||||
|
{
|
||||||
|
u_stacks->pshtp = cur_stacks->pshtp;
|
||||||
|
u_stacks->pcshtp = cur_stacks->pcshtp;
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
native_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
native_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
||||||
{
|
{
|
||||||
|
@ -1049,6 +1080,19 @@ extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n);
|
||||||
|
|
||||||
#define ONLY_SET_GUEST_GREGS(ti) NATIVE_ONLY_SET_GUEST_GREGS(ti)
|
#define ONLY_SET_GUEST_GREGS(ti) NATIVE_ONLY_SET_GUEST_GREGS(ti)
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
restore_wd_register_psize(e2k_wd_t wd_from)
|
||||||
|
{
|
||||||
|
native_restore_wd_register_psize(wd_from);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks,
|
||||||
|
e2k_stacks_t *cur_stacks)
|
||||||
|
{
|
||||||
|
native_preserve_user_hw_stacks_to_copy(u_stacks, cur_stacks);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
|
||||||
{
|
{
|
||||||
|
@ -1277,9 +1321,9 @@ user_hw_stack_frames_copy(void __user *dst, void *src, unsigned long copy_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int
|
static __always_inline int
|
||||||
user_crs_frames_copy(e2k_mem_crs_t __user *u_frame, pt_regs_t *regs)
|
user_crs_frames_copy(e2k_mem_crs_t __user *u_frame, pt_regs_t *regs,
|
||||||
|
e2k_mem_crs_t *crs)
|
||||||
{
|
{
|
||||||
e2k_mem_crs_t *crs = ®s->crs;
|
|
||||||
unsigned long ts_flag;
|
unsigned long ts_flag;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1397,7 +1441,7 @@ static inline void apply_graph_tracer_delta(unsigned long delta)
|
||||||
* data from user space is spilled to kernel space.
|
* data from user space is spilled to kernel space.
|
||||||
*/
|
*/
|
||||||
static __always_inline int
|
static __always_inline int
|
||||||
user_hw_stacks_copy(struct e2k_stacks *stacks,
|
native_user_hw_stacks_copy(struct e2k_stacks *stacks,
|
||||||
pt_regs_t *regs, u64 cur_window_q, bool copy_full)
|
pt_regs_t *regs, u64 cur_window_q, bool copy_full)
|
||||||
{
|
{
|
||||||
trap_pt_regs_t *trap = regs->trap;
|
trap_pt_regs_t *trap = regs->trap;
|
||||||
|
@ -1533,65 +1577,6 @@ static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks)
|
||||||
raw_all_irq_restore(flags);
|
raw_all_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* user_hw_stacks_copy_full - copy part of user stacks that was SPILLed
|
|
||||||
* into kernel back to user stacks.
|
|
||||||
* @stacks - saved user stack registers
|
|
||||||
* @regs - pt_regs pointer
|
|
||||||
* @crs - last frame to copy
|
|
||||||
*
|
|
||||||
* If @crs is not NULL then the frame pointed to by it will also be copied
|
|
||||||
* to userspace. Note that 'stacks->pcsp_hi.ind' is _not_ updated after
|
|
||||||
* copying since it would leave stack in inconsistent state (with two
|
|
||||||
* copies of the same @crs frame), this is left to the caller. *
|
|
||||||
*
|
|
||||||
* Inlining this reduces the amount of memory to copy in
|
|
||||||
* collapse_kernel_hw_stacks().
|
|
||||||
*/
|
|
||||||
static inline int user_hw_stacks_copy_full(struct e2k_stacks *stacks,
|
|
||||||
pt_regs_t *regs, e2k_mem_crs_t *crs)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy part of user stacks that were SPILLed into kernel stacks
|
|
||||||
*/
|
|
||||||
ret = user_hw_stacks_copy(stacks, regs, 0, true);
|
|
||||||
if (unlikely(ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Nothing to FILL so remove the resulting hole from kernel stacks.
|
|
||||||
*
|
|
||||||
* IMPORTANT: there is always at least one user frame at the top of
|
|
||||||
* kernel stack - the one that issued a system call (in case of an
|
|
||||||
* exception we uphold this rule manually, see user_hw_stacks_prepare())
|
|
||||||
* We keep this ABI and _always_ leave space for one user frame,
|
|
||||||
* this way we can later FILL using return trick (otherwise there
|
|
||||||
* would be no space in chain stack for the trick).
|
|
||||||
*/
|
|
||||||
collapse_kernel_hw_stacks(stacks);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy saved %cr registers
|
|
||||||
*
|
|
||||||
* Caller must take care of filling of resulting hole
|
|
||||||
* (last user frame from pcshtp == SZ_OF_CR).
|
|
||||||
*/
|
|
||||||
if (crs) {
|
|
||||||
e2k_mem_crs_t __user *u_frame;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
u_frame = (void __user *) (AS(stacks->pcsp_lo).base +
|
|
||||||
AS(stacks->pcsp_hi).ind);
|
|
||||||
ret = user_crs_frames_copy(u_frame, regs);
|
|
||||||
if (unlikely(ret))
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* user_hw_stacks_prepare - prepare user hardware stacks that have been
|
* user_hw_stacks_prepare - prepare user hardware stacks that have been
|
||||||
* SPILLed to kernel back to user space
|
* SPILLed to kernel back to user space
|
||||||
|
@ -1691,13 +1676,20 @@ static __always_inline void native_user_hw_stacks_prepare(
|
||||||
/*
|
/*
|
||||||
* 2) Copy user data that cannot be FILLed
|
* 2) Copy user data that cannot be FILLed
|
||||||
*/
|
*/
|
||||||
ret = user_hw_stacks_copy(stacks, regs, cur_window_q, false);
|
ret = native_user_hw_stacks_copy(stacks, regs, cur_window_q, false);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
do_exit(SIGKILL);
|
do_exit(SIGKILL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_VIRTUALIZATION
|
#ifndef CONFIG_VIRTUALIZATION
|
||||||
/* native kernel without virtualization support */
|
/* native kernel without virtualization support */
|
||||||
|
static __always_inline int
|
||||||
|
user_hw_stacks_copy(struct e2k_stacks *stacks,
|
||||||
|
pt_regs_t *regs, u64 cur_window_q, bool copy_full)
|
||||||
|
{
|
||||||
|
return native_user_hw_stacks_copy(stacks, regs, cur_window_q, copy_full);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
u64 cur_window_q, enum restore_caller from, int syscall)
|
u64 cur_window_q, enum restore_caller from, int syscall)
|
||||||
|
@ -1718,6 +1710,64 @@ host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs,
|
||||||
#error "unknown virtualization mode"
|
#error "unknown virtualization mode"
|
||||||
#endif /* !CONFIG_VIRTUALIZATION */
|
#endif /* !CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* user_hw_stacks_copy_full - copy part of user stacks that was SPILLed
|
||||||
|
* into kernel back to user stacks.
|
||||||
|
* @stacks - saved user stack registers
|
||||||
|
* @regs - pt_regs pointer
|
||||||
|
* @crs - last frame to copy
|
||||||
|
*
|
||||||
|
* If @crs is not NULL then the frame pointed to by it will also be copied
|
||||||
|
* to userspace. Note that 'stacks->pcsp_hi.ind' is _not_ updated after
|
||||||
|
* copying since it would leave stack in inconsistent state (with two
|
||||||
|
* copies of the same @crs frame), this is left to the caller. *
|
||||||
|
*
|
||||||
|
* Inlining this reduces the amount of memory to copy in
|
||||||
|
* collapse_kernel_hw_stacks().
|
||||||
|
*/
|
||||||
|
static inline int user_hw_stacks_copy_full(struct e2k_stacks *stacks,
|
||||||
|
pt_regs_t *regs, e2k_mem_crs_t *crs)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy part of user stacks that were SPILLed into kernel stacks
|
||||||
|
*/
|
||||||
|
ret = user_hw_stacks_copy(stacks, regs, 0, true);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing to FILL so remove the resulting hole from kernel stacks.
|
||||||
|
*
|
||||||
|
* IMPORTANT: there is always at least one user frame at the top of
|
||||||
|
* kernel stack - the one that issued a system call (in case of an
|
||||||
|
* exception we uphold this rule manually, see user_hw_stacks_prepare())
|
||||||
|
* We keep this ABI and _always_ leave space for one user frame,
|
||||||
|
* this way we can later FILL using return trick (otherwise there
|
||||||
|
* would be no space in chain stack for the trick).
|
||||||
|
*/
|
||||||
|
collapse_kernel_hw_stacks(stacks);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy saved %cr registers
|
||||||
|
*
|
||||||
|
* Caller must take care of filling of resulting hole
|
||||||
|
* (last user frame from pcshtp == SZ_OF_CR).
|
||||||
|
*/
|
||||||
|
if (crs) {
|
||||||
|
e2k_mem_crs_t __user *u_frame;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
u_frame = (void __user *) (AS(stacks->pcsp_lo).base +
|
||||||
|
AS(stacks->pcsp_hi).ind);
|
||||||
|
ret = user_crs_frames_copy(u_frame, regs, ®s->crs);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n);
|
extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n);
|
||||||
extern unsigned long remap_e2k_stack(unsigned long addr,
|
extern unsigned long remap_e2k_stack(unsigned long addr,
|
||||||
|
|
|
@ -144,6 +144,7 @@ typedef struct thread_struct {
|
||||||
/* protected mode */
|
/* protected mode */
|
||||||
#define BIN_COMP_CODE_TASK_FLAG_BIT 4 /* task is binary application */
|
#define BIN_COMP_CODE_TASK_FLAG_BIT 4 /* task is binary application */
|
||||||
/* compiler code */
|
/* compiler code */
|
||||||
|
#define CLONE_SETTLS_TASK_FLAG_BIT 5 /* set new TLS for thread */
|
||||||
#define DO_PRESENT_HW_STACKS_TASK_FLAG_BIT 8 /* hardware stacks should be */
|
#define DO_PRESENT_HW_STACKS_TASK_FLAG_BIT 8 /* hardware stacks should be */
|
||||||
/* made present (populated) */
|
/* made present (populated) */
|
||||||
#define DO_LOCK_HW_STACKS_TASK_FLAG_BIT 9 /* hardware stacks should be */
|
#define DO_LOCK_HW_STACKS_TASK_FLAG_BIT 9 /* hardware stacks should be */
|
||||||
|
@ -163,6 +164,7 @@ typedef struct thread_struct {
|
||||||
#define BIN_32_CODE_TASK_FLAG (1UL << BIN_32_CODE_TASK_FLAG_BIT)
|
#define BIN_32_CODE_TASK_FLAG (1UL << BIN_32_CODE_TASK_FLAG_BIT)
|
||||||
#define BIN_COMP_CODE_TASK_FLAG (1UL << BIN_COMP_CODE_TASK_FLAG_BIT)
|
#define BIN_COMP_CODE_TASK_FLAG (1UL << BIN_COMP_CODE_TASK_FLAG_BIT)
|
||||||
#define PROTECTED_CODE_TASK_FLAG (1UL << PROTECTED_CODE_TASK_FLAG_BIT)
|
#define PROTECTED_CODE_TASK_FLAG (1UL << PROTECTED_CODE_TASK_FLAG_BIT)
|
||||||
|
#define CLONE_SETTLS_TASK_FLAG (1UL << CLONE_SETTLS_TASK_FLAG_BIT)
|
||||||
#define DO_PRESENT_HW_STACKS_TASK_FLAG \
|
#define DO_PRESENT_HW_STACKS_TASK_FLAG \
|
||||||
(1UL << DO_PRESENT_HW_STACKS_TASK_FLAG_BIT)
|
(1UL << DO_PRESENT_HW_STACKS_TASK_FLAG_BIT)
|
||||||
#define DO_LOCK_HW_STACKS_TASK_FLAG \
|
#define DO_LOCK_HW_STACKS_TASK_FLAG \
|
||||||
|
|
|
@ -7,9 +7,13 @@
|
||||||
|
|
||||||
/****************** PROTECTED SYSTEM CALL DEBUG DEFINES *******************/
|
/****************** PROTECTED SYSTEM CALL DEBUG DEFINES *******************/
|
||||||
|
|
||||||
|
#ifndef _E2K_PROTECTED_SYSCALLS_H_
|
||||||
|
#define _E2K_PROTECTED_SYSCALLS_H_
|
||||||
|
|
||||||
#ifdef CONFIG_PROTECTED_MODE
|
#ifdef CONFIG_PROTECTED_MODE
|
||||||
|
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
#include <asm/e2k_ptypes.h>
|
||||||
|
|
||||||
#undef DYNAMIC_DEBUG_SYSCALLP_ENABLED
|
#undef DYNAMIC_DEBUG_SYSCALLP_ENABLED
|
||||||
#define DYNAMIC_DEBUG_SYSCALLP_ENABLED 1 /* Dynamic prot. syscalls control */
|
#define DYNAMIC_DEBUG_SYSCALLP_ENABLED 1 /* Dynamic prot. syscalls control */
|
||||||
|
@ -123,6 +127,112 @@ do { \
|
||||||
|
|
||||||
/**************************** END of DEBUG DEFINES ***********************/
|
/**************************** END of DEBUG DEFINES ***********************/
|
||||||
|
|
||||||
|
|
||||||
|
static inline
|
||||||
|
long make_ap_lo(e2k_addr_t base, long size, long offset, int access)
|
||||||
|
{
|
||||||
|
return MAKE_AP_LO(base, size, offset, access);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
long make_ap_hi(e2k_addr_t base, long size, long offset, int access)
|
||||||
|
{
|
||||||
|
return MAKE_AP_HI(base, size, offset, access);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
int e2k_ptr_itag(long low)
|
||||||
|
{
|
||||||
|
e2k_ptr_t ptr;
|
||||||
|
|
||||||
|
AW(ptr).lo = low;
|
||||||
|
|
||||||
|
return AS(ptr).itag;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
int e2k_ptr_rw(long low)
|
||||||
|
{
|
||||||
|
e2k_ptr_t ptr;
|
||||||
|
|
||||||
|
AW(ptr).lo = low;
|
||||||
|
|
||||||
|
return AS(ptr).rw;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
unsigned long e2k_ptr_ptr(long low, long hiw, unsigned int min_size)
|
||||||
|
{
|
||||||
|
e2k_ptr_t ptr;
|
||||||
|
unsigned int ptr_size;
|
||||||
|
|
||||||
|
AW(ptr).lo = low;
|
||||||
|
AW(ptr).hi = hiw;
|
||||||
|
ptr_size = AS(ptr).size - AS(ptr).curptr;
|
||||||
|
|
||||||
|
if (ptr_size < min_size) {
|
||||||
|
DbgSCP_ALERT(" Pointer is too small: %d < %d\n",
|
||||||
|
ptr_size, min_size);
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return E2K_PTR_PTR(ptr, GET_SBR_HI());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
unsigned long e2k_ptr_curptr(long low, long hiw)
|
||||||
|
{
|
||||||
|
e2k_ptr_t ptr;
|
||||||
|
|
||||||
|
AW(ptr).lo = low;
|
||||||
|
AW(ptr).hi = hiw;
|
||||||
|
|
||||||
|
return AS(ptr).curptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
unsigned long e2k_ptr_size(long low, long hiw, unsigned int min_size)
|
||||||
|
{
|
||||||
|
e2k_ptr_hi_t hi;
|
||||||
|
unsigned int ptr_size;
|
||||||
|
|
||||||
|
AW(hi) = hiw;
|
||||||
|
ptr_size = AS(hi).size - AS(hi).curptr;
|
||||||
|
|
||||||
|
if (ptr_size < min_size) {
|
||||||
|
DbgSCP_ALERT(" Pointer is too small: %d < %d\n",
|
||||||
|
ptr_size, min_size);
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return ptr_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int e2k_ptr_str_check(char __user *str, u64 max_size)
|
||||||
|
{
|
||||||
|
long slen;
|
||||||
|
|
||||||
|
slen = strnlen_user(str, max_size);
|
||||||
|
|
||||||
|
if (unlikely(!slen || slen > max_size))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline char __user *e2k_ptr_str(long low, long hiw, u64 sbr_hi)
|
||||||
|
{
|
||||||
|
char __user *str;
|
||||||
|
e2k_ptr_hi_t hi = { .word = hiw };
|
||||||
|
|
||||||
|
str = (char __user *) __E2K_PTR_PTR(low, hiw, sbr_hi);
|
||||||
|
|
||||||
|
if (!e2k_ptr_str_check(str, AS(hi).size - AS(hi).curptr))
|
||||||
|
return str;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#else /* #ifndef CONFIG_PROTECTED_MODE */
|
#else /* #ifndef CONFIG_PROTECTED_MODE */
|
||||||
|
|
||||||
#define DbgSCP(...)
|
#define DbgSCP(...)
|
||||||
|
@ -131,3 +241,7 @@ do { \
|
||||||
#define DbgSC_ALERT(...)
|
#define DbgSC_ALERT(...)
|
||||||
|
|
||||||
#endif /* CONFIG_PROTECTED_MODE */
|
#endif /* CONFIG_PROTECTED_MODE */
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* _E2K_PROTECTED_SYSCALLS_H_ */
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,6 @@ typedef struct trap_pt_regs {
|
||||||
u64 TIR_lo;
|
u64 TIR_lo;
|
||||||
int TIR_no; /* current handled TIRs # */
|
int TIR_no; /* current handled TIRs # */
|
||||||
s8 nr_TIRs;
|
s8 nr_TIRs;
|
||||||
bool irqs_disabled; /* IRQs are disabled while trap */
|
|
||||||
s8 tc_count;
|
s8 tc_count;
|
||||||
s8 curr_cnt;
|
s8 curr_cnt;
|
||||||
char ignore_user_tc;
|
char ignore_user_tc;
|
||||||
|
@ -114,11 +113,29 @@ typedef struct trap_pt_regs {
|
||||||
#endif
|
#endif
|
||||||
} trap_pt_regs_t;
|
} trap_pt_regs_t;
|
||||||
|
|
||||||
/*
|
union pt_regs_flags {
|
||||||
* WARNING: 'usd_lo' field in the 'pt_regs' structure should have offset
|
struct {
|
||||||
* JB_USD_LO = 22 (in format of long long) as defined by e2k GLIBC header
|
/* execute_mmu_operations() is working */
|
||||||
* /usr/include/bits/setjmp.h
|
u32 exec_mmu_op : 1;
|
||||||
*/
|
/* nested exception appeared while
|
||||||
|
* execute_mmu_operations() was working */
|
||||||
|
u32 exec_mmu_op_nested : 1;
|
||||||
|
/* A signal's handler will be called upon return to userspace */
|
||||||
|
u32 sig_call_handler : 1;
|
||||||
|
/* System call should be restarted after signal's handler */
|
||||||
|
u32 sig_restart_syscall : 1;
|
||||||
|
/* Used to distinguish between entry8 and entry10 for protected syscalls */
|
||||||
|
u32 protected_entry10 : 1;
|
||||||
|
/* From hardware guest interception */
|
||||||
|
u32 kvm_hw_intercept : 1;
|
||||||
|
/* trap or system call is on or from guest */
|
||||||
|
u32 trap_as_intc_emul : 1;
|
||||||
|
/* Trap occurred in light hypercall */
|
||||||
|
u32 light_hypercall : 1;
|
||||||
|
};
|
||||||
|
u32 word;
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct pt_regs {
|
typedef struct pt_regs {
|
||||||
struct pt_regs *next; /* the previous regs structure */
|
struct pt_regs *next; /* the previous regs structure */
|
||||||
struct trap_pt_regs *trap;
|
struct trap_pt_regs *trap;
|
||||||
|
@ -131,9 +148,7 @@ typedef struct pt_regs {
|
||||||
e2k_wd_t wd; /* current window descriptor */
|
e2k_wd_t wd; /* current window descriptor */
|
||||||
int sys_num; /* to restart sys_call */
|
int sys_num; /* to restart sys_call */
|
||||||
int kernel_entry;
|
int kernel_entry;
|
||||||
u32 flags; /* Trap occured on the instruction */
|
union pt_regs_flags flags;
|
||||||
/* with "Store recovery point" flag */
|
|
||||||
bool irqs_disabled; /* IRQs are disabled while trap */
|
|
||||||
e2k_ctpr_t ctpr1; /* CTPRj for control transfer */
|
e2k_ctpr_t ctpr1; /* CTPRj for control transfer */
|
||||||
e2k_ctpr_t ctpr2;
|
e2k_ctpr_t ctpr2;
|
||||||
e2k_ctpr_t ctpr3;
|
e2k_ctpr_t ctpr3;
|
||||||
|
@ -168,12 +183,15 @@ typedef struct pt_regs {
|
||||||
u64 rpr_lo;
|
u64 rpr_lo;
|
||||||
u64 rpr_hi;
|
u64 rpr_hi;
|
||||||
#ifdef CONFIG_VIRTUALIZATION
|
#ifdef CONFIG_VIRTUALIZATION
|
||||||
|
u64 sys_func; /* need only for guest */
|
||||||
e2k_stacks_t g_stacks; /* current state of guest kernel */
|
e2k_stacks_t g_stacks; /* current state of guest kernel */
|
||||||
/* stacks registers */
|
/* stacks registers */
|
||||||
bool g_stacks_valid; /* the state of guest kernel stacks */
|
bool g_stacks_valid; /* the state of guest kernel stacks */
|
||||||
/* registers is valid */
|
/* registers is valid */
|
||||||
bool g_stacks_active; /* the guest kernel stacks */
|
bool g_stacks_active; /* the guest kernel stacks */
|
||||||
/* registers is in active work */
|
/* registers is in active work */
|
||||||
|
bool stack_regs_saved; /* stack state regs was already */
|
||||||
|
/* saved */
|
||||||
bool need_inject; /* flag for unconditional injection */
|
bool need_inject; /* flag for unconditional injection */
|
||||||
/* trap to guest to avoid acces to */
|
/* trap to guest to avoid acces to */
|
||||||
/* guest user space in trap context */
|
/* guest user space in trap context */
|
||||||
|
@ -183,11 +201,19 @@ typedef struct pt_regs {
|
||||||
unsigned long traps_to_guest; /* mask of traps passed to guest */
|
unsigned long traps_to_guest; /* mask of traps passed to guest */
|
||||||
/* and are not yet handled by guest */
|
/* and are not yet handled by guest */
|
||||||
/* need only for host */
|
/* need only for host */
|
||||||
unsigned long deferred_traps; /* mask of deffered traps, which */
|
#ifdef CONFIG_KVM_GUEST_KERNEL
|
||||||
/* cannot be handled immediately */
|
/* only for guest kernel */
|
||||||
/* (for guest) or occured while */
|
/* already copyed back part of guest user hardware stacks */
|
||||||
/* guest is handling previous traps */
|
/* spilled to guest kernel stacks */
|
||||||
/* (for host, for example interrupts) */
|
struct {
|
||||||
|
e2k_size_t ps_size; /* procedure stack copyed size */
|
||||||
|
e2k_size_t pcs_size; /* chain stack copyesd size */
|
||||||
|
/* The frames injected to support 'signal stack' */
|
||||||
|
/* and trampolines to return from user to kernel */
|
||||||
|
e2k_size_t pcs_injected_frames_size;
|
||||||
|
} copyed;
|
||||||
|
#endif /* CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
#endif /* CONFIG_VIRTUALIZATION */
|
#endif /* CONFIG_VIRTUALIZATION */
|
||||||
|
|
||||||
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL)
|
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
|
@ -199,23 +225,6 @@ typedef struct pt_regs {
|
||||||
#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */
|
#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */
|
||||||
} pt_regs_t;
|
} pt_regs_t;
|
||||||
|
|
||||||
#define E_MMU_OP_FLAG_PT_REGS 0x2U /* execute_mmu_operations is */
|
|
||||||
/* working */
|
|
||||||
#define E_MMU_NESTED_OP_FLAG_PT_REGS 0x4U /* nested exception appeared */
|
|
||||||
/* while */
|
|
||||||
/* execute_mmu_operations is */
|
|
||||||
/* working */
|
|
||||||
#define SIG_CALL_HANDLER_FLAG_PT_REGS 0x8U
|
|
||||||
#define SIG_RESTART_SYSCALL_FLAG_PT_REGS 0x10U
|
|
||||||
#define PROT_10_FLAG_PT_REGS 0x20U /* marked 10 pm sys_call */
|
|
||||||
#define TRAP_AS_INTC_EMUL_PT_REGS 0x0100 /* trap or system call */
|
|
||||||
/* is on or from guest */
|
|
||||||
#define GUEST_FLAG_PT_REGS 0x10000U /* Trap occurred on the */
|
|
||||||
/* guest and should be */
|
|
||||||
/* handled by guest */
|
|
||||||
#define LIGHT_HYPERCALL_FLAG_PT_REGS 0x20000U /* Trap occurred in */
|
|
||||||
/* hypercall */
|
|
||||||
|
|
||||||
static inline struct trap_pt_regs *
|
static inline struct trap_pt_regs *
|
||||||
pt_regs_to_trap_regs(struct pt_regs *regs)
|
pt_regs_to_trap_regs(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@ -580,6 +589,7 @@ struct signal_stack_context {
|
||||||
do { \
|
do { \
|
||||||
struct pt_regs *__pt_regs = current_thread_info()->pt_regs; \
|
struct pt_regs *__pt_regs = current_thread_info()->pt_regs; \
|
||||||
if (__pt_regs) { \
|
if (__pt_regs) { \
|
||||||
|
if (!test_ts_flag(TS_USER_EXECVE)) \
|
||||||
user_hw_stacks_copy_full(&__pt_regs->stacks, \
|
user_hw_stacks_copy_full(&__pt_regs->stacks, \
|
||||||
__pt_regs, NULL); \
|
__pt_regs, NULL); \
|
||||||
SAVE_AAU_REGS_FOR_PTRACE(__pt_regs, current_thread_info()); \
|
SAVE_AAU_REGS_FOR_PTRACE(__pt_regs, current_thread_info()); \
|
||||||
|
@ -709,7 +719,7 @@ static inline struct pt_regs *find_user_regs(const struct pt_regs *regs)
|
||||||
do {
|
do {
|
||||||
CHECK_PT_REGS_LOOP(regs);
|
CHECK_PT_REGS_LOOP(regs);
|
||||||
|
|
||||||
if (user_mode(regs))
|
if (user_mode(regs) && !regs->flags.kvm_hw_intercept)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
regs = regs->next;
|
regs = regs->next;
|
||||||
|
@ -731,7 +741,7 @@ static inline struct pt_regs *find_entry_regs(const struct pt_regs *regs)
|
||||||
do {
|
do {
|
||||||
CHECK_PT_REGS_LOOP(regs);
|
CHECK_PT_REGS_LOOP(regs);
|
||||||
|
|
||||||
if (user_mode(regs))
|
if (user_mode(regs) && !regs->flags.kvm_hw_intercept)
|
||||||
goto found;
|
goto found;
|
||||||
|
|
||||||
prev_regs = regs;
|
prev_regs = regs;
|
||||||
|
@ -745,12 +755,26 @@ found:
|
||||||
return (struct pt_regs *) regs;
|
return (struct pt_regs *) regs;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct pt_regs *find_trap_regs(const struct pt_regs *regs)
|
static inline struct pt_regs *find_host_regs(const struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
while (regs) {
|
while (regs) {
|
||||||
CHECK_PT_REGS_LOOP(regs);
|
CHECK_PT_REGS_LOOP(regs);
|
||||||
|
|
||||||
if (from_trap(regs))
|
if (likely(!regs->flags.kvm_hw_intercept))
|
||||||
|
break;
|
||||||
|
|
||||||
|
regs = regs->next;
|
||||||
|
};
|
||||||
|
|
||||||
|
return (struct pt_regs *) regs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct pt_regs *find_trap_host_regs(const struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
while (regs) {
|
||||||
|
CHECK_PT_REGS_LOOP(regs);
|
||||||
|
|
||||||
|
if (from_trap(regs) && !regs->flags.kvm_hw_intercept)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
regs = regs->next;
|
regs = regs->next;
|
||||||
|
|
|
@ -182,19 +182,6 @@ do { \
|
||||||
(thread_info)->u_stack.size = stk_sz; \
|
(thread_info)->u_stack.size = stk_sz; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
|
||||||
* Remeber state of IRQs at trap point
|
|
||||||
* Now it usefull to analyze can be traps passed to guest handler
|
|
||||||
* immediately or should be deferred
|
|
||||||
*/
|
|
||||||
#define SAVE_IRQS_STATE(regs, upsr) \
|
|
||||||
({ \
|
|
||||||
unsigned long psr_val = (regs)->crs.cr1_lo.CR1_lo_psr; \
|
|
||||||
unsigned long upsr_val = (upsr).UPSR_reg; \
|
|
||||||
(regs)->irqs_disabled = \
|
|
||||||
psr_and_upsr_irqs_disabled_flags(psr_val, upsr_val); \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interrupts should be disabled by caller to read all hardware
|
* Interrupts should be disabled by caller to read all hardware
|
||||||
* stacks registers in coordinated state
|
* stacks registers in coordinated state
|
||||||
|
@ -579,24 +566,24 @@ do { \
|
||||||
#endif /* E2K_MAXGR_d */
|
#endif /* E2K_MAXGR_d */
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
native_save_local_glob_regs(local_gregs_t *l_gregs)
|
native_save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
void (*save_local_gregs)(struct local_gregs *);
|
void (*save_local_gregs)(struct local_gregs *, bool is_signal);
|
||||||
|
|
||||||
save_local_gregs = machine.save_local_gregs;
|
save_local_gregs = machine.save_local_gregs;
|
||||||
|
|
||||||
copy_k_gregs_to_l_gregs(l_gregs, ¤t_thread_info()->k_gregs);
|
copy_k_gregs_to_l_gregs(l_gregs, ¤t_thread_info()->k_gregs);
|
||||||
save_local_gregs(l_gregs);
|
save_local_gregs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
native_restore_local_glob_regs(local_gregs_t *l_gregs)
|
native_restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
void (*restore_local_gregs)(const struct local_gregs *);
|
void (*restore_local_gregs)(const struct local_gregs *, bool is_signal);
|
||||||
|
|
||||||
restore_local_gregs = machine.restore_local_gregs;
|
restore_local_gregs = machine.restore_local_gregs;
|
||||||
|
|
||||||
get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, l_gregs);
|
get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, l_gregs);
|
||||||
restore_local_gregs(l_gregs);
|
restore_local_gregs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -767,7 +754,7 @@ do { \
|
||||||
(trap)->tc_count = cnt * 3; \
|
(trap)->tc_count = cnt * 3; \
|
||||||
if (unlikely(GET_CLW_REQUEST_COUNT(regs) && \
|
if (unlikely(GET_CLW_REQUEST_COUNT(regs) && \
|
||||||
cpu_has(CPU_HWBUG_CLW_STALE_L1_ENTRY))) \
|
cpu_has(CPU_HWBUG_CLW_STALE_L1_ENTRY))) \
|
||||||
(regs)->clw_cpu = raw_smp_processor_id(); \
|
SET_CLW_CPU(regs, raw_smp_processor_id()); \
|
||||||
if (cs_req_num > 0) { \
|
if (cs_req_num > 0) { \
|
||||||
/* recover chain stack pointers to repeat FILL */ \
|
/* recover chain stack pointers to repeat FILL */ \
|
||||||
e2k_pcshtp_t pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); \
|
e2k_pcshtp_t pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); \
|
||||||
|
@ -798,6 +785,7 @@ do { \
|
||||||
# define GET_CLW_REQUEST_COUNT(regs) ((regs)->clw_count)
|
# define GET_CLW_REQUEST_COUNT(regs) ((regs)->clw_count)
|
||||||
# define SET_CLW_FIRST_REQUEST(regs, cnt) ((regs)->clw_first = (cnt))
|
# define SET_CLW_FIRST_REQUEST(regs, cnt) ((regs)->clw_first = (cnt))
|
||||||
# define GET_CLW_FIRST_REQUEST(regs) ((regs)->clw_first)
|
# define GET_CLW_FIRST_REQUEST(regs) ((regs)->clw_first)
|
||||||
|
# define SET_CLW_CPU(regs, cpu) ((regs)->clw_cpu = (cpu))
|
||||||
#define ENABLE_US_CLW() \
|
#define ENABLE_US_CLW() \
|
||||||
do { \
|
do { \
|
||||||
if (!cpu_has(CPU_HWBUG_CLW)) \
|
if (!cpu_has(CPU_HWBUG_CLW)) \
|
||||||
|
@ -810,6 +798,7 @@ do { \
|
||||||
# define GET_CLW_REQUEST_COUNT(regs) (0)
|
# define GET_CLW_REQUEST_COUNT(regs) (0)
|
||||||
# define SET_CLW_FIRST_REQUEST(regs, cnt)
|
# define SET_CLW_FIRST_REQUEST(regs, cnt)
|
||||||
# define GET_CLW_FIRST_REQUEST(regs) (0)
|
# define GET_CLW_FIRST_REQUEST(regs) (0)
|
||||||
|
# define SET_CLW_CPU(regs, cpu)
|
||||||
# define ENABLE_US_CLW()
|
# define ENABLE_US_CLW()
|
||||||
# define DISABLE_US_CLW()
|
# define DISABLE_US_CLW()
|
||||||
#endif /* CONFIG_CLW_ENABLE */
|
#endif /* CONFIG_CLW_ENABLE */
|
||||||
|
@ -862,10 +851,6 @@ do { \
|
||||||
PREFIX_RESTORE_USER_CRs(NATIVE, regs)
|
PREFIX_RESTORE_USER_CRs(NATIVE, regs)
|
||||||
#define NATIVE_RESTORE_USER_STACK_REGS(regs, insyscall) \
|
#define NATIVE_RESTORE_USER_STACK_REGS(regs, insyscall) \
|
||||||
PREFIX_RESTORE_USER_STACK_REGS(NATIVE, regs, insyscall)
|
PREFIX_RESTORE_USER_STACK_REGS(NATIVE, regs, insyscall)
|
||||||
#define NATIVE_RESTORE_USER_TRAP_STACK_REGS(regs) \
|
|
||||||
NATIVE_RESTORE_USER_STACK_REGS(regs, false)
|
|
||||||
#define NATIVE_RESTORE_USER_SYSCALL_STACK_REGS(regs) \
|
|
||||||
NATIVE_RESTORE_USER_STACK_REGS(regs, true)
|
|
||||||
|
|
||||||
#if defined(CONFIG_PARAVIRT_GUEST)
|
#if defined(CONFIG_PARAVIRT_GUEST)
|
||||||
/* it is paravirtualized host and guest */
|
/* it is paravirtualized host and guest */
|
||||||
|
@ -887,19 +872,21 @@ do { \
|
||||||
RESTORE_USER_STACK_REGS(regs, false)
|
RESTORE_USER_STACK_REGS(regs, false)
|
||||||
#define RESTORE_USER_SYSCALL_STACK_REGS(regs) \
|
#define RESTORE_USER_SYSCALL_STACK_REGS(regs) \
|
||||||
RESTORE_USER_STACK_REGS(regs, true)
|
RESTORE_USER_STACK_REGS(regs, true)
|
||||||
|
#define RESTORE_COMMON_REGS(regs) \
|
||||||
|
NATIVE_RESTORE_COMMON_REGS(regs)
|
||||||
|
|
||||||
#define INIT_G_REGS() NATIVE_INIT_G_REGS()
|
#define INIT_G_REGS() NATIVE_INIT_G_REGS()
|
||||||
#define BOOT_INIT_G_REGS() NATIVE_BOOT_INIT_G_REGS()
|
#define BOOT_INIT_G_REGS() NATIVE_BOOT_INIT_G_REGS()
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
save_local_glob_regs(local_gregs_t *l_gregs)
|
save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
native_save_local_glob_regs(l_gregs);
|
native_save_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
restore_local_glob_regs(local_gregs_t *l_gregs)
|
restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal)
|
||||||
{
|
{
|
||||||
native_restore_local_glob_regs(l_gregs);
|
native_restore_local_glob_regs(l_gregs, is_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -1080,7 +1067,11 @@ NATIVE_DO_SAVE_TASK_USER_REGS_TO_SWITCH(struct sw_regs *sw_regs,
|
||||||
static inline void
|
static inline void
|
||||||
NATIVE_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task)
|
NATIVE_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_VIRTUALIZATION
|
||||||
|
const int task_is_binco = TASK_IS_BINCO(task) || task_thread_info(task)->vcpu;
|
||||||
|
#else
|
||||||
const int task_is_binco = TASK_IS_BINCO(task);
|
const int task_is_binco = TASK_IS_BINCO(task);
|
||||||
|
#endif
|
||||||
struct mm_struct *mm = task->mm;
|
struct mm_struct *mm = task->mm;
|
||||||
struct sw_regs *sw_regs = &task->thread.sw_regs;
|
struct sw_regs *sw_regs = &task->thread.sw_regs;
|
||||||
|
|
||||||
|
@ -1166,7 +1157,11 @@ NATIVE_RESTORE_TASK_REGS_TO_SWITCH(struct task_struct *task,
|
||||||
u64 pcsp_lo = AS_WORD(sw_regs->pcsp_lo);
|
u64 pcsp_lo = AS_WORD(sw_regs->pcsp_lo);
|
||||||
u64 pcsp_hi = AS_WORD(sw_regs->pcsp_hi);
|
u64 pcsp_hi = AS_WORD(sw_regs->pcsp_hi);
|
||||||
e2k_mem_crs_t crs = sw_regs->crs;
|
e2k_mem_crs_t crs = sw_regs->crs;
|
||||||
|
#ifdef CONFIG_VIRTUALIZATION
|
||||||
|
const int task_is_binco = TASK_IS_BINCO(task) || ti->vcpu;
|
||||||
|
#else
|
||||||
const int task_is_binco = TASK_IS_BINCO(task);
|
const int task_is_binco = TASK_IS_BINCO(task);
|
||||||
|
#endif
|
||||||
struct mm_struct *mm = task->mm;
|
struct mm_struct *mm = task->mm;
|
||||||
|
|
||||||
NATIVE_FLUSHCPU;
|
NATIVE_FLUSHCPU;
|
||||||
|
|
|
@ -79,6 +79,8 @@ s64 sys_el_binary(s64 work, s64 arg2, s64 arg3, s64 arg4);
|
||||||
#define SET_SECONDARY_64BIT_MODE 11
|
#define SET_SECONDARY_64BIT_MODE 11
|
||||||
#define GET_PROTOCOL_VERSION 12
|
#define GET_PROTOCOL_VERSION 12
|
||||||
#define SET_IC_NEED_FLUSH_ON_SWITCH 13
|
#define SET_IC_NEED_FLUSH_ON_SWITCH 13
|
||||||
|
#define GET_UPT_SEC_AD_SHIFT_DSBL 14
|
||||||
|
#define SET_UPT_SEC_AD_SHIFT_DSBL 15
|
||||||
|
|
||||||
/* Selector numbers for GET_SECONDARY_SPACE_OFFSET */
|
/* Selector numbers for GET_SECONDARY_SPACE_OFFSET */
|
||||||
enum sel_num {
|
enum sel_num {
|
||||||
|
|
|
@ -189,22 +189,27 @@
|
||||||
|
|
||||||
/* MC */
|
/* MC */
|
||||||
|
|
||||||
#define SIC_MAX_MC_COUNT 4
|
#define SIC_MAX_MC_COUNT E16C_SIC_MC_COUNT
|
||||||
#define SIC_MC_COUNT (machine.sic_mc_count)
|
#define SIC_MC_COUNT (machine.sic_mc_count)
|
||||||
|
|
||||||
|
#define SIC_MC_BASE 0x400
|
||||||
|
#define SIC_MC_SIZE (machine.sic_mc_size)
|
||||||
|
|
||||||
|
#define SIC_mc_ecc 0x440
|
||||||
#define SIC_mc0_ecc 0x400
|
#define SIC_mc0_ecc 0x400
|
||||||
#define SIC_mc1_ecc (machine.sic_mc1_ecc)
|
#define SIC_mc1_ecc (machine.sic_mc1_ecc)
|
||||||
#define SIC_mc2_ecc 0x480
|
#define SIC_mc2_ecc 0x480
|
||||||
#define SIC_mc3_ecc 0x4c0
|
#define SIC_mc3_ecc 0x4c0
|
||||||
|
|
||||||
#define SIC_MC_BASE SIC_mc0_ecc
|
#define SIC_mc_ch 0x400
|
||||||
#define SIC_MC_SIZE (IS_MACHINE_E2S ? 0xa4 : \
|
#define SIC_mc_status 0x44c
|
||||||
(IS_MACHINE_E8C ? 0xe4 : 0xf4))
|
|
||||||
|
|
||||||
/* PHY */
|
/* HMU */
|
||||||
#define SIC_PHY_BASE (IS_MACHINE_E8C2 ? 0x4000 : 0x1000)
|
#define SIC_hmu_mic 0xd00
|
||||||
#define SIC_PHY_SIZE (IS_MACHINE_E2S ? 0x0c00 : \
|
#define SIC_hmu0_int 0xd40
|
||||||
(IS_MACHINE_E8C ? 0x1000 : 0x4000))
|
#define SIC_hmu1_int 0xd70
|
||||||
|
#define SIC_hmu2_int 0xda0
|
||||||
|
#define SIC_hmu3_int 0xdd0
|
||||||
|
|
||||||
/* IPCC */
|
/* IPCC */
|
||||||
#define SIC_IPCC_LINKS_COUNT 3
|
#define SIC_IPCC_LINKS_COUNT 3
|
||||||
|
@ -260,6 +265,28 @@ typedef union {
|
||||||
u32 word;
|
u32 word;
|
||||||
} freq_core_mon_t;
|
} freq_core_mon_t;
|
||||||
|
|
||||||
|
/* PMC_FREQ_CORE_0_CTRL fields: */
|
||||||
|
typedef union {
|
||||||
|
struct {
|
||||||
|
u32 enable : 1;
|
||||||
|
u32 mode : 3;
|
||||||
|
u32 progr_divF : 6;
|
||||||
|
u32 progr_divF_max : 6;
|
||||||
|
u32 decr_dsbl : 1;
|
||||||
|
u32 pin_en : 1;
|
||||||
|
u32 clk_en : 1;
|
||||||
|
u32 log_en : 1;
|
||||||
|
u32 sleep_c2 : 1;
|
||||||
|
u32 w_trap : 1;
|
||||||
|
u32 ev_term : 1;
|
||||||
|
u32 mon_Fmax : 1;
|
||||||
|
u32 divF_curr : 6;
|
||||||
|
u32 bfs_bypass : 1;
|
||||||
|
u32 rmwen : 1;
|
||||||
|
};
|
||||||
|
u32 word;
|
||||||
|
} freq_core_ctrl_t;
|
||||||
|
|
||||||
/* PMC_SYS_MON_1 fields: */
|
/* PMC_SYS_MON_1 fields: */
|
||||||
typedef union {
|
typedef union {
|
||||||
struct {
|
struct {
|
||||||
|
@ -427,6 +454,9 @@ typedef union {
|
||||||
#define BC_MM_REG_SIZE (BC_MM_REG_END - BC_MM_REG_BASE)
|
#define BC_MM_REG_SIZE (BC_MM_REG_END - BC_MM_REG_BASE)
|
||||||
#define BC_MM_REG_NUM (BC_MM_REG_SIZE / 4)
|
#define BC_MM_REG_NUM (BC_MM_REG_SIZE / 4)
|
||||||
|
|
||||||
|
#define EFUSE_RAM_ADDR 0x0cc0
|
||||||
|
#define EFUSE_RAM_DATA 0x0cc4
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
/*
|
/*
|
||||||
* Read/Write RT_LCFGj Regs
|
* Read/Write RT_LCFGj Regs
|
||||||
|
|
|
@ -128,7 +128,7 @@ struct signal_stack;
|
||||||
extern void free_signal_stack(struct signal_stack *signal_stack);
|
extern void free_signal_stack(struct signal_stack *signal_stack);
|
||||||
extern struct signal_stack_context __user *pop_signal_stack(void);
|
extern struct signal_stack_context __user *pop_signal_stack(void);
|
||||||
extern struct signal_stack_context __user *get_signal_stack(void);
|
extern struct signal_stack_context __user *get_signal_stack(void);
|
||||||
extern int setup_signal_stack(struct pt_regs *regs);
|
extern int setup_signal_stack(struct pt_regs *regs, bool is_signal);
|
||||||
|
|
||||||
#define GET_SIG_RESTORE_STACK(ti, sbr, usd_lo, usd_hi) \
|
#define GET_SIG_RESTORE_STACK(ti, sbr, usd_lo, usd_hi) \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -59,16 +59,10 @@ extern unsigned long smp_invalidate_needed;
|
||||||
extern int pic_mode;
|
extern int pic_mode;
|
||||||
extern cpumask_t callin_go;
|
extern cpumask_t callin_go;
|
||||||
|
|
||||||
extern void smp_alloc_memory(void);
|
|
||||||
extern void e2k_start_secondary(int cpuid);
|
extern void e2k_start_secondary(int cpuid);
|
||||||
extern void start_secondary_resume(int cpuid, int cpu);
|
extern void start_secondary_resume(int cpuid, int cpu);
|
||||||
extern void wait_for_startup(int cpuid, int hotplug);
|
extern void wait_for_startup(int cpuid, int hotplug);
|
||||||
extern void smp_flush_tlb(void);
|
|
||||||
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
|
|
||||||
extern void smp_send_reschedule(int cpu);
|
extern void smp_send_reschedule(int cpu);
|
||||||
extern void smp_invalidate_rcv(void); /* Process an NMI */
|
|
||||||
extern void (*mtrr_hook) (void);
|
|
||||||
extern void zap_low_mappings (void);
|
|
||||||
extern void arch_send_call_function_single_ipi(int cpu);
|
extern void arch_send_call_function_single_ipi(int cpu);
|
||||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||||
extern void smp_send_refresh(void);
|
extern void smp_send_refresh(void);
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#ifdef CONFIG_SPARSEMEM
|
#ifdef CONFIG_SPARSEMEM
|
||||||
|
|
||||||
# define SECTION_SIZE_BITS 28
|
# define SECTION_SIZE_BITS 28
|
||||||
# define MAX_PHYSMEM_BITS 40
|
# define MAX_PHYSMEM_BITS CONFIG_E2K_PA_BITS
|
||||||
|
|
||||||
#endif /* CONFIG_SPARSEMEM */
|
#endif /* CONFIG_SPARSEMEM */
|
||||||
#endif /* _ASM_E2K_SPARSEMEM_H */
|
#endif /* _ASM_E2K_SPARSEMEM_H */
|
||||||
|
|
|
@ -432,7 +432,13 @@ static inline void native_tagged_memcpy_8(void *__restrict dst,
|
||||||
*
|
*
|
||||||
* All parameters must be 8-bytes aligned.
|
* All parameters must be 8-bytes aligned.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_PARAVIRT_GUEST)
|
#ifdef CONFIG_BOOT_E2K
|
||||||
|
#define tagged_memcpy_8(dst, src, n) \
|
||||||
|
({ \
|
||||||
|
native_tagged_memcpy_8(dst, src, n, \
|
||||||
|
__alignof(*(dst)), __alignof(*(src))); \
|
||||||
|
})
|
||||||
|
#elif defined(CONFIG_PARAVIRT_GUEST)
|
||||||
#include <asm/paravirt/string.h>
|
#include <asm/paravirt/string.h>
|
||||||
#elif defined(CONFIG_KVM_GUEST_KERNEL)
|
#elif defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
#include <asm/kvm/guest/string.h>
|
#include <asm/kvm/guest/string.h>
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
/* Functions to sync shadow page tables with guest page tables
|
||||||
|
* without flushing tlb. Used only by guest kernels
|
||||||
|
*
|
||||||
|
* Copyright 2021 Andrey Alekhin (alekhin_amcst.ru)
|
||||||
|
*/
|
||||||
|
#ifndef _E2K_SYNC_PG_TABLES_H
|
||||||
|
#define _E2K_SYNC_PG_TABLES_H
|
||||||
|
|
||||||
|
#if defined(CONFIG_KVM_GUEST_KERNEL)
|
||||||
|
|
||||||
|
#include <asm/kvm/guest/sync_pg_tables.h>
|
||||||
|
|
||||||
|
#define sync_addr_range kvm_sync_addr_range
|
||||||
|
#else
|
||||||
|
#define sync_addr_range
|
||||||
|
#endif /* !CONFIG_KVM_GUEST_KERNEL */
|
||||||
|
|
||||||
|
#endif
|
|
@ -39,10 +39,6 @@ extern long sys_stat64(const char __user *filename,
|
||||||
extern long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
|
extern long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
|
||||||
extern long sys_lstat64(const char __user *filename,
|
extern long sys_lstat64(const char __user *filename,
|
||||||
struct stat64 __user *statbuf);
|
struct stat64 __user *statbuf);
|
||||||
#ifdef CONFIG_MAC_
|
|
||||||
extern int sys_macctl(register int request, register void *data,
|
|
||||||
register int size);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern asmlinkage long sys_set_backtrace(unsigned long *__user buf,
|
extern asmlinkage long sys_set_backtrace(unsigned long *__user buf,
|
||||||
size_t count, size_t skip, unsigned long flags);
|
size_t count, size_t skip, unsigned long flags);
|
||||||
|
@ -83,8 +79,13 @@ extern long protected_sys_rt_sigaction_ex(int sig,
|
||||||
const size_t sigsetsize);
|
const size_t sigsetsize);
|
||||||
extern long protected_sys_mq_notify(const long a1,
|
extern long protected_sys_mq_notify(const long a1,
|
||||||
const unsigned long __user a2);
|
const unsigned long __user a2);
|
||||||
extern long protected_sys_timer_create(const long a1,
|
extern long protected_sys_timer_create(const long a1, /* clockid */
|
||||||
const unsigned long __user a2, const unsigned long __user a3);
|
const unsigned long __user a2, /* sevp */
|
||||||
|
const unsigned long __user a3, /* timerid */
|
||||||
|
const unsigned long unused4,
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
extern long protected_sys_rt_sigtimedwait(const unsigned long __user a1,
|
extern long protected_sys_rt_sigtimedwait(const unsigned long __user a1,
|
||||||
const unsigned long __user a2, const unsigned long __user a3,
|
const unsigned long __user a2, const unsigned long __user a3,
|
||||||
const unsigned long a4);
|
const unsigned long a4);
|
||||||
|
@ -229,6 +230,13 @@ extern long protected_sys_sendmsg(const unsigned long sockfd,
|
||||||
const unsigned long unused5,
|
const unsigned long unused5,
|
||||||
const unsigned long unused6,
|
const unsigned long unused6,
|
||||||
const struct pt_regs *regs);
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_sendmmsg(const unsigned long sockfd,
|
||||||
|
const unsigned long __user msgvec,
|
||||||
|
const unsigned long vlen,
|
||||||
|
const unsigned long flags,
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
extern long protected_sys_recvmsg(const unsigned long socket,
|
extern long protected_sys_recvmsg(const unsigned long socket,
|
||||||
const unsigned long __user message,
|
const unsigned long __user message,
|
||||||
const unsigned long flags,
|
const unsigned long flags,
|
||||||
|
@ -236,6 +244,13 @@ extern long protected_sys_recvmsg(const unsigned long socket,
|
||||||
const unsigned long unused5,
|
const unsigned long unused5,
|
||||||
const unsigned long unused6,
|
const unsigned long unused6,
|
||||||
const struct pt_regs *regs);
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_recvmmsg(const unsigned long socket,
|
||||||
|
const unsigned long __user message,
|
||||||
|
const unsigned long vlen,
|
||||||
|
const unsigned long flags,
|
||||||
|
const unsigned long __user timeout,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
extern long protected_sys_olduselib(const unsigned long __user a1, /* library */
|
extern long protected_sys_olduselib(const unsigned long __user a1, /* library */
|
||||||
const unsigned long __user a2); /* umdd */
|
const unsigned long __user a2); /* umdd */
|
||||||
/* NB> 'olduselib' is obsolete syscall; unsupported in CPU ISET V6 */
|
/* NB> 'olduselib' is obsolete syscall; unsupported in CPU ISET V6 */
|
||||||
|
@ -314,8 +329,8 @@ extern long protected_sys_prctl(const int option, /* a1 */
|
||||||
const unsigned long unused6,
|
const unsigned long unused6,
|
||||||
const struct pt_regs *regs);
|
const struct pt_regs *regs);
|
||||||
extern long protected_sys_ioctl(const int fd, /* a1 */
|
extern long protected_sys_ioctl(const int fd, /* a1 */
|
||||||
const unsigned long request,/* a2 */
|
const unsigned long request, /* a2 */
|
||||||
void *argp, /* a3 */
|
const unsigned long __user argp, /* a3 */
|
||||||
const unsigned long unused4,
|
const unsigned long unused4,
|
||||||
const unsigned long unused5,
|
const unsigned long unused5,
|
||||||
const unsigned long unused6,
|
const unsigned long unused6,
|
||||||
|
@ -348,6 +363,48 @@ extern long protected_sys_pselect6(const long nfds, /* a1 */
|
||||||
const unsigned long timeout, /* a5 */
|
const unsigned long timeout, /* a5 */
|
||||||
const unsigned long sigmask, /* a6 */
|
const unsigned long sigmask, /* a6 */
|
||||||
const struct pt_regs *regs);
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_rt_sigqueueinfo(const long tgid, /* a1 */
|
||||||
|
const long sig, /* a2 */
|
||||||
|
const unsigned long __user uinfo, /* a3 */
|
||||||
|
const unsigned long unused4,
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_rt_tgsigqueueinfo(const long tgid, /* a1 */
|
||||||
|
const long tid, /* a2 */
|
||||||
|
const long sig, /* a3 */
|
||||||
|
const unsigned long __user uinfo, /* a4 */
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_pidfd_send_signal(const long pidfd, /* a1 */
|
||||||
|
const long sig, /* a2 */
|
||||||
|
const unsigned long __user info, /* a3 */
|
||||||
|
unsigned long flags, /* a4 */
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_waitid(const long which, /* a1 */
|
||||||
|
const long pid, /* a2 */
|
||||||
|
const unsigned long __user *infop, /* a3 */
|
||||||
|
const long options, /* a4 */
|
||||||
|
const unsigned long __user *ru, /* a5 */
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_io_uring_register(const unsigned long fd, /* a1 */
|
||||||
|
const unsigned long opcode, /* a2 */
|
||||||
|
const unsigned long __user arg, /* a3 */
|
||||||
|
const unsigned long nr_args, /* a4 */
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
extern long protected_sys_kexec_load(const unsigned long entry, /* a1 */
|
||||||
|
const unsigned long nr_segments, /* a2 */
|
||||||
|
const unsigned long __user segments, /* a3 */
|
||||||
|
const unsigned long flags, /* a4 */
|
||||||
|
const unsigned long unused5,
|
||||||
|
const unsigned long unused6,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
|
||||||
extern int arch_init_pm_sc_debug_mode(const int debug_mask);
|
extern int arch_init_pm_sc_debug_mode(const int debug_mask);
|
||||||
|
|
||||||
|
|
|
@ -635,7 +635,7 @@ extern void * __e2k_read_kernel_return_address(int n);
|
||||||
: \
|
: \
|
||||||
__e2k_read_kernel_return_address(n); })
|
__e2k_read_kernel_return_address(n); })
|
||||||
|
|
||||||
#if CONFIG_CPU_ISET < 5
|
#ifndef CONFIG_CPU_HW_CLEAR_RF
|
||||||
typedef void (*clear_rf_t)(void);
|
typedef void (*clear_rf_t)(void);
|
||||||
extern const clear_rf_t clear_rf_fn[];
|
extern const clear_rf_t clear_rf_fn[];
|
||||||
|
|
||||||
|
@ -643,7 +643,7 @@ static __always_inline void clear_rf_kernel_except_current(u64 num_q)
|
||||||
{
|
{
|
||||||
clear_rf_fn[num_q]();
|
clear_rf_fn[num_q]();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CPU_ISET < 5 */
|
#endif
|
||||||
|
|
||||||
#define SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) \
|
#define SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) \
|
||||||
NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis)
|
NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis)
|
||||||
|
|
|
@ -9,8 +9,9 @@
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/restart_block.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
#include <asm/e2k_api.h>
|
#include <asm/e2k_api.h>
|
||||||
#include <asm/cpu_regs_types.h>
|
#include <asm/cpu_regs_types.h>
|
||||||
|
@ -232,13 +233,8 @@ typedef struct thread_info {
|
||||||
#define TIF_PSEUDOTHREAD 24 /* the thread is pseudo only to run */
|
#define TIF_PSEUDOTHREAD 24 /* the thread is pseudo only to run */
|
||||||
/* on VIRQ VCPU as starter of VIRQ */
|
/* on VIRQ VCPU as starter of VIRQ */
|
||||||
/* handler */
|
/* handler */
|
||||||
#define TIF_HOST_AT_VCPU_MODE 25 /* the host thread is switching to */
|
|
||||||
/* VCPU running mode and wait for */
|
|
||||||
/* interception (trap on PV mode) */
|
|
||||||
#define TIF_VIRQS_ACTIVE 26 /* the thread is ready to inject */
|
#define TIF_VIRQS_ACTIVE 26 /* the thread is ready to inject */
|
||||||
/* VIRQS interrupt */
|
/* VIRQS interrupt */
|
||||||
#define TIF_VIRQ_HANDLER 27 /* the thread is VIRQ handler and */
|
|
||||||
/* should run with max priorety */
|
|
||||||
#define TIF_LIGHT_HYPERCALL 28 /* hypervisor is executing light */
|
#define TIF_LIGHT_HYPERCALL 28 /* hypervisor is executing light */
|
||||||
/* hypercall */
|
/* hypercall */
|
||||||
#define TIF_GENERIC_HYPERCALL 29 /* hypervisor is executing generic */
|
#define TIF_GENERIC_HYPERCALL 29 /* hypervisor is executing generic */
|
||||||
|
@ -266,9 +262,7 @@ typedef struct thread_info {
|
||||||
#define _TIF_VIRTUALIZED_GUEST (1 << TIF_VIRTUALIZED_GUEST)
|
#define _TIF_VIRTUALIZED_GUEST (1 << TIF_VIRTUALIZED_GUEST)
|
||||||
#define _TIF_PARAVIRT_GUEST (1 << TIF_PARAVIRT_GUEST)
|
#define _TIF_PARAVIRT_GUEST (1 << TIF_PARAVIRT_GUEST)
|
||||||
#define _TIF_PSEUDOTHREAD (1 << TIF_PSEUDOTHREAD)
|
#define _TIF_PSEUDOTHREAD (1 << TIF_PSEUDOTHREAD)
|
||||||
#define _TIF_HOST_AT_VCPU_MODE (1 << TIF_HOST_AT_VCPU_MODE)
|
|
||||||
#define _TIF_VIRQS_ACTIVE (1 << TIF_VIRQS_ACTIVE)
|
#define _TIF_VIRQS_ACTIVE (1 << TIF_VIRQS_ACTIVE)
|
||||||
#define _TIF_VIRQ_HANDLER (1 << TIF_VIRQ_HANDLER)
|
|
||||||
#define _TIF_LIGHT_HYPERCALL (1 << TIF_LIGHT_HYPERCALL)
|
#define _TIF_LIGHT_HYPERCALL (1 << TIF_LIGHT_HYPERCALL)
|
||||||
#define _TIF_GENERIC_HYPERCALL (1 << TIF_GENERIC_HYPERCALL)
|
#define _TIF_GENERIC_HYPERCALL (1 << TIF_GENERIC_HYPERCALL)
|
||||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||||
|
@ -298,18 +292,18 @@ typedef struct thread_info {
|
||||||
* have to worry about atomic accesses.
|
* have to worry about atomic accesses.
|
||||||
*/
|
*/
|
||||||
#define TS_DELAYED_SIG_HANDLING 0x00000001
|
#define TS_DELAYED_SIG_HANDLING 0x00000001
|
||||||
#define TS_KEEP_PAGES_VALID 0x00000004
|
#define TS_KEEP_PAGES_VALID 0x00000002
|
||||||
#define TS_MMAP_PRIVILEGED 0x00000010
|
#define TS_MMAP_PRIVILEGED 0x00000004
|
||||||
#define TS_MMAP_DONTEXPAND 0x00000020
|
#define TS_MMAP_PS 0x00000008
|
||||||
#define TS_MMAP_DONTCOPY 0x00000040
|
#define TS_MMAP_PCS 0x00000010
|
||||||
#define TS_KERNEL_SYSCALL 0x00000100
|
#define TS_MMAP_SIGNAL_STACK 0x00000020
|
||||||
#define TS_USER_EXECVE 0x00001000
|
#define TS_KERNEL_SYSCALL 0x00000040
|
||||||
#define TS_MMAP_PS 0x00010000
|
#define TS_USER_EXECVE 0x00000080
|
||||||
#define TS_MMAP_PCS 0x00020000
|
#define TS_SINGLESTEP_KERNEL 0x00000100
|
||||||
#define TS_MMAP_NOHUGEPAGE 0x00040000
|
#define TS_SINGLESTEP_USER 0x00000200
|
||||||
#define TS_MMAP_SIGNAL_STACK 0x00080000
|
/* the host thread is switching to VCPU running mode
|
||||||
#define TS_SINGLESTEP_KERNEL 0x00100000
|
* and wait for interception (trap on PV mode) */
|
||||||
#define TS_SINGLESTEP_USER 0x00200000
|
#define TS_HOST_AT_VCPU_MODE 0x00001000
|
||||||
|
|
||||||
#define THREAD_SIZE KERNEL_STACKS_SIZE
|
#define THREAD_SIZE KERNEL_STACKS_SIZE
|
||||||
|
|
||||||
|
@ -343,12 +337,22 @@ static inline unsigned long test_ti_status_flag(struct thread_info *ti,
|
||||||
return ti->status & flag;
|
return ti->status & flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long test_and_clear_ti_status_flag(
|
||||||
|
struct thread_info *ti, int flag)
|
||||||
|
{
|
||||||
|
typeof(ti->status) status = ti->status;
|
||||||
|
ti->status = status & ~flag;
|
||||||
|
return status & flag;
|
||||||
|
}
|
||||||
|
|
||||||
#define set_ts_flag(flag) \
|
#define set_ts_flag(flag) \
|
||||||
set_ti_status_flag(current_thread_info(), flag)
|
set_ti_status_flag(current_thread_info(), flag)
|
||||||
#define clear_ts_flag(flag) \
|
#define clear_ts_flag(flag) \
|
||||||
clear_ti_status_flag(current_thread_info(), flag)
|
clear_ti_status_flag(current_thread_info(), flag)
|
||||||
#define test_ts_flag(flag) \
|
#define test_ts_flag(flag) \
|
||||||
test_ti_status_flag(current_thread_info(), flag)
|
test_ti_status_flag(current_thread_info(), flag)
|
||||||
|
#define test_and_clear_ts_flag(flag) \
|
||||||
|
test_and_clear_ti_status_flag(current_thread_info(), flag)
|
||||||
|
|
||||||
#define native_current_thread_info() current_thread_info()
|
#define native_current_thread_info() current_thread_info()
|
||||||
#define boot_current_thread_info() BOOT_READ_CURRENT_REG()
|
#define boot_current_thread_info() BOOT_READ_CURRENT_REG()
|
||||||
|
|
|
@ -29,10 +29,6 @@
|
||||||
* This is needed to flush SLT before trying to load anything.
|
* This is needed to flush SLT before trying to load anything.
|
||||||
*/
|
*/
|
||||||
#define SWITCH_HW_STACKS_FROM_USER(...) \
|
#define SWITCH_HW_STACKS_FROM_USER(...) \
|
||||||
{ \
|
|
||||||
/* Disable load/store generations */ \
|
|
||||||
crp; \
|
|
||||||
} \
|
|
||||||
{ \
|
{ \
|
||||||
/* Wait for FPU exceptions before switching stacks */ \
|
/* Wait for FPU exceptions before switching stacks */ \
|
||||||
wait all_e = 1; \
|
wait all_e = 1; \
|
||||||
|
@ -43,6 +39,18 @@
|
||||||
rrd %psp.hi, GCURTASK; \
|
rrd %psp.hi, GCURTASK; \
|
||||||
stgdq,sm %qg18, 0, TSK_TI_G_MY_CPU_OFFSET; \
|
stgdq,sm %qg18, 0, TSK_TI_G_MY_CPU_OFFSET; \
|
||||||
cmpesb,1 0, 0, %pred0; \
|
cmpesb,1 0, 0, %pred0; \
|
||||||
|
/* Do restore %rpr (it's clobbered by "crp" below) */ \
|
||||||
|
cmpesb,3 0, 0, %pred1; \
|
||||||
|
} \
|
||||||
|
{ \
|
||||||
|
/* 'crp' instruction also clears %rpr besides the generations \
|
||||||
|
* table, so make sure we preserve %rpr value. */ \
|
||||||
|
rrd %rpr.lo, GCPUOFFSET; \
|
||||||
|
} \
|
||||||
|
{ \
|
||||||
|
rrd %rpr.hi, GCPUID; \
|
||||||
|
/* Disable load/store generations */ \
|
||||||
|
crp; \
|
||||||
} \
|
} \
|
||||||
SWITCH_HW_STACKS(TSK_TI_, ##__VA_ARGS__)
|
SWITCH_HW_STACKS(TSK_TI_, ##__VA_ARGS__)
|
||||||
|
|
||||||
|
@ -50,6 +58,9 @@
|
||||||
* This assumes that GVCPUSTATE points to current_thread_info()
|
* This assumes that GVCPUSTATE points to current_thread_info()
|
||||||
* and %psp.hi has been read into GCURTASK
|
* and %psp.hi has been read into GCURTASK
|
||||||
*
|
*
|
||||||
|
* %pred0 - set to "true" if PSP/PCSP should be switched.
|
||||||
|
* %pred1 - set to "true" if RPR should be restored.
|
||||||
|
*
|
||||||
* Does the following:
|
* Does the following:
|
||||||
*
|
*
|
||||||
* 1) Saves global registers either to 'thread_info.tmp_k_gregs' or to
|
* 1) Saves global registers either to 'thread_info.tmp_k_gregs' or to
|
||||||
|
@ -65,14 +76,18 @@
|
||||||
*/
|
*/
|
||||||
#define SWITCH_HW_STACKS(PREFIX, ...) \
|
#define SWITCH_HW_STACKS(PREFIX, ...) \
|
||||||
{ \
|
{ \
|
||||||
ldd,0 GVCPUSTATE, TI_K_PSP_LO, GCPUOFFSET; \
|
rwd GCPUOFFSET, %rpr.lo ? %pred1; \
|
||||||
ldd,2 GVCPUSTATE, TI_K_PCSP_LO, GCPUID; \
|
ldd,2 GVCPUSTATE, TI_K_PSP_LO, GCPUOFFSET; \
|
||||||
__VA_ARGS__ \
|
__VA_ARGS__ \
|
||||||
} \
|
} \
|
||||||
|
{ \
|
||||||
|
rwd GCPUID, %rpr.hi ? %pred1; \
|
||||||
|
ldd,2 GVCPUSTATE, TI_K_PCSP_LO, GCPUID; \
|
||||||
|
} \
|
||||||
{ \
|
{ \
|
||||||
rrd %psp.lo, GCURTASK ? %pred0; \
|
rrd %psp.lo, GCURTASK ? %pred0; \
|
||||||
stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSP_HI ? %pred0; \
|
stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSP_HI ? %pred0; \
|
||||||
SMP_ONLY(ldw,5 GVCPUSTATE, TSK_TI_CPU_DELTA, GCPUID ? ~ %pred0;) \
|
SMP_ONLY(ldgdw,5 0, TSK_TI_CPU_DELTA, GCPUID ? ~ %pred0;) \
|
||||||
} \
|
} \
|
||||||
{ \
|
{ \
|
||||||
rrd %pcsp.hi, GCURTASK ? %pred0; \
|
rrd %pcsp.hi, GCURTASK ? %pred0; \
|
||||||
|
@ -82,7 +97,6 @@
|
||||||
ibranch trap_handler_switched_stacks ? ~ %pred0; \
|
ibranch trap_handler_switched_stacks ? ~ %pred0; \
|
||||||
} \
|
} \
|
||||||
{ \
|
{ \
|
||||||
nop 1; /* ldd -> use */ \
|
|
||||||
rrd %pcsp.lo, GCURTASK; \
|
rrd %pcsp.lo, GCURTASK; \
|
||||||
stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSP_HI; \
|
stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSP_HI; \
|
||||||
} \
|
} \
|
||||||
|
|
|
@ -35,12 +35,29 @@ static inline bool
|
||||||
is_gdb_breakpoint_trap(struct pt_regs *regs)
|
is_gdb_breakpoint_trap(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u64 *instr = (u64 *)GET_IP_CR0_HI(regs->crs.cr0_hi);
|
u64 *instr = (u64 *)GET_IP_CR0_HI(regs->crs.cr0_hi);
|
||||||
|
u64 sylab;
|
||||||
|
|
||||||
return (*instr & GDB_BREAKPOINT_STUB_MASK) == GDB_BREAKPOINT_STUB;
|
host_get_user(sylab, instr, regs);
|
||||||
|
return (sylab & GDB_BREAKPOINT_STUB_MASK) == GDB_BREAKPOINT_STUB;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void kernel_stack_overflow(unsigned int overflows);
|
extern void kernel_stack_overflow(unsigned int overflows);
|
||||||
|
|
||||||
|
static inline void native_clear_fork_child_pt_regs(struct pt_regs *childregs)
|
||||||
|
{
|
||||||
|
childregs->sys_rval = 0;
|
||||||
|
/*
|
||||||
|
* Remove all pointers to parent's data stack
|
||||||
|
* (these are not needed anyway for system calls)
|
||||||
|
*/
|
||||||
|
childregs->trap = NULL;
|
||||||
|
childregs->aau_context = NULL;
|
||||||
|
#ifdef CONFIG_KERNEL_TIMES_ACCOUNT
|
||||||
|
childregs->scall_times = NULL;
|
||||||
|
#endif
|
||||||
|
childregs->next = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
native_is_kernel_data_stack_bounds(bool trap_on_kernel, e2k_usd_lo_t usd_lo)
|
native_is_kernel_data_stack_bounds(bool trap_on_kernel, e2k_usd_lo_t usd_lo)
|
||||||
{
|
{
|
||||||
|
@ -63,23 +80,14 @@ native_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip)
|
||||||
}
|
}
|
||||||
regs->crs.cr0_hi.CR0_hi_IP = return_ip;
|
regs->crs.cr0_hi.CR0_hi_IP = return_ip;
|
||||||
}
|
}
|
||||||
static inline void
|
|
||||||
native_handle_deferred_traps_in_syscall(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
/* none deferred traps in system call */
|
|
||||||
}
|
|
||||||
static inline bool
|
|
||||||
native_have_deferred_traps(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
return false; /* none deferred traps in system call */
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
native_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
native_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
||||||
const tc_cond_t condition, const tc_mask_t mask,
|
const tc_cond_t condition, const tc_mask_t mask,
|
||||||
const unsigned int aa_no)
|
const unsigned int aa_no)
|
||||||
{
|
{
|
||||||
(void)do_page_fault(regs, address, condition, mask, 0);
|
(void)do_page_fault(regs, address, condition, mask, 0);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern long native_ttable_entry1(int sys_num, ...);
|
extern long native_ttable_entry1(int sys_num, ...);
|
||||||
|
@ -160,17 +168,17 @@ extern const system_call_func sys_call_table_deprecated[NR_syscalls];
|
||||||
|
|
||||||
#define FILL_HARDWARE_STACKS() NATIVE_FILL_HARDWARE_STACKS()
|
#define FILL_HARDWARE_STACKS() NATIVE_FILL_HARDWARE_STACKS()
|
||||||
|
|
||||||
|
static inline void clear_fork_child_pt_regs(struct pt_regs *childregs)
|
||||||
|
{
|
||||||
|
native_clear_fork_child_pt_regs(childregs);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip)
|
correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip)
|
||||||
{
|
{
|
||||||
native_correct_trap_return_ip(regs, return_ip);
|
native_correct_trap_return_ip(regs, return_ip);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
handle_deferred_traps_in_syscall(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
native_handle_deferred_traps_in_syscall(regs);
|
|
||||||
}
|
|
||||||
static inline void
|
|
||||||
stack_bounds_trap_enable(void)
|
stack_bounds_trap_enable(void)
|
||||||
{
|
{
|
||||||
native_stack_bounds_trap_enable();
|
native_stack_bounds_trap_enable();
|
||||||
|
@ -203,12 +211,12 @@ kvm_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar)
|
||||||
#define instr_page_fault(__regs, __ftype, __async) \
|
#define instr_page_fault(__regs, __ftype, __async) \
|
||||||
native_do_instr_page_fault(__regs, __ftype, __async)
|
native_do_instr_page_fault(__regs, __ftype, __async)
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address,
|
||||||
const tc_cond_t condition, const tc_mask_t mask,
|
const tc_cond_t condition, const tc_mask_t mask,
|
||||||
const unsigned int aa_no)
|
const unsigned int aa_no)
|
||||||
{
|
{
|
||||||
native_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
return native_do_aau_page_fault(regs, address, condition, mask, aa_no);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
|
@ -250,7 +258,7 @@ static inline void init_pt_regs_for_syscall(struct pt_regs *regs)
|
||||||
regs->aau_context = NULL;
|
regs->aau_context = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
regs->flags = 0;
|
AW(regs->flags) = 0;
|
||||||
init_guest_syscalls_handling(regs);
|
init_guest_syscalls_handling(regs);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -34,6 +34,9 @@ do { \
|
||||||
force_sig_fault(signo, code, addr, trapno); \
|
force_sig_fault(signo, code, addr, trapno); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
extern int pf_on_page_boundary(unsigned long address, tc_cond_t cond);
|
||||||
|
extern bool is_spurious_qp_store(bool store, unsigned long address,
|
||||||
|
int fmt, tc_mask_t mask, unsigned long *pf_address);
|
||||||
extern void parse_TIR_registers(struct pt_regs *regs, u64 exceptions);
|
extern void parse_TIR_registers(struct pt_regs *regs, u64 exceptions);
|
||||||
extern void do_aau_fault(int aa_field, struct pt_regs *regs);
|
extern void do_aau_fault(int aa_field, struct pt_regs *regs);
|
||||||
extern int handle_proc_stack_bounds(struct e2k_stacks *stacks,
|
extern int handle_proc_stack_bounds(struct e2k_stacks *stacks,
|
||||||
|
|
|
@ -119,9 +119,15 @@ struct exception_table_entry
|
||||||
might_fault(); \
|
might_fault(); \
|
||||||
__TRY_USR_PFAULT
|
__TRY_USR_PFAULT
|
||||||
|
|
||||||
|
#pragma unknown_control_flow(set_usr_pfault_jump)
|
||||||
|
static __always_inline void set_usr_pfault_jump(void)
|
||||||
|
{
|
||||||
|
SAVE_CURRENT_ADDR(¤t_thread_info()->usr_pfault_jump);
|
||||||
|
}
|
||||||
|
|
||||||
#define __TRY_USR_PFAULT \
|
#define __TRY_USR_PFAULT \
|
||||||
unsigned long _usr_pfault_jmp = current_thread_info()->usr_pfault_jump;\
|
unsigned long _usr_pfault_jmp = current_thread_info()->usr_pfault_jump; \
|
||||||
SAVE_CURRENT_ADDR(¤t_thread_info()->usr_pfault_jump); \
|
set_usr_pfault_jump(); \
|
||||||
if (likely(current_thread_info()->usr_pfault_jump)) {
|
if (likely(current_thread_info()->usr_pfault_jump)) {
|
||||||
|
|
||||||
#define CATCH_USR_PFAULT \
|
#define CATCH_USR_PFAULT \
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue