linux-headers-5.4.0-3.18

master
Alibek Omarov 1 year ago
parent 2013552b5f
commit ab03d80f80

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 154
EXTRAVERSION = -3.13
SUBLEVEL = 170
EXTRAVERSION = -3.18
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*

@ -739,6 +739,8 @@ default_check_phys_apicid_present(int phys_apicid)
{
return __default_check_phys_apicid_present(phys_apicid);
}
extern bool default_check_phys_apicid_online(void);
#else
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);

@ -134,7 +134,9 @@ extern unsigned int load_threshold;
int spmc_get_temp_cur0(void) { return SPMC_TEMP_BAD_VALUE; }
#endif /* CONFIG_L_PMC || CONFIG_S2_PMC */
#if defined(CONFIG_PMC_R2KP)
uint32_t r2kp_get_freq_mult(int cpu);
#endif
#endif /* __L_ASM_PMC_H__ */

@ -630,6 +630,7 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
extern physid_mask_t phys_cpu_present_map;
extern physid_mask_t phys_cpu_offline_map;
#endif /* __ASSEMBLY__ */

@ -23,15 +23,6 @@
#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF
#endif
#ifdef CONFIG_E2K
#define SERIAL_PORT_DFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
#endif
#define AM85C30_RES_Tx_P 0x28
#define AM85C30_EXT_INT_ENAB 0x01
#define AM85C30_TxINT_ENAB 0x02

@ -244,12 +244,12 @@ kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
native_kernel_hw_stack_frames_copy(dst, src, size);
}
static __always_inline void
collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size)
collapse_kernel_pcs(pt_regs_t *regs, u64 *dst, const u64 *src, u64 spilled_size)
{
native_collapse_kernel_pcs(dst, src, spilled_size);
}
static __always_inline void
collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size)
collapse_kernel_ps(pt_regs_t *regs, u64 *dst, const u64 *src, u64 spilled_size)
{
native_collapse_kernel_ps(dst, src, spilled_size);
}
@ -601,7 +601,8 @@ native_user_hw_stacks_copy(struct e2k_stacks *stacks,
return 0;
}
static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks)
static inline void collapse_kernel_hw_stacks(pt_regs_t *regs,
struct e2k_stacks *stacks)
{
e2k_pcsp_lo_t k_pcsp_lo = current_thread_info()->k_pcsp_lo;
e2k_psp_lo_t k_psp_lo = current_thread_info()->k_psp_lo;
@ -635,7 +636,7 @@ static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks)
if (spilled_pc_size) {
dst = (u64 *) AS(k_pcsp_lo).base;
src = (u64 *) (AS(k_pcsp_lo).base + spilled_pc_size);
collapse_kernel_pcs(dst, src, spilled_pc_size);
collapse_kernel_pcs(regs, dst, src, spilled_pc_size);
stacks->pcshtp = SZ_OF_CR;
@ -645,7 +646,7 @@ static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks)
if (spilled_p_size) {
dst = (u64 *) AS(k_psp_lo).base;
src = (u64 *) (AS(k_psp_lo).base + spilled_p_size);
collapse_kernel_ps(dst, src, spilled_p_size);
collapse_kernel_ps(regs, dst, src, spilled_p_size);
AS(pshtp).ind = 0;
stacks->pshtp = pshtp;
@ -823,7 +824,7 @@ static inline int do_user_hw_stacks_copy_full(struct e2k_stacks *stacks,
* this way we can later FILL using return trick (otherwise there
* would be no space in chain stack for the trick).
*/
collapse_kernel_hw_stacks(stacks);
collapse_kernel_hw_stacks(regs, stacks);
/*
* Copy saved %cr registers

@ -34,6 +34,8 @@ enum {
CPU_NO_HWBUG_SOFT_WAIT,
CPU_HWBUG_SOFT_WAIT_E8C2,
CPU_HWBUG_C3,
CPU_HWBUG_HRET_INTC_CU,
CPU_HWBUG_INTC_CR_WRITE,
/* Features, not bugs */
CPU_FEAT_EPIC,

@ -1,6 +1,11 @@
#ifndef __ASM_E2K_IOMMU_H
#define __ASM_E2K_IOMMU_H
#ifdef CONFIG_EPIC
extern void e2k_iommu_error_interrupt(void);
#else
static inline void e2k_iommu_error_interrupt(void) {}
#endif
extern int iommu_panic_off;
extern void e2k_iommu_error_interrupt(void);

@ -924,7 +924,6 @@ _Pragma("no_asm_inline") \
NATIVE_SET_DSREG_CLOSED_NOEXC(reg, (val), 7)
#endif
/*
* bug #97048
* Closed GNU asm is used for rarely read registers.
@ -1007,6 +1006,24 @@ _Pragma("no_asm_inline") \
: "ri" ((__e2k_u64_t) (val))); \
})
/* Add ctpr3 clobber to avoid writing CRs between return and ct */
#define NATIVE_SET_CR_CLOSED_NOEXC(reg_mnemonic, val) \
({ \
asm volatile ( \
ALTERNATIVE_1_ALTINSTR \
/* CPU_HWBUG_INTC_CR_WRITE version */ \
"{wait ma_c=1\n" \
"rwd %0, %%" #reg_mnemonic "}" \
ALTERNATIVE_2_OLDINSTR \
/* Default version */ \
"{rwd %0, %%" #reg_mnemonic "}" \
ALTERNATIVE_3_FEATURE(%[facility]) \
: \
: "ri" ((__e2k_u64_t) (val)), \
[facility] "i" (CPU_HWBUG_INTC_CR_WRITE) \
: "ctpr3"); \
})
#define NATIVE_SET_DSREGS_CLOSED_NOEXC(reg_mnemonic_lo, reg_mnemonic_hi, \
_val_lo, _val_hi, nop) \
({ \
@ -6689,16 +6706,38 @@ do { \
/* Clobbers "ctpr" are here to tell lcc that there is a return inside */
#define E2K_HRET_CLOBBERS "ctpr1", "ctpr2", "ctpr3"
#define E2K_HRET_READ_INTC_PTR_CU \
".word 0x04100011\n" /* rrd,0 %intc_ptr_cu, %dr0 */ \
".word 0x3f65c080\n" \
".word 0x01c00000\n" \
".word 0x00000000\n"
#define E2K_HRET_CLEAR_INTC_INFO_CU \
".word 0x04100291\n" /* nop 5 */ \
".word 0x3dc0c064\n" /* rwd,0 0x0, %intc_info_cu */ \
".word 0x01c00000\n" \
".word 0x00000000\n"
#define E2K_HRET(_ret) \
do { \
asm volatile ( \
ALTERNATIVE_1_ALTINSTR \
/* CPU_HWBUG_HRET_INTC_CU version */ \
E2K_HRET_READ_INTC_PTR_CU \
E2K_HRET_CLEAR_INTC_INFO_CU \
E2K_HRET_CLEAR_INTC_INFO_CU \
E2K_HRET_READ_INTC_PTR_CU \
ALTERNATIVE_2_OLDINSTR \
/* Default version */ \
ALTERNATIVE_3_FEATURE(%[facility]) \
"addd 0x0, %[ret], %%r0\n" \
"{.word 0x00005012\n" /* HRET */ \
" .word 0xc0000020\n" \
" .word 0x30000003\n" \
" .word 0x00000000}\n" \
: \
: [ret] "ir" (_ret) \
: [facility] "i" (CPU_HWBUG_HRET_INTC_CU), \
[ret] "ir" (_ret) \
: E2K_HRET_CLOBBERS); \
unreachable(); \
} while (0)

@ -32,7 +32,7 @@
})
extern void print_stack_frames(struct task_struct *task,
struct pt_regs *pt_regs, int show_reg_window) __cold;
const struct pt_regs *pt_regs, int show_reg_window) __cold;
extern void print_mmap(struct task_struct *task) __cold;
extern void print_va_tlb(e2k_addr_t addr, int large_page) __cold;
extern void print_all_TC(const trap_cellar_t *TC, int TC_count) __cold;
@ -862,6 +862,18 @@ do { \
current->comm, current->pid, ##__VA_ARGS__); \
} while (0)
extern void __debug_signal_print(const char *message,
struct pt_regs *regs, bool print_stack) __cold;
static inline void debug_signal_print(const char *message,
struct pt_regs *regs, bool print_stack)
{
if (likely(!debug_signal))
return;
__debug_signal_print(message, regs, print_stack);
}
extern int debug_trap;
#endif /* !(__ASSEMBLY__) */

@ -11,11 +11,6 @@ enum die_val {
DIE_BREAKPOINT
};
extern void printk_address(unsigned long address, int reliable) __cold;
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
unsigned long *sp, unsigned long bp) __cold;
extern void __show_regs(struct pt_regs *regs, int all) __cold;
extern void show_regs(struct pt_regs *regs) __cold;
extern void die(const char *str, struct pt_regs *regs, long err) __cold;
#endif /* _ASM_E2K_KDEBUG_H */

@ -201,15 +201,24 @@ static inline void write_SH_CORE_MODE_reg(e2k_core_mode_t core_mode)
#endif /* CONFIG_VIRTUALIZATION */
#define READ_G_PREEMPT_TMR_REG() \
((e2k_g_preempt_tmr_t) NATIVE_GET_SREG_CLOSED(g_preempt_tmr))
((g_preempt_tmr_t) NATIVE_GET_DSREG_CLOSED(g_preempt_tmr))
#define WRITE_G_PREEMPT_TMR_REG(x) \
NATIVE_SET_SREG_CLOSED_NOEXC(g_preempt_tmr, AW(x), 5)
NATIVE_SET_DSREG_CLOSED_NOEXC(g_preempt_tmr, AW(x), 5)
#define READ_INTC_PTR_CU() NATIVE_GET_DSREG_CLOSED(intc_ptr_cu)
#define READ_INTC_INFO_CU() NATIVE_GET_DSREG_CLOSED(intc_info_cu)
#define WRITE_INTC_INFO_CU(x) \
NATIVE_SET_DSREG_CLOSED_NOEXC(intc_info_cu, x, 5)
/* Clear INTC_INFO_CU header and INTC_PTR_CU */
static inline void clear_intc_info_cu(void)
{
READ_INTC_PTR_CU();
WRITE_INTC_INFO_CU(0ULL);
WRITE_INTC_INFO_CU(0ULL);
READ_INTC_PTR_CU();
}
static inline void save_intc_info_cu(intc_info_cu_t *info, int *num)
{
u64 info_ptr, i = 0;
@ -227,14 +236,8 @@ static inline void save_intc_info_cu(intc_info_cu_t *info, int *num)
return;
}
/*
* CU header should be cleared --- fg@mcst.ru
*/
AW(info->header.lo) = READ_INTC_INFO_CU();
AW(info->header.hi) = READ_INTC_INFO_CU();
READ_INTC_PTR_CU();
WRITE_INTC_INFO_CU(0ULL);
WRITE_INTC_INFO_CU(0ULL);
info_ptr -= 2;
/*
@ -254,17 +257,16 @@ static inline void restore_intc_info_cu(const intc_info_cu_t *info, int num)
{
int i;
/*
* 1) Clear the hardware pointer
*/
/* Clear the pointer, in case we just migrated to new cpu */
READ_INTC_PTR_CU();
if (num == -1)
/* Header will be cleared by hardware during GLAUNCH */
if (num == -1 || num == 0)
return;
/*
* 2) Write the registers
*
* CU header should be cleared --- fg@mcst.ru
* Restore intercepted events. Header flags aren't used for reexecution,
* so restore 0 in header.
*/
WRITE_INTC_INFO_CU(0ULL);
WRITE_INTC_INFO_CU(0ULL);

@ -0,0 +1,33 @@
/*
* arch/e2k/include/asm/kvm/ctx_signal_stacks.h
*
* This file contains interfaces for managing of separate signal stacks
* for guest's contexts
*
* Copyright 2022 Andrey Alekhin (Andrey.I.Alekhin@mcst.ru)
*/
#ifndef CTX_SIGNAL_STACKS
#define CTX_SIGNAL_STACKS
#include <linux/rhashtable-types.h>
#include <asm/thread_info.h>
enum {
CTX_STACK_READY = 0U, /* Stack is free to take */
CTX_STACK_BUSY = 1U, /* Stack is currently busy by thread */
CTX_STACK_COPYING = 2U /* Stack is being copied in fork() */
};
struct rhashtable *alloc_gst_ctx_sig_stacks_ht(void);
void free_gst_ctx_sig_stacks_ht(struct rhashtable *ht);
struct rhashtable *copy_gst_ctx_sig_stacks_ht(void);
int add_gst_ctx_signal_stack(struct rhashtable *ht,
struct signal_stack *signal_stack,
u64 key, int state);
void remove_gst_ctx_signal_stack(u64 key);
int switch_gst_ctx_signal_stack(u64 to_key);
int update_curr_gst_signal_stack(void);
#endif /* CTX_SIGNAL_STACKS */

@ -34,7 +34,7 @@ kvm_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
}
static __always_inline void
kvm_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size)
kvm_collapse_kernel_ps(pt_regs_t *regs, u64 *dst, const u64 *src, u64 spilled_size)
{
e2k_psp_hi_t k_psp_hi;
u64 ps_ind, ps_size;
@ -55,6 +55,8 @@ kvm_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size)
k_psp_hi = NATIVE_NV_READ_PSP_HI_REG();
k_psp_hi.PSP_hi_ind = size;
HYPERVISOR_update_psp_hi(k_psp_hi.PSP_hi_half);
BUG_ON(regs->copyed.ps_size < spilled_size);
regs->copyed.ps_size -= spilled_size;
DebugUST("move spilled procedure part from host top %px to "
"bottom %px, size 0x%llx\n",
@ -65,7 +67,7 @@ kvm_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size)
}
static __always_inline void
kvm_collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size)
kvm_collapse_kernel_pcs(pt_regs_t *regs, u64 *dst, const u64 *src, u64 spilled_size)
{
e2k_pcsp_hi_t k_pcsp_hi;
u64 pcs_ind, pcs_size;
@ -86,6 +88,8 @@ kvm_collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size)
k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG();
k_pcsp_hi.PCSP_hi_ind = size;
HYPERVISOR_update_pcsp_hi(k_pcsp_hi.PCSP_hi_half);
BUG_ON(regs->copyed.pcs_size < spilled_size);
regs->copyed.pcs_size -= spilled_size;
DebugUST("move spilled chain part from host top %px to "
"bottom %px, size 0x%llx\n",
@ -504,7 +508,8 @@ kvm_copy_injected_pcs_frames_to_user(pt_regs_t *regs, int frames_num)
ATOMIC_GET_HW_PCS_SIZES_BASE_TOP(pcs_ind, pcs_size, pcs_base, pcsh_top);
/* guest user stacks part spilled to kernel should be already copyed */
BUG_ON(PCSHTP_SIGN_EXTEND(regs->copyed.pcs_size != stacks->pcshtp));
BUG_ON(PCSHTP_SIGN_EXTEND(regs->copyed.pcs_size != stacks->pcshtp &&
stacks->pcshtp != SZ_OF_CR));
src = (void *)(pcs_base + regs->copyed.pcs_size);
DebugUST("chain stack at kernel from %px, size 0x%lx + 0x%lx, "
@ -647,15 +652,15 @@ kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size)
}
static __always_inline void
collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size)
collapse_kernel_ps(pt_regs_t *regs, u64 *dst, const u64 *src, u64 spilled_size)
{
kvm_collapse_kernel_ps(dst, src, spilled_size);
kvm_collapse_kernel_ps(regs, dst, src, spilled_size);
}
static __always_inline void
collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size)
collapse_kernel_pcs(pt_regs_t *regs, u64 *dst, const u64 *src, u64 spilled_size)
{
kvm_collapse_kernel_pcs(dst, src, spilled_size);
kvm_collapse_kernel_pcs(regs, dst, src, spilled_size);
}
static __always_inline int

@ -0,0 +1,78 @@
#ifndef KVM_GUEST_PROC_CTXT_STACKS
#define KVM_GUEST_PROC_CTXT_STACKS
#include <linux/mm_types.h>
#include <asm/machdep.h>
#include <asm/trap_table.h>
#include <asm/kvm/proc_context_types.h>
#include <asm/copy-hw-stacks.h>
static inline int
kvm_mkctxt_prepare_hw_user_stacks(void (*user_func)(void), void *args,
u64 args_size, size_t d_stack_sz,
bool protected, void *ps_frames,
e2k_mem_crs_t *cs_frames)
{
unsigned long ps_frames_k, cs_frames_k;
struct page *pg_ps_frames, *pg_cs_frames;
int ret;
/* Get kernel address for procedure stack */
pg_ps_frames = get_user_addr_to_kernel_page((unsigned long)ps_frames);
if (IS_ERR_OR_NULL(pg_ps_frames))
ret = (IS_ERR(pg_ps_frames)) ? PTR_ERR(pg_ps_frames) : -EINVAL;
else
ps_frames_k = ((unsigned long)page_address(pg_ps_frames)) +
(((unsigned long)ps_frames) & ~PAGE_MASK);
/* Get kernel address for chain stack */
pg_cs_frames = get_user_addr_to_kernel_page((unsigned long)cs_frames);
if (IS_ERR_OR_NULL(pg_cs_frames))
ret |= (IS_ERR(pg_cs_frames)) ? PTR_ERR(pg_cs_frames) : -EINVAL;
else
cs_frames_k = ((unsigned long)page_address(pg_cs_frames)) +
(((unsigned long)cs_frames) & ~PAGE_MASK);
if (ret)
return ret;
kvm_proc_ctxt_hw_stacks_t hw_stacks = {
.user_func = user_func,
.args = args,
.args_size = args_size,
.d_stack_sz = d_stack_sz,
.protected = protected,
.gst_mkctxt_trampoline = (u64)&kvm_guest_mkctxt_trampoline,
.ps_frames = (void *)ps_frames_k,
.cs_frames = (e2k_mem_crs_t *)cs_frames_k
};
ret = HYPERVISOR_prepare_mkctxt_hw_user_stacks(&hw_stacks);
put_user_addr_to_kernel_page(pg_ps_frames);
put_user_addr_to_kernel_page(pg_cs_frames);
return ret;
}
static inline int
mkctxt_prepare_hw_user_stacks(void (*user_func)(void), void *args,
u64 args_size, size_t d_stack_sz,
bool protected, void *ps_frames,
e2k_mem_crs_t *cs_frames)
{
if (IS_HV_GM()) {
return native_mkctxt_prepare_hw_user_stacks(user_func, args,
args_size, d_stack_sz,
protected, ps_frames,
cs_frames);
} else {
return kvm_mkctxt_prepare_hw_user_stacks(user_func, args,
args_size, d_stack_sz,
protected, ps_frames,
cs_frames);
}
}
#endif /* KVM_GUEST_PROC_CTXT_STACKS */

@ -333,6 +333,12 @@ do { \
#define RESTORE_COMMON_REGS(regs) \
KVM_RESTORE_COMMON_REGS(regs)
#define CLEAR_DAM \
({ \
if (IS_HV_GM()) \
NATIVE_CLEAR_DAM; \
})
static inline void
save_glob_regs_v3(global_regs_t *gregs)
{

@ -8,7 +8,12 @@
extern int kvm_signal_setup(struct pt_regs *regs);
extern int kvm_longjmp_copy_user_to_kernel_hw_stacks(struct pt_regs *regs,
struct pt_regs *new_regs);
extern int kvm_complete_long_jump(struct pt_regs *regs);
extern int kvm_complete_long_jump(struct pt_regs *regs, bool switch_stack,
u64 to_key);
extern void kvm_update_kernel_crs(e2k_mem_crs_t *crs, e2k_mem_crs_t *prev_crs,
e2k_mem_crs_t *p_prev_crs);
extern int kvm_add_ctx_signal_stack(u64 key, bool is_main);
extern void kvm_remove_ctx_signal_stack(u64 key);
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is native paravirtualized guest kernel */
@ -25,13 +30,38 @@ static inline int longjmp_copy_user_to_kernel_hw_stacks(struct pt_regs *regs,
return kvm_longjmp_copy_user_to_kernel_hw_stacks(regs, new_regs);
}
static inline int complete_long_jump(struct pt_regs *regs)
static inline int complete_long_jump(struct pt_regs *regs, bool switch_stack,
u64 to_key)
{
if (likely(IS_HV_GM())) {
return native_complete_long_jump(regs);
} else {
return kvm_complete_long_jump(regs);
}
if (likely(IS_HV_GM()))
return native_complete_long_jump();
else
return kvm_complete_long_jump(regs, switch_stack, to_key);
}
static inline void update_kernel_crs(e2k_mem_crs_t *k_crs, e2k_mem_crs_t *crs,
e2k_mem_crs_t *prev_crs, e2k_mem_crs_t *p_prev_crs)
{
if (likely(IS_HV_GM()))
native_update_kernel_crs(k_crs, crs, prev_crs, p_prev_crs);
else
kvm_update_kernel_crs(crs, prev_crs, p_prev_crs);
}
static inline int add_ctx_signal_stack(u64 key, bool is_main)
{
if (likely(IS_HV_GM()))
return native_add_ctx_signal_stack(key, is_main);
else
return kvm_add_ctx_signal_stack(key, is_main);
}
static inline void remove_ctx_signal_stack(u64 key)
{
if (likely(IS_HV_GM()))
native_remove_ctx_signal_stack(key);
else
kvm_remove_ctx_signal_stack(key);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */

@ -18,6 +18,8 @@ extern long kvm_guest_ttable_entry5(int sys_num,
extern long kvm_guest_ttable_entry6(int sys_num,
u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6);
extern void kvm_guest_mkctxt_trampoline(void);
static __always_inline void kvm_init_pt_regs_copyed_fields(struct pt_regs *regs)
{
#ifdef CONFIG_KVM_GUEST_KERNEL
@ -205,6 +207,12 @@ is_guest_TIRs_frozen(struct pt_regs *regs)
return false; /* none any guest */
}
static inline bool is_injected_guest_coredump(struct pt_regs *regs)
{
/* nested guests is not supported */
return false;
}
static inline void clear_fork_child_pt_regs(struct pt_regs *childregs)
{
kvm_clear_fork_child_pt_regs(childregs);

@ -40,6 +40,7 @@
#include <asm/cpu_regs_types.h>
#include <asm/trap_def.h>
#include <asm/kvm/guest/cpu.h>
#include <asm/kvm/proc_context_types.h>
#ifdef CONFIG_KVM_GUEST_HW_HCALL
extern unsigned long light_hw_hypercall(unsigned long nr,
@ -254,6 +255,8 @@ static inline unsigned long generic_hypercall6(unsigned long nr,
#define KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_COPY 40
/* fast guest kernel tagged memory set */
#define KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_SET 41
/* update last 2 frmaes on guest kernel stack */
#define KVM_HCALL_UPDATE_GUEST_KERNEL_CRS 42
typedef struct kvm_hw_stacks_flush {
@ -404,7 +407,6 @@ HYPERVISOR_inject_interrupt(void)
{
return light_hypercall0(KVM_HCALL_INJECT_INTERRUPT);
}
extern unsigned long kvm_hypervisor_inject_interrupt(void);
static inline unsigned long
HYPERVISOR_virqs_handled(void)
{
@ -509,6 +511,14 @@ HYPERVISOR_fast_kernel_tagged_memory_set(void *addr, u64 val, u64 tag, size_t le
return light_hypercall5(KVM_HCALL_FAST_KERNEL_TAGGED_MEMORY_SET,
(unsigned long)addr, val, tag, len, strd_opcode);
}
static inline unsigned long
HYPERVISOR_update_guest_kernel_crs(e2k_mem_crs_t *crs, e2k_mem_crs_t *prev_crs,
e2k_mem_crs_t *p_prev_crs)
{
return light_hypercall3(KVM_HCALL_UPDATE_GUEST_KERNEL_CRS,
(unsigned long)crs, (unsigned long)prev_crs,
(unsigned long)p_prev_crs);
}
/*
* KVM hypervisor (host) <-> guest generic hypercalls list
@ -689,6 +699,15 @@ HYPERVISOR_fast_kernel_tagged_memory_set(void *addr, u64 val, u64 tag, size_t le
/* recovery faulted load */
/* value and tag to global */
/* register */
#define KVM_HCALL_PREPARE_MKCTXT_HW_USER_STACKS 145
#define KVM_HCALL_ADD_CTX_SIGNAL_STACK 146
/* create separate */
/* signal stack for context */
/* on host side */
#define KVM_HCALL_REMOVE_CTX_SIGNAL_STACK 147
/* remove signal stack for */
/* context on host side */
/*
@ -904,10 +923,13 @@ HYPERVISOR_set_clockevent(unsigned long delta)
}
static inline unsigned long
HYPERVISOR_complete_long_jump(kvm_long_jump_info_t *regs_state)
HYPERVISOR_complete_long_jump(kvm_long_jump_info_t *regs_state,
bool switch_stack, u64 to_key)
{
return generic_hypercall1(KVM_HCALL_COMPLETE_LONG_JUMP,
(unsigned long)regs_state);
return generic_hypercall3(KVM_HCALL_COMPLETE_LONG_JUMP,
(unsigned long)regs_state,
(unsigned long)switch_stack,
(unsigned long)to_key);
}
static inline unsigned long
@ -1566,6 +1588,13 @@ static inline int HYPERVISOR_pv_enable_async_pf(u64 apf_reason_gpa,
apf_ready_vector, irq_controller);
}
#endif /* CONFIG_KVM_ASYNC_PF */
static inline int
HYPERVISOR_prepare_mkctxt_hw_user_stacks(kvm_proc_ctxt_hw_stacks_t *hw_stacks)
{
return generic_hypercall1(KVM_HCALL_PREPARE_MKCTXT_HW_USER_STACKS,
(unsigned long)hw_stacks);
}
/*
* The structure to flush guest virtual space at the host shadow PTs
@ -1622,4 +1651,17 @@ HYPERVISOR_wait_for_virq(int virq, bool in_progress)
return generic_hypercall2(KVM_HCALL_WAIT_FOR_VIRQ, virq, in_progress);
}
static inline unsigned long
HYPERVISOR_add_ctx_signal_stack(u64 key, bool is_main)
{
return generic_hypercall2(KVM_HCALL_ADD_CTX_SIGNAL_STACK,
key, is_main);
}
static inline void
HYPERVISOR_remove_ctx_signal_stack(u64 key)
{
generic_hypercall1(KVM_HCALL_REMOVE_CTX_SIGNAL_STACK, key);
}
#endif /* _ASM_E2K_HYPERCALL_H */

@ -102,5 +102,15 @@ static inline bool kvm_test_hprv_feats_bit(int feature_bit)
#define IS_PV_APIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_APIC_MASK)
#define IS_PV_EPIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_EPIC_MASK)
static inline unsigned long kvm_hypervisor_inject_interrupt(void)
{
/*
* Not yet fully implemented
* The real implementation requires checking for interrupts and only
* after that call the host to inject interrupt
return HYPERVISOR_inject_interrupt();
*/
return 0;
}
#endif /* _ASM_E2K_KVM_HYPERVISOR_H */

@ -57,6 +57,8 @@ typedef struct gmm_struct {
cpumask_t cpu_vm_mask; /* mask of CPUs where the mm is */
/* in use or was some early */
gva_cache_t *gva_cache; /* gva -> gpa,hva cache */
struct rhashtable *ctx_stacks; /* hash table with signal stacks */
/* for contexts created by guest */
} gmm_struct_t;
/* same as accessor for struct mm_struct's cpu_vm_mask but for guest mm */
@ -113,7 +115,7 @@ kvm_find_gmmid(gmmid_table_t *gmmid_table, int gmmid_nr)
{
kvm_nid_t *nid;
nid = kvm_try_find_nid(gmmid_table, gmmid_nr, gmmid_hashfn(gmmid_nr));
nid = kvm_find_nid(gmmid_table, gmmid_nr, gmmid_hashfn(gmmid_nr));
if (nid == NULL)
return NULL;
return gmmid_entry(nid);

@ -33,6 +33,8 @@
#define PFERR_HW_ACCESS_BIT 17
#define PFERR_USER_ADDR_BIT 18
#define PFERR_ILLEGAL_PAGE_BIT 19
#define PFERR_DONT_INJECT_BIT 20
#define PFERR_SPEC_BIT 21
#define PFERR_ACCESS_SIZE_BIT 24
@ -56,6 +58,8 @@
#define PFERR_HW_ACCESS_MASK (1U << PFERR_HW_ACCESS_BIT)
#define PFERR_USER_ADDR_MASK (1U << PFERR_USER_ADDR_BIT)
#define PFERR_ILLEGAL_PAGE_MASK (1U << PFERR_ILLEGAL_PAGE_BIT)
#define PFERR_DONT_INJECT_MASK (1U << PFERR_DONT_INJECT_BIT)
#define PFERR_SPEC_MASK (1U << PFERR_SPEC_BIT)
#define PFERR_ACCESS_SIZE_MASK (~0U << PFERR_ACCESS_SIZE_BIT)

@ -75,21 +75,6 @@ kvm_find_nid(struct kvm_nid_table *nid_table, int nid_nr, int hash_index)
return nid;
}
static inline kvm_nid_t *
kvm_try_find_nid(struct kvm_nid_table *nid_table, int nid_nr, int hash_index)
{
kvm_nid_t *nid;
unsigned long flags;
bool locked;
locked = raw_spin_trylock_irqsave(&nid_table->nidmap_lock, flags);
nid = kvm_do_find_nid(nid_table, nid_nr, hash_index);
if (likely(locked)) {
raw_spin_unlock_irqrestore(&nid_table->nidmap_lock, flags);
}
return nid;
}
#define for_each_guest_nid_node(node, entry, next, nid_table, \
nid_hlist_member) \
for ((entry) = 0; (entry) < (nid_table)->nid_hash_size; (entry)++) \

@ -0,0 +1,9 @@
#ifndef KVM_PROC_CTXT_STACKS
#define KVM_PROC_CTXT_STACKS
#include <asm/kvm/proc_context_types.h>
unsigned long kvm_prepare_gst_mkctxt_hw_stacks(struct kvm_vcpu *vcpu,
kvm_proc_ctxt_hw_stacks_t *hw_stacks);
#endif /* KVM_PROC_CTXT_STACKS */

@ -0,0 +1,19 @@
#ifndef KVM_PROC_CTXT_TYPES
#define KVM_PROC_CTXT_TYPES
#include <linux/types.h>
#include <asm/cpu_regs_types.h>
typedef struct kvm_proc_ctxt_hw_stacks {
void (*user_func)(void);
void *args;
u64 args_size;
size_t d_stack_sz;
bool protected;
u64 gst_mkctxt_trampoline;
void *ps_frames;
e2k_mem_crs_t *cs_frames;
} kvm_proc_ctxt_hw_stacks_t;
#endif /* KVM_PROC_CTXT_TYPES */

@ -30,7 +30,7 @@
typedef enum inject_caller {
FROM_HOST_INJECT = 1 << 0,
FROM_PV_VCPU_TRAP_INJECT = 1 << 1,
FROM_PV_VCPU_SYSCALL_INJECT = 1 << 2,
FROM_PV_VCPU_SYSCALL_INJECT = 1 << 2
} inject_caller_t;
#ifdef CONFIG_VIRTUALIZATION
@ -588,6 +588,9 @@ check_is_user_address(struct task_struct *task, e2k_addr_t address)
typedef struct pv_vcpu_ctxt {
inject_caller_t inject_from; /* reason of injection */
int trap_no; /* number of recursive trap */
int skip_frames; /* number signal stack frame to remove */
int skip_traps; /* number of traps frames to remove */
int skip_syscalls; /* number of syscall frames to remove */
u64 sys_rval; /* return value of guest system call */
e2k_psr_t guest_psr; /* guest PSR state before trap */
bool irq_under_upsr; /* is IRQ control under UOSR? */

@ -136,6 +136,35 @@ static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu)
return vcpu->arch.sw_ctxt.in_hypercall;
}
static inline void kvm_vcpu_set_dont_inject(struct kvm_vcpu *vcpu)
{
vcpu->arch.sw_ctxt.dont_inject = true;
}
static inline void kvm_vcpu_reset_dont_inject(struct kvm_vcpu *vcpu)
{
vcpu->arch.sw_ctxt.dont_inject = false;
}
static inline bool kvm_vcpu_test_dont_inject(struct kvm_vcpu *vcpu)
{
return vcpu->arch.sw_ctxt.dont_inject;
}
static inline bool kvm_vcpu_test_and_clear_dont_inject(struct kvm_vcpu *vcpu)
{
if (likely(!kvm_vcpu_test_dont_inject(vcpu)))
return false;
kvm_vcpu_reset_dont_inject(vcpu);
return true;
}
static inline bool host_test_dont_inject(pt_regs_t *regs)
{
return host_test_intc_emul_mode(regs) && regs->dont_inject;
}
static inline void pv_vcpu_clear_gti(struct kvm_vcpu *vcpu)
{
if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) {
@ -314,6 +343,11 @@ static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu)
return false;
}
static inline bool host_test_dont_inject(pt_regs_t *regs)
{
return false;
}
#endif /* CONFIG_VIRTUALIZATION */
#endif /* ! __ASSEMBLY__ */

@ -1258,6 +1258,9 @@ static inline bool host_guest_syscall_enter(struct pt_regs *regs,
kvm_switch_to_host_mmu_pid(vcpu, current->mm);
kvm_set_intc_emul_flag(regs);
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); /* See the comment in kvm_vcpu_exiting_guest_mode() */
return true;
}

@ -179,6 +179,7 @@ typedef struct gthread_info {
/* on host */
/* NULL for guest kernel threads */
hpa_t nonp_root_hpa; /* physical base of nonpaging root PT */
u64 curr_ctx_key; /* Key of curr context signal stack */
bool gmm_in_release; /* guest mm is releasing (exit_mm()) */
/* following fields should be updated for each multi-stack process */

@ -963,16 +963,44 @@ TRACE_EVENT(
__entry->aaldi[30], __entry->aaldi[31])
);
TRACE_EVENT(kvm_pid,
TP_PROTO(kvm_e2k_from_t from, unsigned long vmid, unsigned long vcpu_id, unsigned long pid),
TP_ARGS(from, vmid, vcpu_id, pid),
TP_STRUCT__entry(
__field( kvm_e2k_from_t, from )
__field( u64, vmid )
__field( u64, vcpu_id )
__field( u64, pid )
),
TP_fast_assign(
__entry->from = from;
__entry->vmid = vmid;
__entry->vcpu_id = vcpu_id;
__entry->pid = pid;
),
TP_printk("%s: vmid %llu vcpu %llu mmu pid 0x%llx",
__print_symbolic(__entry->from,
{ FROM_GENERIC_HYPERCALL, "generic hcall" },
{ FROM_LIGHT_HYPERCALL, "light hcall" },
{ FROM_PV_INTERCEPT, "pv intc" },
{ FROM_HV_INTERCEPT, "hv intc" },
{ FROM_VCPU_LOAD, "vcpu load" },
{ FROM_VCPU_PUT, "vcpu put" }),
__entry->vmid, __entry->vcpu_id, __entry->pid)
);
TRACE_EVENT(
generic_hcall,
TP_PROTO(unsigned long hcall_num, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
unsigned long arg6, unsigned long gsbr,
unsigned long cpu),
unsigned long arg6, unsigned long gsbr),
TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, gsbr, cpu),
TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, gsbr),
TP_STRUCT__entry(
__field( u64, hcall_num )
@ -983,7 +1011,6 @@ TRACE_EVENT(
__field( u64, arg5 )
__field( u64, arg6 )
__field( u64, gsbr )
__field( u64, cpu )
),
TP_fast_assign(
@ -995,13 +1022,11 @@ TRACE_EVENT(
__entry->arg5 = arg5;
__entry->arg6 = arg6;
__entry->gsbr = gsbr;
__entry->cpu = cpu;
),
TP_printk("CPU#%llu, generic hypercall %llu\n"
TP_printk("nr %llu\n"
"Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx; gsbr: 0x%llx"
,
__entry->cpu,
__entry->hcall_num,
__entry->arg1,
__entry->arg2,
@ -1018,9 +1043,9 @@ TRACE_EVENT(
TP_PROTO(unsigned long hcall_num, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
unsigned long arg6, unsigned long cpu),
unsigned long arg6),
TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, cpu),
TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6),
TP_STRUCT__entry(
__field( u64, hcall_num )
@ -1030,7 +1055,6 @@ TRACE_EVENT(
__field( u64, arg4 )
__field( u64, arg5 )
__field( u64, arg6 )
__field( u64, cpu )
),
TP_fast_assign(
@ -1041,13 +1065,11 @@ TRACE_EVENT(
__entry->arg4 = arg4;
__entry->arg5 = arg5;
__entry->arg6 = arg6;
__entry->cpu = cpu;
),
TP_printk("CPU#%llu, light hypercall %llu\n"
TP_printk("nr %llu\n"
"Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx"
,
__entry->cpu,
__entry->hcall_num,
__entry->arg1,
__entry->arg2,

@ -212,26 +212,6 @@ TRACE_EVENT(
))
);
TRACE_EVENT(
intc_exit,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("Intercept exit %s(%d)",
(__entry->ret) ? "to QEMU " : "",
__entry->ret)
);
TRACE_EVENT(
intc_stacks,

@ -157,6 +157,11 @@ is_guest_TIRs_frozen(struct pt_regs *regs)
return false; /* none any guest */
}
static inline bool is_injected_guest_coredump(struct pt_regs *regs)
{
return false; /* none any guest */
}
static inline bool
handle_guest_last_wish(struct pt_regs *regs)
{
@ -273,6 +278,7 @@ extern unsigned long kvm_pass_virqs_to_guest(struct pt_regs *regs,
unsigned long TIR_hi, unsigned long TIR_lo);
extern unsigned long kvm_pass_coredump_trap_to_guest(struct kvm_vcpu *vcpu,
struct pt_regs *regs);
extern void kvm_pass_coredump_to_all_vm(struct pt_regs *regs);
extern unsigned long kvm_pass_clw_fault_to_guest(struct pt_regs *regs,
trap_cellar_t *tcellar);
extern unsigned long kvm_pass_page_fault_to_guest(struct pt_regs *regs,
@ -283,12 +289,18 @@ extern int do_hret_last_wish_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs);
extern void trap_handler_trampoline(void);
extern void syscall_handler_trampoline(void);
extern void host_mkctxt_trampoline(void);
extern void return_pv_vcpu_from_mkctxt(void);
extern void trap_handler_trampoline_continue(void);
extern void syscall_handler_trampoline_continue(u64 sys_rval);
extern void host_mkctxt_trampoline_continue(void);
extern void return_pv_vcpu_from_mkctxt_continue(void);
extern void syscall_fork_trampoline(void);
extern void syscall_fork_trampoline_continue(u64 sys_rval);
extern notrace long return_pv_vcpu_trap(void);
extern notrace long return_pv_vcpu_syscall(void);
extern notrace void pv_vcpu_mkctxt_trampoline_inject(void);
extern notrace void pv_vcpu_mkctxt_complete(void);
static __always_inline void
kvm_init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap)
@ -542,6 +554,11 @@ is_guest_TIRs_frozen(struct pt_regs *regs)
return kvm_is_guest_TIRs_frozen(regs);
}
static inline bool is_injected_guest_coredump(struct pt_regs *regs)
{
return regs->traps_to_guest == core_dump_mask;
}
static inline bool
handle_guest_last_wish(struct pt_regs *regs)
{
@ -700,12 +717,13 @@ pass_coredump_trap_to_guest(struct pt_regs *regs)
{
struct kvm_vcpu *vcpu;
if (!kvm_test_intc_emul_flag(regs))
if (!kvm_test_intc_emul_flag(regs)) {
kvm_pass_coredump_to_all_vm(regs);
return 0;
}
vcpu = current_thread_info()->vcpu;
return kvm_pass_coredump_trap_to_guest(vcpu, regs);
}

@ -113,7 +113,7 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
(res); \
})
#define __kvm_get_guest_atomic(__slot, gfn, __hk_ptr, offset, \
#define __kvm_get_guest(__slot, gfn, __hk_ptr, offset, \
gk_ptrp, __writable) \
({ \
__typeof__(__hk_ptr) __user *gk_ptr; \
@ -126,12 +126,12 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
} else { \
gk_ptr = (__typeof__((__hk_ptr)) *)(addr + offset); \
gk_ptrp = gk_ptr; \
r = native_get_user((__hk_ptr), gk_ptr); \
r = __get_user((__hk_ptr), gk_ptr); \
} \
r; \
})
#define kvm_get_guest_atomic(kvm, gpa, _hk_ptr) \
#define kvm_get_guest(kvm, gpa, _hk_ptr) \
({ \
gfn_t gfn = (gpa) >> PAGE_SHIFT; \
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); \
@ -139,12 +139,10 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
__typeof__(_hk_ptr) __user *unused; \
int r; \
\
__kvm_get_guest_atomic(slot, gfn, (_hk_ptr), offset, \
unused, NULL); \
__kvm_get_guest(slot, gfn, (_hk_ptr), offset, unused, NULL); \
})
#define kvm_vcpu_get_guest_ptr_atomic(vcpu, gpa, _hk_ptr, \
_gk_ptrp, _writable) \
#define kvm_vcpu_get_guest_ptr(vcpu, gpa, _hk_ptr, _gk_ptrp, _writable) \
({ \
gfn_t gfn = (gpa) >> PAGE_SHIFT; \
struct kvm_memory_slot *slot; \
@ -152,16 +150,37 @@ native_copy_from_user_with_tags(void *to, const void __user *from,
int r; \
\
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); \
r = __kvm_get_guest_atomic(slot, gfn, (_hk_ptr), offset, \
r = __kvm_get_guest(slot, gfn, (_hk_ptr), offset, \
_gk_ptrp, _writable); \
r; \
})
#define kvm_vcpu_get_guest_atomic(vcpu, gpa, ___hk_ptr) \
#define kvm_vcpu_get_guest(vcpu, gpa, ___hk_ptr) \
({ \
__typeof__(___hk_ptr) __user *unused; \
\
kvm_vcpu_get_guest_ptr_atomic(vcpu, gpa, ___hk_ptr, \
unused, NULL); \
kvm_vcpu_get_guest_ptr(vcpu, gpa, ___hk_ptr, unused, NULL); \
})
#define kvm_get_guest_atomic(kvm, gpa, __hk_ptr) \
({ \
__typeof__(__hk_ptr) __user *gk_ptr; \
gfn_t gfn = (gpa) >> PAGE_SHIFT; \
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); \
int offset = offset_in_page(gpa); \
bool writable; \
unsigned long addr; \
int r; \
\
addr = gfn_to_hva_memslot_prot(slot, gfn, &writable); \
if (unlikely(kvm_is_error_hva(addr))) { \
r = -EFAULT; \
} else { \
gk_ptr = (__typeof__((__hk_ptr)) *)(addr + offset); \
pagefault_disable(); \
r = native_get_user((__hk_ptr), gk_ptr); \
pagefault_enable(); \
} \
r; \
})
extern unsigned long kvm_copy_in_user_with_tags(void __user *to,

@ -414,6 +414,9 @@ typedef enum pf_res {
PFRES_RETRY, /* page fault is not handled and can */
/* be retried on guest or should be handled */
/* from begining by hypervisor */
PFRES_RETRY_MEM, /* not enough memory to handle */
PFRES_DONT_INJECT, /* page ault should be injected to the guest */
/* but injection is prohibited */
} pf_res_t;
struct kvm_arch_exception;
@ -561,7 +564,7 @@ typedef struct kvm_mmu {
gw_attr_t *gw_res);
void (*update_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprot_t *spte, const void *pte);
void (*sync_gva)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, gva_t gva);
int (*sync_gva)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, gva_t gva);
long (*sync_gva_range)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm,
gva_t gva_start, gva_t gva_end);
int (*sync_page)(struct kvm_vcpu *vcpu, kvm_mmu_page_t *sp);
@ -782,6 +785,7 @@ typedef struct kvm_sw_cpu_context {
int osem;
bool in_hypercall;
bool in_fast_syscall;
bool dont_inject;
e2k_usd_lo_t usd_lo;
e2k_usd_hi_t usd_hi;
@ -1224,6 +1228,8 @@ struct kvm_vcpu_arch {
int node_id;
int hard_cpu_id;
u64 gst_mkctxt_trampoline;
};
typedef struct kvm_lpage_info {
@ -1257,6 +1263,7 @@ typedef struct kvm_arch_memory_slot {
#define KVM_REQ_VIRQS_INJECTED 22 /* pending VIRQs injected */
#define KVM_REQ_SCAN_IOAPIC 23 /* scan IO-APIC */
#define KVM_REQ_SCAN_IOEPIC 24 /* scan IO-EPIC */
#define KVM_REQ_TO_COREDUMP 25 /* pending coredump request */
#define kvm_set_pending_virqs(vcpu) \
set_bit(KVM_REQ_PENDING_VIRQS, (void *)&vcpu->requests)
@ -1283,6 +1290,14 @@ do { \
if (test_and_clear_bit(KVM_REG_SHOW_STATE, (void *)&vcpu->requests)) \
wake_up_bit((void *)&vcpu->requests, KVM_REG_SHOW_STATE); \
} while (false)
#define kvm_set_request_to_coredump(vcpu) \
kvm_make_request(KVM_REQ_TO_COREDUMP, vcpu)
#define kvm_clear_request_to_coredump(vcpu) \
kvm_clear_request(KVM_REQ_TO_COREDUMP, vcpu)
#define kvm_test_request_to_coredump(vcpu) \
kvm_test_request(KVM_REQ_TO_COREDUMP, vcpu)
#define kvm_test_and_clear_request_to_coredump(vcpu) \
kvm_check_request(KVM_REQ_TO_COREDUMP, vcpu)
struct kvm_irq_mask_notifier {
void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
@ -1320,7 +1335,6 @@ struct kvm_arch {
bool tdp_enable; /* two dimensional paging is supported */
/* by hardware MMU and hypervisor */
bool shadow_pt_set_up; /* shadow PT was set up, skip setup on other VCPUs */
struct mutex spt_sync_lock;
atomic_t vcpus_to_reset; /* atomic counter of VCPUs ready to reset */
kvm_mem_alias_t aliases[KVM_ALIAS_SLOTS];
kvm_kernel_shadow_t shadows[KVM_SHADOW_SLOTS];