linux-headers-5.4.91-2.13

This commit is contained in:
Alibek Omarov 2021-08-26 01:45:14 +03:00
parent f830966167
commit b732598e29
36 changed files with 336 additions and 812 deletions

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 91
EXTRAVERSION = -2.11
EXTRAVERSION = -2.13
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*

View File

@ -1,122 +0,0 @@
/* Generic MTRR (Memory Type Range Register) ioctls.
Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
The postal address is:
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
*/
#ifndef _LINUX_MTRR_H
#define _LINUX_MTRR_H
#include <linux/ioctl.h>
#define MTRR_IOCTL_BASE 'M'
struct mtrr_sentry
{
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry
{
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
/* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#ifdef MTRR_NEED_STRINGS
static char *mtrr_strings[MTRR_NUM_TYPES] =
{
"uncachable", /* 0 */
"write-combining", /* 1 */
"?", /* 2 */
"?", /* 3 */
"write-through", /* 4 */
"write-protect", /* 5 */
"write-back", /* 6 */
};
#endif
#ifdef __KERNEL__
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
extern int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_del (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
static __inline__ int mtrr_del_page (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
# endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
# if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
# endif
#endif
#endif /* _LINUX_MTRR_H */

View File

@ -22,7 +22,9 @@
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
EXPORT_PER_CPU_SYMBOL(_name); \
EXPORT_PER_CPU_SYMBOL(_name##_early_ptr); \
EXPORT_PER_CPU_SYMBOL(_name##_early_map);
#define DECLARE_EARLY_PER_CPU(_type, _name) \
DECLARE_PER_CPU(_type, _name); \

View File

@ -39,25 +39,29 @@
#define barrier() \
do { \
int unused; \
__asm__ NOT_VOLATILE("" : "=r" (unused) : : "memory", PREEMPTION_CLOBBERS);\
/* TODO bug 126238 - insert additional NOP until fixed */ \
__asm__ NOT_VOLATILE("{nop}" : "=r" (unused) : : "memory", PREEMPTION_CLOBBERS);\
} while (0)
/* See comment before PREEMPTION_CLOBBERS */
#define barrier_preemption() \
do { \
int unused; \
__asm__ NOT_VOLATILE("" : "=r" (unused) : : PREEMPTION_CLOBBERS);\
/* TODO bug 126238 - insert additional NOP until fixed */ \
__asm__ NOT_VOLATILE("{nop}" : "=r" (unused) : : PREEMPTION_CLOBBERS);\
} while (0)
#define barrier_data(ptr) \
do { \
__asm__ NOT_VOLATILE("" : : "r"(ptr) : "memory", PREEMPTION_CLOBBERS); \
/* TODO bug 126238 - insert additional NOP until fixed */ \
__asm__ NOT_VOLATILE("{nop}" : : "r"(ptr) : "memory", PREEMPTION_CLOBBERS); \
} while (0)
#define RELOC_HIDE(ptr, off) \
({ \
unsigned long __ptr; \
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
/* TODO bug 126238 - insert additional NOP until fixed */ \
__asm__ ("{nop}" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); \
})

View File

@ -249,7 +249,7 @@ typedef union { /* Common array pointer */
static inline e2k_ptr_t MAKE_AP(u64 base, u64 len)
{
e2k_ptr_t ptr = {{0}};
AW(ptr).lo = 0L | ((base & (E2K_VA_SIZE -1)) |
AW(ptr).lo = 0L | ((base & E2K_VA_MASK) |
((u64)E2K_AP_ITAG << 61) |
((u64)RW_ENABLE << 59));
AW(ptr).hi = 0L | ((len & 0xFFFFFFFF) << 32);

View File

@ -0,0 +1,19 @@
#ifndef __ASM_KVM_GUEST_MM_HOOKS_H
#define __ASM_KVM_GUEST_MM_HOOKS_H
#ifdef __KERNEL__
extern void kvm_get_mm_notifier_locked(struct mm_struct *mm);
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
static inline void
get_mm_notifier_locked(struct mm_struct *mm)
{
/* create mm notifier to trace some events over mm */
kvm_get_mm_notifier_locked(mm);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */
#endif /* __KERNEL__ */
#endif /* __ASM_KVM_GUEST_MM_HOOKS_H */

View File

@ -7,6 +7,7 @@
extern void kvm_activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm);
extern void kvm_get_mm_notifier_locked(struct mm_struct *mm);
#ifdef CONFIG_KVM_GUEST_KERNEL
/* it is pure guest kernel (not paravirtualized based on pv_ops) */

View File

@ -83,24 +83,6 @@ kvm_pt_clear_young_atomic(struct mm_struct *mm,
_PAGE_INIT_ACCESSED));
}
}
static inline pgprotval_t
kvm_pt_modify_prot_atomic(struct mm_struct *mm,
unsigned long addr, pgprot_t *pgprot)
{
if (IS_HV_MMU_TDP()) {
return native_pt_modify_prot_atomic(&pgprot->pgprot);
} else {
return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot,
ATOMIC_MODIFY_START,
_PAGE_INIT_VALID));
}
}
static inline pte_t kvm_ptep_get_and_clear_to_move(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return __pte(kvm_pt_get_and_clear_atomic(mm, addr, (pgprot_t *)ptep));
}
#elif defined(CONFIG_KVM_GUEST_KERNEL)
#error "CONFIG_KVM_SHADOW_PT should be set for guest paravirtualized kernel"
#endif /* CONFIG_KVM_SHADOW_PT */
@ -142,19 +124,6 @@ pt_clear_young_atomic(struct mm_struct *mm,
return kvm_pt_clear_young_atomic(mm, addr, pgprot);
}
static inline pgprotval_t
pt_modify_prot_atomic(struct mm_struct *mm,
unsigned long addr, pgprot_t *pgprot)
{
return kvm_pt_modify_prot_atomic(mm, addr, pgprot);
}
static inline pte_t ptep_get_and_clear_to_move(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return kvm_ptep_get_and_clear_to_move(mm, addr, ptep);
}
static inline pte_t get_pte_for_address(struct vm_area_struct *vma,
e2k_addr_t address)
{

View File

@ -110,8 +110,6 @@ kvm_preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks,
/* after copying and therefore are not preserve */
}
extern void kvm_get_mm_notifier(thread_info_t *ti, struct mm_struct *mm);
static __always_inline void
kvm_jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from)
{

View File

@ -0,0 +1,31 @@
/*
* KVM guest mm hooks support
* Copyright 2021 Andrey I. Alekhin (alekhin_a@mcst.ru)
*/
#ifndef _E2K_KVM_MM_HOOKS_H
#define _E2K_KVM_MM_HOOKS_H
#include <linux/mm.h>
/*
* Virtualization support
*/
#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL)
/* it is native kernel without any virtualization */
/* it is native host kernel with virtualization support */
static inline void
get_mm_notifier_locked(struct mm_struct *mm)
{
/* Do not need mmu notifier in native mode */
}
#elif defined(CONFIG_PARAVIRT_GUEST)
/* it is paravirtualized host and guest kernel */
#include <asm/paravirt/mm_hooks.h>
#elif defined(CONFIG_KVM_GUEST_KERNEL)
/* it is pure guest kernel (not paravirtualized based on pv_ops) */
#include <asm/kvm/guest/mm_hooks.h>
#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */
#endif /* !(_E2K_KVM_MM_HOOKS_H) */

View File

@ -11,6 +11,7 @@
#include <asm/regs_state.h>
#include <asm/kvm/cpu_hv_regs_access.h>
#include <asm/kvm/mmu_hv_regs_access.h>
#include <asm/pgd.h>
#define DEBUG_UPSR_FP_DISABLE
@ -745,6 +746,20 @@ pv_vcpu_switch_guest_host_context(struct kvm_vcpu *vcpu,
pv_vcpu_restore_host_context(vcpu, next_gti);
}
static inline void
pv_vcpu_switch_kernel_pgd_range(struct kvm_vcpu *vcpu, int cpu)
{
hpa_t vcpu_root;
if (is_sep_virt_spaces(vcpu)) {
vcpu_root = kvm_get_space_type_spt_os_root(vcpu);
} else {
vcpu_root = kvm_get_space_type_spt_u_root(vcpu);
}
copy_kernel_pgd_range(__va(vcpu_root), the_cpu_pg_dir(cpu));
}
static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu)
{
kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt;

View File

@ -423,13 +423,12 @@ CPUHAS(CPU_HWBUG_FALSE_SS,
CPUHAS(CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG,
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E12C) &&
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
false,
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E12C_MDL && revision == 0 ||
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
/* #119084 - several TBL flushes in a row might fail to flush L1D.
@ -441,11 +440,10 @@ CPUHAS(CPU_HWBUG_TLB_FLUSH_L1D,
/* #121311 - asynchronous entries in INTC_INFO_MU always have "pm" bit set.
* Workaround - use "pm" bit saved in guest's chain stack. */
CPUHAS(CPU_HWBUG_GUEST_ASYNC_PM,
!IS_ENABLED(CONFIG_CPU_E12C) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
false,
cpu == IDR_E12C_MDL || cpu == IDR_E16C_MDL ||
cpu == IDR_E2C3_MDL);
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
/* #122946 - conflict new interrupt while sync signal turning off.
* Workaround - wating for C0 after E2K_WAIT_V6 */
CPUHAS(CPU_HWBUG_E16C_SLEEP,
@ -458,58 +456,60 @@ CPUHAS(CPU_HWBUG_E16C_SLEEP,
CPUHAS(CPU_HWBUG_L1I_STOPS_WORKING,
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E12C) &&
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
false,
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E12C_MDL || cpu == IDR_E16C_MDL ||
cpu == IDR_E2C3_MDL);
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
/* #124947 - CLW clearing by OS must be done on the same CPU that started the
* hardware clearing operation to avoid creating a stale L1 entry.
* Workaround - forbid migration until CLW clearing is finished in software. */
CPUHAS(CPU_HWBUG_CLW_STALE_L1_ENTRY,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E12C) &&
!IS_ENABLED(CONFIG_CPU_E16C),
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E8C2),
!IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) &&
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C),
false,
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E12C_MDL && revision == 0 ||
cpu == IDR_E16C_MDL && revision == 0);
cpu == IDR_E16C_MDL && revision == 0);
/* #126587 - "wait ma_c=1" does not wait for all L2$ writebacks to complete
* when disabling CPU core with "wait trap=1" algorithm.
* Workaround - manually insert 66 NOPs before "wait trap=1" */
CPUHAS(CPU_HWBUG_C3_WAIT_MA_C,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) ||
IS_ENABLED(CONFIG_CPU_E1CP),
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E1CP_MDL);
!IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) &&
!IS_ENABLED(CONFIG_CPU_E1CP),
false,
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E1CP_MDL);
/* #128127 - Intercepting SCLKM3 write does not prevent guest from writing it.
* Workaround - Update SH_SCLKM3 in intercept handler */
CPUHAS(CPU_HWBUG_VIRT_SCLKM3_INTC,
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3) &&
!IS_ENABLED(CONFIG_CPU_E12C),
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
false,
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E12C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
/* #130039 - intercepting some specific sequences of call/return/setwd
* (that change WD.psize in a specific way) does not work.
* Workaround - avoid those sequences. */
CPUHAS(CPU_HWBUG_VIRT_PSIZE_INTERCEPTION,
IS_ENABLED(CONFIG_E2K_MACHINE),
IS_ENABLED(CONFIG_CPU_E16C) || IS_ENABLED(CONFIG_CPU_E2C3),
(cpu == IDR_E16C_MDL || cpu == IDR_E2C3_MDL) && revision == 0);
!IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3),
false,
cpu == IDR_E16C_MDL && revision == 0 ||
cpu == IDR_E2C3_MDL && revision == 0);
/* #129848 - alignment of usd_hi write depends on current usd_lo.p
* Workaround - write usd_lo before usd_hi, while keeping 2 tact distance from sbr write.
* Valid sequences are: sbr, nop, usd.lo, usd.hi OR sbr, usd.lo, usd.hi, usd.lo */
CPUHAS(CPU_HWBUG_USD_ALIGNMENT,
IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3),
!IS_ENABLED(CONFIG_CPU_E12C),
cpu == IDR_E16C_MDL && revision <= 1 ||
cpu == IDR_E2C3_MDL && revision <= 1);
!IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) &&
!IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) &&
!IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C) &&
!IS_ENABLED(CONFIG_CPU_E2C3) && !IS_ENABLED(CONFIG_CPU_E12C),
false,
cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL ||
cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL ||
cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL ||
cpu == IDR_E16C_MDL || cpu == IDR_E2C3_MDL ||
cpu == IDR_E12C_MDL);
/* Rely on IDR instead of iset version to choose between APIC and EPIC.
* For guest we use it's own fake IDR so that we choose between APIC and
* EPIC based on what hardware guest *thinks* it's being executed on. */

View File

@ -6,6 +6,8 @@
#ifndef _ASM_E2K_MM_HOOKS_H
#define _ASM_E2K_MM_HOOKS_H
#include <asm/kvm/mm_hooks.h>
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@ -14,6 +16,7 @@ static inline void arch_unmap(struct mm_struct *mm,
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
get_mm_notifier_locked(mm);
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,

View File

@ -1,67 +0,0 @@
/* Generic MTRR (Memory Type Range Register) ioctls.
Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
The postal address is:
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
*/
#ifndef _LINUX_MTRR_H
#define _LINUX_MTRR_H
#include <uapi/asm/mtrr.h>
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
extern int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_del (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
static __inline__ int mtrr_del_page (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
# endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
# if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
# endif
#endif /* _LINUX_MTRR_H */

View File

@ -0,0 +1,14 @@
#ifndef __ASM_PARAVIRT_GUEST_MM_HOOKS_H
#define __ASM_PARAVIRT_GUEST_MM_HOOKS_H
#ifdef __KERNEL__
#ifdef CONFIG_PARAVIRT_GUEST
static inline void
get_mm_notifier_locked(struct mm_struct *mm)
{
}
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* __KERNEL__ */
#endif /* __ASM_PARAVIRT_GUEST_MM_HOOKS_H */

View File

@ -68,17 +68,6 @@ pv_pt_clear_young_atomic(struct mm_struct *mm,
}
}
static inline pgprotval_t
pv_pt_modify_prot_atomic(struct mm_struct *mm,
unsigned long addr, pgprot_t *pgprot)
{
if (paravirt_enabled()) {
return kvm_pt_modify_prot_atomic(mm, addr, pgprot);
} else {
return native_pt_modify_prot_atomic(&pgprot->pgprot);
}
}
#if defined(CONFIG_PARAVIRT_GUEST)
/* It is paravirtualized host and guest kernel */
@ -115,13 +104,6 @@ pt_clear_young_atomic(struct mm_struct *mm,
{
return pv_pt_clear_young_atomic(mm, addr, pgprot);
}
static inline pgprotval_t
pt_modify_prot_atomic(struct mm_struct *mm,
unsigned long addr, pgprot_t *pgprot)
{
return pv_pt_modify_prot_atomic(mm, addr, pgprot);
}
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* ! _E2K_PARAVIRT_PGATOMIC_H */

View File

@ -20,12 +20,6 @@ pv_set_pte_at(struct mm_struct *mm, unsigned long addr,
pv_write_pte_at(mm, addr, ptep, pteval, false, false);
}
static inline void
pv_set_pte_to_move_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
pv_write_pte_at(mm, addr, ptep, pteval, false, true);
}
static inline void
pv_validate_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
@ -107,16 +101,6 @@ static inline pte_t pv_ptep_get_and_clear(struct mm_struct *mm,
{
return pv_mmu_ops.ptep_get_and_clear(mm, addr, ptep, false);
}
static inline pte_t pv_ptep_get_and_clear_to_move(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return pv_mmu_ops.ptep_get_and_clear(mm, addr, ptep, true);
}
static inline pte_t pv_ptep_get_and_clear_as_valid(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return pv_mmu_ops.ptep_get_and_clear_as_valid(mm, addr, ptep);
}
static inline void pv_ptep_wrprotect_atomic(struct mm_struct *mm,
e2k_addr_t addr, pte_t *ptep)
{
@ -142,12 +126,6 @@ set_pte_at(struct mm_struct *mm, unsigned long addr,
pv_set_pte_at(mm, addr, ptep, pteval);
}
static inline void
set_pte_to_move_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
pv_set_pte_to_move_at(mm, addr, ptep, pteval);
}
static inline void
validate_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
@ -215,16 +193,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
{
return pv_ptep_get_and_clear(mm, addr, ptep);
}
static inline pte_t ptep_get_and_clear_to_move(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return pv_ptep_get_and_clear_to_move(mm, addr, ptep);
}
static inline pte_t ptep_get_and_clear_as_valid(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return pv_ptep_get_and_clear_as_valid(mm, addr, ptep);
}
static inline void ptep_wrprotect_atomic(struct mm_struct *mm,
e2k_addr_t addr, pte_t *ptep)
{

View File

@ -592,8 +592,6 @@ typedef struct pv_mmu_ops {
pgd_t *pgdp, pgd_t pgdval, bool only_validate);
pte_t (*ptep_get_and_clear)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, bool to_move);
pte_t (*ptep_get_and_clear_as_valid)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
void (*ptep_wrprotect_atomic)(struct mm_struct *mm,
e2k_addr_t addr, pte_t *ptep);
pte_t (*get_pte_for_address)(struct vm_area_struct *vma,

View File

@ -179,6 +179,8 @@ pud_page_validate(pgd_t *pgdp, pud_t *pudp)
if (pgd_val(*pgdp) != _PAGE_INIT_VALID)
return;
trace_pt_update("Validating pud page at 0x%lx (pgd at 0x%lx = 0x%lx)\n",
pudp, pgdp, pgd_val(*pgdp));
for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
WARN_ON(pud_val(*pudp));
*pudp = __pud(_PAGE_INIT_VALID);
@ -385,6 +387,8 @@ pmd_page_validate(pud_t *pudp, pmd_t *pmdp)
if (pud_val(*pudp) != _PAGE_INIT_VALID)
return;
trace_pt_update("Validating pmd page at 0x%lx (pud at 0x%lx = 0x%lx)\n",
pmdp, pudp, pud_val(*pudp));
for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
WARN_ON(pmd_val(*pmdp));
*pmdp = __pmd(_PAGE_INIT_VALID);
@ -453,6 +457,8 @@ pte_page_validate(pmd_t *pmdp, pte_t *ptep)
if (pmd_val(*pmdp) != _PAGE_INIT_VALID)
return;
trace_pt_update("Validating pte page at 0x%lx (pmd at 0x%lx = 0x%lx)\n",
ptep, pmdp, pmd_val(*pmdp));
for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
*ptep = pte_mkvalid(*ptep);
}

View File

@ -30,7 +30,6 @@ typedef enum pt_atomic_op {
ATOMIC_GET_AND_XCHG,
ATOMIC_GET_AND_CLEAR,
ATOMIC_SET_WRPROTECT,
ATOMIC_MODIFY_START,
ATOMIC_TEST_AND_CLEAR_YOUNG,
ATOMIC_TEST_AND_CLEAR_RELAXED,
} pt_atomic_op_t;
@ -38,41 +37,50 @@ typedef enum pt_atomic_op {
static inline pgprotval_t
native_pt_set_wrprotect_atomic(pgprotval_t *pgprot)
{
return __api_atomic_op(_PAGE_INIT_WRITEABLE, pgprot, d,
"andnd", RELAXED_MB);
pgprotval_t newval = __api_atomic_op(_PAGE_INIT_WRITEABLE, pgprot,
d, "andnd", RELAXED_MB);
trace_pt_update("pt_set_wrprotect: entry at 0x%lx: -> 0x%lx\n",
pgprot, newval);
return newval;
}
static inline pgprotval_t
native_pt_get_and_clear_atomic(pgprotval_t *pgprot)
{
return __api_atomic_fetch_op(_PAGE_INIT_VALID, pgprot,
d, "andd", RELAXED_MB);
pgprotval_t oldval = __api_atomic_fetch_op(_PAGE_INIT_VALID, pgprot,
d, "andd", RELAXED_MB);
trace_pt_update("pt_get_and_clear: entry at 0x%lx: 0x%lx -> 0x%lx\n",
pgprot, oldval, oldval & _PAGE_INIT_VALID);
return oldval;
}
static inline pgprotval_t
native_pt_get_and_xchg_atomic(pgprotval_t newval, pgprotval_t *pgprot)
{
return __api_xchg_return(newval, pgprot, d, RELAXED_MB);
pgprotval_t oldval = __api_xchg_return(newval, pgprot, d, RELAXED_MB);
trace_pt_update("pt_get_and_xchg: entry at 0x%lx: 0x%lx -> 0x%lx\n",
pgprot, oldval, newval);
return oldval;
}
static inline pgprotval_t
native_pt_clear_relaxed_atomic(pgprotval_t mask, pgprotval_t *pgprot)
{
return __api_atomic_fetch_op(mask, pgprot, d, "andnd", RELAXED_MB);
pgprotval_t oldval = __api_atomic_fetch_op(mask, pgprot, d,
"andnd", RELAXED_MB);
trace_pt_update("pt_clear: entry at 0x%lx: 0x%lx -> 0x%lx\n",
pgprot, oldval, oldval & ~mask);
return oldval;
}
static inline pgprotval_t
native_pt_clear_young_atomic(pgprotval_t *pgprot)
{
return __api_atomic_fetch_op(_PAGE_INIT_ACCESSED, pgprot,
d, "andnd", RELAXED_MB);
}
static inline pgprotval_t
native_pt_modify_prot_atomic(pgprotval_t *pgprot)
{
return __api_atomic_fetch_op(_PAGE_INIT_VALID, pgprot,
d, "andd", RELAXED_MB);
pgprotval_t oldval = __api_atomic_fetch_op(_PAGE_INIT_ACCESSED, pgprot,
d, "andnd", RELAXED_MB);
trace_pt_update("pt_clear_young: entry at 0x%lx: 0x%lx -> 0x%lx\n",
pgprot, oldval, oldval & ~_PAGE_INIT_ACCESSED);
return oldval;
}
#if defined(CONFIG_KVM_GUEST_KERNEL)
@ -118,13 +126,6 @@ pt_clear_young_atomic(struct mm_struct *mm,
{
return native_pt_clear_young_atomic(&pgprot->pgprot);
}
static inline pgprotval_t
pt_modify_prot_atomic(struct mm_struct *mm,
unsigned long addr, pgprot_t *pgprot)
{
return native_pt_modify_prot_atomic(&pgprot->pgprot);
}
#endif /* CONFIG_KVM_GUEST_KERNEL */
#endif /* ! _E2K_PGATOMIC_H */

View File

@ -39,15 +39,23 @@
#define set_pte(ptep, pteval) \
native_set_pte(ptep, pteval, false)
#define set_pte_at(mm, addr, ptep, pteval) \
native_set_pte(ptep, pteval, false)
do { \
trace_pt_update("set_pte_at: mm 0x%lx, addr 0x%lx, ptep 0x%lx, value 0x%lx\n", \
(mm), (addr), (ptep), pte_val(pteval)); \
native_set_pte(ptep, pteval, false); \
} while (0)
#define set_pte_not_present_at(mm, addr, ptep, pteval) \
native_set_pte(ptep, pteval, true)
#define set_pte_to_move_at(mm, addr, ptep, pteval) \
native_set_pte_to_move(ptep, pteval)
do { \
trace_pt_update("set_pte_not_present_at: mm 0x%lx, addr 0x%lx, ptep 0x%lx, value 0x%lx\n", \
(mm), (addr), (ptep), pte_val(pteval)); \
native_set_pte(ptep, pteval, true); \
} while (0)
#define validate_pte_at(mm, addr, ptep, pteval) \
native_set_pte_noflush(ptep, pteval)
#define ptep_get_and_clear_to_move(mm, addr, ptep) \
ptep_get_and_clear(mm, addr, ptep)
do { \
trace_pt_update("validate_pte_at: mm 0x%lx, addr 0x%lx, ptep 0x%lx, value 0x%lx\n", \
(mm), (addr), (ptep), pte_val(pteval)); \
native_set_pte_noflush(ptep, pteval); \
} while (0)
#define boot_set_pte_at(addr, ptep, pteval) \
native_set_pte(ptep, pteval, false)
#define boot_set_pte_kernel(addr, ptep, pteval) \
@ -59,15 +67,25 @@
({ \
(void)(mm); \
(void)(addr); \
trace_pt_update("set_pmd_at: mm 0x%lx, addr 0x%lx, pmdp 0x%lx, value 0x%lx\n", \
(mm), (addr), (pmdp), pmd_val(pmdval)); \
native_set_pmd(pmdp, pmdval); \
})
#define validate_pmd_at(mm, addr, pmdp, pmdval) \
native_set_pmd_noflush(pmdp, pmdval)
do { \
trace_pt_update("validate_pmd_at: mm 0x%lx, addr 0x%lx, pmdp 0x%lx, value 0x%lx\n", \
(mm), (addr), (pmdp), pmd_val(pmdval)); \
native_set_pmd_noflush(pmdp, pmdval); \
} while (0)
#define set_pud(pudp, pudval) \
native_set_pud(pudp, pudval)
#define set_pud_at(mm, addr, pudp, pudval) \
native_set_pud(pudp, pudval)
do { \
trace_pt_update("set_pud_at: mm 0x%lx, addr 0x%lx, pudp 0x%lx, value 0x%lx\n", \
(mm), (addr), (pudp), pud_val(pudval)); \
native_set_pud(pudp, pudval); \
} while (0)
#define validate_pud_at(mm, addr, pudp) \
set_pud_at(mm, addr, pudp, __pud(_PAGE_INIT_VALID))
#define invalidate_pud_at(mm, addr, pudp) \
@ -76,7 +94,11 @@
#define set_pgd(pgdp, pgdval) \
native_set_pgd(pgdp, pgdval)
#define set_pgd_at(mm, addr, pgdp, pgdval) \
native_set_pgd(pgdp, pgdval)
do { \
trace_pt_update("set_pgd_at: mm 0x%lx, addr 0x%lx, pgdp 0x%lx, value 0x%lx\n", \
(mm), (addr), (pgdp), pgd_val(pgdval)); \
native_set_pgd(pgdp, pgdval); \
} while (0)
#define validate_pgd_at(mm, addr, pgdp) \
set_pgd_at(mm, addr, pgdp, __pgd(_PAGE_INIT_VALID))
#define invalidate_pgd_at(mm, addr, pgdp) \
@ -107,8 +129,7 @@
#define pmd_clear(pmdp) \
do { \
u64 __pmdval; \
__pmdval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? \
_PAGE_INIT_VALID : 0UL; \
__pmdval = _PAGE_INIT_VALID; \
native_set_pmd(pmdp, __pmd(__pmdval)); \
} while (0)
@ -143,8 +164,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
PAGE_USER_PMD))
static inline void pud_clear(pud_t *pud)
{
pud_val(*pud) = (test_ts_flag(TS_KEEP_PAGES_VALID)) ?
_PAGE_INIT_VALID : 0UL;
pud_val(*pud) = _PAGE_INIT_VALID;
}
#define boot_pud_set_k(pudp, pmdp) \
@ -174,8 +194,7 @@ static void inline pgd_set_k(pgd_t *pgdp, pud_t *pudp)
PAGE_USER_PUD))
static inline void pgd_clear_one(pgd_t *pgd)
{
pgd_val(*pgd) = (test_ts_flag(TS_KEEP_PAGES_VALID)) ?
_PAGE_INIT_VALID : 0UL;
pgd_val(*pgd) = _PAGE_INIT_VALID;
}
@ -266,11 +285,6 @@ static __always_inline void native_set_pte(pte_t *ptep, pte_t pteval,
}
}
static inline void native_set_pte_to_move(pte_t *ptep, pte_t pteval)
{
native_set_pte(ptep, pteval, false);
}
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC);
@ -303,7 +317,6 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgdval)
}
#else
# define native_set_pte(ptep, pteval, known_not_present) (*(ptep) = (pteval))
# define native_set_pte_to_move(ptep, pteval) native_set_pte(ptep, pteval, false)
# define native_set_pmd(pmdp, pmdval) (*(pmdp) = (pmdval))
# define native_set_pud(pudp, pudval) (*(pudp) = (pudval))
# define native_set_pgd(pgdp, pgdval) (*(pgdp) = (pgdval))
@ -390,21 +403,6 @@ static inline void untrack_pfn_moved(struct vm_area_struct *vma)
/* 0x0000 f800 0000 0000 - for 64 bytes struct page */
/* 0x0000 fc00 0000 0000 - for 128 bytes struct page */
#ifdef CONFIG_SMP
static inline void
ptep_wrprotect_atomic(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pt_set_wrprotect_atomic(mm, addr, (pgprot_t *)ptep);
}
#else /* ! CONFIG_SMP */
static inline void
ptep_wrprotect_atomic(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
}
#endif /* CONFIG_SMP */
/*
* The module space starts from end of resident kernel image and
* both areas should be within 2 ** 30 bits of the virtual addresses.
@ -418,8 +416,7 @@ ptep_wrprotect_atomic(struct mm_struct *mm,
#define pte_clear_not_present_full(mm, addr, ptep, fullmm) \
do { \
u64 __pteval; \
__pteval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? \
_PAGE_INIT_VALID : 0UL; \
__pteval = _PAGE_INIT_VALID; \
set_pte_not_present_at(mm, addr, ptep, __pte(__pteval)); \
} while (0)
@ -427,111 +424,51 @@ do { \
#define pte_clear(mm, addr, ptep) \
do { \
u64 __pteval; \
__pteval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? \
_PAGE_INIT_VALID : 0UL; \
__pteval = _PAGE_INIT_VALID; \
set_pte_at(mm, addr, ptep, __pte(__pteval)); \
} while (0)
#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V)
static inline pte_t
do_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC);
int mm_users;
int mm_users = atomic_read(&mm->mm_users);
pte_t oldpte;
prefetch_offset(ptep, PREFETCH_STRIDE);
# ifdef CONFIG_SMP
u64 newval;
newval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ?
_PAGE_INIT_VALID : 0UL;
oldpte = __pte(pt_get_and_xchg_atomic(mm, addr, newval,
(pgprot_t *)ptep));
# else
oldpte = *ptep;
pte_clear(mm, addr, ptep);
# endif
if (likely(mm != NULL))
mm_users = (atomic_read(&mm->mm_users) != 0);
else
/* kernel or guest process: users exist always */
mm_users = true;
if (mm == &init_mm) {
/* In kernel there is no swap or thp, valid page
* is always mapped, so do not keep the valid bit.
* This is important because in kernel we cannot
* tolerate spurious page faults from h.-s. loads. */
oldpte = __pte(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) ptep));
} else {
oldpte = __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) ptep));
}
/* mm_users check is for the fork() case: we do not
* want to spend time flushing when we are exiting. */
if (have_flush_dc_ic && mm_users && pte_present_and_exec(oldpte))
flush_pte_from_ic(oldpte);
return oldpte;
}
static inline pte_t
do_ptep_get_and_clear_as_valid(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC);
pte_t oldpte;
prefetch_offset(ptep, PREFETCH_STRIDE);
# ifdef CONFIG_SMP
oldpte = __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *)ptep));
# else
oldpte = *ptep;
pte_val(*ptep) &= _PAGE_INIT_VALID;
# endif
if (have_flush_dc_ic && pte_present_and_exec(oldpte))
if (have_flush_dc_ic && mm_users != 0 && pte_present_and_exec(oldpte))
flush_pte_from_ic(oldpte);
return oldpte;
}
#else
static inline pte_t
do_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
prefetch_offset(ptep, PREFETCH_STRIDE);
# ifdef CONFIG_SMP
return __pte(pt_get_and_xchg_atomic(mm, addr, 0UL, (pgprot_t *)ptep));
# else
pte_t pte = *ptep;
pte_clear(mm, addr, ptep);
return pte;
# endif
}
static inline pte_t
do_ptep_get_and_clear_as_valid(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t oldpte;
prefetch_offset(ptep, PREFETCH_STRIDE);
# ifdef CONFIG_SMP
oldpte = __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *)ptep));
# else
oldpte = *ptep;
pte_val(*ptep) &= _PAGE_INIT_VALID;
# endif
return oldpte;
if (mm == &init_mm) {
/* In kernel there is no swap or thp, valid page
* is always mapped, so do not keep the valid bit. */
return __pte(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) ptep));
} else {
return __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) ptep));
}
}
#endif
static inline pte_t
ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
return do_ptep_get_and_clear(mm, addr, ptep);
}
static inline pte_t
ptep_get_and_clear_as_valid(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
return do_ptep_get_and_clear_as_valid(mm, addr, ptep);
}
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
# define vmemmap ((struct page *)VMEMMAP_START)
#endif
@ -753,18 +690,6 @@ native_do_get_pte_for_address(struct vm_area_struct *vma, e2k_addr_t address)
}
}
#ifdef CONFIG_SMP
static inline int
test_and_clear_relaxed(pgprotval_t mask, pgprot_t *addr)
{
pgprotval_t retval;
retval = pt_clear_relaxed_atomic(mask, addr);
return (retval & mask) != 0;
}
#endif /* CONFIG_SMP */
static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
@ -772,88 +697,62 @@ ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
pte_t pte;
prefetch_offset(ptep, PREFETCH_STRIDE);
#ifdef CONFIG_SMP
pte_val(pte) = pt_clear_young_atomic(vma->vm_mm, addr,
(pgprot_t *)ptep);
return pte_young(pte);
#else
pte = *ptep;
if (!pte_young(pte))
return 0;
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
return 1;
#endif
}
static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
prefetch_offset(ptep, PREFETCH_STRIDE);
#ifdef CONFIG_SMP
ptep_wrprotect_atomic(mm, addr, ptep);
#else
pte_t pte = *ptep;
pte = pte_wrprotect(pte);
set_pte_at(mm, addr, ptep, pte);
#endif
pt_set_wrprotect_atomic(mm, addr, (pgprot_t *) ptep);
}
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
#ifdef CONFIG_MAKE_ALL_PAGES_VALID
# define ptep_clear_flush_as_valid(__vma, __address, __ptep) \
({ \
pte_t __pte; \
__pte = ptep_get_and_clear_as_valid((__vma)->vm_mm, __address, __ptep);\
flush_tlb_page(__vma, __address); \
__pte; \
})
#endif /* CONFIG_MAKE_ALL_PAGES_VALID */
#define pgd_addr_bound(addr) (((addr) + PGDIR_SIZE) & PGDIR_MASK)
#define pud_addr_bound(addr) (((addr) + PUD_SIZE) & PUD_MASK)
#define pmd_addr_bound(addr) (((addr) + PMD_SIZE) & PMD_MASK)
#if defined CONFIG_TRANSPARENT_HUGEPAGE && defined CONFIG_MAKE_ALL_PAGES_VALID
# define pmdp_collapse_flush pmdp_collapse_flush
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#if defined CONFIG_TRANSPARENT_HUGEPAGE
# if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
# ifdef CONFIG_SMP
u64 newval;
int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC);
int mm_users = atomic_read(&mm->mm_users);
pmd_t oldpmd;
newval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ?
_PAGE_INIT_VALID : 0UL;
if (mm == &init_mm) {
/* See comment in ptep_get_and_clear() */
oldpmd = __pmd(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) pmdp));
} else {
oldpmd = __pmd(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) pmdp));
}
return __pmd(pt_get_and_xchg_atomic(mm, addr, newval,
(pgprot_t *)pmdp));
# else
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
return pmd;
# endif
/* mm_users check is for the fork() case: we do not
* want to spend time flushing when we are exiting. */
if (have_flush_dc_ic && mm_users != 0 &&
pmd_present_and_exec_and_huge(oldpmd))
flush_pmd_from_ic(oldpmd);
return oldpmd;
}
static inline pmd_t pmdp_huge_get_and_clear_as_valid(struct mm_struct *mm,
# else
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
# ifdef CONFIG_SMP
return __pmd(pt_get_and_xchg_atomic(mm, addr, _PAGE_INIT_VALID,
(pgprot_t *)pmdp));
# else
pmd_t pmd = *pmdp;
set_pmd_at(mm, addr, pmdp, __pmd(_PAGE_INIT_VALID));
return pmd;
# endif
if (mm == &init_mm) {
/* See comment in ptep_get_and_clear() */
return __pmd(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) pmdp));
} else {
return __pmd(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) pmdp));
}
}
# endif
#endif
/* interface functions to handle some things on the PT level */
@ -872,90 +771,27 @@ extern void memmap_init(unsigned long size, int nid, unsigned long zone,
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
#define __HAVE_ARCH_PMD_WRITE
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
#define __HAVE_ARCH_MEMMAP_INIT
#define __HAVE_PFNMAP_TRACKING
#include <asm-generic/pgtable.h>
typedef enum pte_cmp {
PTE_SAME_CMP,
PTE_CHANGE_PFN_CMP,
PTE_CHANGE_PROTECTS_CMP,
PTE_CHANGE_FLAGS_CMP,
PTE_CHANGE_FLAGS_AND_PROTECTS_CMP,
} pte_cmp_t;
static inline pte_cmp_t pte_compare(pte_t src_pte, pte_t dst_pte)
{
pteval_t src_pte_flags;
pteval_t dst_pte_flags;
pteval_t src_pte_protects;
pteval_t dst_pte_protects;
if (pte_same(src_pte, dst_pte))
return PTE_SAME_CMP;
if (pte_pfn(src_pte) != pte_pfn(dst_pte))
return PTE_CHANGE_PFN_CMP;
src_pte_flags = pte_only_flags(src_pte);
dst_pte_flags = pte_only_flags(dst_pte);
src_pte_protects = pte_only_protects(src_pte);
dst_pte_protects = pte_only_protects(dst_pte);
if (src_pte_flags == dst_pte_flags) {
if (src_pte_protects == dst_pte_protects)
return PTE_SAME_CMP;
else
return PTE_CHANGE_PROTECTS_CMP;
} else if (src_pte_protects == dst_pte_protects) {
return PTE_CHANGE_FLAGS_CMP;
} else {
return PTE_CHANGE_FLAGS_AND_PROTECTS_CMP;
}
}
static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
return __pte(pt_modify_prot_atomic(vma->vm_mm, addr, (pgprot_t *)ptep));
}
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte)
{
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd;
#ifdef CONFIG_SMP
pmd_val(pmd) = pt_clear_young_atomic(vma->vm_mm, addr,
(pgprot_t *)pmdp);
return pmd_young(pmd);
#else
pmd = *pmdp;
if (!pmd_young(pmd))
return 0;
set_pmd_at(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
return 1;
#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

View File

@ -23,6 +23,17 @@
#include <asm/pgtable-v6.h>
#include <asm/p2v/boot_v2p.h>
#define TRACE_PT_UPDATES 0
#if TRACE_PT_UPDATES
# define trace_pt_update(...) \
do { \
if (system_state == SYSTEM_RUNNING) \
trace_printk(__VA_ARGS__); \
} while (0)
#else
# define trace_pt_update(...)
#endif
#ifndef __ASSEMBLY__
/* max. number of physical address bits (architected) */
@ -1076,6 +1087,7 @@ static inline int has_transparent_hugepage(void)
UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE)))
#define pmd_mknot_present_valid(pmd) (__pmd(_PAGE_CLEAR(pmd_val(pmd), \
UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE | UNI_PAGE_VALID)))
#define pmd_mknotvalid(pmd) (__pmd(_PAGE_CLEAR_VALID(pmd_val(pmd))))
#define pmd_mkold(pmd) (__pmd(_PAGE_CLEAR_ACCESSED(pmd_val(pmd))))
#define pmd_mkyoung(pmd) (__pmd(_PAGE_SET_ACCESSED(pmd_val(pmd))))
#define pmd_mkclean(pmd) (__pmd(_PAGE_CLEAR_DIRTY(pmd_val(pmd))))
@ -1144,6 +1156,7 @@ static inline int pud_bad(pud_t pud)
#define pud_mknotpresent(pud) (__pud(_PAGE_CLEAR_PRESENT(pud_val(pud))))
#define pud_mknot_present_valid(pud) (__pud(_PAGE_CLEAR(pud_val(pud), \
UNI_PAGE_PRESENT | UNI_PAGE_VALID)))
#define pud_mknotvalid(pud) (__pud(_PAGE_CLEAR_VALID(pud_val(pud))))
static inline pud_t pud_mk_wb(pud_t pud)
{
if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud))))
@ -1173,6 +1186,7 @@ static inline pud_t pud_mk_uc(pud_t pud)
#endif
#define pgd_none_full(pgd) (!pgd_val(pgd))
#define pgd_valid(pgd) _PAGE_TEST_VALID(pgd_val(pgd))
#define pgd_mknotvalid(pgd) (__pgd(_PAGE_CLEAR_VALID(pgd_val(pgd))))
static inline int pgd_bad(pgd_t pgd)
{

View File

@ -191,7 +191,7 @@ unsigned long e2k_ptr_curptr(long low, long hiw)
}
static inline
unsigned long e2k_ptr_size(long low, long hiw, unsigned int min_size)
unsigned int e2k_ptr_size(long low, long hiw, unsigned int min_size)
{
e2k_ptr_hi_t hi;
unsigned int ptr_size;

View File

@ -1080,7 +1080,7 @@ static inline void
NATIVE_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task)
{
#ifdef CONFIG_VIRTUALIZATION
const int task_is_binco = TASK_IS_BINCO(task) || task_thread_info(task)->vcpu;
const int task_is_binco = TASK_IS_BINCO(task) || task_thread_info(task)->virt_machine;
#else
const int task_is_binco = TASK_IS_BINCO(task);
#endif
@ -1170,7 +1170,7 @@ NATIVE_RESTORE_TASK_REGS_TO_SWITCH(struct task_struct *task,
u64 pcsp_hi = AS_WORD(sw_regs->pcsp_hi);
e2k_mem_crs_t crs = sw_regs->crs;
#ifdef CONFIG_VIRTUALIZATION
const int task_is_binco = TASK_IS_BINCO(task) || ti->vcpu;
const int task_is_binco = TASK_IS_BINCO(task) || ti->virt_machine;
#else
const int task_is_binco = TASK_IS_BINCO(task);
#endif
@ -1271,12 +1271,9 @@ NATIVE_SWITCH_TO_KERNEL_STACK(e2k_addr_t ps_base, e2k_size_t ps_size,
} while(GET_NR_TIRS(TIR_hi)); \
TIRs_num = nr_TIRs; \
\
/* un-freeze the TIR's LIFO */ \
UNFREEZE_TIRs(TIR_lo); \
\
all_interrupts & (exc_all_mask | aau_exc_mask); \
})
#define UNFREEZE_TIRs(TIR_lo) NATIVE_WRITE_TIR_LO_REG_VALUE(TIR_lo)
#define UNFREEZE_TIRs() NATIVE_WRITE_TIR_LO_REG_VALUE(0)
#define SAVE_SBBP(sbbp) \
do { \
int i; \

View File

@ -228,7 +228,7 @@ static inline void *_memcpy(void *__restrict dst,
*(u8 *) (dst + (n & ~0x1UL)) =
*(u8 *) (src + (n & ~0x1UL));
} else {
E2K_PREFETCH_L2(src);
E2K_PREFETCH_L2_SPEC(src);
__memcpy(dst, src, n);
}

View File

@ -118,7 +118,7 @@ extern long protected_sys_futex(const unsigned long __user uaddr,
const unsigned long __user uaddr2,
const unsigned long val3,
const struct pt_regs *regs);
extern long protected_sys_getgroups(const unsigned long a1, /* size */
extern long protected_sys_getgroups(const long a1, /* size */
const unsigned long __user a2, /* list[] */
const unsigned long unused3,
const unsigned long unused4,
@ -256,13 +256,13 @@ extern long protected_sys_olduselib(const unsigned long __user a1, /* library */
/* NB> 'olduselib' is obsolete syscall; unsupported in CPU ISET V6 */
extern long protected_sys_uselib(const unsigned long __user a1, /* library */
const unsigned long __user a2); /* umdd */
extern long protected_sys_sigaltstack(const unsigned long __user a1, /* ss */
const unsigned long __user a2, /* oss */
const unsigned long unused3,
const unsigned long unused4,
const unsigned long unused5,
const unsigned long unused6,
const struct pt_regs *regs);
extern long protected_sys_sigaltstack(const stack_prot_t __user *ss_128,
stack_prot_t __user *old_ss_128,
const unsigned long unused3,
const unsigned long unused4,
const unsigned long unused5,
const unsigned long unused6,
const struct pt_regs *regs);
extern long protected_sys_unuselib(const unsigned long __user a1, /* addr */
const unsigned long a2,
const unsigned long a3,

View File

@ -294,7 +294,6 @@ typedef struct thread_info {
* have to worry about atomic accesses.
*/
#define TS_DELAYED_SIG_HANDLING 0x00000001
#define TS_KEEP_PAGES_VALID 0x00000002
#define TS_MMAP_PRIVILEGED 0x00000004
#define TS_MMAP_PS 0x00000008
#define TS_MMAP_PCS 0x00000010
@ -307,7 +306,8 @@ typedef struct thread_info {
* and wait for interception (trap on PV mode) */
#define TS_HOST_AT_VCPU_MODE 0x00001000
#define THREAD_SIZE KERNEL_STACKS_SIZE
#define THREAD_SIZE KERNEL_STACKS_SIZE
#define THREAD_SIZE_ORDER order_base_2(KERNEL_STACKS_SIZE / PAGE_SIZE)
#ifndef __ASSEMBLY__

View File

@ -295,6 +295,64 @@ TRACE_EVENT(
)
);
/* How many last IPs are saved in hardware TIR_lo trace for debugging */
#define TIR_HW_TRACE_LENGTH 512
/* How many IPs to save to ring buffer in one event. Limited because:
* 1) It is assumed by ring buffer internals that events are small.
* 2) When dumping events with [ftrace_dump_on_oops] we are limited
* by printk() which outputs ~1000 symbols (LOG_LINE_MAX) at maximum. */
#define TIR_TRACE_LENGTH 16
#define TIR_TRACE_PARTS 32
/* Output last IPs executed before a trap _without_
* regions that executed with frozen TIRs (i.e.
* without trap entry up to UNFREEZE_TIRs() call). */
TRACE_EVENT(
tir_ip_trace,
TP_PROTO(int part),
TP_ARGS(part),
TP_STRUCT__entry(
__field(int, part)
__array(void *, ip, TIR_TRACE_LENGTH)
),
TP_fast_assign(
int i;
BUILD_BUG_ON(TIR_TRACE_PARTS * TIR_TRACE_LENGTH != TIR_HW_TRACE_LENGTH);
BUG_ON(part < 1 || part > TIR_TRACE_PARTS);
__entry->part = part;
for (i = 0; i < TIR_TRACE_LENGTH; i++) {
e2k_tir_lo_t tir_lo;
/* Read additional debug TIRs */
NATIVE_READ_TIR_HI_REG();
tir_lo = NATIVE_READ_TIR_LO_REG();
__entry->ip[i] = (void *) tir_lo.TIR_lo_ip;
}
/* For TP_printk below */
BUILD_BUG_ON(TIR_TRACE_LENGTH != 16);
),
TP_printk("last %d IPs (part %d/%d):\n"
" %pS %pS %pS %pS\n"
" %pS %pS %pS %pS\n"
" %pS %pS %pS %pS\n"
" %pS %pS %pS %pS\n",
TIR_TRACE_LENGTH * TIR_TRACE_PARTS, __entry->part, TIR_TRACE_PARTS,
__entry->ip[0], __entry->ip[1], __entry->ip[2], __entry->ip[3],
__entry->ip[4], __entry->ip[5], __entry->ip[6], __entry->ip[7],
__entry->ip[8], __entry->ip[9], __entry->ip[10], __entry->ip[11],
__entry->ip[12], __entry->ip[13], __entry->ip[14], __entry->ip[15]
)
);
#endif /* _TRACE_E2K_H */

View File

@ -1,78 +0,0 @@
/* Generic MTRR (Memory Type Range Register) ioctls.
Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
The postal address is:
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
*/
#ifndef _UAPI_LINUX_MTRR_H
#define _UAPI_LINUX_MTRR_H
#include <linux/ioctl.h>
#define MTRR_IOCTL_BASE 'M'
struct mtrr_sentry {
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry {
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
/* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#ifdef MTRR_NEED_STRINGS
static char *mtrr_strings[MTRR_NUM_TYPES] = {
"uncachable", /* 0 */
"write-combining", /* 1 */
"?", /* 2 */
"?", /* 3 */
"write-through", /* 4 */
"write-protect", /* 5 */
"write-back", /* 6 */
};
#endif
#endif /* _UAPI_LINUX_MTRR_H */

View File

@ -1,7 +0,0 @@
#ifndef _ELMAC_H
#define _ELMAC_H
#define __EXPORTED_HEADERS__
#include <uapi/linux/elmac.h>
#endif /* _ELMAC_H */

View File

@ -96,8 +96,9 @@ struct iommu_domain {
void *iova_cookie;
#ifdef CONFIG_MCST /* support CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE*/
unsigned long map_base;
struct idr idr_lo;
unsigned long *orig_phys_lo;
struct idr idr_hi;
rwlock_t lock_hi;
#endif
};

View File

@ -1,29 +0,0 @@
/*
* Supported by Alexey V. Sitnikov, alexmipt@mcst.ru, MCST
*
*/
#include <linux/mcst/ddi.h>
struct pci_dev_info {
char *prom_name;
unsigned short vendor;
unsigned short device;
char *options;
int args[2];
};
#define PCI_VENDOR_ID_MCST 0x5453
#define PCI_DEVICE_ID_MCST_PDC 0x4350
#define PCI_DEVICE_ID_MCST_MPV 0x4360
#define MCST_DEVICE_DRIVERS \
{ "MCST,mvp", 0, 0, "mvp-parity,mvp-polar,", {0, 0} }, \
{ "MCST,mbkp1", 0, 0 }, \
{ "MCST,mbkp2", 0, 0 }, \
{ "MCST,pidc", PCI_VENDOR_ID_MCST, PCI_DEVICE_ID_MCST_PDC }, \
{ "MCST,mpv", PCI_VENDOR_ID_MCST, PCI_DEVICE_ID_MCST_MPV }, \
{ "MCST,FOO", 0, 0 }, \
{ "MCST,FOO", 0, 0 },

View File

View File

@ -431,7 +431,7 @@ enum {
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_XTENSA (EM_XTENSA)
/* CONFIG_E2K */
#define AUDIT_ARCH_E2K (EM_MCST_ELBRUS)
#define AUDIT_ARCH_E2K (EM_MCST_ELBRUS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_PERM_EXEC 1
#define AUDIT_PERM_WRITE 2
#define AUDIT_PERM_READ 4

View File

@ -1,90 +0,0 @@
/*
* Elbrus MAC Kernel (elmac) security module
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#ifndef _UAPI_LINUX_ELMAC_H
#define _UAPI_LINUX_ELMAC_H
#include <linux/capability.h>
#ifndef PACKED
#define EL_PACKED __attribute__((aligned(1), packed))
#else
#define EL_PACKED PACKED
#endif
/*
* elmac label
*/
typedef struct _el_mac_label {
__u8 level;
__u64 category;
} EL_PACKED elmac_label_t;
/*
* elmac context
*/
typedef struct _el_mac_context {
__u32 attr;
elmac_label_t mac;
} EL_PACKED elmac_context_t;
#define ELMAC_LABEL_LEV(label) ((label).level)
#define ELMAC_LABEL_LCAT(label) ((label).category)
#define ELMAC_CONTEXT_LEV(context) ((context).mac.level)
#define ELMAC_CONTEXT_CAT(context) ((context).mac.category)
#define ELMAC_CONTEXT_ATTR(context) ((context).attr)
#define ELMAC_CONTEXT_ATTR_CHECK(context, a) \
((context).attr & ELMAC_ATTR_##a)
/*
* Objects special attr
*/
#define ELMAC_ATTR_IGNORER_CAT 0x00000001
#define ELMAC_ATTR_IGNOREW_CAT 0x00000002
#define ELMAC_ATTR_IGNOREX_CAT 0x00000004
#define ELMAC_ATTR_IGNORER_LVL 0x00000008
#define ELMAC_ATTR_IGNOREW_LVL 0x00000010
#define ELMAC_ATTR_IGNOREX_LVL 0x00000020
#define ELMAC_ATTR_IGNORER ((ELMAC_ATTR_IGNORER_CAT) | \
(ELMAC_ATTR_IGNORER_LVL))
#define ELMAC_ATTR_IGNOREW ((ELMAC_ATTR_IGNOREW_CAT) | \
(ELMAC_ATTR_IGNOREW_LVL))
#define ELMAC_ATTR_IGNOREX ((ELMAC_ATTR_IGNOREX_CAT) | \
(ELMAC_ATTR_IGNOREX_LVL))
#define ELMAC_ATTR_IGNORE ((ELMAC_ATTR_IGNORER) | (ELMAC_ATTR_IGNOREW) | \
(ELMAC_ATTR_IGNOREX))
/*
* Subjects special attr
*/
#define ELMAC_ATTR_PRIVSOCK 0x00000040
#define ELMAC_ATTR_CAP_READ_SEARCH 0x00000080
#define ELMAC_MAX_LEVEL 255
#define ELMAC_MAX_CATEGORY ((__u64)(-1))
#define ELMAC_LABEL_MAXLEN sizeof(elmac_label_t)
#define ELMAC_LABEL_STRLEN (ELMAC_LABEL_MAXLEN + 1)
#define ELMAC_CONTEXT_MAXLEN sizeof(elmac_context_t)
#define ELMAC_IPSO_OPTION "-IPSO"
#ifdef __KERNEL__
#ifdef CONFIG_MCST_SECURITY_ELMAC
extern bool elmac_is_enabled(void);
#else
static inline bool elmac_is_enabled(void)
{
return false;
}
#endif /* CONFIG_MCST_SECURITY_ELMAC */
#endif /* __KERNEL__ */
#endif /* _UAPI_LINUX_ELMAC_H */

View File