2006-12-07 02:14:07 +01:00
|
|
|
#ifndef __ASM_PARAVIRT_H
|
|
|
|
#define __ASM_PARAVIRT_H
|
|
|
|
/* Various instructions on x86 need to be replaced for
|
|
|
|
* para-virtualization: those hooks are defined here. */
|
[PATCH] i386: PARAVIRT: Hooks to set up initial pagetable
This patch introduces paravirt_ops hooks to control how the kernel's
initial pagetable is set up.
In the case of a native boot, the very early bootstrap code creates a
simple non-PAE pagetable to map the kernel and physical memory. When
the VM subsystem is initialized, it creates a proper pagetable which
respects the PAE mode, large pages, etc.
When booting under a hypervisor, there are many possibilities for what
paging environment the hypervisor establishes for the guest kernel, so
the constructon of the kernel's pagetable depends on the hypervisor.
In the case of Xen, the hypervisor boots the kernel with a fully
constructed pagetable, which is already using PAE if necessary. Also,
Xen requires particular care when constructing pagetables to make sure
all pagetables are always mapped read-only.
In order to make this easier, kernel's initial pagetable construction
has been changed to only allocate and initialize a pagetable page if
there's no page already present in the pagetable. This allows the Xen
paravirt backend to make a copy of the hypervisor-provided pagetable,
allowing the kernel to establish any more mappings it needs while
keeping the existing ones.
A slightly subtle point which is worth highlighting here is that Xen
requires all kernel mappings to share the same pte_t pages between all
pagetables, so that updating a kernel page's mapping in one pagetable
is reflected in all other pagetables. This makes it possible to
allocate a page and attach it to a pagetable without having to
explicitly enumerate that page's mapping in all pagetables.
And:
+From: "Eric W. Biederman" <ebiederm@xmission.com>
If we don't set the leaf page table entries it is quite possible that
will inherit and incorrect page table entry from the initial boot
page table setup in head.S. So we need to redo the effort here,
so we pick up PSE, PGE and the like.
Hypervisors like Xen require that their page tables be read-only,
which is slightly incompatible with our low identity mappings, however
I discussed this with Jeremy he has modified the Xen early set_pte
function to avoid problems in this area.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Acked-by: William Irwin <bill.irwin@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
2007-05-02 19:27:13 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
2006-12-07 02:14:08 +01:00
|
|
|
#include <linux/stringify.h>
|
2006-12-07 02:14:08 +01:00
|
|
|
#include <asm/page.h>
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
/* These are the most performance critical ops, so we want to be able to patch
|
|
|
|
* callers */
|
|
|
|
#define PARAVIRT_IRQ_DISABLE 0
|
|
|
|
#define PARAVIRT_IRQ_ENABLE 1
|
|
|
|
#define PARAVIRT_RESTORE_FLAGS 2
|
|
|
|
#define PARAVIRT_SAVE_FLAGS 3
|
|
|
|
#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
|
|
|
|
#define PARAVIRT_INTERRUPT_RETURN 5
|
|
|
|
#define PARAVIRT_STI_SYSEXIT 6
|
|
|
|
|
|
|
|
/* Bitmask of what can be clobbered: usually at least eax. */
|
|
|
|
#define CLBR_NONE 0x0
|
|
|
|
#define CLBR_EAX 0x1
|
|
|
|
#define CLBR_ECX 0x2
|
|
|
|
#define CLBR_EDX 0x4
|
|
|
|
#define CLBR_ANY 0x7
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
#ifndef __ASSEMBLY__
|
2007-05-02 19:27:13 +02:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
struct thread_struct;
|
|
|
|
struct Xgt_desc_struct;
|
|
|
|
struct tss_struct;
|
2006-12-07 02:14:08 +01:00
|
|
|
struct mm_struct;
|
2007-05-02 19:27:10 +02:00
|
|
|
struct desc_struct;
|
2006-12-07 02:14:07 +01:00
|
|
|
struct paravirt_ops
|
|
|
|
{
|
|
|
|
unsigned int kernel_rpl;
|
[PATCH] i386: PARAVIRT: Allow paravirt backend to choose kernel PMD sharing
Normally when running in PAE mode, the 4th PMD maps the kernel address space,
which can be shared among all processes (since they all need the same kernel
mappings).
Xen, however, does not allow guests to have the kernel pmd shared between page
tables, so parameterize pgtable.c to allow both modes of operation.
There are several side-effects of this. One is that vmalloc will update the
kernel address space mappings, and those updates need to be propagated into
all processes if the kernel mappings are not intrinsically shared. In the
non-PAE case, this is done by maintaining a pgd_list of all processes; this
list is used when all process pagetables must be updated. pgd_list is
threaded via otherwise unused entries in the page structure for the pgd, which
means that the pgd must be page-sized for this to work.
Normally the PAE pgd is only 4x64 byte entries large, but Xen requires the PAE
pgd to page aligned anyway, so this patch forces the pgd to be page
aligned+sized when the kernel pmd is unshared, to accomodate both these
requirements.
Also, since there may be several distinct kernel pmds (if the user/kernel
split is below 3G), there's no point in allocating them from a slab cache;
they're just allocated with get_free_page and initialized appropriately. (Of
course the could be cached if there is just a single kernel pmd - which is the
default with a 3G user/kernel split - but it doesn't seem worthwhile to add
yet another case into this code).
[ Many thanks to wli for review comments. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Christoph Lameter <clameter@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2007-05-02 19:27:13 +02:00
|
|
|
int shared_kernel_pmd;
|
2006-12-07 02:14:07 +01:00
|
|
|
int paravirt_enabled;
|
|
|
|
const char *name;
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
/*
|
|
|
|
* Patch may replace one of the defined code sequences with arbitrary
|
|
|
|
* code, subject to the same register constraints. This generally
|
|
|
|
* means the code is not free to clobber any registers other than EAX.
|
|
|
|
* The patch function should return the number of bytes of code
|
|
|
|
* generated, as we nop pad the rest in generic code.
|
|
|
|
*/
|
|
|
|
unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
void (*arch_setup)(void);
|
|
|
|
char *(*memory_setup)(void);
|
|
|
|
void (*init_IRQ)(void);
|
|
|
|
|
[PATCH] i386: PARAVIRT: Hooks to set up initial pagetable
This patch introduces paravirt_ops hooks to control how the kernel's
initial pagetable is set up.
In the case of a native boot, the very early bootstrap code creates a
simple non-PAE pagetable to map the kernel and physical memory. When
the VM subsystem is initialized, it creates a proper pagetable which
respects the PAE mode, large pages, etc.
When booting under a hypervisor, there are many possibilities for what
paging environment the hypervisor establishes for the guest kernel, so
the constructon of the kernel's pagetable depends on the hypervisor.
In the case of Xen, the hypervisor boots the kernel with a fully
constructed pagetable, which is already using PAE if necessary. Also,
Xen requires particular care when constructing pagetables to make sure
all pagetables are always mapped read-only.
In order to make this easier, kernel's initial pagetable construction
has been changed to only allocate and initialize a pagetable page if
there's no page already present in the pagetable. This allows the Xen
paravirt backend to make a copy of the hypervisor-provided pagetable,
allowing the kernel to establish any more mappings it needs while
keeping the existing ones.
A slightly subtle point which is worth highlighting here is that Xen
requires all kernel mappings to share the same pte_t pages between all
pagetables, so that updating a kernel page's mapping in one pagetable
is reflected in all other pagetables. This makes it possible to
allocate a page and attach it to a pagetable without having to
explicitly enumerate that page's mapping in all pagetables.
And:
+From: "Eric W. Biederman" <ebiederm@xmission.com>
If we don't set the leaf page table entries it is quite possible that
will inherit and incorrect page table entry from the initial boot
page table setup in head.S. So we need to redo the effort here,
so we pick up PSE, PGE and the like.
Hypervisors like Xen require that their page tables be read-only,
which is slightly incompatible with our low identity mappings, however
I discussed this with Jeremy he has modified the Xen early set_pte
function to avoid problems in this area.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Acked-by: William Irwin <bill.irwin@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
2007-05-02 19:27:13 +02:00
|
|
|
void (*pagetable_setup_start)(pgd_t *pgd_base);
|
|
|
|
void (*pagetable_setup_done)(pgd_t *pgd_base);
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
void (*banner)(void);
|
|
|
|
|
|
|
|
unsigned long (*get_wallclock)(void);
|
|
|
|
int (*set_wallclock)(unsigned long);
|
|
|
|
void (*time_init)(void);
|
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
|
2006-12-07 02:14:07 +01:00
|
|
|
unsigned int *ecx, unsigned int *edx);
|
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
unsigned long (*get_debugreg)(int regno);
|
|
|
|
void (*set_debugreg)(int regno, unsigned long value);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*clts)(void);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
unsigned long (*read_cr0)(void);
|
|
|
|
void (*write_cr0)(unsigned long);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
unsigned long (*read_cr2)(void);
|
|
|
|
void (*write_cr2)(unsigned long);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
unsigned long (*read_cr3)(void);
|
|
|
|
void (*write_cr3)(unsigned long);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
unsigned long (*read_cr4_safe)(void);
|
|
|
|
unsigned long (*read_cr4)(void);
|
|
|
|
void (*write_cr4)(unsigned long);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
unsigned long (*save_fl)(void);
|
|
|
|
void (*restore_fl)(unsigned long);
|
|
|
|
void (*irq_disable)(void);
|
|
|
|
void (*irq_enable)(void);
|
|
|
|
void (*safe_halt)(void);
|
|
|
|
void (*halt)(void);
|
|
|
|
void (*wbinvd)(void);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
|
|
|
/* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
2007-02-13 13:26:25 +01:00
|
|
|
u64 (*read_msr)(unsigned int msr, int *err);
|
|
|
|
int (*write_msr)(unsigned int msr, u64 val);
|
|
|
|
|
|
|
|
u64 (*read_tsc)(void);
|
|
|
|
u64 (*read_pmc)(void);
|
2007-03-05 09:30:35 +01:00
|
|
|
u64 (*get_scheduled_cycles)(void);
|
2007-03-05 09:30:36 +01:00
|
|
|
unsigned long (*get_cpu_khz)(void);
|
2007-02-13 13:26:25 +01:00
|
|
|
|
|
|
|
void (*load_tr_desc)(void);
|
|
|
|
void (*load_gdt)(const struct Xgt_desc_struct *);
|
|
|
|
void (*load_idt)(const struct Xgt_desc_struct *);
|
|
|
|
void (*store_gdt)(struct Xgt_desc_struct *);
|
|
|
|
void (*store_idt)(struct Xgt_desc_struct *);
|
|
|
|
void (*set_ldt)(const void *desc, unsigned entries);
|
|
|
|
unsigned long (*store_tr)(void);
|
|
|
|
void (*load_tls)(struct thread_struct *t, unsigned int cpu);
|
2007-05-02 19:27:10 +02:00
|
|
|
void (*write_ldt_entry)(struct desc_struct *,
|
|
|
|
int entrynum, u32 low, u32 high);
|
|
|
|
void (*write_gdt_entry)(struct desc_struct *,
|
|
|
|
int entrynum, u32 low, u32 high);
|
|
|
|
void (*write_idt_entry)(struct desc_struct *,
|
|
|
|
int entrynum, u32 low, u32 high);
|
|
|
|
void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*set_iopl_mask)(unsigned mask);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*io_delay)(void);
|
2006-12-07 02:14:07 +01:00
|
|
|
|
2007-05-02 19:27:14 +02:00
|
|
|
void (*activate_mm)(struct mm_struct *prev,
|
|
|
|
struct mm_struct *next);
|
|
|
|
void (*dup_mmap)(struct mm_struct *oldmm,
|
|
|
|
struct mm_struct *mm);
|
|
|
|
void (*exit_mmap)(struct mm_struct *mm);
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*apic_write)(unsigned long reg, unsigned long v);
|
|
|
|
void (*apic_write_atomic)(unsigned long reg, unsigned long v);
|
|
|
|
unsigned long (*apic_read)(unsigned long reg);
|
2007-02-13 13:26:21 +01:00
|
|
|
void (*setup_boot_clock)(void);
|
|
|
|
void (*setup_secondary_clock)(void);
|
2006-12-07 02:14:08 +01:00
|
|
|
#endif
|
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*flush_tlb_user)(void);
|
|
|
|
void (*flush_tlb_kernel)(void);
|
|
|
|
void (*flush_tlb_single)(u32 addr);
|
|
|
|
|
2007-03-14 10:18:09 +01:00
|
|
|
void (*map_pt_hook)(int type, pte_t *va, u32 pfn);
|
2007-03-05 09:30:37 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*alloc_pt)(u32 pfn);
|
|
|
|
void (*alloc_pd)(u32 pfn);
|
|
|
|
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
|
|
|
|
void (*release_pt)(u32 pfn);
|
|
|
|
void (*release_pd)(u32 pfn);
|
|
|
|
|
|
|
|
void (*set_pte)(pte_t *ptep, pte_t pteval);
|
2007-05-02 19:27:13 +02:00
|
|
|
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval);
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
|
2007-05-02 19:27:13 +02:00
|
|
|
void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
|
|
void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
|
|
|
|
|
|
pte_t (*ptep_get_and_clear)(pte_t *ptep);
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
#ifdef CONFIG_X86_PAE
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
|
2007-05-02 19:27:13 +02:00
|
|
|
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*set_pud)(pud_t *pudp, pud_t pudval);
|
2007-05-02 19:27:13 +02:00
|
|
|
void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*pmd_clear)(pmd_t *pmdp);
|
2007-05-02 19:27:13 +02:00
|
|
|
|
|
|
|
unsigned long long (*pte_val)(pte_t);
|
|
|
|
unsigned long long (*pmd_val)(pmd_t);
|
|
|
|
unsigned long long (*pgd_val)(pgd_t);
|
|
|
|
|
|
|
|
pte_t (*make_pte)(unsigned long long pte);
|
|
|
|
pmd_t (*make_pmd)(unsigned long long pmd);
|
|
|
|
pgd_t (*make_pgd)(unsigned long long pgd);
|
|
|
|
#else
|
|
|
|
unsigned long (*pte_val)(pte_t);
|
|
|
|
unsigned long (*pgd_val)(pgd_t);
|
|
|
|
|
|
|
|
pte_t (*make_pte)(unsigned long pte);
|
|
|
|
pgd_t (*make_pgd)(unsigned long pgd);
|
2006-12-07 02:14:08 +01:00
|
|
|
#endif
|
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*set_lazy_mode)(int mode);
|
2007-02-13 13:26:21 +01:00
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
/* These two are jmp to, not actually called. */
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*irq_enable_sysexit)(void);
|
|
|
|
void (*iret)(void);
|
2007-02-13 13:26:21 +01:00
|
|
|
|
2007-02-13 13:26:25 +01:00
|
|
|
void (*startup_ipi_hook)(int phys_apicid, unsigned long start_eip, unsigned long start_esp);
|
2006-12-07 02:14:07 +01:00
|
|
|
};
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
/* Mark a paravirt probe function. */
|
|
|
|
#define paravirt_probe(fn) \
|
|
|
|
static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
|
|
|
|
__attribute__((__section__(".paravirtprobe"))) = fn
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
extern struct paravirt_ops paravirt_ops;
|
|
|
|
|
|
|
|
#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
|
|
|
|
|
|
|
|
static inline void load_esp0(struct tss_struct *tss,
|
|
|
|
struct thread_struct *thread)
|
|
|
|
{
|
|
|
|
paravirt_ops.load_esp0(tss, thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_SETUP paravirt_ops.arch_setup();
|
|
|
|
static inline unsigned long get_wallclock(void)
|
|
|
|
{
|
|
|
|
return paravirt_ops.get_wallclock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int set_wallclock(unsigned long nowtime)
|
|
|
|
{
|
|
|
|
return paravirt_ops.set_wallclock(nowtime);
|
|
|
|
}
|
|
|
|
|
2007-03-05 09:30:39 +01:00
|
|
|
static inline void (*choose_time_init(void))(void)
|
2006-12-07 02:14:07 +01:00
|
|
|
{
|
2007-03-05 09:30:39 +01:00
|
|
|
return paravirt_ops.time_init;
|
2006-12-07 02:14:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The paravirtualized CPUID instruction. */
|
|
|
|
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
|
|
|
|
unsigned int *ecx, unsigned int *edx)
|
|
|
|
{
|
|
|
|
paravirt_ops.cpuid(eax, ebx, ecx, edx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These special macros can be used to get or set a debugging register
|
|
|
|
*/
|
|
|
|
#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
|
|
|
|
#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
|
|
|
|
|
|
|
|
#define clts() paravirt_ops.clts()
|
|
|
|
|
|
|
|
#define read_cr0() paravirt_ops.read_cr0()
|
|
|
|
#define write_cr0(x) paravirt_ops.write_cr0(x)
|
|
|
|
|
|
|
|
#define read_cr2() paravirt_ops.read_cr2()
|
|
|
|
#define write_cr2(x) paravirt_ops.write_cr2(x)
|
|
|
|
|
|
|
|
#define read_cr3() paravirt_ops.read_cr3()
|
|
|
|
#define write_cr3(x) paravirt_ops.write_cr3(x)
|
|
|
|
|
|
|
|
#define read_cr4() paravirt_ops.read_cr4()
|
|
|
|
#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
|
|
|
|
#define write_cr4(x) paravirt_ops.write_cr4(x)
|
|
|
|
|
2007-05-02 19:27:13 +02:00
|
|
|
#define raw_ptep_get_and_clear(xp) (paravirt_ops.ptep_get_and_clear(xp))
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
static inline void raw_safe_halt(void)
|
|
|
|
{
|
|
|
|
paravirt_ops.safe_halt();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void halt(void)
|
|
|
|
{
|
|
|
|
paravirt_ops.safe_halt();
|
|
|
|
}
|
|
|
|
#define wbinvd() paravirt_ops.wbinvd()
|
|
|
|
|
|
|
|
#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
|
|
|
|
|
2007-05-02 19:27:10 +02:00
|
|
|
/* These should all do BUG_ON(_err), but our headers are too tangled. */
|
2006-12-07 02:14:07 +01:00
|
|
|
#define rdmsr(msr,val1,val2) do { \
|
|
|
|
int _err; \
|
|
|
|
u64 _l = paravirt_ops.read_msr(msr,&_err); \
|
|
|
|
val1 = (u32)_l; \
|
|
|
|
val2 = _l >> 32; \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define wrmsr(msr,val1,val2) do { \
|
|
|
|
u64 _l = ((u64)(val2) << 32) | (val1); \
|
|
|
|
paravirt_ops.write_msr((msr), _l); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define rdmsrl(msr,val) do { \
|
|
|
|
int _err; \
|
|
|
|
val = paravirt_ops.read_msr((msr),&_err); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
|
|
|
|
#define wrmsr_safe(msr,a,b) ({ \
|
|
|
|
u64 _l = ((u64)(b) << 32) | (a); \
|
|
|
|
paravirt_ops.write_msr((msr),_l); \
|
|
|
|
})
|
|
|
|
|
|
|
|
/* rdmsr with exception handling */
|
|
|
|
#define rdmsr_safe(msr,a,b) ({ \
|
|
|
|
int _err; \
|
|
|
|
u64 _l = paravirt_ops.read_msr(msr,&_err); \
|
|
|
|
(*a) = (u32)_l; \
|
|
|
|
(*b) = _l >> 32; \
|
|
|
|
_err; })
|
|
|
|
|
|
|
|
#define rdtsc(low,high) do { \
|
|
|
|
u64 _l = paravirt_ops.read_tsc(); \
|
|
|
|
low = (u32)_l; \
|
|
|
|
high = _l >> 32; \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define rdtscl(low) do { \
|
|
|
|
u64 _l = paravirt_ops.read_tsc(); \
|
|
|
|
low = (int)_l; \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define rdtscll(val) (val = paravirt_ops.read_tsc())
|
|
|
|
|
2007-03-05 09:30:35 +01:00
|
|
|
#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
|
2007-03-05 09:30:36 +01:00
|
|
|
#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
|
2007-03-05 09:30:35 +01:00
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
|
|
|
|
|
|
|
|
#define rdpmc(counter,low,high) do { \
|
|
|
|
u64 _l = paravirt_ops.read_pmc(); \
|
|
|
|
low = (u32)_l; \
|
|
|
|
high = _l >> 32; \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define load_TR_desc() (paravirt_ops.load_tr_desc())
|
|
|
|
#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
|
|
|
|
#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
|
|
|
|
#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
|
|
|
|
#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
|
|
|
|
#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
|
|
|
|
#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
|
|
|
|
#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
|
|
|
|
#define write_ldt_entry(dt, entry, low, high) \
|
|
|
|
(paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
|
|
|
|
#define write_gdt_entry(dt, entry, low, high) \
|
|
|
|
(paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
|
|
|
|
#define write_idt_entry(dt, entry, low, high) \
|
|
|
|
(paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
|
|
|
|
#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
|
|
|
|
|
2007-05-02 19:27:13 +02:00
|
|
|
#define __pte(x) paravirt_ops.make_pte(x)
|
|
|
|
#define __pgd(x) paravirt_ops.make_pgd(x)
|
|
|
|
|
|
|
|
#define pte_val(x) paravirt_ops.pte_val(x)
|
|
|
|
#define pgd_val(x) paravirt_ops.pgd_val(x)
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
|
|
#define __pmd(x) paravirt_ops.make_pmd(x)
|
|
|
|
#define pmd_val(x) paravirt_ops.pmd_val(x)
|
|
|
|
#endif
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
/* The paravirtualized I/O functions */
|
|
|
|
static inline void slow_down_io(void) {
|
|
|
|
paravirt_ops.io_delay();
|
|
|
|
#ifdef REALLY_SLOW_IO
|
|
|
|
paravirt_ops.io_delay();
|
|
|
|
paravirt_ops.io_delay();
|
|
|
|
paravirt_ops.io_delay();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
/*
|
|
|
|
* Basic functions accessing APICs.
|
|
|
|
*/
|
|
|
|
static inline void apic_write(unsigned long reg, unsigned long v)
|
|
|
|
{
|
|
|
|
paravirt_ops.apic_write(reg,v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void apic_write_atomic(unsigned long reg, unsigned long v)
|
|
|
|
{
|
|
|
|
paravirt_ops.apic_write_atomic(reg,v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long apic_read(unsigned long reg)
|
|
|
|
{
|
|
|
|
return paravirt_ops.apic_read(reg);
|
|
|
|
}
|
2007-02-13 13:26:21 +01:00
|
|
|
|
|
|
|
static inline void setup_boot_clock(void)
|
|
|
|
{
|
|
|
|
paravirt_ops.setup_boot_clock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void setup_secondary_clock(void)
|
|
|
|
{
|
|
|
|
paravirt_ops.setup_secondary_clock();
|
|
|
|
}
|
2006-12-07 02:14:08 +01:00
|
|
|
#endif
|
|
|
|
|
[PATCH] i386: PARAVIRT: Hooks to set up initial pagetable
This patch introduces paravirt_ops hooks to control how the kernel's
initial pagetable is set up.
In the case of a native boot, the very early bootstrap code creates a
simple non-PAE pagetable to map the kernel and physical memory. When
the VM subsystem is initialized, it creates a proper pagetable which
respects the PAE mode, large pages, etc.
When booting under a hypervisor, there are many possibilities for what
paging environment the hypervisor establishes for the guest kernel, so
the constructon of the kernel's pagetable depends on the hypervisor.
In the case of Xen, the hypervisor boots the kernel with a fully
constructed pagetable, which is already using PAE if necessary. Also,
Xen requires particular care when constructing pagetables to make sure
all pagetables are always mapped read-only.
In order to make this easier, kernel's initial pagetable construction
has been changed to only allocate and initialize a pagetable page if
there's no page already present in the pagetable. This allows the Xen
paravirt backend to make a copy of the hypervisor-provided pagetable,
allowing the kernel to establish any more mappings it needs while
keeping the existing ones.
A slightly subtle point which is worth highlighting here is that Xen
requires all kernel mappings to share the same pte_t pages between all
pagetables, so that updating a kernel page's mapping in one pagetable
is reflected in all other pagetables. This makes it possible to
allocate a page and attach it to a pagetable without having to
explicitly enumerate that page's mapping in all pagetables.
And:
+From: "Eric W. Biederman" <ebiederm@xmission.com>
If we don't set the leaf page table entries it is quite possible that
will inherit and incorrect page table entry from the initial boot
page table setup in head.S. So we need to redo the effort here,
so we pick up PSE, PGE and the like.
Hypervisors like Xen require that their page tables be read-only,
which is slightly incompatible with our low identity mappings, however
I discussed this with Jeremy he has modified the Xen early set_pte
function to avoid problems in this area.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Acked-by: William Irwin <bill.irwin@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
2007-05-02 19:27:13 +02:00
|
|
|
static inline void paravirt_pagetable_setup_start(pgd_t *base)
|
|
|
|
{
|
|
|
|
if (paravirt_ops.pagetable_setup_start)
|
|
|
|
(*paravirt_ops.pagetable_setup_start)(base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void paravirt_pagetable_setup_done(pgd_t *base)
|
|
|
|
{
|
|
|
|
if (paravirt_ops.pagetable_setup_done)
|
|
|
|
(*paravirt_ops.pagetable_setup_done)(base);
|
|
|
|
}
|
2007-05-02 19:27:13 +02:00
|
|
|
|
2007-02-13 13:26:21 +01:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
|
|
|
|
unsigned long start_esp)
|
|
|
|
{
|
|
|
|
return paravirt_ops.startup_ipi_hook(phys_apicid, start_eip, start_esp);
|
|
|
|
}
|
|
|
|
#endif
|
2006-12-07 02:14:08 +01:00
|
|
|
|
2007-05-02 19:27:14 +02:00
|
|
|
static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|
|
|
struct mm_struct *next)
|
|
|
|
{
|
|
|
|
paravirt_ops.activate_mm(prev, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
|
|
|
struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
paravirt_ops.dup_mmap(oldmm, mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
paravirt_ops.exit_mmap(mm);
|
|
|
|
}
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
#define __flush_tlb() paravirt_ops.flush_tlb_user()
|
|
|
|
#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
|
|
|
|
#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
|
|
|
|
|
2007-03-05 09:30:37 +01:00
|
|
|
#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn)
|
|
|
|
|
2007-02-13 13:26:21 +01:00
|
|
|
#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
|
|
|
|
#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
|
|
|
|
|
|
|
|
#define paravirt_alloc_pd(pfn) paravirt_ops.alloc_pd(pfn)
|
|
|
|
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) \
|
|
|
|
paravirt_ops.alloc_pd_clone(pfn, clonepfn, start, count)
|
|
|
|
#define paravirt_release_pd(pfn) paravirt_ops.release_pd(pfn)
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|
|
|
{
|
|
|
|
paravirt_ops.set_pte(ptep, pteval);
|
|
|
|
}
|
|
|
|
|
2007-05-02 19:27:13 +02:00
|
|
|
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t pteval)
|
2006-12-07 02:14:08 +01:00
|
|
|
{
|
|
|
|
paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
|
|
|
{
|
|
|
|
paravirt_ops.set_pmd(pmdp, pmdval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
paravirt_ops.pte_update(mm, addr, ptep);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
paravirt_ops.pte_update_defer(mm, addr, ptep);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
|
|
static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
|
|
|
|
{
|
|
|
|
paravirt_ops.set_pte_atomic(ptep, pteval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
|
|
|
|
{
|
|
|
|
paravirt_ops.set_pte_present(mm, addr, ptep, pte);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_pud(pud_t *pudp, pud_t pudval)
|
|
|
|
{
|
|
|
|
paravirt_ops.set_pud(pudp, pudval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
paravirt_ops.pte_clear(mm, addr, ptep);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pmd_clear(pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
paravirt_ops.pmd_clear(pmdp);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-02-13 13:26:21 +01:00
|
|
|
/* Lazy mode for batching updates / context switch */
|
|
|
|
#define PARAVIRT_LAZY_NONE 0
|
|
|
|
#define PARAVIRT_LAZY_MMU 1
|
|
|
|
#define PARAVIRT_LAZY_CPU 2
|
2007-04-09 01:04:01 +02:00
|
|
|
#define PARAVIRT_LAZY_FLUSH 3
|
2007-02-13 13:26:21 +01:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
|
|
|
|
#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
|
|
|
|
#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
|
2007-04-09 01:04:01 +02:00
|
|
|
#define arch_flush_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH)
|
2007-02-13 13:26:21 +01:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
|
|
|
#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
|
|
|
|
#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
|
2007-04-09 01:04:01 +02:00
|
|
|
#define arch_flush_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH)
|
2007-02-13 13:26:21 +01:00
|
|
|
|
2007-05-02 19:27:13 +02:00
|
|
|
void _paravirt_nop(void);
|
|
|
|
#define paravirt_nop ((void *)_paravirt_nop)
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
/* These all sit in the .parainstructions section to tell us what to patch. */
|
|
|
|
struct paravirt_patch {
|
|
|
|
u8 *instr; /* original instructions */
|
|
|
|
u8 instrtype; /* type of this instruction */
|
|
|
|
u8 len; /* length of original instruction */
|
|
|
|
u16 clobbers; /* what registers you may clobber */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define paravirt_alt(insn_string, typenum, clobber) \
|
|
|
|
"771:\n\t" insn_string "\n" "772:\n" \
|
|
|
|
".pushsection .parainstructions,\"a\"\n" \
|
|
|
|
" .long 771b\n" \
|
|
|
|
" .byte " __stringify(typenum) "\n" \
|
|
|
|
" .byte 772b-771b\n" \
|
|
|
|
" .short " __stringify(clobber) "\n" \
|
|
|
|
".popsection"
|
|
|
|
|
|
|
|
static inline unsigned long __raw_local_save_flags(void)
|
|
|
|
{
|
|
|
|
unsigned long f;
|
|
|
|
|
|
|
|
__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
|
|
|
|
"call *%1;"
|
|
|
|
"popl %%edx; popl %%ecx",
|
|
|
|
PARAVIRT_SAVE_FLAGS, CLBR_NONE)
|
|
|
|
: "=a"(f): "m"(paravirt_ops.save_fl)
|
|
|
|
: "memory", "cc");
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void raw_local_irq_restore(unsigned long f)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
|
|
|
|
"call *%1;"
|
|
|
|
"popl %%edx; popl %%ecx",
|
|
|
|
PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
|
|
|
|
: "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
|
|
|
|
: "memory", "cc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void raw_local_irq_disable(void)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
|
|
|
|
"call *%0;"
|
|
|
|
"popl %%edx; popl %%ecx",
|
|
|
|
PARAVIRT_IRQ_DISABLE, CLBR_EAX)
|
|
|
|
: : "m" (paravirt_ops.irq_disable)
|
|
|
|
: "memory", "eax", "cc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void raw_local_irq_enable(void)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
|
|
|
|
"call *%0;"
|
|
|
|
"popl %%edx; popl %%ecx",
|
|
|
|
PARAVIRT_IRQ_ENABLE, CLBR_EAX)
|
|
|
|
: : "m" (paravirt_ops.irq_enable)
|
|
|
|
: "memory", "eax", "cc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long __raw_local_irq_save(void)
|
|
|
|
{
|
|
|
|
unsigned long f;
|
|
|
|
|
|
|
|
__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
|
|
|
|
"call *%1; pushl %%eax;"
|
|
|
|
"call *%2; popl %%eax;"
|
|
|
|
"popl %%edx; popl %%ecx",
|
|
|
|
PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
|
|
|
|
CLBR_NONE)
|
|
|
|
: "=a"(f)
|
|
|
|
: "m" (paravirt_ops.save_fl),
|
|
|
|
"m" (paravirt_ops.irq_disable)
|
|
|
|
: "memory", "cc");
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
|
|
|
|
"call *paravirt_ops+%c[irq_disable];" \
|
|
|
|
"popl %%edx; popl %%ecx", \
|
|
|
|
PARAVIRT_IRQ_DISABLE, CLBR_EAX)
|
|
|
|
|
|
|
|
#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
|
|
|
|
"call *paravirt_ops+%c[irq_enable];" \
|
|
|
|
"popl %%edx; popl %%ecx", \
|
|
|
|
PARAVIRT_IRQ_ENABLE, CLBR_EAX)
|
|
|
|
#define CLI_STI_CLOBBERS , "%eax"
|
|
|
|
#define CLI_STI_INPUT_ARGS \
|
|
|
|
, \
|
|
|
|
[irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
|
|
|
|
[irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
|
2006-12-07 02:14:08 +01:00
|
|
|
#define PARA_PATCH(ptype, clobbers, ops) \
|
|
|
|
771:; \
|
|
|
|
ops; \
|
|
|
|
772:; \
|
|
|
|
.pushsection .parainstructions,"a"; \
|
|
|
|
.long 771b; \
|
|
|
|
.byte ptype; \
|
|
|
|
.byte 772b-771b; \
|
|
|
|
.short clobbers; \
|
|
|
|
.popsection
|
|
|
|
|
|
|
|
#define INTERRUPT_RETURN \
|
|
|
|
PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
|
|
|
|
jmp *%cs:paravirt_ops+PARAVIRT_iret)
|
|
|
|
|
|
|
|
#define DISABLE_INTERRUPTS(clobbers) \
|
|
|
|
PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \
|
|
|
|
pushl %ecx; pushl %edx; \
|
|
|
|
call *paravirt_ops+PARAVIRT_irq_disable; \
|
|
|
|
popl %edx; popl %ecx) \
|
|
|
|
|
|
|
|
#define ENABLE_INTERRUPTS(clobbers) \
|
|
|
|
PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \
|
|
|
|
pushl %ecx; pushl %edx; \
|
|
|
|
call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
|
|
|
|
popl %edx; popl %ecx)
|
|
|
|
|
|
|
|
#define ENABLE_INTERRUPTS_SYSEXIT \
|
|
|
|
PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \
|
|
|
|
jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
|
|
|
|
|
|
|
|
#define GET_CR0_INTO_EAX \
|
|
|
|
call *paravirt_ops+PARAVIRT_read_cr0
|
|
|
|
|
2006-12-07 02:14:07 +01:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
#endif /* __ASM_PARAVIRT_H */
|