x86/pvops: replace arch_enter_lazy_cpu_mode with arch_start_context_switch

Impact: simplification, prepare for later changes

Make lazy cpu mode more specific to context switching, so that
it makes sense to do more context-switch specific things in
the callbacks.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
This commit is contained in:
Jeremy Fitzhardinge 2009-02-17 23:24:03 -08:00
parent b8bcfe997e
commit 7fd7d83d49
8 changed files with 20 additions and 37 deletions

View File

@ -1420,19 +1420,17 @@ void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
static inline void arch_enter_lazy_cpu_mode(void)
#define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
}
static inline void arch_leave_lazy_cpu_mode(void)
static inline void arch_end_context_switch(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
}
void arch_flush_lazy_cpu_mode(void);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{

View File

@ -301,19 +301,6 @@ void arch_flush_lazy_mmu_mode(void)
preempt_enable();
}
void arch_flush_lazy_cpu_mode(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
WARN_ON(preempt_count() == 1);
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}
preempt_enable();
}
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,

View File

@ -407,7 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_leave_lazy_cpu_mode();
arch_end_context_switch();
/* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the

View File

@ -428,7 +428,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_leave_lazy_cpu_mode();
arch_end_context_switch();
/*
* Switch FS and GS.

View File

@ -1119,10 +1119,8 @@ static void drop_other_mm_ref(void *info)
/* If this cpu still has a stale cr3 reference, then make sure
it has been flushed. */
if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
load_cr3(swapper_pg_dir);
arch_flush_lazy_cpu_mode();
}
}
static void xen_drop_mm_ref(struct mm_struct *mm)
@ -1135,7 +1133,6 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
load_cr3(swapper_pg_dir);
else
leave_mm(smp_processor_id());
arch_flush_lazy_cpu_mode();
}
/* Get the "official" set of cpus referring to our pagetable. */

View File

@ -73,8 +73,8 @@ static inline int pte_file(pte_t pte) { return 0; }
#define pgtable_cache_init() do {} while (0)
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define arch_leave_lazy_cpu_mode() do {} while (0)
#define arch_start_context_switch() do {} while (0)
#else /* !CONFIG_MMU */
/*****************************************************************************/

View File

@ -280,17 +280,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#endif
/*
* A facility to provide batching of the reload of page tables with the
* actual context switch code for paravirtualized guests. By convention,
* only one of the lazy modes (CPU, MMU) should be active at any given
* time, entry should never be nested, and entry and exits should always
* be paired. This is for sanity of maintaining and reasoning about the
* kernel code.
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
* update (lazy) modes (CPU, MMU) should be active at any given time,
* entry should never be nested, and entry and exits should always be
* paired. This is for sanity of maintaining and reasoning about the
* kernel code. In this case, the exit (end of the context switch) is
* in architecture-specific code, and so doesn't need a generic
* definition.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define arch_leave_lazy_cpu_mode() do {} while (0)
#define arch_flush_lazy_cpu_mode() do {} while (0)
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
#define arch_start_context_switch() do {} while (0)
#endif
#ifndef __HAVE_PFNMAP_TRACKING

View File

@ -2746,7 +2746,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_enter_lazy_cpu_mode();
arch_start_context_switch();
if (unlikely(!mm)) {
next->active_mm = oldmm;