mm: convert swap to percpu locked

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Ingo Molnar 2009-07-03 08:29:51 -05:00 committed by Alibek Omarov
parent d62167b358
commit 1ea4f83f1b
1 changed files with 20 additions and 14 deletions

View File

@ -31,6 +31,7 @@
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/locallock.h>
#include "internal.h" #include "internal.h"
@ -44,6 +45,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
/* /*
* This path almost never happens for VM activity - pages are normally * This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking. * freed via pagevecs. But it gets used by networking.
@ -440,11 +444,11 @@ void rotate_reclaimable_page(struct page *page)
unsigned long flags; unsigned long flags;
page_cache_get(page); page_cache_get(page);
local_irq_save(flags); local_lock_irqsave(rotate_lock, flags);
pvec = &__get_cpu_var(lru_rotate_pvecs); pvec = &__get_cpu_var(lru_rotate_pvecs);
if (!pagevec_add(pvec, page)) if (!pagevec_add(pvec, page))
pagevec_move_tail(pvec); pagevec_move_tail(pvec);
local_irq_restore(flags); local_unlock_irqrestore(rotate_lock, flags);
} }
} }
@ -495,12 +499,13 @@ static bool need_activate_page_drain(int cpu)
void activate_page(struct page *page) void activate_page(struct page *page)
{ {
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); struct pagevec *pvec = &get_locked_var(swapvec_lock,
activate_page_pvecs);
page_cache_get(page); page_cache_get(page);
if (!pagevec_add(pvec, page)) if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, __activate_page, NULL); pagevec_lru_move_fn(pvec, __activate_page, NULL);
put_cpu_var(activate_page_pvecs); put_locked_var(swapvec_lock, activate_page_pvecs);
} }
} }
@ -526,7 +531,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_pvec); struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
int i; int i;
/* /*
@ -548,7 +553,7 @@ static void __lru_cache_activate_page(struct page *page)
} }
} }
put_cpu_var(lru_add_pvec); put_locked_var(swapvec_lock, lru_add_pvec);
} }
/* /*
@ -593,13 +598,13 @@ EXPORT_SYMBOL(init_page_accessed);
static void __lru_cache_add(struct page *page) static void __lru_cache_add(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_pvec); struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
page_cache_get(page); page_cache_get(page);
if (!pagevec_space(pvec)) if (!pagevec_space(pvec))
__pagevec_lru_add(pvec); __pagevec_lru_add(pvec);
pagevec_add(pvec, page); pagevec_add(pvec, page);
put_cpu_var(lru_add_pvec); put_locked_var(swapvec_lock, lru_add_pvec);
} }
/** /**
@ -745,9 +750,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags; unsigned long flags;
/* No harm done if a racing interrupt already did this */ /* No harm done if a racing interrupt already did this */
local_irq_save(flags); local_lock_irqsave(rotate_lock, flags);
pagevec_move_tail(pvec); pagevec_move_tail(pvec);
local_irq_restore(flags); local_unlock_irqrestore(rotate_lock, flags);
} }
pvec = &per_cpu(lru_deactivate_pvecs, cpu); pvec = &per_cpu(lru_deactivate_pvecs, cpu);
@ -775,18 +780,19 @@ void deactivate_page(struct page *page)
return; return;
if (likely(get_page_unless_zero(page))) { if (likely(get_page_unless_zero(page))) {
struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); struct pagevec *pvec = &get_locked_var(swapvec_lock,
lru_deactivate_pvecs);
if (!pagevec_add(pvec, page)) if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
put_cpu_var(lru_deactivate_pvecs); put_locked_var(swapvec_lock, lru_deactivate_pvecs);
} }
} }
void lru_add_drain(void) void lru_add_drain(void)
{ {
lru_add_drain_cpu(get_cpu()); lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
put_cpu(); local_unlock_cpu(swapvec_lock);
} }
static void lru_add_drain_per_cpu(struct work_struct *dummy) static void lru_add_drain_per_cpu(struct work_struct *dummy)