mm: rename pgmoved variable in shrink_active_list()

Currently the pgmoved variable has two meanings.  It causes harder
reviewing.  This patch separates it.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KOSAKI Motohiro 2009-09-21 17:01:35 -07:00 committed by Linus Torvalds
parent b259fbde0a
commit 44c241f166
1 changed files with 8 additions and 8 deletions

View File

@ -1244,7 +1244,7 @@ static void move_active_pages_to_lru(struct zone *zone,
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct scan_control *sc, int priority, int file)
{
unsigned long pgmoved;
unsigned long nr_taken;
unsigned long pgscanned;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The pages which were snipped off */
@ -1252,10 +1252,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
LIST_HEAD(l_inactive);
struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
unsigned long nr_rotated = 0;
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
sc->mem_cgroup, 1, file);
/*
@ -1265,16 +1266,15 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
if (scanning_global_lru(sc)) {
zone->pages_scanned += pgscanned;
}
reclaim_stat->recent_scanned[!!file] += pgmoved;
reclaim_stat->recent_scanned[!!file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, pgscanned);
if (file)
__mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
else
__mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
pgmoved = 0; /* count referenced (mapping) mapped pages */
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
@ -1288,7 +1288,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
/* page_referenced clears PageReferenced */
if (page_mapping_inuse(page) &&
page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
pgmoved++;
nr_rotated++;
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So
@ -1317,7 +1317,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* helps balance scan pressure between file and anonymous pages in
* get_scan_ratio.
*/
reclaim_stat->recent_rotated[!!file] += pgmoved;
reclaim_stat->recent_rotated[!!file] += nr_rotated;
move_active_pages_to_lru(zone, &l_active,
LRU_ACTIVE + file * LRU_FILE);