mm, compaction: finish pageblock scanning on contention
Async migration aborts on spinlock contention but contention can be high when there are multiple compaction attempts and kswapd is active. The consequence is that the migration scanners move forward uselessly while still contending on locks for longer while leaving suitable migration sources behind. This patch will acquire the lock but track when contention occurs. When it does, the current pageblock will finish as compaction may succeed for that block and then abort. This will have a variable impact on latency as in some cases useless scanning is avoided (reduces latency) but a lock will be contended (increase latency) or a single contended pageblock is scanned that would otherwise have been skipped (increase latency). 5.0.0-rc1 5.0.0-rc1 norescan-v3r16 finishcontend-v3r16 Amean fault-both-1 0.00 ( 0.00%) 0.00 * 0.00%* Amean fault-both-3 3002.07 ( 0.00%) 3153.17 ( -5.03%) Amean fault-both-5 4684.47 ( 0.00%) 4280.52 ( 8.62%) Amean fault-both-7 6815.54 ( 0.00%) 5811.50 * 14.73%* Amean fault-both-12 10864.02 ( 0.00%) 9276.85 ( 14.61%) Amean fault-both-18 12247.52 ( 0.00%) 11032.67 ( 9.92%) Amean fault-both-24 15683.99 ( 0.00%) 14285.70 ( 8.92%) Amean fault-both-30 18620.02 ( 0.00%) 16293.76 * 12.49%* Amean fault-both-32 19250.28 ( 0.00%) 16721.02 * 13.14%* 5.0.0-rc1 5.0.0-rc1 norescan-v3r16 finishcontend-v3r16 Percentage huge-1 0.00 ( 0.00%) 0.00 ( 0.00%) Percentage huge-3 95.00 ( 0.00%) 96.82 ( 1.92%) Percentage huge-5 94.22 ( 0.00%) 95.40 ( 1.26%) Percentage huge-7 92.35 ( 0.00%) 95.92 ( 3.86%) Percentage huge-12 91.90 ( 0.00%) 96.73 ( 5.25%) Percentage huge-18 89.58 ( 0.00%) 96.77 ( 8.03%) Percentage huge-24 90.03 ( 0.00%) 96.05 ( 6.69%) Percentage huge-30 89.14 ( 0.00%) 96.81 ( 8.60%) Percentage huge-32 90.58 ( 0.00%) 97.41 ( 7.54%) There is a variable impact that is mostly good on latency while allocation success rates are slightly higher. System CPU usage is reduced by about 10% but scan rate impact is mixed Compaction migrate scanned 27997659.00 20148867 Compaction free scanned 120782791.00 118324914 Migration scan rates are reduced 28% which is expected as a pageblock is used by the async scanner instead of skipped. The impact on the free scanner is known to be variable. Overall the primary justification for this patch is that completing scanning of a pageblock is very important for later patches. [yuehaibing@huawei.com: fix unused variable warning] Link: http://lkml.kernel.org/r/20190118175136.31341-14-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: YueHaibing <yuehaibing@huawei.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
804d3121ba
commit
cb2dcaf023
|
@ -382,24 +382,25 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compaction requires the taking of some coarse locks that are potentially
|
* Compaction requires the taking of some coarse locks that are potentially
|
||||||
* very heavily contended. For async compaction, back out if the lock cannot
|
* very heavily contended. For async compaction, trylock and record if the
|
||||||
* be taken immediately. For sync compaction, spin on the lock if needed.
|
* lock is contended. The lock will still be acquired but compaction will
|
||||||
|
* abort when the current block is finished regardless of success rate.
|
||||||
|
* Sync compaction acquires the lock.
|
||||||
*
|
*
|
||||||
* Returns true if the lock is held
|
* Always returns true which makes it easier to track lock state in callers.
|
||||||
* Returns false if the lock is not held and compaction should abort
|
|
||||||
*/
|
*/
|
||||||
static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
|
static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
|
||||||
struct compact_control *cc)
|
struct compact_control *cc)
|
||||||
{
|
{
|
||||||
if (cc->mode == MIGRATE_ASYNC) {
|
/* Track if the lock is contended in async mode */
|
||||||
if (!spin_trylock_irqsave(lock, *flags)) {
|
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
|
||||||
|
if (spin_trylock_irqsave(lock, *flags))
|
||||||
|
return true;
|
||||||
|
|
||||||
cc->contended = true;
|
cc->contended = true;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
spin_lock_irqsave(lock, *flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(lock, *flags);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,10 +433,8 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
if (cc->mode == MIGRATE_ASYNC) {
|
if (cc->mode == MIGRATE_ASYNC)
|
||||||
cc->contended = true;
|
cc->contended = true;
|
||||||
return true;
|
|
||||||
}
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -455,10 +454,8 @@ static inline bool compact_should_abort(struct compact_control *cc)
|
||||||
{
|
{
|
||||||
/* async compaction aborts if contended */
|
/* async compaction aborts if contended */
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
if (cc->mode == MIGRATE_ASYNC) {
|
if (cc->mode == MIGRATE_ASYNC)
|
||||||
cc->contended = true;
|
cc->contended = true;
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
@ -535,18 +532,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
||||||
* recheck as well.
|
* recheck as well.
|
||||||
*/
|
*/
|
||||||
if (!locked) {
|
if (!locked) {
|
||||||
/*
|
locked = compact_lock_irqsave(&cc->zone->lock,
|
||||||
* The zone lock must be held to isolate freepages.
|
|
||||||
* Unfortunately this is a very coarse lock and can be
|
|
||||||
* heavily contended if there are parallel allocations
|
|
||||||
* or parallel compactions. For async compaction do not
|
|
||||||
* spin on the lock and we acquire the lock as late as
|
|
||||||
* possible.
|
|
||||||
*/
|
|
||||||
locked = compact_trylock_irqsave(&cc->zone->lock,
|
|
||||||
&flags, cc);
|
&flags, cc);
|
||||||
if (!locked)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Recheck this is a buddy page under lock */
|
/* Recheck this is a buddy page under lock */
|
||||||
if (!PageBuddy(page))
|
if (!PageBuddy(page))
|
||||||
|
@ -900,15 +887,9 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
|
|
||||||
/* If we already hold the lock, we can skip some rechecking */
|
/* If we already hold the lock, we can skip some rechecking */
|
||||||
if (!locked) {
|
if (!locked) {
|
||||||
locked = compact_trylock_irqsave(zone_lru_lock(zone),
|
locked = compact_lock_irqsave(zone_lru_lock(zone),
|
||||||
&flags, cc);
|
&flags, cc);
|
||||||
|
|
||||||
/* Allow future scanning if the lock is contended */
|
|
||||||
if (!locked) {
|
|
||||||
clear_pageblock_skip(page);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Try get exclusive access under lock */
|
/* Try get exclusive access under lock */
|
||||||
if (!skip_updated) {
|
if (!skip_updated) {
|
||||||
skip_updated = true;
|
skip_updated = true;
|
||||||
|
@ -951,9 +932,12 @@ isolate_success:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid isolating too much unless this block is being
|
* Avoid isolating too much unless this block is being
|
||||||
* rescanned (e.g. dirty/writeback pages, parallel allocation).
|
* rescanned (e.g. dirty/writeback pages, parallel allocation)
|
||||||
|
* or a lock is contended. For contention, isolate quickly to
|
||||||
|
* potentially remove one source of contention.
|
||||||
*/
|
*/
|
||||||
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && !cc->rescan) {
|
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
|
||||||
|
!cc->rescan && !cc->contended) {
|
||||||
++low_pfn;
|
++low_pfn;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1416,12 +1400,8 @@ static void isolate_freepages(struct compact_control *cc)
|
||||||
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
|
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
|
||||||
freelist, false);
|
freelist, false);
|
||||||
|
|
||||||
/*
|
/* Are enough freepages isolated? */
|
||||||
* If we isolated enough freepages, or aborted due to lock
|
if (cc->nr_freepages >= cc->nr_migratepages) {
|
||||||
* contention, terminate.
|
|
||||||
*/
|
|
||||||
if ((cc->nr_freepages >= cc->nr_migratepages)
|
|
||||||
|| cc->contended) {
|
|
||||||
if (isolate_start_pfn >= block_end_pfn) {
|
if (isolate_start_pfn >= block_end_pfn) {
|
||||||
/*
|
/*
|
||||||
* Restart at previous pageblock if more
|
* Restart at previous pageblock if more
|
||||||
|
@ -1463,12 +1443,7 @@ static struct page *compaction_alloc(struct page *migratepage,
|
||||||
struct compact_control *cc = (struct compact_control *)data;
|
struct compact_control *cc = (struct compact_control *)data;
|
||||||
struct page *freepage;
|
struct page *freepage;
|
||||||
|
|
||||||
/*
|
|
||||||
* Isolate free pages if necessary, and if we are not aborting due to
|
|
||||||
* contention.
|
|
||||||
*/
|
|
||||||
if (list_empty(&cc->freepages)) {
|
if (list_empty(&cc->freepages)) {
|
||||||
if (!cc->contended)
|
|
||||||
isolate_freepages(cc);
|
isolate_freepages(cc);
|
||||||
|
|
||||||
if (list_empty(&cc->freepages))
|
if (list_empty(&cc->freepages))
|
||||||
|
@ -1733,7 +1708,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
|
||||||
low_pfn = isolate_migratepages_block(cc, low_pfn,
|
low_pfn = isolate_migratepages_block(cc, low_pfn,
|
||||||
block_end_pfn, isolate_mode);
|
block_end_pfn, isolate_mode);
|
||||||
|
|
||||||
if (!low_pfn || cc->contended)
|
if (!low_pfn)
|
||||||
return ISOLATE_ABORT;
|
return ISOLATE_ABORT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1763,9 +1738,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||||
{
|
{
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
const int migratetype = cc->migratetype;
|
const int migratetype = cc->migratetype;
|
||||||
|
int ret;
|
||||||
if (cc->contended || fatal_signal_pending(current))
|
|
||||||
return COMPACT_CONTENDED;
|
|
||||||
|
|
||||||
/* Compaction run completes if the migrate and free scanner meet */
|
/* Compaction run completes if the migrate and free scanner meet */
|
||||||
if (compact_scanners_met(cc)) {
|
if (compact_scanners_met(cc)) {
|
||||||
|
@ -1800,6 +1773,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||||
return COMPACT_CONTINUE;
|
return COMPACT_CONTINUE;
|
||||||
|
|
||||||
/* Direct compactor: Is a suitable page free? */
|
/* Direct compactor: Is a suitable page free? */
|
||||||
|
ret = COMPACT_NO_SUITABLE_PAGE;
|
||||||
for (order = cc->order; order < MAX_ORDER; order++) {
|
for (order = cc->order; order < MAX_ORDER; order++) {
|
||||||
struct free_area *area = &cc->zone->free_area[order];
|
struct free_area *area = &cc->zone->free_area[order];
|
||||||
bool can_steal;
|
bool can_steal;
|
||||||
|
@ -1839,11 +1813,15 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||||
return COMPACT_SUCCESS;
|
return COMPACT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
return COMPACT_CONTINUE;
|
ret = COMPACT_CONTINUE;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return COMPACT_NO_SUITABLE_PAGE;
|
if (cc->contended || fatal_signal_pending(current))
|
||||||
|
ret = COMPACT_CONTENDED;
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum compact_result compact_finished(struct compact_control *cc)
|
static enum compact_result compact_finished(struct compact_control *cc)
|
||||||
|
|
Loading…
Reference in New Issue