Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu: restructure pcpu_extend_area_map() to fix bugs and improve readability
This commit is contained in:
Linus Torvalds 2009-11-14 12:59:06 -08:00
commit e0a2af1e60
1 changed files with 81 additions and 40 deletions

View File

@ -355,62 +355,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
} }
/** /**
* pcpu_extend_area_map - extend area map for allocation * pcpu_need_to_extend - determine whether chunk area map needs to be extended
* @chunk: target chunk * @chunk: chunk of interest
* *
* Extend area map of @chunk so that it can accomodate an allocation. * Determine whether area map of @chunk needs to be extended to
* A single allocation can split an area into three areas, so this * accomodate a new allocation.
* function makes sure that @chunk->map has at least two extra slots.
* *
* CONTEXT: * CONTEXT:
* pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired * pcpu_lock.
* if area map is extended.
* *
* RETURNS: * RETURNS:
* 0 if noop, 1 if successfully extended, -errno on failure. * New target map allocation length if extension is necessary, 0
* otherwise.
*/ */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags) static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
{ {
int new_alloc; int new_alloc;
int *new;
size_t size;
/* has enough? */
if (chunk->map_alloc >= chunk->map_used + 2) if (chunk->map_alloc >= chunk->map_used + 2)
return 0; return 0;
spin_unlock_irqrestore(&pcpu_lock, *flags);
new_alloc = PCPU_DFL_MAP_ALLOC; new_alloc = PCPU_DFL_MAP_ALLOC;
while (new_alloc < chunk->map_used + 2) while (new_alloc < chunk->map_used + 2)
new_alloc *= 2; new_alloc *= 2;
new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); return new_alloc;
if (!new) { }
spin_lock_irqsave(&pcpu_lock, *flags);
/**
* pcpu_extend_area_map - extend area map of a chunk
* @chunk: chunk of interest
* @new_alloc: new target allocation length of the area map
*
* Extend area map of @chunk to have @new_alloc entries.
*
* CONTEXT:
* Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
int *old = NULL, *new = NULL;
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
new = pcpu_mem_alloc(new_size);
if (!new)
return -ENOMEM; return -ENOMEM;
}
/* /* acquire pcpu_lock and switch to new area map */
* Acquire pcpu_lock and switch to new area map. Only free spin_lock_irqsave(&pcpu_lock, flags);
* could have happened inbetween, so map_used couldn't have
* grown.
*/
spin_lock_irqsave(&pcpu_lock, *flags);
BUG_ON(new_alloc < chunk->map_used + 2);
size = chunk->map_alloc * sizeof(chunk->map[0]); if (new_alloc <= chunk->map_alloc)
memcpy(new, chunk->map, size); goto out_unlock;
old_size = chunk->map_alloc * sizeof(chunk->map[0]);
memcpy(new, chunk->map, old_size);
/* /*
* map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
* one of the first chunks and still using static map. * one of the first chunks and still using static map.
*/ */
if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
pcpu_mem_free(chunk->map, size); old = chunk->map;
chunk->map_alloc = new_alloc; chunk->map_alloc = new_alloc;
chunk->map = new; chunk->map = new;
new = NULL;
out_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
/*
* pcpu_mem_free() might end up calling vfree() which uses
* IRQ-unsafe lock and thus can't be called under pcpu_lock.
*/
pcpu_mem_free(old, old_size);
pcpu_mem_free(new, new_size);
return 0; return 0;
} }
@ -1049,7 +1073,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
static int warn_limit = 10; static int warn_limit = 10;
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
const char *err; const char *err;
int slot, off; int slot, off, new_alloc;
unsigned long flags; unsigned long flags;
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@ -1064,14 +1088,25 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
/* serve reserved allocations from the reserved chunk if available */ /* serve reserved allocations from the reserved chunk if available */
if (reserved && pcpu_reserved_chunk) { if (reserved && pcpu_reserved_chunk) {
chunk = pcpu_reserved_chunk; chunk = pcpu_reserved_chunk;
if (size > chunk->contig_hint ||
pcpu_extend_area_map(chunk, &flags) < 0) { if (size > chunk->contig_hint) {
err = "failed to extend area map of reserved chunk"; err = "alloc from reserved chunk failed";
goto fail_unlock; goto fail_unlock;
} }
while ((new_alloc = pcpu_need_to_extend(chunk))) {
spin_unlock_irqrestore(&pcpu_lock, flags);
if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
err = "failed to extend area map of reserved chunk";
goto fail_unlock_mutex;
}
spin_lock_irqsave(&pcpu_lock, flags);
}
off = pcpu_alloc_area(chunk, size, align); off = pcpu_alloc_area(chunk, size, align);
if (off >= 0) if (off >= 0)
goto area_found; goto area_found;
err = "alloc from reserved chunk failed"; err = "alloc from reserved chunk failed";
goto fail_unlock; goto fail_unlock;
} }
@ -1083,14 +1118,20 @@ restart:
if (size > chunk->contig_hint) if (size > chunk->contig_hint)
continue; continue;
switch (pcpu_extend_area_map(chunk, &flags)) { new_alloc = pcpu_need_to_extend(chunk);
case 0: if (new_alloc) {
break; spin_unlock_irqrestore(&pcpu_lock, flags);
case 1: if (pcpu_extend_area_map(chunk,
goto restart; /* pcpu_lock dropped, restart */ new_alloc) < 0) {
default: err = "failed to extend area map";
err = "failed to extend area map"; goto fail_unlock_mutex;
goto fail_unlock; }
spin_lock_irqsave(&pcpu_lock, flags);
/*
* pcpu_lock has been dropped, need to
* restart cpu_slot list walking.
*/
goto restart;
} }
off = pcpu_alloc_area(chunk, size, align); off = pcpu_alloc_area(chunk, size, align);