percpu: cosmetic renames in pcpu_setup_first_chunk()

Impact: cosmetic, preparation for future changes

Make the following renames in pcpur_setup_first_chunk() in preparation
for future changes.

* s/free_size/dyn_size/
* s/static_vm/first_vm/
* s/static_chunk/schunk/

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2009-03-06 14:33:59 +09:00
parent 6a242909b0
commit 2441d15c97
2 changed files with 30 additions and 30 deletions

View File

@ -118,7 +118,7 @@ typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t unit_size, size_t static_size, size_t unit_size,
size_t free_size, void *base_addr, size_t dyn_size, void *base_addr,
pcpu_populate_pte_fn_t populate_pte_fn); pcpu_populate_pte_fn_t populate_pte_fn);
/* /*

View File

@ -831,7 +831,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
* @get_page_fn: callback to fetch page pointer * @get_page_fn: callback to fetch page pointer
* @static_size: the size of static percpu area in bytes * @static_size: the size of static percpu area in bytes
* @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto
* @free_size: free size in bytes, 0 for auto * @dyn_size: free size for dynamic allocation in bytes, 0 for auto
* @base_addr: mapped address, NULL for auto * @base_addr: mapped address, NULL for auto
* @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
* *
@ -849,12 +849,12 @@ EXPORT_SYMBOL_GPL(free_percpu);
* return the same number of pages for all cpus. * return the same number of pages for all cpus.
* *
* @unit_size, if non-zero, determines unit size and must be aligned * @unit_size, if non-zero, determines unit size and must be aligned
* to PAGE_SIZE and equal to or larger than @static_size + @free_size. * to PAGE_SIZE and equal to or larger than @static_size + @dyn_size.
* *
* @free_size determines the number of free bytes after the static * @dyn_size determines the number of free bytes after the static
* area in the first chunk. If zero, whatever left is available. * area in the first chunk. If zero, whatever left is available.
* Specifying non-zero value make percpu leave the area after * Specifying non-zero value make percpu leave the area after
* @static_size + @free_size alone. * @static_size + @dyn_size alone.
* *
* Non-null @base_addr means that the caller already allocated virtual * Non-null @base_addr means that the caller already allocated virtual
* region for the first chunk and mapped it. percpu must not mess * region for the first chunk and mapped it. percpu must not mess
@ -870,19 +870,19 @@ EXPORT_SYMBOL_GPL(free_percpu);
*/ */
size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t unit_size, size_t static_size, size_t unit_size,
size_t free_size, void *base_addr, size_t dyn_size, void *base_addr,
pcpu_populate_pte_fn_t populate_pte_fn) pcpu_populate_pte_fn_t populate_pte_fn)
{ {
static struct vm_struct static_vm; static struct vm_struct first_vm;
struct pcpu_chunk *static_chunk; struct pcpu_chunk *schunk;
unsigned int cpu; unsigned int cpu;
int nr_pages; int nr_pages;
int err, i; int err, i;
/* santiy checks */ /* santiy checks */
BUG_ON(!static_size); BUG_ON(!static_size);
BUG_ON(!unit_size && free_size); BUG_ON(!unit_size && dyn_size);
BUG_ON(unit_size && unit_size < static_size + free_size); BUG_ON(unit_size && unit_size < static_size + dyn_size);
BUG_ON(unit_size & ~PAGE_MASK); BUG_ON(unit_size & ~PAGE_MASK);
BUG_ON(base_addr && !unit_size); BUG_ON(base_addr && !unit_size);
BUG_ON(base_addr && populate_pte_fn); BUG_ON(base_addr && populate_pte_fn);
@ -908,24 +908,24 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
for (i = 0; i < pcpu_nr_slots; i++) for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]); INIT_LIST_HEAD(&pcpu_slot[i]);
/* init static_chunk */ /* init static chunk */
static_chunk = alloc_bootmem(pcpu_chunk_struct_size); schunk = alloc_bootmem(pcpu_chunk_struct_size);
INIT_LIST_HEAD(&static_chunk->list); INIT_LIST_HEAD(&schunk->list);
static_chunk->vm = &static_vm; schunk->vm = &first_vm;
if (free_size) if (dyn_size)
static_chunk->free_size = free_size; schunk->free_size = dyn_size;
else else
static_chunk->free_size = pcpu_unit_size - pcpu_static_size; schunk->free_size = pcpu_unit_size - pcpu_static_size;
static_chunk->contig_hint = static_chunk->free_size; schunk->contig_hint = schunk->free_size;
/* allocate vm address */ /* allocate vm address */
static_vm.flags = VM_ALLOC; first_vm.flags = VM_ALLOC;
static_vm.size = pcpu_chunk_size; first_vm.size = pcpu_chunk_size;
if (!base_addr) if (!base_addr)
vm_area_register_early(&static_vm, PAGE_SIZE); vm_area_register_early(&first_vm, PAGE_SIZE);
else { else {
/* /*
* Pages already mapped. No need to remap into * Pages already mapped. No need to remap into
@ -933,8 +933,8 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
* be mapped or unmapped by percpu and is marked * be mapped or unmapped by percpu and is marked
* immutable. * immutable.
*/ */
static_vm.addr = base_addr; first_vm.addr = base_addr;
static_chunk->immutable = true; schunk->immutable = true;
} }
/* assign pages */ /* assign pages */
@ -945,7 +945,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
if (!page) if (!page)
break; break;
*pcpu_chunk_pagep(static_chunk, cpu, i) = page; *pcpu_chunk_pagep(schunk, cpu, i) = page;
} }
BUG_ON(i < PFN_UP(pcpu_static_size)); BUG_ON(i < PFN_UP(pcpu_static_size));
@ -960,20 +960,20 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
if (populate_pte_fn) { if (populate_pte_fn) {
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
populate_pte_fn(pcpu_chunk_addr(static_chunk, populate_pte_fn(pcpu_chunk_addr(schunk,
cpu, i)); cpu, i));
err = pcpu_map(static_chunk, 0, nr_pages); err = pcpu_map(schunk, 0, nr_pages);
if (err) if (err)
panic("failed to setup static percpu area, err=%d\n", panic("failed to setup static percpu area, err=%d\n",
err); err);
} }
/* link static_chunk in */ /* link the first chunk in */
pcpu_chunk_relocate(static_chunk, -1); pcpu_chunk_relocate(schunk, -1);
pcpu_chunk_addr_insert(static_chunk); pcpu_chunk_addr_insert(schunk);
/* we're done */ /* we're done */
pcpu_base_addr = (void *)pcpu_chunk_addr(static_chunk, 0, 0); pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
return pcpu_unit_size; return pcpu_unit_size;
} }