x86: use ELF section to list CPU vendor specific code

Replace the hardcoded list of initialization functions for each CPU
vendor by a list in an ELF section, which is read at initialization in
arch/x86/kernel/cpu/cpu.c to fill the cpu_devs[] array. The ELF
section, named .x86cpuvendor.init, is reclaimed after boot, and
contains entries of type "struct cpu_vendor_dev" which associates a
vendor number with a pointer to a "struct cpu_dev" structure.

This first modification allows to remove all the VENDOR_init_cpu()
functions.

This patch also removes the hardcoded calls to early_init_amd() and
early_init_intel(). Instead, we add a "c_early_init" member to the
cpu_dev structure, which is then called if not NULL by the generic CPU
initialization code. Unfortunately, in early_cpu_detect(), this_cpu is
not yet set, so we have to use the cpu_devs[] array directly.

This patch is part of the Linux Tiny project, and is needed for
further patch that will allow to disable compilation of unused CPU
support code.

Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Petazzoni 2008-02-15 12:00:23 +01:00 committed by Ingo Molnar
parent bc7c314d70
commit 03ae5768b6
10 changed files with 46 additions and 69 deletions

View File

@ -63,7 +63,7 @@ static __cpuinit int amd_apic_timer_broken(void)
int force_mwait __cpuinitdata;
void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
if (cpuid_eax(0x80000000) >= 0x80000007) {
c->x86_power = cpuid_edx(0x80000007);
@ -336,6 +336,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
}
},
},
.c_early_init = early_init_amd,
.c_init = init_amd,
.c_size_cache = amd_size_cache,
};
@ -345,3 +346,5 @@ int __init amd_init_cpu(void)
cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);

View File

@ -464,8 +464,4 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_size_cache = centaur_size_cache,
};
int __init centaur_init_cpu(void)
{
cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev);

View File

@ -328,14 +328,9 @@ static void __init early_cpu_detect(void)
get_cpu_vendor(c, 1);
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
early_init_amd(c);
break;
case X86_VENDOR_INTEL:
early_init_intel(c);
break;
}
if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c);
early_get_cap(c);
}
@ -616,23 +611,15 @@ __setup("clearcpuid=", setup_disablecpuid);
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
/* This is hacky. :)
* We're emulating future behavior.
* In the future, the cpu-specific init functions will be called implicitly
* via the magic of initcalls.
* They will insert themselves into the cpu_devs structure.
* Then, when cpu_init() is called, we can just iterate over that array.
*/
void __init early_cpu_init(void)
{
intel_cpu_init();
cyrix_init_cpu();
nsc_init_cpu();
amd_init_cpu();
centaur_init_cpu();
transmeta_init_cpu();
nexgen_init_cpu();
umc_init_cpu();
struct cpu_vendor_dev *cvdev;
for (cvdev = __x86cpuvendor_start ;
cvdev < __x86cpuvendor_end ;
cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_detect();
}

View File

@ -14,6 +14,7 @@ struct cpu_dev {
struct cpu_model_info c_models[4];
void (*c_early_init)(struct cpuinfo_x86 *c);
void (*c_init)(struct cpuinfo_x86 * c);
void (*c_identify)(struct cpuinfo_x86 * c);
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
@ -21,18 +22,17 @@ struct cpu_dev {
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
struct cpu_vendor_dev {
int vendor;
struct cpu_dev *cpu_dev;
};
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
__attribute__((__section__(".x86cpuvendor.init"))) = \
{ cpu_vendor_id, cpu_dev }
extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
extern int get_model_name(struct cpuinfo_x86 *c);
extern void display_cacheinfo(struct cpuinfo_x86 *c);
extern void early_init_intel(struct cpuinfo_x86 *c);
extern void early_init_amd(struct cpuinfo_x86 *c);
/* Specific CPU type init functions */
int intel_cpu_init(void);
int amd_init_cpu(void);
int cyrix_init_cpu(void);
int nsc_init_cpu(void);
int centaur_init_cpu(void);
int transmeta_init_cpu(void);
int nexgen_init_cpu(void);
int umc_init_cpu(void);

View File

@ -439,11 +439,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
.c_identify = cyrix_identify,
};
int __init cyrix_init_cpu(void)
{
cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev);
static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
.c_vendor = "NSC",
@ -451,9 +447,4 @@ static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
.c_init = init_nsc,
};
int __init nsc_init_cpu(void)
{
cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev);

View File

@ -30,7 +30,7 @@
struct movsl_mask movsl_mask __read_mostly;
#endif
void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if (c->x86 == 15 && c->x86_cache_alignment == 64)
@ -290,15 +290,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
}
},
},
.c_early_init = early_init_intel,
.c_init = init_intel,
.c_size_cache = intel_size_cache,
};
__init int intel_cpu_init(void)
{
cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
#ifndef CONFIG_X86_CMPXCHG
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)

View File

@ -102,8 +102,4 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
.c_identify = transmeta_identify,
};
int __init transmeta_init_cpu(void)
{
cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev);

View File

@ -19,8 +19,5 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
},
};
int __init umc_init_cpu(void)
{
cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
return 0;
}
cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev);

View File

@ -149,6 +149,11 @@ SECTIONS
*(.con_initcall.init)
__con_initcall_end = .;
}
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
__x86cpuvendor_start = .;
*(.x86cpuvendor.init)
__x86cpuvendor_end = .;
}
SECURITY_INIT
. = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {

View File

@ -177,6 +177,11 @@ SECTIONS
*(.con_initcall.init)
}
__con_initcall_end = .;
__x86cpuvendor_start = .;
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
*(.x86cpuvendor.init)
}
__x86cpuvendor_end = .;
SECURITY_INIT
. = ALIGN(8);