Merge branches 'acpi-scan', 'acpi-tables', 'acpi-misc' and 'acpi-pm'
* acpi-scan: ACPI / scan: Add labels for PNP button devices ACPI / scan: Simplify acpi_bus_extract_wakeup_device_power_package() * acpi-tables: ACPI / tables: Clean up whitespace * acpi-misc: ACPI / DPTF: Use dev_get_drvdata() ACPI: event: replace strcpy() by strscpy() ACPI: Fix comment typos * acpi-pm: ACPI: PM: Print debug messages when enabling GPEs for wakeup
This commit is contained in:
commit
91751459ec
|
@ -390,7 +390,7 @@ again:
|
|||
return size > 0 ? size : ret;
|
||||
}
|
||||
|
||||
static int acpi_aml_thread(void *unsed)
|
||||
static int acpi_aml_thread(void *unused)
|
||||
{
|
||||
acpi_osd_exec_callback function = NULL;
|
||||
void *context;
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
* LPAT conversion table
|
||||
*
|
||||
* @lpat_table: the temperature_raw mapping table structure
|
||||
* @raw: the raw value, used as a key to get the temerature from the
|
||||
* @raw: the raw value, used as a key to get the temperature from the
|
||||
* above mapping table
|
||||
*
|
||||
* A positive converted temperature value will be returned on success,
|
||||
|
|
|
@ -81,9 +81,9 @@ struct cppc_pcc_data {
|
|||
int refcount;
|
||||
};
|
||||
|
||||
/* Array to represent the PCC channel per subspace id */
|
||||
/* Array to represent the PCC channel per subspace ID */
|
||||
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
|
||||
/* The cpu_pcc_subspace_idx containsper CPU subspace id */
|
||||
/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
|
||||
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
|
||||
|
||||
/*
|
||||
|
@ -436,7 +436,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
|
|||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Now that we have _PSD data from all CPUs, lets setup P-state
|
||||
* Now that we have _PSD data from all CPUs, let's setup P-state
|
||||
* domain info.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
|
@ -588,7 +588,7 @@ static int register_pcc_channel(int pcc_ss_idx)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Set flag so that we dont come here for each CPU. */
|
||||
/* Set flag so that we don't come here for each CPU. */
|
||||
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
|
||||
}
|
||||
|
||||
|
@ -613,7 +613,7 @@ bool __weak cpc_ffh_supported(void)
|
|||
*
|
||||
* Check and allocate the cppc_pcc_data memory.
|
||||
* In some processor configurations it is possible that same subspace
|
||||
* is shared between multiple CPU's. This is seen especially in CPU's
|
||||
* is shared between multiple CPUs. This is seen especially in CPUs
|
||||
* with hardware multi-threading support.
|
||||
*
|
||||
* Return: 0 for success, errno for failure
|
||||
|
@ -711,7 +711,7 @@ static bool is_cppc_supported(int revision, int num_ent)
|
|||
|
||||
/**
|
||||
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
|
||||
* @pr: Ptr to acpi_processor containing this CPUs logical Id.
|
||||
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
|
||||
*
|
||||
* Return: 0 for success or negative value for err.
|
||||
*/
|
||||
|
@ -728,7 +728,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
acpi_status status;
|
||||
int ret = -EFAULT;
|
||||
|
||||
/* Parse the ACPI _CPC table for this cpu. */
|
||||
/* Parse the ACPI _CPC table for this CPU. */
|
||||
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
|
||||
ACPI_TYPE_PACKAGE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -840,7 +840,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/* Register PCC channel once for all PCC subspace id. */
|
||||
/* Register PCC channel once for all PCC subspace ID. */
|
||||
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
|
||||
ret = register_pcc_channel(pcc_subspace_id);
|
||||
if (ret)
|
||||
|
@ -860,7 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
/* Plug PSD data into this CPUs CPC descriptor. */
|
||||
/* Plug PSD data into this CPU's CPC descriptor. */
|
||||
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
|
||||
|
||||
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
|
||||
|
@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
|
|||
|
||||
/**
|
||||
* acpi_cppc_processor_exit - Cleanup CPC structs.
|
||||
* @pr: Ptr to acpi_processor containing this CPUs logical Id.
|
||||
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
|
||||
*
|
||||
* Return: Void
|
||||
*/
|
||||
|
@ -931,7 +931,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
|
|||
|
||||
/**
|
||||
* cpc_read_ffh() - Read FFH register
|
||||
* @cpunum: cpu number to read
|
||||
* @cpunum: CPU number to read
|
||||
* @reg: cppc register information
|
||||
* @val: place holder for return value
|
||||
*
|
||||
|
@ -946,7 +946,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
|
|||
|
||||
/**
|
||||
* cpc_write_ffh() - Write FFH register
|
||||
* @cpunum: cpu number to write
|
||||
* @cpunum: CPU number to write
|
||||
* @reg: cppc register information
|
||||
* @val: value to write
|
||||
*
|
||||
|
@ -1093,7 +1093,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
|
|||
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
|
||||
|
||||
/**
|
||||
* cppc_get_perf_caps - Get a CPUs performance capabilities.
|
||||
* cppc_get_perf_caps - Get a CPU's performance capabilities.
|
||||
* @cpunum: CPU from which to get capabilities info.
|
||||
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
|
||||
*
|
||||
|
@ -1183,7 +1183,7 @@ out_err:
|
|||
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
|
||||
|
||||
/**
|
||||
* cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
|
||||
* cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
|
||||
* @cpunum: CPU from which to read counters.
|
||||
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
|
||||
*
|
||||
|
@ -1210,7 +1210,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|||
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
|
||||
|
||||
/*
|
||||
* If refernce perf register is not supported then we should
|
||||
* If reference perf register is not supported then we should
|
||||
* use the nominal perf value
|
||||
*/
|
||||
if (!CPC_SUPPORTED(ref_perf_reg))
|
||||
|
@ -1263,7 +1263,7 @@ out_err:
|
|||
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
|
||||
|
||||
/**
|
||||
* cppc_set_perf - Set a CPUs performance controls.
|
||||
* cppc_set_perf - Set a CPU's performance controls.
|
||||
* @cpu: CPU for which to set performance controls.
|
||||
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
|
||||
*
|
||||
|
@ -1344,7 +1344,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|||
* executing the Phase-II.
|
||||
* 2. Some other CPU has beaten this CPU to successfully execute the
|
||||
* write_trylock and has already acquired the write_lock. We know for a
|
||||
* fact it(other CPU acquiring the write_lock) couldn't have happened
|
||||
* fact it (other CPU acquiring the write_lock) couldn't have happened
|
||||
* before this CPU's Phase-I as we held the read_lock.
|
||||
* 3. Some other CPU executing pcc CMD_READ has stolen the
|
||||
* down_write, in which case, send_pcc_cmd will check for pending
|
||||
|
|
|
@ -728,6 +728,9 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n",
|
||||
(unsigned int)wakeup->gpe_number);
|
||||
|
||||
inc:
|
||||
wakeup->enable_count++;
|
||||
|
||||
|
|
|
@ -31,8 +31,7 @@ static ssize_t name##_show(struct device *dev,\
|
|||
struct device_attribute *attr,\
|
||||
char *buf)\
|
||||
{\
|
||||
struct platform_device *pdev = to_platform_device(dev);\
|
||||
struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\
|
||||
struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
|
||||
unsigned long long val;\
|
||||
acpi_status status;\
|
||||
\
|
||||
|
|
|
@ -131,8 +131,8 @@ int acpi_bus_generate_netlink_event(const char *device_class,
|
|||
event = nla_data(attr);
|
||||
memset(event, 0, sizeof(struct acpi_genl_event));
|
||||
|
||||
strcpy(event->device_class, device_class);
|
||||
strcpy(event->bus_id, bus_id);
|
||||
strscpy(event->device_class, device_class, sizeof(event->device_class));
|
||||
strscpy(event->bus_id, bus_id, sizeof(event->bus_id));
|
||||
event->type = type;
|
||||
event->data = data;
|
||||
|
||||
|
|
|
@ -535,12 +535,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
|
|||
/*
|
||||
* Try to execute _DSW first.
|
||||
*
|
||||
* Three agruments are needed for the _DSW object:
|
||||
* Three arguments are needed for the _DSW object:
|
||||
* Argument 0: enable/disable the wake capabilities
|
||||
* Argument 1: target system state
|
||||
* Argument 2: target device state
|
||||
* When _DSW object is called to disable the wake capabilities, maybe
|
||||
* the first argument is filled. The values of the other two agruments
|
||||
* the first argument is filled. The values of the other two arguments
|
||||
* are meaningless.
|
||||
*/
|
||||
in_arg[0].type = ACPI_TYPE_INTEGER;
|
||||
|
|
|
@ -164,7 +164,7 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t
|
|||
}
|
||||
|
||||
/**
|
||||
* acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches
|
||||
* acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
|
||||
* @table_hdr: Pointer to the head of the PPTT table
|
||||
* @cpu_node: processor node we wish to count caches for
|
||||
*
|
||||
|
@ -235,7 +235,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
|
|||
/**
|
||||
* acpi_find_processor_node() - Given a PPTT table find the requested processor
|
||||
* @table_hdr: Pointer to the head of the PPTT table
|
||||
* @acpi_cpu_id: cpu we are searching for
|
||||
* @acpi_cpu_id: CPU we are searching for
|
||||
*
|
||||
* Find the subtable entry describing the provided processor.
|
||||
* This is done by iterating the PPTT table looking for processor nodes
|
||||
|
@ -456,21 +456,21 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta
|
|||
|
||||
static void acpi_pptt_warn_missing(void)
|
||||
{
|
||||
pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n");
|
||||
pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
|
||||
* @table: Pointer to the head of the PPTT table
|
||||
* @cpu: Kernel logical cpu number
|
||||
* @cpu: Kernel logical CPU number
|
||||
* @level: A level that terminates the search
|
||||
* @flag: A flag which terminates the search
|
||||
*
|
||||
* Get a unique value given a cpu, and a topology level, that can be
|
||||
* Get a unique value given a CPU, and a topology level, that can be
|
||||
* matched to determine which cpus share common topological features
|
||||
* at that level.
|
||||
*
|
||||
* Return: Unique value, or -ENOENT if unable to locate cpu
|
||||
* Return: Unique value, or -ENOENT if unable to locate CPU
|
||||
*/
|
||||
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
|
||||
unsigned int cpu, int level, int flag)
|
||||
|
@ -510,7 +510,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
|
|||
return -ENOENT;
|
||||
}
|
||||
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
|
||||
pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n",
|
||||
pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
|
||||
cpu, level, retval);
|
||||
acpi_put_table(table);
|
||||
|
||||
|
@ -519,9 +519,9 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
|
|||
|
||||
/**
|
||||
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE
|
||||
* @cpu: Kernel logical cpu number
|
||||
* @cpu: Kernel logical CPU number
|
||||
*
|
||||
* Given a logical cpu number, returns the number of levels of cache represented
|
||||
* Given a logical CPU number, returns the number of levels of cache represented
|
||||
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
|
||||
* indicating we didn't find any cache levels.
|
||||
*
|
||||
|
@ -534,7 +534,7 @@ int acpi_find_last_cache_level(unsigned int cpu)
|
|||
int number_of_levels = 0;
|
||||
acpi_status status;
|
||||
|
||||
pr_debug("Cache Setup find last level cpu=%d\n", cpu);
|
||||
pr_debug("Cache Setup find last level CPU=%d\n", cpu);
|
||||
|
||||
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
|
||||
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
|
||||
|
@ -551,14 +551,14 @@ int acpi_find_last_cache_level(unsigned int cpu)
|
|||
|
||||
/**
|
||||
* cache_setup_acpi() - Override CPU cache topology with data from the PPTT
|
||||
* @cpu: Kernel logical cpu number
|
||||
* @cpu: Kernel logical CPU number
|
||||
*
|
||||
* Updates the global cache info provided by cpu_get_cacheinfo()
|
||||
* when there are valid properties in the acpi_pptt_cache nodes. A
|
||||
* successful parse may not result in any updates if none of the
|
||||
* cache levels have any valid flags set. Futher, a unique value is
|
||||
* cache levels have any valid flags set. Further, a unique value is
|
||||
* associated with each known CPU cache entry. This unique value
|
||||
* can be used to determine whether caches are shared between cpus.
|
||||
* can be used to determine whether caches are shared between CPUs.
|
||||
*
|
||||
* Return: -ENOENT on failure to find table, or 0 on success
|
||||
*/
|
||||
|
@ -567,7 +567,7 @@ int cache_setup_acpi(unsigned int cpu)
|
|||
struct acpi_table_header *table;
|
||||
acpi_status status;
|
||||
|
||||
pr_debug("Cache Setup ACPI cpu %d\n", cpu);
|
||||
pr_debug("Cache Setup ACPI CPU %d\n", cpu);
|
||||
|
||||
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -582,8 +582,8 @@ int cache_setup_acpi(unsigned int cpu)
|
|||
}
|
||||
|
||||
/**
|
||||
* find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
|
||||
* @cpu: Kernel logical cpu number
|
||||
* find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
|
||||
* @cpu: Kernel logical CPU number
|
||||
* @level: The topological level for which we would like a unique ID
|
||||
*
|
||||
* Determine a topology unique ID for each thread/core/cluster/mc_grouping
|
||||
|
@ -596,7 +596,7 @@ int cache_setup_acpi(unsigned int cpu)
|
|||
* other levels beyond this use a generated value to uniquely identify
|
||||
* a topological feature.
|
||||
*
|
||||
* Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
|
||||
* Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
|
||||
* Otherwise returns a value which represents a unique topological feature.
|
||||
*/
|
||||
int find_acpi_cpu_topology(unsigned int cpu, int level)
|
||||
|
@ -606,12 +606,12 @@ int find_acpi_cpu_topology(unsigned int cpu, int level)
|
|||
|
||||
/**
|
||||
* find_acpi_cpu_cache_topology() - Determine a unique cache topology value
|
||||
* @cpu: Kernel logical cpu number
|
||||
* @cpu: Kernel logical CPU number
|
||||
* @level: The cache level for which we would like a unique ID
|
||||
*
|
||||
* Determine a unique ID for each unified cache in the system
|
||||
*
|
||||
* Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
|
||||
* Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
|
||||
* Otherwise returns a value which represents a unique topological feature.
|
||||
*/
|
||||
int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
|
||||
|
@ -643,17 +643,17 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
|
|||
|
||||
|
||||
/**
|
||||
* find_acpi_cpu_topology_package() - Determine a unique cpu package value
|
||||
* @cpu: Kernel logical cpu number
|
||||
* find_acpi_cpu_topology_package() - Determine a unique CPU package value
|
||||
* @cpu: Kernel logical CPU number
|
||||
*
|
||||
* Determine a topology unique package ID for the given cpu.
|
||||
* Determine a topology unique package ID for the given CPU.
|
||||
* This ID can then be used to group peers, which will have matching ids.
|
||||
*
|
||||
* The search terminates when either a level is found with the PHYSICAL_PACKAGE
|
||||
* flag set or we reach a root node.
|
||||
*
|
||||
* Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
|
||||
* Otherwise returns a value which represents the package for this cpu.
|
||||
* Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
|
||||
* Otherwise returns a value which represents the package for this CPU.
|
||||
*/
|
||||
int find_acpi_cpu_topology_package(unsigned int cpu)
|
||||
{
|
||||
|
|
|
@ -763,18 +763,16 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
|
||||
|
||||
static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
|
||||
struct acpi_device_wakeup *wakeup)
|
||||
static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
|
||||
{
|
||||
acpi_handle handle = dev->handle;
|
||||
struct acpi_device_wakeup *wakeup = &dev->wakeup;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *package = NULL;
|
||||
union acpi_object *element = NULL;
|
||||
acpi_status status;
|
||||
int err = -ENODATA;
|
||||
|
||||
if (!wakeup)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&wakeup->resources);
|
||||
|
||||
/* _PRW */
|
||||
|
@ -848,9 +846,9 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
|
|||
static bool acpi_wakeup_gpe_init(struct acpi_device *device)
|
||||
{
|
||||
static const struct acpi_device_id button_device_ids[] = {
|
||||
{"PNP0C0C", 0},
|
||||
{"PNP0C0D", 0},
|
||||
{"PNP0C0E", 0},
|
||||
{"PNP0C0C", 0}, /* Power button */
|
||||
{"PNP0C0D", 0}, /* Lid */
|
||||
{"PNP0C0E", 0}, /* Sleep button */
|
||||
{"", 0},
|
||||
};
|
||||
struct acpi_device_wakeup *wakeup = &device->wakeup;
|
||||
|
@ -883,8 +881,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
|
|||
if (!acpi_has_method(device->handle, "_PRW"))
|
||||
return;
|
||||
|
||||
err = acpi_bus_extract_wakeup_device_power_package(device->handle,
|
||||
&device->wakeup);
|
||||
err = acpi_bus_extract_wakeup_device_power_package(device);
|
||||
if (err) {
|
||||
dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
|
||||
return;
|
||||
|
@ -895,7 +892,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
|
|||
/*
|
||||
* Call _PSW/_DSW object to disable its ability to wake the sleeping
|
||||
* system for the ACPI device with the _PRW object.
|
||||
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
|
||||
* The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
|
||||
* So it is necessary to call _DSW object first. Only when it is not
|
||||
* present will the _PSW object used.
|
||||
*/
|
||||
|
|
|
@ -28,7 +28,7 @@ EXPORT_SYMBOL(qdf2400_e44_present);
|
|||
|
||||
/*
|
||||
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
|
||||
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
|
||||
* Detect them by examining the OEM fields in the SPCR header, similar to PCI
|
||||
* quirk detection in pci_mcfg.c.
|
||||
*/
|
||||
static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
|
||||
|
|
|
@ -240,8 +240,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
|||
* On success returns sum of all matching entries for all proc handlers.
|
||||
* Otherwise, -ENODEV or -EINVAL is returned.
|
||||
*/
|
||||
static int __init
|
||||
acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
struct acpi_table_header *table_header,
|
||||
struct acpi_subtable_proc *proc, int proc_num,
|
||||
unsigned int max_entries)
|
||||
|
@ -314,8 +313,7 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
|
|||
return errs ? -EINVAL : count;
|
||||
}
|
||||
|
||||
int __init
|
||||
acpi_table_parse_entries_array(char *id,
|
||||
int __init acpi_table_parse_entries_array(char *id,
|
||||
unsigned long table_size,
|
||||
struct acpi_subtable_proc *proc, int proc_num,
|
||||
unsigned int max_entries)
|
||||
|
@ -346,8 +344,7 @@ acpi_table_parse_entries_array(char *id,
|
|||
return count;
|
||||
}
|
||||
|
||||
int __init
|
||||
acpi_table_parse_entries(char *id,
|
||||
int __init acpi_table_parse_entries(char *id,
|
||||
unsigned long table_size,
|
||||
int entry_id,
|
||||
acpi_tbl_entry_handler handler,
|
||||
|
@ -362,8 +359,7 @@ acpi_table_parse_entries(char *id,
|
|||
max_entries);
|
||||
}
|
||||
|
||||
int __init
|
||||
acpi_table_parse_madt(enum acpi_madt_type id,
|
||||
int __init acpi_table_parse_madt(enum acpi_madt_type id,
|
||||
acpi_tbl_entry_handler handler, unsigned int max_entries)
|
||||
{
|
||||
return acpi_table_parse_entries(ACPI_SIG_MADT,
|
||||
|
@ -725,8 +721,7 @@ static void *amlcode __attribute__ ((weakref("AmlCode")));
|
|||
static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code")));
|
||||
#endif
|
||||
|
||||
acpi_status
|
||||
acpi_os_table_override(struct acpi_table_header *existing_table,
|
||||
acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
|
||||
struct acpi_table_header **new_table)
|
||||
{
|
||||
if (!existing_table || !new_table)
|
||||
|
@ -788,7 +783,6 @@ static int __init acpi_parse_apic_instance(char *str)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("acpi_apic_instance", acpi_parse_apic_instance);
|
||||
|
||||
static int __init acpi_force_table_verification_setup(char *s)
|
||||
|
@ -797,7 +791,6 @@ static int __init acpi_force_table_verification_setup(char *s)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
|
||||
|
||||
static int __init acpi_force_32bit_fadt_addr(char *s)
|
||||
|
@ -807,5 +800,4 @@ static int __init acpi_force_32bit_fadt_addr(char *s)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
|
||||
|
|
|
@ -112,7 +112,7 @@ static int video_detect_force_none(const struct dmi_system_id *d)
|
|||
static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
|
||||
* ACPI backlight device is used. This flag will definitively break
|
||||
* the backlight interface (even the vendor interface) untill next
|
||||
* the backlight interface (even the vendor interface) until next
|
||||
* reboot. It's why we should prevent video.ko from being used here
|
||||
* and we can't rely on a later call to acpi_video_unregister().
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue