hw/acpi/aml-build: Use existing CPU topology to build PPTT table
When the PPTT table is built, the CPU topology is re-calculated, but it's unecessary because the CPU topology has been populated in virt_possible_cpu_arch_ids() on arm/virt machine. This reworks build_pptt() to avoid by reusing the existing IDs in ms->possible_cpus. Currently, the only user of build_pptt() is arm/virt machine. Signed-off-by: Gavin Shan <gshan@redhat.com> Tested-by: Yanan Wang <wangyanan55@huawei.com> Reviewed-by: Yanan Wang <wangyanan55@huawei.com> Acked-by: Igor Mammedov <imammedo@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Message-id: 20220503140304.855514-7-gshan@redhat.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
4c18bc1923
commit
ae9141d4a3
@ -2002,86 +2002,71 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
|
||||
const char *oem_id, const char *oem_table_id)
|
||||
{
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
GQueue *list = g_queue_new();
|
||||
guint pptt_start = table_data->len;
|
||||
guint parent_offset;
|
||||
guint length, i;
|
||||
int uid = 0;
|
||||
int socket;
|
||||
CPUArchIdList *cpus = ms->possible_cpus;
|
||||
int64_t socket_id = -1, cluster_id = -1, core_id = -1;
|
||||
uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
|
||||
uint32_t pptt_start = table_data->len;
|
||||
int n;
|
||||
AcpiTable table = { .sig = "PPTT", .rev = 2,
|
||||
.oem_id = oem_id, .oem_table_id = oem_table_id };
|
||||
|
||||
acpi_table_begin(&table, table_data);
|
||||
|
||||
for (socket = 0; socket < ms->smp.sockets; socket++) {
|
||||
g_queue_push_tail(list,
|
||||
GUINT_TO_POINTER(table_data->len - pptt_start));
|
||||
build_processor_hierarchy_node(
|
||||
table_data,
|
||||
/*
|
||||
* Physical package - represents the boundary
|
||||
* of a physical package
|
||||
*/
|
||||
(1 << 0),
|
||||
0, socket, NULL, 0);
|
||||
}
|
||||
|
||||
if (mc->smp_props.clusters_supported) {
|
||||
length = g_queue_get_length(list);
|
||||
for (i = 0; i < length; i++) {
|
||||
int cluster;
|
||||
|
||||
parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
|
||||
for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
|
||||
g_queue_push_tail(list,
|
||||
GUINT_TO_POINTER(table_data->len - pptt_start));
|
||||
build_processor_hierarchy_node(
|
||||
table_data,
|
||||
(0 << 0), /* not a physical package */
|
||||
parent_offset, cluster, NULL, 0);
|
||||
}
|
||||
/*
|
||||
* This works with the assumption that cpus[n].props.*_id has been
|
||||
* sorted from top to down levels in mc->possible_cpu_arch_ids().
|
||||
* Otherwise, the unexpected and duplicated containers will be
|
||||
* created.
|
||||
*/
|
||||
for (n = 0; n < cpus->len; n++) {
|
||||
if (cpus->cpus[n].props.socket_id != socket_id) {
|
||||
assert(cpus->cpus[n].props.socket_id > socket_id);
|
||||
socket_id = cpus->cpus[n].props.socket_id;
|
||||
cluster_id = -1;
|
||||
core_id = -1;
|
||||
socket_offset = table_data->len - pptt_start;
|
||||
build_processor_hierarchy_node(table_data,
|
||||
(1 << 0), /* Physical package */
|
||||
0, socket_id, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
length = g_queue_get_length(list);
|
||||
for (i = 0; i < length; i++) {
|
||||
int core;
|
||||
|
||||
parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
|
||||
for (core = 0; core < ms->smp.cores; core++) {
|
||||
if (ms->smp.threads > 1) {
|
||||
g_queue_push_tail(list,
|
||||
GUINT_TO_POINTER(table_data->len - pptt_start));
|
||||
build_processor_hierarchy_node(
|
||||
table_data,
|
||||
(0 << 0), /* not a physical package */
|
||||
parent_offset, core, NULL, 0);
|
||||
} else {
|
||||
build_processor_hierarchy_node(
|
||||
table_data,
|
||||
(1 << 1) | /* ACPI Processor ID valid */
|
||||
(1 << 3), /* Node is a Leaf */
|
||||
parent_offset, uid++, NULL, 0);
|
||||
if (mc->smp_props.clusters_supported) {
|
||||
if (cpus->cpus[n].props.cluster_id != cluster_id) {
|
||||
assert(cpus->cpus[n].props.cluster_id > cluster_id);
|
||||
cluster_id = cpus->cpus[n].props.cluster_id;
|
||||
core_id = -1;
|
||||
cluster_offset = table_data->len - pptt_start;
|
||||
build_processor_hierarchy_node(table_data,
|
||||
(0 << 0), /* Not a physical package */
|
||||
socket_offset, cluster_id, NULL, 0);
|
||||
}
|
||||
} else {
|
||||
cluster_offset = socket_offset;
|
||||
}
|
||||
}
|
||||
|
||||
length = g_queue_get_length(list);
|
||||
for (i = 0; i < length; i++) {
|
||||
int thread;
|
||||
if (ms->smp.threads == 1) {
|
||||
build_processor_hierarchy_node(table_data,
|
||||
(1 << 1) | /* ACPI Processor ID valid */
|
||||
(1 << 3), /* Node is a Leaf */
|
||||
cluster_offset, n, NULL, 0);
|
||||
} else {
|
||||
if (cpus->cpus[n].props.core_id != core_id) {
|
||||
assert(cpus->cpus[n].props.core_id > core_id);
|
||||
core_id = cpus->cpus[n].props.core_id;
|
||||
core_offset = table_data->len - pptt_start;
|
||||
build_processor_hierarchy_node(table_data,
|
||||
(0 << 0), /* Not a physical package */
|
||||
cluster_offset, core_id, NULL, 0);
|
||||
}
|
||||
|
||||
parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
|
||||
for (thread = 0; thread < ms->smp.threads; thread++) {
|
||||
build_processor_hierarchy_node(
|
||||
table_data,
|
||||
build_processor_hierarchy_node(table_data,
|
||||
(1 << 1) | /* ACPI Processor ID valid */
|
||||
(1 << 2) | /* Processor is a Thread */
|
||||
(1 << 3), /* Node is a Leaf */
|
||||
parent_offset, uid++, NULL, 0);
|
||||
core_offset, n, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
g_queue_free(list);
|
||||
acpi_table_end(linker, &table);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user