backends: Simplify host_memory_backend_memory_complete()

Return early if bc->alloc is NULL. De-indent the if() ladder.

Note, this avoids a pointless call to error_propagate() with
errp=NULL at the 'out:' label.

Change trivial when reviewed with 'git-diff --ignore-all-space'.

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Message-Id: <20231120213301.24349-16-philmd@linaro.org>
This commit is contained in:
Philippe Mathieu-Daudé 2023-11-20 13:49:30 +01:00
parent 2d7a1eb6e6
commit e199f7ad4d

View File

@ -328,83 +328,84 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
void *ptr; void *ptr;
uint64_t sz; uint64_t sz;
if (bc->alloc) { if (!bc->alloc) {
bc->alloc(backend, &local_err); return;
if (local_err) { }
goto out; bc->alloc(backend, &local_err);
} if (local_err) {
goto out;
}
ptr = memory_region_get_ram_ptr(&backend->mr); ptr = memory_region_get_ram_ptr(&backend->mr);
sz = memory_region_size(&backend->mr); sz = memory_region_size(&backend->mr);
if (backend->merge) { if (backend->merge) {
qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE);
} }
if (!backend->dump) { if (!backend->dump) {
qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP);
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES);
/* lastbit == MAX_NODES means maxnode = 0 */ /* lastbit == MAX_NODES means maxnode = 0 */
unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1);
/* ensure policy won't be ignored in case memory is preallocated /* ensure policy won't be ignored in case memory is preallocated
* before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
* this doesn't catch hugepage case. */ * this doesn't catch hugepage case. */
unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
int mode = backend->policy; int mode = backend->policy;
/* check for invalid host-nodes and policies and give more verbose /* check for invalid host-nodes and policies and give more verbose
* error messages than mbind(). */ * error messages than mbind(). */
if (maxnode && backend->policy == MPOL_DEFAULT) { if (maxnode && backend->policy == MPOL_DEFAULT) {
error_setg(errp, "host-nodes must be empty for policy default," error_setg(errp, "host-nodes must be empty for policy default,"
" or you should explicitly specify a policy other" " or you should explicitly specify a policy other"
" than default"); " than default");
return; return;
} else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) {
error_setg(errp, "host-nodes must be set for policy %s", error_setg(errp, "host-nodes must be set for policy %s",
HostMemPolicy_str(backend->policy)); HostMemPolicy_str(backend->policy));
return; return;
} }
/* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1
* as argument to mbind() due to an old Linux bug (feature?) which * as argument to mbind() due to an old Linux bug (feature?) which
* cuts off the last specified node. This means backend->host_nodes * cuts off the last specified node. This means backend->host_nodes
* must have MAX_NODES+1 bits available. * must have MAX_NODES+1 bits available.
*/ */
assert(sizeof(backend->host_nodes) >= assert(sizeof(backend->host_nodes) >=
BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
assert(maxnode <= MAX_NODES); assert(maxnode <= MAX_NODES);
#ifdef HAVE_NUMA_HAS_PREFERRED_MANY #ifdef HAVE_NUMA_HAS_PREFERRED_MANY
if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) {
/* /*
* Replace with MPOL_PREFERRED_MANY otherwise the mbind() below * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below
* silently picks the first node. * silently picks the first node.
*/ */
mode = MPOL_PREFERRED_MANY; mode = MPOL_PREFERRED_MANY;
} }
#endif #endif
if (maxnode && if (maxnode &&
mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) {
if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
error_setg_errno(errp, errno, error_setg_errno(errp, errno,
"cannot bind memory to host NUMA nodes"); "cannot bind memory to host NUMA nodes");
return; return;
}
} }
}
#endif #endif
/* Preallocate memory after the NUMA policy has been instantiated. /* Preallocate memory after the NUMA policy has been instantiated.
* This is necessary to guarantee memory is allocated with * This is necessary to guarantee memory is allocated with
* specified NUMA policy in place. * specified NUMA policy in place.
*/ */
if (backend->prealloc) { if (backend->prealloc) {
qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz,
backend->prealloc_threads, backend->prealloc_threads,
backend->prealloc_context, &local_err); backend->prealloc_context, &local_err);
if (local_err) { if (local_err) {
goto out; goto out;
}
} }
} }
out: out: