"Host Memory Backends" and "Memory devices" queue ("mem"):
 - virtio-mem fixes
 - Use new MPOL_PREFERRED_MANY mbind() policy for memory backends if
   possible
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEEG9nKrXNcTDpGDfzKTd4Q9wD/g1oFAmOyvlwRHGRhdmlkQHJl
 ZGhhdC5jb20ACgkQTd4Q9wD/g1o52Q//WdmLKrvMyr32crqxPQD6EjqAOUyjV0ps
 +5og4glIMb8+0Br4lIOOsZ71qYpBGCKHVAYr5FYCusYGBoNWMHWXzlYUB7RlMQcE
 VUZ1aJIzitR4GPul++j0rZxwip9WtDGOhXLoGbtQnpMdPNYx27bxG6fel4uZ/8gD
 R0jXnB4SHb4g4CvAp4xx+ffbtpWVGuGA8tPe4Hktsib8NFjBxrwoLDlb8+aqWuBN
 qyDMlhv1rbigVwTFkzzq0BVWMXjHWJ7Bfz9jh752u5fdn+ekEHoQ4pHHKb5CzbS4
 JOXUgozMUdroctZEcie0yCRYSjwUILoBo4h5XaaT3YEFlhOC33tVX2WXEAD7o7zF
 AGwFq39VWcodU5mRSMoGnAdsDS+nxbBER6uebQQb72HrtUn1p+afR9jxLeWbfOMp
 6mWw8/XXLLo3dlOUyn3HedBQP+iitFf13vPSUD2GAIP8gAb1DWWO61kpoP2lxNdK
 89kcKv42X8g53d19F8Ayv7kJdjxpeCczbrFhBT8D66OXJZQikp9NFBm9o4lIVVjs
 DMBHw08pT6UeQfktExTsrOAlezavicwqTCrC9zXvbTC31YFr8cwf7RaB3olQm+rA
 J19y3c6nsvniIyhG+yYLR8z9Worr5ydGinICZpDmeQGXb+qydne5W0+0WVL0k0HT
 iFZesSH2cak=
 =aVS3
 -----END PGP SIGNATURE-----

Merge tag 'mem-2023-01-02' of https://github.com/davidhildenbrand/qemu into staging

Hi,

"Host Memory Backends" and "Memory devices" queue ("mem"):
- virtio-mem fixes
- Use new MPOL_PREFERRED_MANY mbind() policy for memory backends if
  possible

# gpg: Signature made Mon 02 Jan 2023 11:22:04 GMT
# gpg:                using RSA key 1BD9CAAD735C4C3A460DFCCA4DDE10F700FF835A
# gpg:                issuer "david@redhat.com"
# gpg: Good signature from "David Hildenbrand <david@redhat.com>" [marginal]
# gpg:                 aka "David Hildenbrand <davidhildenbrand@gmail.com>" [full]
# gpg:                 aka "David Hildenbrand <hildenbr@in.tum.de>" [unknown]
# Primary key fingerprint: 1BD9 CAAD 735C 4C3A 460D  FCCA 4DDE 10F7 00FF 835A

* tag 'mem-2023-01-02' of https://github.com/davidhildenbrand/qemu:
  hostmem: Honor multiple preferred nodes if possible
  virtio-mem: Fix typo in function name
  virtio-mem: Fix the iterator variable in a vmem->rdl_list loop
  virtio-mem: Fix the bitmap index of the section offset

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2023-01-05 14:05:21 +00:00
commit f8af61fa14
3 changed files with 31 additions and 11 deletions

View File

@ -23,7 +23,12 @@
#ifdef CONFIG_NUMA
#include <numaif.h>
#include <numa.h>
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT);
/*
* HOST_MEM_POLICY_PREFERRED may either translate to MPOL_PREFERRED or
* MPOL_PREFERRED_MANY, see comments further below.
*/
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED);
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND);
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
@ -346,6 +351,7 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
* before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
* this doesn't catch hugepage case. */
unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
int mode = backend->policy;
/* check for invalid host-nodes and policies and give more verbose
* error messages than mbind(). */
@ -369,9 +375,18 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
assert(maxnode <= MAX_NODES);
#ifdef HAVE_NUMA_HAS_PREFERRED_MANY
if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) {
/*
* Replace with MPOL_PREFERRED_MANY otherwise the mbind() below
* silently picks the first node.
*/
mode = MPOL_PREFERRED_MANY;
}
#endif
if (maxnode &&
mbind(ptr, sz, backend->policy, backend->host_nodes, maxnode + 1,
flags)) {
mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) {
if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
error_setg_errno(errp, errno,
"cannot bind memory to host NUMA nodes");

View File

@ -207,7 +207,7 @@ static int virtio_mem_for_each_unplugged_range(const VirtIOMEM *vmem, void *arg,
*
* Returns false if the intersection is empty, otherwise returns true.
*/
static bool virito_mem_intersect_memory_section(MemoryRegionSection *s,
static bool virtio_mem_intersect_memory_section(MemoryRegionSection *s,
uint64_t offset, uint64_t size)
{
uint64_t start = MAX(s->offset_within_region, offset);
@ -235,7 +235,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
uint64_t offset, size;
int ret = 0;
first_bit = s->offset_within_region / vmem->bitmap_size;
first_bit = s->offset_within_region / vmem->block_size;
first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
while (first_bit < vmem->bitmap_size) {
MemoryRegionSection tmp = *s;
@ -245,7 +245,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
first_bit + 1) - 1;
size = (last_bit - first_bit + 1) * vmem->block_size;
if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
break;
}
ret = cb(&tmp, arg);
@ -267,7 +267,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem,
uint64_t offset, size;
int ret = 0;
first_bit = s->offset_within_region / vmem->bitmap_size;
first_bit = s->offset_within_region / vmem->block_size;
first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
while (first_bit < vmem->bitmap_size) {
MemoryRegionSection tmp = *s;
@ -277,7 +277,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem,
first_bit + 1) - 1;
size = (last_bit - first_bit + 1) * vmem->block_size;
if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
break;
}
ret = cb(&tmp, arg);
@ -313,7 +313,7 @@ static void virtio_mem_notify_unplug(VirtIOMEM *vmem, uint64_t offset,
QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
MemoryRegionSection tmp = *rdl->section;
if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
continue;
}
rdl->notify_discard(rdl, &tmp);
@ -329,7 +329,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
MemoryRegionSection tmp = *rdl->section;
if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
continue;
}
ret = rdl->notify_populate(rdl, &tmp);
@ -341,12 +341,12 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
if (ret) {
/* Notify all already-notified listeners. */
QLIST_FOREACH(rdl2, &vmem->rdl_list, next) {
MemoryRegionSection tmp = *rdl->section;
MemoryRegionSection tmp = *rdl2->section;
if (rdl2 == rdl) {
break;
}
if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
continue;
}
rdl2->notify_discard(rdl2, &tmp);

View File

@ -1858,6 +1858,11 @@ config_host_data.set('CONFIG_LINUX_AIO', libaio.found())
config_host_data.set('CONFIG_LINUX_IO_URING', linux_io_uring.found())
config_host_data.set('CONFIG_LIBPMEM', libpmem.found())
config_host_data.set('CONFIG_NUMA', numa.found())
if numa.found()
config_host_data.set('HAVE_NUMA_HAS_PREFERRED_MANY',
cc.has_function('numa_has_preferred_many',
dependencies: numa))
endif
config_host_data.set('CONFIG_OPENGL', opengl.found())
config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
config_host_data.set('CONFIG_RBD', rbd.found())