softmmu/physmem: Don't use atomic operations in ram_block_discard_(disable|require)

We have users in migration context that don't hold the BQL (when
finishing migration). To prepare for further changes, use a dedicated mutex
instead of atomic operations. Keep using qatomic_read ("READ_ONCE") for the
functions that only extract the current state (e.g., used by
virtio-balloon), locking isn't necessary.

While at it, split up the counter into two variables to make it easier
to understand.

Suggested-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Pankaj Gupta <pankaj.gupta@cloud.ionos.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Auger Eric <eric.auger@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: teawater <teawaterz@linux.alibaba.com>
Cc: Marek Kedzierski <mkedzier@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210413095531.25603-11-david@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
This commit is contained in:
David Hildenbrand 2021-04-13 11:55:28 +02:00 committed by Eduardo Habkost
parent 0fd7616e0f
commit 98da491dff

View File

@ -3684,56 +3684,64 @@ void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root)
}
}
/*
* If positive, discarding RAM is disabled. If negative, discarding RAM is
* required to work and cannot be disabled.
*/
static int ram_block_discard_disabled;
static unsigned int ram_block_discard_required_cnt;
static unsigned int ram_block_discard_disabled_cnt;
static QemuMutex ram_block_discard_disable_mutex;
static void ram_block_discard_disable_mutex_lock(void)
{
static gsize initialized;
if (g_once_init_enter(&initialized)) {
qemu_mutex_init(&ram_block_discard_disable_mutex);
g_once_init_leave(&initialized, 1);
}
qemu_mutex_lock(&ram_block_discard_disable_mutex);
}
static void ram_block_discard_disable_mutex_unlock(void)
{
qemu_mutex_unlock(&ram_block_discard_disable_mutex);
}
int ram_block_discard_disable(bool state)
{
int old;
int ret = 0;
ram_block_discard_disable_mutex_lock();
if (!state) {
qatomic_dec(&ram_block_discard_disabled);
return 0;
ram_block_discard_disabled_cnt--;
} else if (!ram_block_discard_required_cnt) {
ram_block_discard_disabled_cnt++;
} else {
ret = -EBUSY;
}
do {
old = qatomic_read(&ram_block_discard_disabled);
if (old < 0) {
return -EBUSY;
}
} while (qatomic_cmpxchg(&ram_block_discard_disabled,
old, old + 1) != old);
return 0;
ram_block_discard_disable_mutex_unlock();
return ret;
}
int ram_block_discard_require(bool state)
{
int old;
int ret = 0;
ram_block_discard_disable_mutex_lock();
if (!state) {
qatomic_inc(&ram_block_discard_disabled);
return 0;
ram_block_discard_required_cnt--;
} else if (!ram_block_discard_disabled_cnt) {
ram_block_discard_required_cnt++;
} else {
ret = -EBUSY;
}
do {
old = qatomic_read(&ram_block_discard_disabled);
if (old > 0) {
return -EBUSY;
}
} while (qatomic_cmpxchg(&ram_block_discard_disabled,
old, old - 1) != old);
return 0;
ram_block_discard_disable_mutex_unlock();
return ret;
}
bool ram_block_discard_is_disabled(void)
{
return qatomic_read(&ram_block_discard_disabled) > 0;
return qatomic_read(&ram_block_discard_disabled_cnt);
}
bool ram_block_discard_is_required(void)
{
return qatomic_read(&ram_block_discard_disabled) < 0;
return qatomic_read(&ram_block_discard_required_cnt);
}