2011-07-26 13:26:01 +02:00
|
|
|
/*
|
|
|
|
* Physical memory management
|
|
|
|
*
|
|
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 17:44:23 +01:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2011-07-26 13:26:01 +02:00
|
|
|
*/
|
|
|
|
|
2016-01-29 18:50:05 +01:00
|
|
|
#include "qemu/osdep.h"
|
2020-10-05 17:27:25 +02:00
|
|
|
#include "qemu/log.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2012-12-17 18:19:49 +01:00
|
|
|
#include "exec/memory.h"
|
2014-06-06 08:16:27 +02:00
|
|
|
#include "qapi/visitor.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/bitops.h"
|
2015-11-20 10:37:16 +01:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 07:23:50 +02:00
|
|
|
#include "qemu/main-loop.h"
|
2019-04-17 21:17:56 +02:00
|
|
|
#include "qemu/qemu-print.h"
|
2013-06-06 11:41:28 +02:00
|
|
|
#include "qom/object.h"
|
2020-08-05 15:02:20 +02:00
|
|
|
#include "trace.h"
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2012-12-17 18:19:49 +01:00
|
|
|
#include "exec/memory-internal.h"
|
2013-10-14 17:13:59 +02:00
|
|
|
#include "exec/ram_addr.h"
|
2015-11-20 10:37:16 +01:00
|
|
|
#include "sysemu/kvm.h"
|
2019-08-12 07:23:59 +02:00
|
|
|
#include "sysemu/runstate.h"
|
2019-05-23 16:35:05 +02:00
|
|
|
#include "sysemu/tcg.h"
|
2021-02-04 17:39:24 +01:00
|
|
|
#include "qemu/accel.h"
|
2019-06-14 03:52:37 +02:00
|
|
|
#include "hw/boards.h"
|
2017-07-07 16:42:51 +02:00
|
|
|
#include "migration/vmstate.h"
|
2022-10-31 04:10:19 +01:00
|
|
|
#include "exec/address-spaces.h"
|
2011-12-15 14:25:22 +01:00
|
|
|
|
2013-05-24 13:23:38 +02:00
|
|
|
//#define DEBUG_UNASSIGNED
|
|
|
|
|
2012-11-05 16:45:56 +01:00
|
|
|
static unsigned memory_region_transaction_depth;
|
|
|
|
static bool memory_region_update_pending;
|
2014-05-08 05:47:32 +02:00
|
|
|
static bool ioeventfd_update_pending;
|
2021-06-29 18:01:19 +02:00
|
|
|
unsigned int global_dirty_tracking;
|
2011-12-11 13:47:25 +01:00
|
|
|
|
2018-12-06 13:10:34 +01:00
|
|
|
static QTAILQ_HEAD(, MemoryListener) memory_listeners
|
2012-02-08 14:05:50 +01:00
|
|
|
= QTAILQ_HEAD_INITIALIZER(memory_listeners);
|
2011-07-26 13:26:13 +02:00
|
|
|
|
2012-10-02 15:28:50 +02:00
|
|
|
static QTAILQ_HEAD(, AddressSpace) address_spaces
|
|
|
|
= QTAILQ_HEAD_INITIALIZER(address_spaces);
|
|
|
|
|
2017-09-21 10:51:04 +02:00
|
|
|
static GHashTable *flat_views;
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
typedef struct AddrRange AddrRange;
|
|
|
|
|
2011-08-03 10:56:14 +02:00
|
|
|
/*
|
2014-08-11 04:18:31 +02:00
|
|
|
* Note that signed integers are needed for negative offsetting in aliases
|
2011-08-03 10:56:14 +02:00
|
|
|
* (large MemoryRegion::alias_offset).
|
|
|
|
*/
|
2011-07-26 13:26:01 +02:00
|
|
|
struct AddrRange {
|
2011-10-16 13:19:17 +02:00
|
|
|
Int128 start;
|
|
|
|
Int128 size;
|
2011-07-26 13:26:01 +02:00
|
|
|
};
|
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
static AddrRange addrrange_make(Int128 start, Int128 size)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
|
|
|
return (AddrRange) { start, size };
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool addrrange_equal(AddrRange r1, AddrRange r2)
|
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
static Int128 addrrange_end(AddrRange r)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
return int128_add(r.start, r.size);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
int128_addto(&range.start, delta);
|
2011-07-26 13:26:01 +02:00
|
|
|
return range;
|
|
|
|
}
|
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
static bool addrrange_contains(AddrRange range, Int128 addr)
|
|
|
|
{
|
|
|
|
return int128_ge(addr, range.start)
|
|
|
|
&& int128_lt(addr, addrrange_end(range));
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
|
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
return addrrange_contains(r1, r2.start)
|
|
|
|
|| addrrange_contains(r2, r1.start);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
Int128 start = int128_max(r1.start, r2.start);
|
|
|
|
Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
|
|
|
|
return addrrange_make(start, int128_sub(end, start));
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2012-02-08 15:39:45 +01:00
|
|
|
enum ListenerDirection { Forward, Reverse };
|
|
|
|
|
2012-02-08 20:05:17 +01:00
|
|
|
#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
|
2012-02-08 15:39:45 +01:00
|
|
|
do { \
|
|
|
|
MemoryListener *_listener; \
|
|
|
|
\
|
|
|
|
switch (_direction) { \
|
|
|
|
case Forward: \
|
|
|
|
QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
|
2012-10-02 16:39:57 +02:00
|
|
|
if (_listener->_callback) { \
|
|
|
|
_listener->_callback(_listener, ##_args); \
|
|
|
|
} \
|
2012-02-08 15:39:45 +01:00
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
case Reverse: \
|
2018-12-06 13:10:34 +01:00
|
|
|
QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
|
2012-10-02 16:39:57 +02:00
|
|
|
if (_listener->_callback) { \
|
|
|
|
_listener->_callback(_listener, ##_args); \
|
|
|
|
} \
|
2012-02-08 15:39:45 +01:00
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2016-09-22 16:23:06 +02:00
|
|
|
#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
|
2012-02-08 20:05:17 +01:00
|
|
|
do { \
|
|
|
|
MemoryListener *_listener; \
|
|
|
|
\
|
|
|
|
switch (_direction) { \
|
|
|
|
case Forward: \
|
2018-12-06 13:10:34 +01:00
|
|
|
QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
|
2016-09-22 16:23:06 +02:00
|
|
|
if (_listener->_callback) { \
|
2012-02-08 20:05:17 +01:00
|
|
|
_listener->_callback(_listener, _section, ##_args); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
case Reverse: \
|
2018-12-06 13:10:34 +01:00
|
|
|
QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
|
2016-09-22 16:23:06 +02:00
|
|
|
if (_listener->_callback) { \
|
2012-02-08 20:05:17 +01:00
|
|
|
_listener->_callback(_listener, _section, ##_args); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2013-05-06 10:46:11 +02:00
|
|
|
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
|
2015-04-25 14:38:30 +02:00
|
|
|
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
|
2016-09-22 16:08:31 +02:00
|
|
|
do { \
|
2017-09-21 10:50:58 +02:00
|
|
|
MemoryRegionSection mrs = section_from_flat_range(fr, \
|
|
|
|
address_space_to_flatview(as)); \
|
2016-09-22 16:23:06 +02:00
|
|
|
MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
|
2016-09-22 16:08:31 +02:00
|
|
|
} while(0)
|
2012-02-08 15:39:45 +01:00
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
struct CoalescedMemoryRange {
|
|
|
|
AddrRange addr;
|
|
|
|
QTAILQ_ENTRY(CoalescedMemoryRange) link;
|
|
|
|
};
|
|
|
|
|
2011-07-26 13:26:11 +02:00
|
|
|
struct MemoryRegionIoeventfd {
|
|
|
|
AddrRange addr;
|
|
|
|
bool match_data;
|
|
|
|
uint64_t data;
|
2012-07-05 17:16:27 +02:00
|
|
|
EventNotifier *e;
|
2011-07-26 13:26:11 +02:00
|
|
|
};
|
|
|
|
|
2018-05-29 05:04:45 +02:00
|
|
|
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
|
|
|
|
MemoryRegionIoeventfd *b)
|
2011-07-26 13:26:11 +02:00
|
|
|
{
|
2018-05-29 05:04:45 +02:00
|
|
|
if (int128_lt(a->addr.start, b->addr.start)) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return true;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (int128_gt(a->addr.start, b->addr.start)) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return false;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (int128_lt(a->addr.size, b->addr.size)) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return true;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (int128_gt(a->addr.size, b->addr.size)) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return false;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (a->match_data < b->match_data) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return true;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (a->match_data > b->match_data) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return false;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (a->match_data) {
|
|
|
|
if (a->data < b->data) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return true;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (a->data > b->data) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2018-05-29 05:04:45 +02:00
|
|
|
if (a->e < b->e) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return true;
|
2018-05-29 05:04:45 +02:00
|
|
|
} else if (a->e > b->e) {
|
2011-07-26 13:26:11 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:04:45 +02:00
|
|
|
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
|
|
|
|
MemoryRegionIoeventfd *b)
|
2011-07-26 13:26:11 +02:00
|
|
|
{
|
2020-10-19 22:20:13 +02:00
|
|
|
if (int128_eq(a->addr.start, b->addr.start) &&
|
|
|
|
(!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
|
|
|
|
(int128_eq(a->addr.size, b->addr.size) &&
|
|
|
|
(a->match_data == b->match_data) &&
|
|
|
|
((a->match_data && (a->data == b->data)) || !a->match_data) &&
|
|
|
|
(a->e == b->e))))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2011-07-26 13:26:11 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
/* Range of memory in the global map. Addresses are absolute. */
|
|
|
|
struct FlatRange {
|
|
|
|
MemoryRegion *mr;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr offset_in_region;
|
2011-07-26 13:26:01 +02:00
|
|
|
AddrRange addr;
|
2011-07-26 13:26:02 +02:00
|
|
|
uint8_t dirty_log_mask;
|
2016-05-24 21:26:28 +02:00
|
|
|
bool romd_mode;
|
2011-09-25 13:48:47 +02:00
|
|
|
bool readonly;
|
2018-10-03 13:44:52 +02:00
|
|
|
bool nonvolatile;
|
2011-07-26 13:26:01 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#define FOR_EACH_FLAT_RANGE(var, view) \
|
|
|
|
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
|
|
|
|
2016-09-22 16:08:31 +02:00
|
|
|
static inline MemoryRegionSection
|
2017-09-21 10:50:58 +02:00
|
|
|
section_from_flat_range(FlatRange *fr, FlatView *fv)
|
2016-09-22 16:08:31 +02:00
|
|
|
{
|
|
|
|
return (MemoryRegionSection) {
|
|
|
|
.mr = fr->mr,
|
2017-09-21 10:50:58 +02:00
|
|
|
.fv = fv,
|
2016-09-22 16:08:31 +02:00
|
|
|
.offset_within_region = fr->offset_in_region,
|
|
|
|
.size = fr->addr.size,
|
|
|
|
.offset_within_address_space = int128_get64(fr->addr.start),
|
|
|
|
.readonly = fr->readonly,
|
2018-10-03 13:44:52 +02:00
|
|
|
.nonvolatile = fr->nonvolatile,
|
2016-09-22 16:08:31 +02:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
static bool flatrange_equal(FlatRange *a, FlatRange *b)
|
|
|
|
{
|
|
|
|
return a->mr == b->mr
|
|
|
|
&& addrrange_equal(a->addr, b->addr)
|
2011-08-08 18:58:49 +02:00
|
|
|
&& a->offset_in_region == b->offset_in_region
|
2016-05-24 21:26:28 +02:00
|
|
|
&& a->romd_mode == b->romd_mode
|
2018-10-03 13:44:52 +02:00
|
|
|
&& a->readonly == b->readonly
|
|
|
|
&& a->nonvolatile == b->nonvolatile;
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:01 +02:00
|
|
|
static FlatView *flatview_new(MemoryRegion *mr_root)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2017-09-21 10:50:55 +02:00
|
|
|
FlatView *view;
|
|
|
|
|
|
|
|
view = g_new0(FlatView, 1);
|
2013-05-06 11:57:21 +02:00
|
|
|
view->ref = 1;
|
2017-09-21 10:51:01 +02:00
|
|
|
view->root = mr_root;
|
|
|
|
memory_region_ref(mr_root);
|
2017-09-21 12:34:00 +02:00
|
|
|
trace_flatview_new(view, mr_root);
|
2017-09-21 10:50:55 +02:00
|
|
|
|
|
|
|
return view;
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert a range into a given position. Caller is responsible for maintaining
|
|
|
|
* sorting order.
|
|
|
|
*/
|
|
|
|
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
|
|
|
|
{
|
|
|
|
if (view->nr == view->nr_allocated) {
|
|
|
|
view->nr_allocated = MAX(2 * view->nr, 10);
|
2011-08-21 05:09:37 +02:00
|
|
|
view->ranges = g_realloc(view->ranges,
|
2011-07-26 13:26:01 +02:00
|
|
|
view->nr_allocated * sizeof(*view->ranges));
|
|
|
|
}
|
|
|
|
memmove(view->ranges + pos + 1, view->ranges + pos,
|
|
|
|
(view->nr - pos) * sizeof(FlatRange));
|
|
|
|
view->ranges[pos] = *range;
|
2013-05-06 10:46:11 +02:00
|
|
|
memory_region_ref(range->mr);
|
2011-07-26 13:26:01 +02:00
|
|
|
++view->nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flatview_destroy(FlatView *view)
|
|
|
|
{
|
2013-05-06 10:46:11 +02:00
|
|
|
int i;
|
|
|
|
|
2017-09-21 12:34:00 +02:00
|
|
|
trace_flatview_destroy(view, view->root);
|
2017-09-21 10:50:56 +02:00
|
|
|
if (view->dispatch) {
|
|
|
|
address_space_dispatch_free(view->dispatch);
|
|
|
|
}
|
2013-05-06 10:46:11 +02:00
|
|
|
for (i = 0; i < view->nr; i++) {
|
|
|
|
memory_region_unref(view->ranges[i].mr);
|
|
|
|
}
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(view->ranges);
|
2017-09-21 10:51:01 +02:00
|
|
|
memory_region_unref(view->root);
|
2013-05-06 10:29:07 +02:00
|
|
|
g_free(view);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2017-09-21 14:32:47 +02:00
|
|
|
static bool flatview_ref(FlatView *view)
|
2013-05-06 11:57:21 +02:00
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
return qatomic_fetch_inc_nonzero(&view->ref) > 0;
|
2013-05-06 11:57:21 +02:00
|
|
|
}
|
|
|
|
|
2018-03-18 18:26:36 +01:00
|
|
|
void flatview_unref(FlatView *view)
|
2013-05-06 11:57:21 +02:00
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
if (qatomic_fetch_dec(&view->ref) == 1) {
|
2017-09-21 12:34:00 +02:00
|
|
|
trace_flatview_destroy_rcu(view, view->root);
|
2017-09-21 10:51:07 +02:00
|
|
|
assert(view->root);
|
2017-09-21 10:50:56 +02:00
|
|
|
call_rcu(view, flatview_destroy, rcu);
|
2013-05-06 11:57:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:03 +02:00
|
|
|
static bool can_merge(FlatRange *r1, FlatRange *r2)
|
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
|
2011-07-26 13:26:03 +02:00
|
|
|
&& r1->mr == r2->mr
|
2011-10-16 13:19:17 +02:00
|
|
|
&& int128_eq(int128_add(int128_make64(r1->offset_in_region),
|
|
|
|
r1->addr.size),
|
|
|
|
int128_make64(r2->offset_in_region))
|
2011-08-08 18:58:49 +02:00
|
|
|
&& r1->dirty_log_mask == r2->dirty_log_mask
|
2016-05-24 21:26:28 +02:00
|
|
|
&& r1->romd_mode == r2->romd_mode
|
2018-10-03 13:44:52 +02:00
|
|
|
&& r1->readonly == r2->readonly
|
|
|
|
&& r1->nonvolatile == r2->nonvolatile;
|
2011-07-26 13:26:03 +02:00
|
|
|
}
|
|
|
|
|
2013-06-03 07:31:56 +02:00
|
|
|
/* Attempt to simplify a view by merging adjacent ranges */
|
2011-07-26 13:26:03 +02:00
|
|
|
static void flatview_simplify(FlatView *view)
|
|
|
|
{
|
memory: unref the memory region in simplify flatview
The memory region reference is increased when insert a range
into flatview range array, then decreased by destroy flatview.
If some flat range merged by flatview_simplify, the memory region
reference can not be decreased by destroy flatview any more.
In this case, start virtual machine by the command line:
qemu-system-x86_64
-name guest=ubuntu,debug-threads=on
-machine pc,accel=kvm,usb=off,dump-guest-core=off
-cpu host
-m 16384
-realtime mlock=off
-smp 8,sockets=2,cores=4,threads=1
-object memory-backend-file,id=ram-node0,prealloc=yes,mem-path=/dev/hugepages,share=yes,size=8589934592
-numa node,nodeid=0,cpus=0-3,memdev=ram-node0
-object memory-backend-file,id=ram-node1,prealloc=yes,mem-path=/dev/hugepages,share=yes,size=8589934592
-numa node,nodeid=1,cpus=4-7,memdev=ram-node1
-no-user-config
-nodefaults
-rtc base=utc
-no-shutdown
-boot strict=on
-device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2
-device virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x2
-device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x3
-drive file=ubuntu.qcow2,format=qcow2,if=none,id=drive-virtio-disk0,cache=none,aio=native
-device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1
-chardev pty,id=charserial0
-device isa-serial,chardev=charserial0,id=serial0
-device usb-tablet,id=input0,bus=usb.0,port=1
-vnc 0.0.0.0:0
-device VGA,id=video0,vgamem_mb=16,bus=pci.0,addr=0x5
-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6
-msg timestamp=on
And run the script in guest OS:
while true
do
setpci -s 00:06.0 04.b=03
setpci -s 00:06.0 04.b=07
done
I found the reference of node0 HostMemoryBackendFile is a big one.
(gdb) p numa_info[0]->node_memdev->parent.ref
$6 = 1636278
(gdb)
Signed-off-by: King Wang<king.wang@huawei.com>
Message-Id: <20190712065241.11784-1-king.wang@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-07-12 08:52:41 +02:00
|
|
|
unsigned i, j, k;
|
2011-07-26 13:26:03 +02:00
|
|
|
|
|
|
|
i = 0;
|
|
|
|
while (i < view->nr) {
|
|
|
|
j = i + 1;
|
|
|
|
while (j < view->nr
|
|
|
|
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
2011-10-16 13:19:17 +02:00
|
|
|
int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
|
2011-07-26 13:26:03 +02:00
|
|
|
++j;
|
|
|
|
}
|
|
|
|
++i;
|
memory: unref the memory region in simplify flatview
The memory region reference is increased when insert a range
into flatview range array, then decreased by destroy flatview.
If some flat range merged by flatview_simplify, the memory region
reference can not be decreased by destroy flatview any more.
In this case, start virtual machine by the command line:
qemu-system-x86_64
-name guest=ubuntu,debug-threads=on
-machine pc,accel=kvm,usb=off,dump-guest-core=off
-cpu host
-m 16384
-realtime mlock=off
-smp 8,sockets=2,cores=4,threads=1
-object memory-backend-file,id=ram-node0,prealloc=yes,mem-path=/dev/hugepages,share=yes,size=8589934592
-numa node,nodeid=0,cpus=0-3,memdev=ram-node0
-object memory-backend-file,id=ram-node1,prealloc=yes,mem-path=/dev/hugepages,share=yes,size=8589934592
-numa node,nodeid=1,cpus=4-7,memdev=ram-node1
-no-user-config
-nodefaults
-rtc base=utc
-no-shutdown
-boot strict=on
-device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2
-device virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x2
-device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x3
-drive file=ubuntu.qcow2,format=qcow2,if=none,id=drive-virtio-disk0,cache=none,aio=native
-device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1
-chardev pty,id=charserial0
-device isa-serial,chardev=charserial0,id=serial0
-device usb-tablet,id=input0,bus=usb.0,port=1
-vnc 0.0.0.0:0
-device VGA,id=video0,vgamem_mb=16,bus=pci.0,addr=0x5
-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6
-msg timestamp=on
And run the script in guest OS:
while true
do
setpci -s 00:06.0 04.b=03
setpci -s 00:06.0 04.b=07
done
I found the reference of node0 HostMemoryBackendFile is a big one.
(gdb) p numa_info[0]->node_memdev->parent.ref
$6 = 1636278
(gdb)
Signed-off-by: King Wang<king.wang@huawei.com>
Message-Id: <20190712065241.11784-1-king.wang@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-07-12 08:52:41 +02:00
|
|
|
for (k = i; k < j; k++) {
|
|
|
|
memory_region_unref(view->ranges[k].mr);
|
|
|
|
}
|
2011-07-26 13:26:03 +02:00
|
|
|
memmove(&view->ranges[i], &view->ranges[j],
|
|
|
|
(view->nr - j) * sizeof(view->ranges[j]));
|
|
|
|
view->nr -= j - i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-22 15:54:37 +02:00
|
|
|
static bool memory_region_big_endian(MemoryRegion *mr)
|
|
|
|
{
|
2022-03-23 16:57:18 +01:00
|
|
|
#if TARGET_BIG_ENDIAN
|
2013-07-22 15:54:37 +02:00
|
|
|
return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
|
|
|
|
#else
|
|
|
|
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-08-23 20:36:54 +02:00
|
|
|
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
|
2013-07-22 15:54:35 +02:00
|
|
|
{
|
2019-08-23 20:36:54 +02:00
|
|
|
if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
|
|
|
|
switch (op & MO_SIZE) {
|
|
|
|
case MO_8:
|
2013-07-22 15:54:35 +02:00
|
|
|
break;
|
2019-08-23 20:36:54 +02:00
|
|
|
case MO_16:
|
2013-07-22 15:54:35 +02:00
|
|
|
*data = bswap16(*data);
|
|
|
|
break;
|
2019-08-23 20:36:54 +02:00
|
|
|
case MO_32:
|
2013-07-22 15:54:35 +02:00
|
|
|
*data = bswap32(*data);
|
|
|
|
break;
|
2019-08-23 20:36:54 +02:00
|
|
|
case MO_64:
|
2013-07-22 15:54:35 +02:00
|
|
|
*data = bswap64(*data);
|
|
|
|
break;
|
|
|
|
default:
|
2019-08-23 20:36:54 +02:00
|
|
|
g_assert_not_reached();
|
2013-07-22 15:54:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-27 02:24:15 +02:00
|
|
|
static inline void memory_region_shift_read_access(uint64_t *value,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2018-09-27 02:24:15 +02:00
|
|
|
uint64_t mask,
|
|
|
|
uint64_t tmp)
|
|
|
|
{
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
if (shift >= 0) {
|
|
|
|
*value |= (tmp & mask) << shift;
|
|
|
|
} else {
|
|
|
|
*value |= (tmp & mask) >> -shift;
|
|
|
|
}
|
2018-09-27 02:24:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t memory_region_shift_write_access(uint64_t *value,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2018-09-27 02:24:15 +02:00
|
|
|
uint64_t mask)
|
|
|
|
{
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
if (shift >= 0) {
|
|
|
|
tmp = (*value >> shift) & mask;
|
|
|
|
} else {
|
|
|
|
tmp = (*value << -shift) & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tmp;
|
2018-09-27 02:24:15 +02:00
|
|
|
}
|
|
|
|
|
2016-02-09 01:03:05 +01:00
|
|
|
static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
|
|
|
|
{
|
|
|
|
MemoryRegion *root;
|
|
|
|
hwaddr abs_addr = offset;
|
|
|
|
|
|
|
|
abs_addr += mr->addr;
|
|
|
|
for (root = mr; root->container; ) {
|
|
|
|
root = root->container;
|
|
|
|
abs_addr += root->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return abs_addr;
|
|
|
|
}
|
|
|
|
|
2016-03-02 21:12:54 +01:00
|
|
|
static int get_cpu_index(void)
|
|
|
|
{
|
|
|
|
if (current_cpu) {
|
|
|
|
return current_cpu->cpu_index;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
|
2013-05-24 17:45:48 +02:00
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2015-04-26 17:49:23 +02:00
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2013-05-24 17:45:48 +02:00
|
|
|
{
|
|
|
|
uint64_t tmp;
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
tmp = mr->ops->read(mr->opaque, addr, size);
|
2016-02-09 01:03:04 +01:00
|
|
|
if (mr->subpage) {
|
2016-03-02 21:12:54 +01:00
|
|
|
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
|
2020-01-20 16:11:41 +01:00
|
|
|
} else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
|
2016-02-09 01:03:05 +01:00
|
|
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
2021-03-07 08:48:33 +01:00
|
|
|
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
|
|
|
|
memory_region_name(mr));
|
2016-02-09 01:03:04 +01:00
|
|
|
}
|
2018-09-27 02:24:15 +02:00
|
|
|
memory_region_shift_read_access(value, shift, mask, tmp);
|
2015-04-26 17:49:23 +02:00
|
|
|
return MEMTX_OK;
|
2013-05-24 17:45:48 +02:00
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2015-04-26 17:49:23 +02:00
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2011-08-11 09:40:25 +02:00
|
|
|
{
|
2015-04-26 17:49:23 +02:00
|
|
|
uint64_t tmp = 0;
|
|
|
|
MemTxResult r;
|
2011-08-11 09:40:25 +02:00
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
|
2016-02-09 01:03:04 +01:00
|
|
|
if (mr->subpage) {
|
2016-03-02 21:12:54 +01:00
|
|
|
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
|
2020-01-20 16:11:41 +01:00
|
|
|
} else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
|
2016-02-09 01:03:05 +01:00
|
|
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
2021-03-07 08:48:33 +01:00
|
|
|
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
|
|
|
|
memory_region_name(mr));
|
2016-02-09 01:03:04 +01:00
|
|
|
}
|
2018-09-27 02:24:15 +02:00
|
|
|
memory_region_shift_read_access(value, shift, mask, tmp);
|
2015-04-26 17:49:23 +02:00
|
|
|
return r;
|
2011-08-11 09:40:25 +02:00
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2015-04-26 17:49:23 +02:00
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2011-08-11 09:40:25 +02:00
|
|
|
{
|
2018-09-27 02:24:15 +02:00
|
|
|
uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
|
2011-08-11 09:40:25 +02:00
|
|
|
|
2016-02-09 01:03:04 +01:00
|
|
|
if (mr->subpage) {
|
2016-03-02 21:12:54 +01:00
|
|
|
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
|
2020-01-20 16:11:41 +01:00
|
|
|
} else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
|
2016-02-09 01:03:05 +01:00
|
|
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
2021-03-07 08:48:33 +01:00
|
|
|
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
|
|
|
|
memory_region_name(mr));
|
2016-02-09 01:03:04 +01:00
|
|
|
}
|
2011-08-11 09:40:25 +02:00
|
|
|
mr->ops->write(mr->opaque, addr, tmp, size);
|
2015-04-26 17:49:23 +02:00
|
|
|
return MEMTX_OK;
|
2011-08-11 09:40:25 +02:00
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2015-04-26 17:49:23 +02:00
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
2018-09-27 02:24:15 +02:00
|
|
|
uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
|
2015-04-26 17:49:23 +02:00
|
|
|
|
2016-02-09 01:03:04 +01:00
|
|
|
if (mr->subpage) {
|
2016-03-02 21:12:54 +01:00
|
|
|
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
|
2020-01-20 16:11:41 +01:00
|
|
|
} else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
|
2016-02-09 01:03:05 +01:00
|
|
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
2021-03-07 08:48:33 +01:00
|
|
|
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
|
|
|
|
memory_region_name(mr));
|
2016-02-09 01:03:04 +01:00
|
|
|
}
|
2015-04-26 17:49:23 +02:00
|
|
|
return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult access_with_adjusted_size(hwaddr addr,
|
2011-08-11 09:40:25 +02:00
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned access_size_min,
|
|
|
|
unsigned access_size_max,
|
2017-09-21 12:04:20 +02:00
|
|
|
MemTxResult (*access_fn)
|
|
|
|
(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180927002416.1781-4-f4bug@amsat.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-27 02:24:16 +02:00
|
|
|
signed shift,
|
2017-09-21 12:04:20 +02:00
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs),
|
2015-04-26 17:49:23 +02:00
|
|
|
MemoryRegion *mr,
|
|
|
|
MemTxAttrs attrs)
|
2011-08-11 09:40:25 +02:00
|
|
|
{
|
|
|
|
uint64_t access_mask;
|
|
|
|
unsigned access_size;
|
|
|
|
unsigned i;
|
2015-04-26 17:49:23 +02:00
|
|
|
MemTxResult r = MEMTX_OK;
|
2011-08-11 09:40:25 +02:00
|
|
|
|
|
|
|
if (!access_size_min) {
|
|
|
|
access_size_min = 1;
|
|
|
|
}
|
|
|
|
if (!access_size_max) {
|
|
|
|
access_size_max = 4;
|
|
|
|
}
|
2013-05-24 17:45:48 +02:00
|
|
|
|
|
|
|
/* FIXME: support unaligned access? */
|
2011-08-11 09:40:25 +02:00
|
|
|
access_size = MAX(MIN(size, access_size_max), access_size_min);
|
2018-09-27 02:24:14 +02:00
|
|
|
access_mask = MAKE_64BIT_MASK(0, access_size * 8);
|
2013-07-22 15:54:37 +02:00
|
|
|
if (memory_region_big_endian(mr)) {
|
|
|
|
for (i = 0; i < size; i += access_size) {
|
2017-09-21 12:04:20 +02:00
|
|
|
r |= access_fn(mr, addr + i, value, access_size,
|
2015-04-26 17:49:23 +02:00
|
|
|
(size - access_size - i) * 8, access_mask, attrs);
|
2013-07-22 15:54:37 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < size; i += access_size) {
|
2017-09-21 12:04:20 +02:00
|
|
|
r |= access_fn(mr, addr + i, value, access_size, i * 8,
|
2015-04-26 17:49:23 +02:00
|
|
|
access_mask, attrs);
|
2013-07-22 15:54:37 +02:00
|
|
|
}
|
2011-08-11 09:40:25 +02:00
|
|
|
}
|
2015-04-26 17:49:23 +02:00
|
|
|
return r;
|
2011-08-11 09:40:25 +02:00
|
|
|
}
|
|
|
|
|
2011-12-08 14:00:18 +01:00
|
|
|
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
|
|
|
|
{
|
2012-10-02 15:28:50 +02:00
|
|
|
AddressSpace *as;
|
|
|
|
|
2014-06-11 11:18:09 +02:00
|
|
|
while (mr->container) {
|
|
|
|
mr = mr->container;
|
2011-12-08 14:00:18 +01:00
|
|
|
}
|
2012-10-02 15:28:50 +02:00
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
|
|
|
if (mr == as->root) {
|
|
|
|
return as;
|
|
|
|
}
|
2011-12-08 14:00:18 +01:00
|
|
|
}
|
2014-06-02 15:25:06 +02:00
|
|
|
return NULL;
|
2011-12-08 14:00:18 +01:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
/* Render a memory region into the global view. Ranges in @view obscure
|
|
|
|
* ranges in @mr.
|
|
|
|
*/
|
|
|
|
static void render_memory_region(FlatView *view,
|
|
|
|
MemoryRegion *mr,
|
2011-10-16 13:19:17 +02:00
|
|
|
Int128 base,
|
2011-09-25 13:48:47 +02:00
|
|
|
AddrRange clip,
|
2018-10-03 13:44:52 +02:00
|
|
|
bool readonly,
|
|
|
|
bool nonvolatile)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
|
|
|
MemoryRegion *subregion;
|
|
|
|
unsigned i;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr offset_in_region;
|
2011-10-16 13:19:17 +02:00
|
|
|
Int128 remain;
|
|
|
|
Int128 now;
|
2011-07-26 13:26:01 +02:00
|
|
|
FlatRange fr;
|
|
|
|
AddrRange tmp;
|
|
|
|
|
2011-09-14 10:54:58 +02:00
|
|
|
if (!mr->enabled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
int128_addto(&base, int128_make64(mr->addr));
|
2011-09-25 13:48:47 +02:00
|
|
|
readonly |= mr->readonly;
|
2018-10-03 13:44:52 +02:00
|
|
|
nonvolatile |= mr->nonvolatile;
|
2011-07-26 13:26:01 +02:00
|
|
|
|
|
|
|
tmp = addrrange_make(base, mr->size);
|
|
|
|
|
|
|
|
if (!addrrange_intersects(tmp, clip)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
clip = addrrange_intersection(tmp, clip);
|
|
|
|
|
|
|
|
if (mr->alias) {
|
2011-10-16 13:19:17 +02:00
|
|
|
int128_subfrom(&base, int128_make64(mr->alias->addr));
|
|
|
|
int128_subfrom(&base, int128_make64(mr->alias_offset));
|
2018-10-03 13:44:52 +02:00
|
|
|
render_memory_region(view, mr->alias, base, clip,
|
|
|
|
readonly, nonvolatile);
|
2011-07-26 13:26:01 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Render subregions in priority order. */
|
|
|
|
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
|
2018-10-03 13:44:52 +02:00
|
|
|
render_memory_region(view, subregion, base, clip,
|
|
|
|
readonly, nonvolatile);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:06 +02:00
|
|
|
if (!mr->terminates) {
|
2011-07-26 13:26:01 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
offset_in_region = int128_get64(int128_sub(clip.start, base));
|
2011-07-26 13:26:01 +02:00
|
|
|
base = clip.start;
|
|
|
|
remain = clip.size;
|
|
|
|
|
2013-06-03 07:33:29 +02:00
|
|
|
fr.mr = mr;
|
2015-03-23 10:57:21 +01:00
|
|
|
fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
|
2016-05-24 21:26:28 +02:00
|
|
|
fr.romd_mode = mr->romd_mode;
|
2013-06-03 07:33:29 +02:00
|
|
|
fr.readonly = readonly;
|
2018-10-03 13:44:52 +02:00
|
|
|
fr.nonvolatile = nonvolatile;
|
2013-06-03 07:33:29 +02:00
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
/* Render the region itself into any gaps left by the current view. */
|
2011-10-16 13:19:17 +02:00
|
|
|
for (i = 0; i < view->nr && int128_nz(remain); ++i) {
|
|
|
|
if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
|
2011-07-26 13:26:01 +02:00
|
|
|
continue;
|
|
|
|
}
|
2011-10-16 13:19:17 +02:00
|
|
|
if (int128_lt(base, view->ranges[i].addr.start)) {
|
|
|
|
now = int128_min(remain,
|
|
|
|
int128_sub(view->ranges[i].addr.start, base));
|
2011-07-26 13:26:01 +02:00
|
|
|
fr.offset_in_region = offset_in_region;
|
|
|
|
fr.addr = addrrange_make(base, now);
|
|
|
|
flatview_insert(view, i, &fr);
|
|
|
|
++i;
|
2011-10-16 13:19:17 +02:00
|
|
|
int128_addto(&base, now);
|
|
|
|
offset_in_region += int128_get64(now);
|
|
|
|
int128_subfrom(&remain, now);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
2012-10-29 17:22:36 +01:00
|
|
|
now = int128_sub(int128_min(int128_add(base, remain),
|
|
|
|
addrrange_end(view->ranges[i].addr)),
|
|
|
|
base);
|
|
|
|
int128_addto(&base, now);
|
|
|
|
offset_in_region += int128_get64(now);
|
|
|
|
int128_subfrom(&remain, now);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
2011-10-16 13:19:17 +02:00
|
|
|
if (int128_nz(remain)) {
|
2011-07-26 13:26:01 +02:00
|
|
|
fr.offset_in_region = offset_in_region;
|
|
|
|
fr.addr = addrrange_make(base, remain);
|
|
|
|
flatview_insert(view, i, &fr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-23 17:07:30 +02:00
|
|
|
void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
|
|
|
|
{
|
|
|
|
FlatRange *fr;
|
|
|
|
|
|
|
|
assert(fv);
|
|
|
|
assert(cb);
|
|
|
|
|
|
|
|
FOR_EACH_FLAT_RANGE(fr, fv) {
|
2021-03-18 18:48:21 +01:00
|
|
|
if (cb(fr->addr.start, fr->addr.size, fr->mr,
|
|
|
|
fr->offset_in_region, opaque)) {
|
2020-10-23 17:07:30 +02:00
|
|
|
break;
|
2021-03-18 18:48:21 +01:00
|
|
|
}
|
2020-10-23 17:07:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:01 +02:00
|
|
|
static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
|
|
|
|
{
|
2017-09-21 12:28:16 +02:00
|
|
|
while (mr->enabled) {
|
|
|
|
if (mr->alias) {
|
|
|
|
if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
|
|
|
|
/* The alias is included in its entirety. Use it as
|
|
|
|
* the "real" root, so that we can share more FlatViews.
|
|
|
|
*/
|
|
|
|
mr = mr->alias;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (!mr->terminates) {
|
|
|
|
unsigned int found = 0;
|
|
|
|
MemoryRegion *child, *next = NULL;
|
|
|
|
QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
|
|
|
|
if (child->enabled) {
|
|
|
|
if (++found > 1) {
|
|
|
|
next = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!child->addr && int128_ge(mr->size, child->size)) {
|
|
|
|
/* A child is included in its entirety. If it's the only
|
|
|
|
* enabled one, use it in the hope of finding an alias down the
|
|
|
|
* way. This will also let us share FlatViews.
|
|
|
|
*/
|
|
|
|
next = child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-21 10:51:07 +02:00
|
|
|
if (found == 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-09-21 12:28:16 +02:00
|
|
|
if (next) {
|
|
|
|
mr = next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:07 +02:00
|
|
|
return mr;
|
2017-09-21 10:51:01 +02:00
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:07 +02:00
|
|
|
return NULL;
|
2017-09-21 10:51:01 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
/* Render a memory topology into a list of disjoint absolute ranges. */
|
2013-05-06 10:29:07 +02:00
|
|
|
static FlatView *generate_memory_topology(MemoryRegion *mr)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2017-09-21 10:51:02 +02:00
|
|
|
int i;
|
2013-05-06 10:29:07 +02:00
|
|
|
FlatView *view;
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2017-09-21 10:51:01 +02:00
|
|
|
view = flatview_new(mr);
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2012-10-07 12:59:55 +02:00
|
|
|
if (mr) {
|
2013-05-06 10:29:07 +02:00
|
|
|
render_memory_region(view, mr, int128_zero(),
|
2018-10-03 13:44:52 +02:00
|
|
|
addrrange_make(int128_zero(), int128_2_64()),
|
|
|
|
false, false);
|
2012-10-07 12:59:55 +02:00
|
|
|
}
|
2013-05-06 10:29:07 +02:00
|
|
|
flatview_simplify(view);
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2017-09-21 10:51:02 +02:00
|
|
|
view->dispatch = address_space_dispatch_new(view);
|
|
|
|
for (i = 0; i < view->nr; i++) {
|
|
|
|
MemoryRegionSection mrs =
|
|
|
|
section_from_flat_range(&view->ranges[i], view);
|
|
|
|
flatview_add_to_dispatch(view, &mrs);
|
|
|
|
}
|
|
|
|
address_space_dispatch_compact(view->dispatch);
|
2017-09-21 10:51:04 +02:00
|
|
|
g_hash_table_replace(flat_views, mr, view);
|
2017-09-21 10:51:02 +02:00
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
return view;
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:11 +02:00
|
|
|
static void address_space_add_del_ioeventfds(AddressSpace *as,
|
|
|
|
MemoryRegionIoeventfd *fds_new,
|
|
|
|
unsigned fds_new_nb,
|
|
|
|
MemoryRegionIoeventfd *fds_old,
|
|
|
|
unsigned fds_old_nb)
|
|
|
|
{
|
|
|
|
unsigned iold, inew;
|
2012-02-08 15:39:06 +01:00
|
|
|
MemoryRegionIoeventfd *fd;
|
|
|
|
MemoryRegionSection section;
|
2011-07-26 13:26:11 +02:00
|
|
|
|
|
|
|
/* Generate a symmetric difference of the old and new fd sets, adding
|
|
|
|
* and deleting as necessary.
|
|
|
|
*/
|
|
|
|
|
|
|
|
iold = inew = 0;
|
|
|
|
while (iold < fds_old_nb || inew < fds_new_nb) {
|
|
|
|
if (iold < fds_old_nb
|
|
|
|
&& (inew == fds_new_nb
|
2018-05-29 05:04:45 +02:00
|
|
|
|| memory_region_ioeventfd_before(&fds_old[iold],
|
|
|
|
&fds_new[inew]))) {
|
2012-02-08 15:39:06 +01:00
|
|
|
fd = &fds_old[iold];
|
|
|
|
section = (MemoryRegionSection) {
|
2017-09-21 10:50:58 +02:00
|
|
|
.fv = address_space_to_flatview(as),
|
2012-02-08 15:39:06 +01:00
|
|
|
.offset_within_address_space = int128_get64(fd->addr.start),
|
2013-05-27 10:08:27 +02:00
|
|
|
.size = fd->addr.size,
|
2012-02-08 15:39:06 +01:00
|
|
|
};
|
2016-09-22 16:23:06 +02:00
|
|
|
MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
|
2012-07-05 17:16:27 +02:00
|
|
|
fd->match_data, fd->data, fd->e);
|
2011-07-26 13:26:11 +02:00
|
|
|
++iold;
|
|
|
|
} else if (inew < fds_new_nb
|
|
|
|
&& (iold == fds_old_nb
|
2018-05-29 05:04:45 +02:00
|
|
|
|| memory_region_ioeventfd_before(&fds_new[inew],
|
|
|
|
&fds_old[iold]))) {
|
2012-02-08 15:39:06 +01:00
|
|
|
fd = &fds_new[inew];
|
|
|
|
section = (MemoryRegionSection) {
|
2017-09-21 10:50:58 +02:00
|
|
|
.fv = address_space_to_flatview(as),
|
2012-02-08 15:39:06 +01:00
|
|
|
.offset_within_address_space = int128_get64(fd->addr.start),
|
2013-05-27 10:08:27 +02:00
|
|
|
.size = fd->addr.size,
|
2012-02-08 15:39:06 +01:00
|
|
|
};
|
2016-09-22 16:23:06 +02:00
|
|
|
MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
|
2012-07-05 17:16:27 +02:00
|
|
|
fd->match_data, fd->data, fd->e);
|
2011-07-26 13:26:11 +02:00
|
|
|
++inew;
|
|
|
|
} else {
|
|
|
|
++iold;
|
|
|
|
++inew;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-18 18:26:36 +01:00
|
|
|
FlatView *address_space_get_flatview(AddressSpace *as)
|
2013-05-06 11:57:21 +02:00
|
|
|
{
|
|
|
|
FlatView *view;
|
|
|
|
|
2019-10-07 16:36:41 +02:00
|
|
|
RCU_READ_LOCK_GUARD();
|
2017-09-21 14:32:47 +02:00
|
|
|
do {
|
2017-09-21 10:50:58 +02:00
|
|
|
view = address_space_to_flatview(as);
|
2017-09-21 14:32:47 +02:00
|
|
|
/* If somebody has replaced as->current_map concurrently,
|
|
|
|
* flatview_ref returns false.
|
|
|
|
*/
|
|
|
|
} while (!flatview_ref(view));
|
2013-05-06 11:57:21 +02:00
|
|
|
return view;
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:11 +02:00
|
|
|
static void address_space_update_ioeventfds(AddressSpace *as)
|
|
|
|
{
|
2013-05-06 10:26:13 +02:00
|
|
|
FlatView *view;
|
2011-07-26 13:26:11 +02:00
|
|
|
FlatRange *fr;
|
|
|
|
unsigned ioeventfd_nb = 0;
|
2020-02-18 19:22:26 +01:00
|
|
|
unsigned ioeventfd_max;
|
|
|
|
MemoryRegionIoeventfd *ioeventfds;
|
2011-07-26 13:26:11 +02:00
|
|
|
AddrRange tmp;
|
|
|
|
unsigned i;
|
|
|
|
|
2020-02-18 19:22:26 +01:00
|
|
|
/*
|
|
|
|
* It is likely that the number of ioeventfds hasn't changed much, so use
|
|
|
|
* the previous size as the starting value, with some headroom to avoid
|
|
|
|
* gratuitous reallocations.
|
|
|
|
*/
|
|
|
|
ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
|
|
|
|
ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
|
|
|
|
|
2013-05-06 11:57:21 +02:00
|
|
|
view = address_space_get_flatview(as);
|
2013-05-06 10:26:13 +02:00
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
2011-07-26 13:26:11 +02:00
|
|
|
for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
|
|
|
|
tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
|
2011-10-16 13:19:17 +02:00
|
|
|
int128_sub(fr->addr.start,
|
|
|
|
int128_make64(fr->offset_in_region)));
|
2011-07-26 13:26:11 +02:00
|
|
|
if (addrrange_intersects(fr->addr, tmp)) {
|
|
|
|
++ioeventfd_nb;
|
2020-02-18 19:22:26 +01:00
|
|
|
if (ioeventfd_nb > ioeventfd_max) {
|
|
|
|
ioeventfd_max = MAX(ioeventfd_max * 2, 4);
|
|
|
|
ioeventfds = g_realloc(ioeventfds,
|
|
|
|
ioeventfd_max * sizeof(*ioeventfds));
|
|
|
|
}
|
2011-07-26 13:26:11 +02:00
|
|
|
ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
|
|
|
|
ioeventfds[ioeventfd_nb-1].addr = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
|
|
|
|
as->ioeventfds, as->ioeventfd_nb);
|
|
|
|
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(as->ioeventfds);
|
2011-07-26 13:26:11 +02:00
|
|
|
as->ioeventfds = ioeventfds;
|
|
|
|
as->ioeventfd_nb = ioeventfd_nb;
|
2013-05-06 11:57:21 +02:00
|
|
|
flatview_unref(view);
|
2011-07-26 13:26:11 +02:00
|
|
|
}
|
|
|
|
|
2019-08-20 16:13:25 +02:00
|
|
|
/*
|
|
|
|
* Notify the memory listeners about the coalesced IO change events of
|
|
|
|
* range `cmr'. Only the part that has intersection of the specified
|
|
|
|
* FlatRange will be sent.
|
|
|
|
*/
|
|
|
|
static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
|
|
|
|
CoalescedMemoryRange *cmr, bool add)
|
|
|
|
{
|
|
|
|
AddrRange tmp;
|
|
|
|
|
|
|
|
tmp = addrrange_shift(cmr->addr,
|
|
|
|
int128_sub(fr->addr.start,
|
|
|
|
int128_make64(fr->offset_in_region)));
|
|
|
|
if (!addrrange_intersects(tmp, fr->addr)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tmp = addrrange_intersection(tmp, fr->addr);
|
|
|
|
|
|
|
|
if (add) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
|
|
|
|
int128_get64(tmp.start),
|
|
|
|
int128_get64(tmp.size));
|
|
|
|
} else {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
|
|
|
|
int128_get64(tmp.start),
|
|
|
|
int128_get64(tmp.size));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-28 10:42:06 +01:00
|
|
|
static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
|
|
|
|
{
|
2019-08-20 16:13:25 +02:00
|
|
|
CoalescedMemoryRange *cmr;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
|
|
|
|
flat_range_coalesced_io_notify(fr, as, cmr, false);
|
|
|
|
}
|
2018-11-28 10:42:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = fr->mr;
|
|
|
|
CoalescedMemoryRange *cmr;
|
|
|
|
|
2018-11-28 17:29:45 +01:00
|
|
|
if (QTAILQ_EMPTY(&mr->coalesced)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-28 10:42:06 +01:00
|
|
|
QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
|
2019-08-20 16:13:25 +02:00
|
|
|
flat_range_coalesced_io_notify(fr, as, cmr, true);
|
2018-11-28 10:42:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:12 +02:00
|
|
|
static void address_space_update_topology_pass(AddressSpace *as,
|
2013-05-06 10:29:07 +02:00
|
|
|
const FlatView *old_view,
|
|
|
|
const FlatView *new_view,
|
2011-07-26 13:26:12 +02:00
|
|
|
bool adding)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
|
|
|
unsigned iold, inew;
|
|
|
|
FlatRange *frold, *frnew;
|
|
|
|
|
|
|
|
/* Generate a symmetric difference of the old and new memory maps.
|
|
|
|
* Kill ranges in the old map, and instantiate ranges in the new map.
|
|
|
|
*/
|
|
|
|
iold = inew = 0;
|
2013-05-06 10:29:07 +02:00
|
|
|
while (iold < old_view->nr || inew < new_view->nr) {
|
|
|
|
if (iold < old_view->nr) {
|
|
|
|
frold = &old_view->ranges[iold];
|
2011-07-26 13:26:01 +02:00
|
|
|
} else {
|
|
|
|
frold = NULL;
|
|
|
|
}
|
2013-05-06 10:29:07 +02:00
|
|
|
if (inew < new_view->nr) {
|
|
|
|
frnew = &new_view->ranges[inew];
|
2011-07-26 13:26:01 +02:00
|
|
|
} else {
|
|
|
|
frnew = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frold
|
|
|
|
&& (!frnew
|
2011-10-16 13:19:17 +02:00
|
|
|
|| int128_lt(frold->addr.start, frnew->addr.start)
|
|
|
|
|| (int128_eq(frold->addr.start, frnew->addr.start)
|
2011-07-26 13:26:01 +02:00
|
|
|
&& !flatrange_equal(frold, frnew)))) {
|
2013-06-03 07:32:42 +02:00
|
|
|
/* In old but not in new, or in both but attributes changed. */
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2011-07-26 13:26:12 +02:00
|
|
|
if (!adding) {
|
2018-11-28 17:28:45 +01:00
|
|
|
flat_range_coalesced_io_del(frold, as);
|
2012-02-08 14:05:50 +01:00
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
|
2011-07-26 13:26:12 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
++iold;
|
|
|
|
} else if (frold && frnew && flatrange_equal(frold, frnew)) {
|
2013-06-03 07:32:42 +02:00
|
|
|
/* In both and unchanged (except logging may have changed) */
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2019-02-05 23:50:19 +01:00
|
|
|
if (adding) {
|
2012-02-08 20:36:02 +01:00
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
|
2015-04-25 14:38:30 +02:00
|
|
|
if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
|
|
|
|
frold->dirty_log_mask,
|
|
|
|
frnew->dirty_log_mask);
|
|
|
|
}
|
|
|
|
if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
|
|
|
|
frold->dirty_log_mask,
|
|
|
|
frnew->dirty_log_mask);
|
2011-07-26 13:26:12 +02:00
|
|
|
}
|
2011-07-26 13:26:02 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
++iold;
|
|
|
|
++inew;
|
|
|
|
} else {
|
|
|
|
/* In new */
|
|
|
|
|
2011-07-26 13:26:12 +02:00
|
|
|
if (adding) {
|
2012-02-08 14:05:50 +01:00
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
|
2018-11-28 17:28:45 +01:00
|
|
|
flat_range_coalesced_io_add(frnew, as);
|
2011-07-26 13:26:12 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
++inew;
|
|
|
|
}
|
|
|
|
}
|
2011-07-26 13:26:12 +02:00
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:04 +02:00
|
|
|
static void flatviews_init(void)
|
|
|
|
{
|
2017-09-21 10:51:07 +02:00
|
|
|
static FlatView *empty_view;
|
|
|
|
|
2017-09-21 10:51:04 +02:00
|
|
|
if (flat_views) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
|
|
|
|
(GDestroyNotify) flatview_unref);
|
2017-09-21 10:51:07 +02:00
|
|
|
if (!empty_view) {
|
|
|
|
empty_view = generate_memory_topology(NULL);
|
|
|
|
/* We keep it alive forever in the global variable. */
|
|
|
|
flatview_ref(empty_view);
|
|
|
|
} else {
|
|
|
|
g_hash_table_replace(flat_views, NULL, empty_view);
|
|
|
|
flatview_ref(empty_view);
|
|
|
|
}
|
2017-09-21 10:51:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flatviews_reset(void)
|
|
|
|
{
|
|
|
|
AddressSpace *as;
|
|
|
|
|
|
|
|
if (flat_views) {
|
|
|
|
g_hash_table_unref(flat_views);
|
|
|
|
flat_views = NULL;
|
|
|
|
}
|
|
|
|
flatviews_init();
|
|
|
|
|
|
|
|
/* Render unique FVs */
|
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
|
|
|
MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
|
|
|
|
|
|
|
|
if (g_hash_table_lookup(flat_views, physmr)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
generate_memory_topology(physmr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void address_space_set_flatview(AddressSpace *as)
|
2011-07-26 13:26:12 +02:00
|
|
|
{
|
2017-09-21 10:51:05 +02:00
|
|
|
FlatView *old_view = address_space_to_flatview(as);
|
2017-09-21 10:51:04 +02:00
|
|
|
MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
|
|
|
|
FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
|
|
|
|
|
|
|
|
assert(new_view);
|
|
|
|
|
2017-09-21 10:51:05 +02:00
|
|
|
if (old_view == new_view) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_view) {
|
|
|
|
flatview_ref(old_view);
|
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:04 +02:00
|
|
|
flatview_ref(new_view);
|
2017-09-21 10:50:54 +02:00
|
|
|
|
|
|
|
if (!QTAILQ_EMPTY(&as->listeners)) {
|
2017-09-21 10:51:05 +02:00
|
|
|
FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
|
|
|
|
|
|
|
|
if (!old_view2) {
|
|
|
|
old_view2 = &tmpview;
|
|
|
|
}
|
|
|
|
address_space_update_topology_pass(as, old_view2, new_view, false);
|
|
|
|
address_space_update_topology_pass(as, old_view2, new_view, true);
|
2017-09-21 10:50:54 +02:00
|
|
|
}
|
2011-07-26 13:26:12 +02:00
|
|
|
|
2013-05-17 12:37:03 +02:00
|
|
|
/* Writes are protected by the BQL. */
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_rcu_set(&as->current_map, new_view);
|
2017-09-21 10:51:05 +02:00
|
|
|
if (old_view) {
|
|
|
|
flatview_unref(old_view);
|
|
|
|
}
|
2013-05-06 11:57:21 +02:00
|
|
|
|
|
|
|
/* Note that all the old MemoryRegions are still alive up to this
|
|
|
|
* point. This relieves most MemoryListeners from the need to
|
|
|
|
* ref/unref the MemoryRegions they get---unless they use them
|
|
|
|
* outside the iothread mutex, in which case precise reference
|
|
|
|
* counting is necessary.
|
|
|
|
*/
|
2017-09-21 10:51:05 +02:00
|
|
|
if (old_view) {
|
|
|
|
flatview_unref(old_view);
|
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:09 +02:00
|
|
|
static void address_space_update_topology(AddressSpace *as)
|
|
|
|
{
|
|
|
|
MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
|
|
|
|
|
|
|
|
flatviews_init();
|
|
|
|
if (!g_hash_table_lookup(flat_views, physmr)) {
|
|
|
|
generate_memory_topology(physmr);
|
|
|
|
}
|
|
|
|
address_space_set_flatview(as);
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:13 +02:00
|
|
|
void memory_region_transaction_begin(void)
|
|
|
|
{
|
2012-08-23 13:02:32 +02:00
|
|
|
qemu_flush_coalesced_mmio_buffer();
|
2011-07-26 13:26:13 +02:00
|
|
|
++memory_region_transaction_depth;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_transaction_commit(void)
|
|
|
|
{
|
2012-10-02 15:28:50 +02:00
|
|
|
AddressSpace *as;
|
|
|
|
|
2011-07-26 13:26:13 +02:00
|
|
|
assert(memory_region_transaction_depth);
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 19:29:11 +01:00
|
|
|
assert(qemu_mutex_iothread_locked());
|
|
|
|
|
2011-07-26 13:26:13 +02:00
|
|
|
--memory_region_transaction_depth;
|
2014-05-08 05:47:32 +02:00
|
|
|
if (!memory_region_transaction_depth) {
|
|
|
|
if (memory_region_update_pending) {
|
2017-09-21 10:51:04 +02:00
|
|
|
flatviews_reset();
|
|
|
|
|
2014-05-08 05:47:32 +02:00
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
|
2012-08-23 13:02:31 +02:00
|
|
|
|
2014-05-08 05:47:32 +02:00
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
2017-09-21 10:51:04 +02:00
|
|
|
address_space_set_flatview(as);
|
2017-09-21 10:51:03 +02:00
|
|
|
address_space_update_ioeventfds(as);
|
2014-05-08 05:47:32 +02:00
|
|
|
}
|
2017-03-22 18:53:35 +01:00
|
|
|
memory_region_update_pending = false;
|
2018-01-14 13:55:19 +01:00
|
|
|
ioeventfd_update_pending = false;
|
2014-05-08 05:47:32 +02:00
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
|
|
|
|
} else if (ioeventfd_update_pending) {
|
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
|
|
|
address_space_update_ioeventfds(as);
|
|
|
|
}
|
2017-03-22 18:53:35 +01:00
|
|
|
ioeventfd_update_pending = false;
|
2014-05-08 05:47:32 +02:00
|
|
|
}
|
|
|
|
}
|
2011-07-26 13:26:13 +02:00
|
|
|
}
|
|
|
|
|
2011-08-08 18:58:48 +02:00
|
|
|
static void memory_region_destructor_none(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_destructor_ram(MemoryRegion *mr)
|
|
|
|
{
|
2016-03-01 07:18:22 +01:00
|
|
|
qemu_ram_free(mr->ram_block);
|
2011-08-08 18:58:48 +02:00
|
|
|
}
|
|
|
|
|
2014-06-06 08:15:52 +02:00
|
|
|
static bool memory_region_need_escape(char c)
|
|
|
|
{
|
|
|
|
return c == '/' || c == '[' || c == '\\' || c == ']';
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *memory_region_escape_name(const char *name)
|
|
|
|
{
|
|
|
|
const char *p;
|
|
|
|
char *escaped, *q;
|
|
|
|
uint8_t c;
|
|
|
|
size_t bytes = 0;
|
|
|
|
|
|
|
|
for (p = name; *p; p++) {
|
|
|
|
bytes += memory_region_need_escape(*p) ? 4 : 1;
|
|
|
|
}
|
|
|
|
if (bytes == p - name) {
|
|
|
|
return g_memdup(name, bytes + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
escaped = g_malloc(bytes + 1);
|
|
|
|
for (p = name, q = escaped; *p; p++) {
|
|
|
|
c = *p;
|
|
|
|
if (unlikely(memory_region_need_escape(c))) {
|
|
|
|
*q++ = '\\';
|
|
|
|
*q++ = 'x';
|
|
|
|
*q++ = "0123456789abcdef"[c >> 4];
|
|
|
|
c = "0123456789abcdef"[c & 15];
|
|
|
|
}
|
|
|
|
*q++ = c;
|
|
|
|
}
|
|
|
|
*q = 0;
|
|
|
|
return escaped;
|
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
static void memory_region_do_init(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
mr->size = int128_make64(size);
|
|
|
|
if (size == UINT64_MAX) {
|
|
|
|
mr->size = int128_2_64();
|
|
|
|
}
|
2014-08-19 21:05:46 +02:00
|
|
|
mr->name = g_strdup(name);
|
2015-12-09 11:44:25 +01:00
|
|
|
mr->owner = owner;
|
2016-02-22 09:34:55 +01:00
|
|
|
mr->ram_block = NULL;
|
2014-06-06 08:15:52 +02:00
|
|
|
|
|
|
|
if (name) {
|
2014-08-20 08:56:26 +02:00
|
|
|
char *escaped_name = memory_region_escape_name(name);
|
|
|
|
char *name_array = g_strdup_printf("%s[*]", escaped_name);
|
2015-12-09 11:44:25 +01:00
|
|
|
|
|
|
|
if (!owner) {
|
|
|
|
owner = container_get(qdev_get_machine(), "/unattached");
|
|
|
|
}
|
|
|
|
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
object_property_add_child(owner, name_array, OBJECT(mr));
|
2014-06-06 08:15:52 +02:00
|
|
|
object_unref(OBJECT(mr));
|
2014-08-20 08:56:26 +02:00
|
|
|
g_free(name_array);
|
|
|
|
g_free(escaped_name);
|
2014-06-06 08:15:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
void memory_region_init(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
|
|
|
object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
|
|
|
|
memory_region_do_init(mr, owner, name, size);
|
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void memory_region_get_container(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2014-06-06 08:16:27 +02:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
2020-05-05 17:29:10 +02:00
|
|
|
char *path = (char *)"";
|
2014-06-06 08:16:27 +02:00
|
|
|
|
|
|
|
if (mr->container) {
|
|
|
|
path = object_get_canonical_path(OBJECT(mr->container));
|
|
|
|
}
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_str(v, name, &path, errp);
|
2014-06-06 08:16:27 +02:00
|
|
|
if (mr->container) {
|
|
|
|
g_free(path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static Object *memory_region_resolve_container(Object *obj, void *opaque,
|
|
|
|
const char *part)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
|
|
|
|
|
|
|
return OBJECT(mr->container);
|
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void memory_region_get_priority(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2014-06-06 08:17:01 +02:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
|
|
|
int32_t value = mr->priority;
|
|
|
|
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_int32(v, name, &value, errp);
|
2014-06-06 08:17:01 +02:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
2014-06-06 08:17:35 +02:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
|
|
|
uint64_t value = memory_region_size(mr);
|
|
|
|
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_uint64(v, name, &value, errp);
|
2014-06-06 08:17:35 +02:00
|
|
|
}
|
|
|
|
|
2014-06-06 08:15:52 +02:00
|
|
|
static void memory_region_initfn(Object *obj)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
2014-06-06 08:16:27 +02:00
|
|
|
ObjectProperty *op;
|
2014-06-06 08:15:52 +02:00
|
|
|
|
|
|
|
mr->ops = &unassigned_mem_ops;
|
2011-09-14 10:54:58 +02:00
|
|
|
mr->enabled = true;
|
2013-05-07 19:04:25 +02:00
|
|
|
mr->romd_mode = true;
|
2011-08-08 18:58:48 +02:00
|
|
|
mr->destructor = memory_region_destructor_none;
|
2011-07-26 13:26:01 +02:00
|
|
|
QTAILQ_INIT(&mr->subregions);
|
|
|
|
QTAILQ_INIT(&mr->coalesced);
|
2014-06-06 08:16:27 +02:00
|
|
|
|
|
|
|
op = object_property_add(OBJECT(mr), "container",
|
|
|
|
"link<" TYPE_MEMORY_REGION ">",
|
|
|
|
memory_region_get_container,
|
|
|
|
NULL, /* memory_region_set_container */
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
NULL, NULL);
|
2014-06-06 08:16:27 +02:00
|
|
|
op->resolve = memory_region_resolve_container;
|
|
|
|
|
2020-02-04 14:16:01 +01:00
|
|
|
object_property_add_uint64_ptr(OBJECT(mr), "addr",
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
&mr->addr, OBJ_PROP_FLAG_READ);
|
2014-06-06 08:17:01 +02:00
|
|
|
object_property_add(OBJECT(mr), "priority", "uint32",
|
|
|
|
memory_region_get_priority,
|
|
|
|
NULL, /* memory_region_set_priority */
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
NULL, NULL);
|
2014-06-06 08:17:35 +02:00
|
|
|
object_property_add(OBJECT(mr), "size", "uint64",
|
|
|
|
memory_region_get_size,
|
|
|
|
NULL, /* memory_region_set_size, */
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
NULL, NULL);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
static void iommu_memory_region_initfn(Object *obj)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
|
|
|
|
|
|
|
mr->is_iommu = true;
|
|
|
|
}
|
|
|
|
|
2013-05-24 14:48:38 +02:00
|
|
|
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
|
|
|
#ifdef DEBUG_UNASSIGNED
|
|
|
|
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
|
|
|
#endif
|
2013-09-02 18:43:31 +02:00
|
|
|
return 0;
|
2013-05-24 14:48:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void unassigned_mem_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
#ifdef DEBUG_UNASSIGNED
|
|
|
|
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-05-24 13:23:38 +02:00
|
|
|
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
|
2018-05-31 15:50:52 +02:00
|
|
|
unsigned size, bool is_write,
|
|
|
|
MemTxAttrs attrs)
|
2013-05-24 13:23:38 +02:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const MemoryRegionOps unassigned_mem_ops = {
|
|
|
|
.valid.accepts = unassigned_mem_accepts,
|
|
|
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
|
|
|
};
|
|
|
|
|
memory: Don't use memcpy for ram_device regions
With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.
This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.
To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.
With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).
Reported-by: Thorsten Kohfeldt <thorsten.kohfeldt@gmx.de>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
2016-10-31 16:53:03 +01:00
|
|
|
static uint64_t memory_region_ram_device_read(void *opaque,
|
|
|
|
hwaddr addr, unsigned size)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = opaque;
|
|
|
|
uint64_t data = (uint64_t)~0;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
data = *(uint8_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
data = *(uint16_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
data = *(uint32_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
data = *(uint64_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_ram_device_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t data, unsigned size)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = opaque;
|
|
|
|
|
|
|
|
trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
*(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
*(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
*(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
*(uint64_t *)(mr->ram_block->host + addr) = data;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MemoryRegionOps ram_device_mem_ops = {
|
|
|
|
.read = memory_region_ram_device_read,
|
|
|
|
.write = memory_region_ram_device_write,
|
2017-02-27 05:52:44 +01:00
|
|
|
.endianness = DEVICE_HOST_ENDIAN,
|
memory: Don't use memcpy for ram_device regions
With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.
This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.
To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.
With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).
Reported-by: Thorsten Kohfeldt <thorsten.kohfeldt@gmx.de>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
2016-10-31 16:53:03 +01:00
|
|
|
.valid = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 8,
|
|
|
|
.unaligned = true,
|
|
|
|
},
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 8,
|
|
|
|
.unaligned = true,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-05-24 11:55:06 +02:00
|
|
|
bool memory_region_access_valid(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
unsigned size,
|
2018-05-31 15:50:52 +02:00
|
|
|
bool is_write,
|
|
|
|
MemTxAttrs attrs)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2020-06-10 15:47:49 +02:00
|
|
|
if (mr->ops->valid.accepts
|
|
|
|
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
|
2021-10-11 19:32:43 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
|
|
|
|
", size %u, region '%s', reason: rejected\n",
|
|
|
|
is_write ? "write" : "read",
|
2020-10-05 17:27:25 +02:00
|
|
|
addr, size, memory_region_name(mr));
|
2011-07-26 13:26:01 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-06-10 15:47:49 +02:00
|
|
|
if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
|
2021-10-11 19:32:43 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
|
|
|
|
", size %u, region '%s', reason: unaligned\n",
|
|
|
|
is_write ? "write" : "read",
|
2020-10-05 17:27:25 +02:00
|
|
|
addr, size, memory_region_name(mr));
|
2020-06-10 15:47:49 +02:00
|
|
|
return false;
|
2013-05-24 17:48:52 +02:00
|
|
|
}
|
|
|
|
|
2020-06-10 15:47:49 +02:00
|
|
|
/* Treat zero as compatibility all valid */
|
2013-05-24 17:48:52 +02:00
|
|
|
if (!mr->ops->valid.max_access_size) {
|
2020-06-10 15:47:49 +02:00
|
|
|
return true;
|
2013-05-24 17:48:52 +02:00
|
|
|
}
|
|
|
|
|
2020-06-10 15:47:49 +02:00
|
|
|
if (size > mr->ops->valid.max_access_size
|
|
|
|
|| size < mr->ops->valid.min_access_size) {
|
2021-10-11 19:32:43 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
|
|
|
|
", size %u, region '%s', reason: invalid size "
|
|
|
|
"(min:%u max:%u)\n",
|
|
|
|
is_write ? "write" : "read",
|
2020-10-05 17:27:25 +02:00
|
|
|
addr, size, memory_region_name(mr),
|
|
|
|
mr->ops->valid.min_access_size,
|
|
|
|
mr->ops->valid.max_access_size);
|
2020-06-10 15:47:49 +02:00
|
|
|
return false;
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *pval,
|
|
|
|
unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2015-04-26 17:49:23 +02:00
|
|
|
*pval = 0;
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2013-05-24 17:45:48 +02:00
|
|
|
if (mr->ops->read) {
|
2015-04-26 17:49:23 +02:00
|
|
|
return access_with_adjusted_size(addr, pval, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_read_accessor,
|
|
|
|
mr, attrs);
|
2018-08-24 19:04:20 +02:00
|
|
|
} else {
|
2015-04-26 17:49:23 +02:00
|
|
|
return access_with_adjusted_size(addr, pval, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_read_with_attrs_accessor,
|
|
|
|
mr, attrs);
|
2011-07-26 13:26:10 +02:00
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *pval,
|
2019-08-23 20:36:48 +02:00
|
|
|
MemOp op,
|
2015-04-26 17:49:23 +02:00
|
|
|
MemTxAttrs attrs)
|
2012-01-02 12:12:08 +01:00
|
|
|
{
|
2019-08-23 20:36:48 +02:00
|
|
|
unsigned size = memop_size(op);
|
2015-04-26 17:49:23 +02:00
|
|
|
MemTxResult r;
|
|
|
|
|
2020-08-16 19:07:11 +02:00
|
|
|
if (mr->alias) {
|
|
|
|
return memory_region_dispatch_read(mr->alias,
|
|
|
|
mr->alias_offset + addr,
|
|
|
|
pval, op, attrs);
|
|
|
|
}
|
2018-05-31 15:50:52 +02:00
|
|
|
if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
|
2013-05-24 16:10:39 +02:00
|
|
|
*pval = unassigned_mem_read(mr, addr, size);
|
2015-04-26 17:49:23 +02:00
|
|
|
return MEMTX_DECODE_ERROR;
|
2013-05-24 16:10:39 +02:00
|
|
|
}
|
2012-01-02 12:12:08 +01:00
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
|
2019-08-23 20:36:54 +02:00
|
|
|
adjust_endianness(mr, pval, op);
|
2015-04-26 17:49:23 +02:00
|
|
|
return r;
|
2012-01-02 12:12:08 +01:00
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2015-11-20 10:37:16 +01:00
|
|
|
/* Return true if an eventfd was signalled */
|
|
|
|
static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t data,
|
|
|
|
unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
MemoryRegionIoeventfd ioeventfd = {
|
|
|
|
.addr = addrrange_make(int128_make64(addr), int128_make64(size)),
|
|
|
|
.data = data,
|
|
|
|
};
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < mr->ioeventfd_nb; i++) {
|
|
|
|
ioeventfd.match_data = mr->ioeventfds[i].match_data;
|
|
|
|
ioeventfd.e = mr->ioeventfds[i].e;
|
|
|
|
|
2018-05-29 05:04:45 +02:00
|
|
|
if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
|
2015-11-20 10:37:16 +01:00
|
|
|
event_notifier_set(ioeventfd.e);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t data,
|
2019-08-23 20:36:48 +02:00
|
|
|
MemOp op,
|
2015-04-26 17:49:23 +02:00
|
|
|
MemTxAttrs attrs)
|
2012-01-02 12:12:08 +01:00
|
|
|
{
|
2019-08-23 20:36:48 +02:00
|
|
|
unsigned size = memop_size(op);
|
|
|
|
|
2020-08-16 19:07:11 +02:00
|
|
|
if (mr->alias) {
|
|
|
|
return memory_region_dispatch_write(mr->alias,
|
|
|
|
mr->alias_offset + addr,
|
|
|
|
data, op, attrs);
|
|
|
|
}
|
2018-05-31 15:50:52 +02:00
|
|
|
if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
|
2013-05-24 14:48:38 +02:00
|
|
|
unassigned_mem_write(mr, addr, data, size);
|
2015-04-26 17:49:23 +02:00
|
|
|
return MEMTX_DECODE_ERROR;
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2019-08-23 20:36:54 +02:00
|
|
|
adjust_endianness(mr, &data, op);
|
2012-01-02 12:12:08 +01:00
|
|
|
|
2015-11-20 10:37:16 +01:00
|
|
|
if ((!kvm_eventfds_enabled()) &&
|
|
|
|
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
2013-05-24 17:45:48 +02:00
|
|
|
if (mr->ops->write) {
|
2015-04-26 17:49:23 +02:00
|
|
|
return access_with_adjusted_size(addr, &data, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_write_accessor, mr,
|
|
|
|
attrs);
|
2018-08-24 19:04:20 +02:00
|
|
|
} else {
|
2015-04-26 17:49:23 +02:00
|
|
|
return
|
|
|
|
access_with_adjusted_size(addr, &data, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_write_with_attrs_accessor,
|
|
|
|
mr, attrs);
|
2011-07-26 13:26:10 +02:00
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init_io(MemoryRegion *mr,
|
2013-06-06 11:41:28 +02:00
|
|
|
Object *owner,
|
2011-07-26 13:26:01 +02:00
|
|
|
const MemoryRegionOps *ops,
|
|
|
|
void *opaque,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
2013-06-06 11:41:28 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
2015-08-13 12:26:21 +02:00
|
|
|
mr->ops = ops ? ops : &unassigned_mem_ops;
|
2011-07-26 13:26:01 +02:00
|
|
|
mr->opaque = opaque;
|
2011-07-26 13:26:06 +02:00
|
|
|
mr->terminates = true;
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2017-07-07 16:42:49 +02:00
|
|
|
void memory_region_init_ram_nomigrate(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
2017-12-13 15:37:37 +01:00
|
|
|
{
|
2021-05-10 13:43:18 +02:00
|
|
|
memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
|
2017-12-13 15:37:37 +01:00
|
|
|
}
|
|
|
|
|
2021-05-10 13:43:18 +02:00
|
|
|
void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
uint32_t ram_flags,
|
|
|
|
Error **errp)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2018-09-04 14:39:37 +02:00
|
|
|
Error *err = NULL;
|
2013-06-06 11:41:28 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
2011-12-08 14:58:43 +01:00
|
|
|
mr->ram = true;
|
2011-07-26 13:26:06 +02:00
|
|
|
mr->terminates = true;
|
2011-08-08 18:58:48 +02:00
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2021-05-10 13:43:19 +02:00
|
|
|
mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
|
2018-09-04 14:39:37 +02:00
|
|
|
if (err) {
|
|
|
|
mr->size = int128_zero();
|
|
|
|
object_unparent(OBJECT(mr));
|
|
|
|
error_propagate(errp, err);
|
|
|
|
}
|
2014-05-14 11:43:19 +02:00
|
|
|
}
|
|
|
|
|
2014-11-16 23:24:36 +01:00
|
|
|
void memory_region_init_resizeable_ram(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
uint64_t max_size,
|
|
|
|
void (*resized)(const char*,
|
|
|
|
uint64_t length,
|
|
|
|
void *host),
|
|
|
|
Error **errp)
|
|
|
|
{
|
2018-09-04 14:39:37 +02:00
|
|
|
Error *err = NULL;
|
2014-11-16 23:24:36 +01:00
|
|
|
memory_region_init(mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
|
|
|
mr->terminates = true;
|
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2016-03-01 07:18:21 +01:00
|
|
|
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
|
2018-09-04 14:39:37 +02:00
|
|
|
mr, &err);
|
|
|
|
if (err) {
|
|
|
|
mr->size = int128_zero();
|
|
|
|
object_unparent(OBJECT(mr));
|
|
|
|
error_propagate(errp, err);
|
|
|
|
}
|
2014-11-16 23:24:36 +01:00
|
|
|
}
|
|
|
|
|
2018-09-24 14:32:05 +02:00
|
|
|
#ifdef CONFIG_POSIX
|
2014-05-14 11:43:19 +02:00
|
|
|
void memory_region_init_ram_from_file(MemoryRegion *mr,
|
2021-02-25 19:20:03 +01:00
|
|
|
Object *owner,
|
2014-05-14 11:43:19 +02:00
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
hostmem-file: add "align" option
When mmap(2) the backend files, QEMU uses the host page size
(getpagesize(2)) by default as the alignment of mapping address.
However, some backends may require alignments different than the page
size. For example, mmap a device DAX (e.g., /dev/dax0.0) on Linux
kernel 4.13 to an address, which is 4K-aligned but not 2M-aligned,
fails with a kernel message like
[617494.969768] dax dax0.0: qemu-system-x86: dax_mmap: fail, unaligned vma (0x7fa37c579000 - 0x7fa43c579000, 0x1fffff)
Because there is no common approach to get such alignment requirement,
we add the 'align' option to 'memory-backend-file', so that users or
management utils, which have enough knowledge about the backend, can
specify a proper alignment via this option.
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Message-Id: <20171211072806.2812-2-haozhong.zhang@intel.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
[ehabkost: fixed typo, fixed error_setg() format string]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-12-11 08:28:04 +01:00
|
|
|
uint64_t align,
|
2018-07-18 09:47:58 +02:00
|
|
|
uint32_t ram_flags,
|
2014-05-14 11:43:20 +02:00
|
|
|
const char *path,
|
2021-01-04 18:13:18 +01:00
|
|
|
bool readonly,
|
2014-05-14 11:43:20 +02:00
|
|
|
Error **errp)
|
2014-05-14 11:43:19 +02:00
|
|
|
{
|
2018-09-04 14:39:37 +02:00
|
|
|
Error *err = NULL;
|
2014-05-14 11:43:19 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
2021-01-04 18:13:18 +01:00
|
|
|
mr->readonly = readonly;
|
2014-05-14 11:43:19 +02:00
|
|
|
mr->terminates = true;
|
|
|
|
mr->destructor = memory_region_destructor_ram;
|
hostmem-file: add "align" option
When mmap(2) the backend files, QEMU uses the host page size
(getpagesize(2)) by default as the alignment of mapping address.
However, some backends may require alignments different than the page
size. For example, mmap a device DAX (e.g., /dev/dax0.0) on Linux
kernel 4.13 to an address, which is 4K-aligned but not 2M-aligned,
fails with a kernel message like
[617494.969768] dax dax0.0: qemu-system-x86: dax_mmap: fail, unaligned vma (0x7fa37c579000 - 0x7fa43c579000, 0x1fffff)
Because there is no common approach to get such alignment requirement,
we add the 'align' option to 'memory-backend-file', so that users or
management utils, which have enough knowledge about the backend, can
specify a proper alignment via this option.
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Message-Id: <20171211072806.2812-2-haozhong.zhang@intel.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
[ehabkost: fixed typo, fixed error_setg() format string]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-12-11 08:28:04 +01:00
|
|
|
mr->align = align;
|
2021-01-04 18:13:18 +01:00
|
|
|
mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
|
|
|
|
readonly, &err);
|
2018-09-04 14:39:37 +02:00
|
|
|
if (err) {
|
|
|
|
mr->size = int128_zero();
|
|
|
|
object_unparent(OBJECT(mr));
|
|
|
|
error_propagate(errp, err);
|
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
2017-06-02 16:12:24 +02:00
|
|
|
|
|
|
|
void memory_region_init_ram_from_fd(MemoryRegion *mr,
|
2021-02-25 19:20:03 +01:00
|
|
|
Object *owner,
|
2017-06-02 16:12:24 +02:00
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
2021-05-10 13:43:17 +02:00
|
|
|
uint32_t ram_flags,
|
2017-06-02 16:12:24 +02:00
|
|
|
int fd,
|
2021-01-29 17:46:04 +01:00
|
|
|
ram_addr_t offset,
|
2017-06-02 16:12:24 +02:00
|
|
|
Error **errp)
|
|
|
|
{
|
2018-09-04 14:39:37 +02:00
|
|
|
Error *err = NULL;
|
2017-06-02 16:12:24 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
|
|
|
mr->terminates = true;
|
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2021-05-10 13:43:17 +02:00
|
|
|
mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
|
|
|
|
false, &err);
|
2018-09-04 14:39:37 +02:00
|
|
|
if (err) {
|
|
|
|
mr->size = int128_zero();
|
|
|
|
object_unparent(OBJECT(mr));
|
|
|
|
error_propagate(errp, err);
|
|
|
|
}
|
2017-06-02 16:12:24 +02:00
|
|
|
}
|
2014-05-14 11:43:19 +02:00
|
|
|
#endif
|
2011-07-26 13:26:01 +02:00
|
|
|
|
|
|
|
void memory_region_init_ram_ptr(MemoryRegion *mr,
|
2013-06-06 11:41:28 +02:00
|
|
|
Object *owner,
|
2011-07-26 13:26:01 +02:00
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
void *ptr)
|
|
|
|
{
|
2013-06-06 11:41:28 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
2011-12-08 14:58:43 +01:00
|
|
|
mr->ram = true;
|
2011-07-26 13:26:06 +02:00
|
|
|
mr->terminates = true;
|
2015-11-06 22:20:05 +01:00
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2014-09-09 07:27:54 +02:00
|
|
|
|
|
|
|
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
|
|
|
assert(ptr != NULL);
|
2016-03-01 07:18:21 +01:00
|
|
|
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2016-10-31 16:53:03 +01:00
|
|
|
void memory_region_init_ram_device_ptr(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
void *ptr)
|
2014-09-15 05:58:23 +02:00
|
|
|
{
|
2019-02-04 23:23:39 +01:00
|
|
|
memory_region_init(mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
|
|
|
mr->terminates = true;
|
2016-10-31 16:53:03 +01:00
|
|
|
mr->ram_device = true;
|
memory: Don't use memcpy for ram_device regions
With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.
This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.
To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.
With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).
Reported-by: Thorsten Kohfeldt <thorsten.kohfeldt@gmx.de>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
2016-10-31 16:53:03 +01:00
|
|
|
mr->ops = &ram_device_mem_ops;
|
|
|
|
mr->opaque = mr;
|
2019-02-04 23:23:39 +01:00
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2020-10-28 08:52:01 +01:00
|
|
|
|
2019-02-04 23:23:39 +01:00
|
|
|
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
|
|
|
assert(ptr != NULL);
|
|
|
|
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
|
2014-09-15 05:58:23 +02:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
void memory_region_init_alias(MemoryRegion *mr,
|
2013-06-06 11:41:28 +02:00
|
|
|
Object *owner,
|
2011-07-26 13:26:01 +02:00
|
|
|
const char *name,
|
|
|
|
MemoryRegion *orig,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr offset,
|
2011-07-26 13:26:01 +02:00
|
|
|
uint64_t size)
|
|
|
|
{
|
2013-06-06 11:41:28 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
2011-07-26 13:26:01 +02:00
|
|
|
mr->alias = orig;
|
|
|
|
mr->alias_offset = offset;
|
|
|
|
}
|
|
|
|
|
2017-07-07 16:42:50 +02:00
|
|
|
void memory_region_init_rom_nomigrate(MemoryRegion *mr,
|
2021-02-25 19:20:03 +01:00
|
|
|
Object *owner,
|
2017-07-07 16:42:50 +02:00
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
2016-07-04 14:06:35 +02:00
|
|
|
{
|
2021-05-10 13:43:18 +02:00
|
|
|
memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
|
2016-07-04 14:06:35 +02:00
|
|
|
mr->readonly = true;
|
|
|
|
}
|
|
|
|
|
2017-07-07 16:42:50 +02:00
|
|
|
void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const MemoryRegionOps *ops,
|
|
|
|
void *opaque,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
2011-08-08 18:58:49 +02:00
|
|
|
{
|
2018-09-04 14:39:37 +02:00
|
|
|
Error *err = NULL;
|
2016-07-04 14:06:35 +02:00
|
|
|
assert(ops);
|
2013-06-06 11:41:28 +02:00
|
|
|
memory_region_init(mr, owner, name, size);
|
2011-08-25 13:56:14 +02:00
|
|
|
mr->ops = ops;
|
2011-08-25 23:35:15 +02:00
|
|
|
mr->opaque = opaque;
|
2011-08-08 18:58:49 +02:00
|
|
|
mr->terminates = true;
|
2012-01-02 14:40:52 +01:00
|
|
|
mr->rom_device = true;
|
2016-09-14 11:05:59 +02:00
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2021-05-10 13:43:19 +02:00
|
|
|
mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
|
2018-09-04 14:39:37 +02:00
|
|
|
if (err) {
|
|
|
|
mr->size = int128_zero();
|
|
|
|
object_unparent(OBJECT(mr));
|
|
|
|
error_propagate(errp, err);
|
|
|
|
}
|
2011-08-08 18:58:49 +02:00
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:20 +02:00
|
|
|
void memory_region_init_iommu(void *_iommu_mr,
|
|
|
|
size_t instance_size,
|
|
|
|
const char *mrtypename,
|
2013-06-06 11:41:28 +02:00
|
|
|
Object *owner,
|
2012-10-30 12:47:46 +01:00
|
|
|
const char *name,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
2017-07-11 05:56:20 +02:00
|
|
|
struct IOMMUMemoryRegion *iommu_mr;
|
2017-07-11 05:56:19 +02:00
|
|
|
struct MemoryRegion *mr;
|
|
|
|
|
2017-07-11 05:56:20 +02:00
|
|
|
object_initialize(_iommu_mr, instance_size, mrtypename);
|
|
|
|
mr = MEMORY_REGION(_iommu_mr);
|
2017-07-11 05:56:19 +02:00
|
|
|
memory_region_do_init(mr, owner, name, size);
|
|
|
|
iommu_mr = IOMMU_MEMORY_REGION(mr);
|
2012-10-30 12:47:46 +01:00
|
|
|
mr->terminates = true; /* then re-forwards */
|
2017-07-11 05:56:19 +02:00
|
|
|
QLIST_INIT(&iommu_mr->iommu_notify);
|
|
|
|
iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
|
2012-10-30 12:47:46 +01:00
|
|
|
}
|
|
|
|
|
2014-06-06 08:15:52 +02:00
|
|
|
static void memory_region_finalize(Object *obj)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2014-06-06 08:15:52 +02:00
|
|
|
MemoryRegion *mr = MEMORY_REGION(obj);
|
|
|
|
|
2015-10-01 10:59:50 +02:00
|
|
|
assert(!mr->container);
|
|
|
|
|
|
|
|
/* We know the region is not visible in any address space (it
|
|
|
|
* does not have a container and cannot be a root either because
|
|
|
|
* it has no references, so we can blindly clear mr->enabled.
|
|
|
|
* memory_region_set_enabled instead could trigger a transaction
|
|
|
|
* and cause an infinite loop.
|
|
|
|
*/
|
|
|
|
mr->enabled = false;
|
|
|
|
memory_region_transaction_begin();
|
|
|
|
while (!QTAILQ_EMPTY(&mr->subregions)) {
|
|
|
|
MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
|
|
|
|
memory_region_del_subregion(mr, subregion);
|
|
|
|
}
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
|
2011-08-08 18:58:48 +02:00
|
|
|
mr->destructor(mr);
|
2011-07-26 13:26:01 +02:00
|
|
|
memory_region_clear_coalescing(mr);
|
2014-08-19 21:05:46 +02:00
|
|
|
g_free((char *)mr->name);
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(mr->ioeventfds);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2013-05-07 06:59:09 +02:00
|
|
|
Object *memory_region_owner(MemoryRegion *mr)
|
|
|
|
{
|
2014-06-11 10:58:06 +02:00
|
|
|
Object *obj = OBJECT(mr);
|
|
|
|
return obj->parent;
|
2013-05-07 06:59:09 +02:00
|
|
|
}
|
|
|
|
|
2013-05-07 09:06:00 +02:00
|
|
|
void memory_region_ref(MemoryRegion *mr)
|
|
|
|
{
|
2014-06-11 10:58:06 +02:00
|
|
|
/* MMIO callbacks most likely will access data that belongs
|
|
|
|
* to the owner, hence the need to ref/unref the owner whenever
|
|
|
|
* the memory region is in use.
|
|
|
|
*
|
|
|
|
* The memory region is a child of its owner. As long as the
|
|
|
|
* owner doesn't call unparent itself on the memory region,
|
|
|
|
* ref-ing the owner will also keep the memory region alive.
|
2015-12-09 11:44:25 +01:00
|
|
|
* Memory regions without an owner are supposed to never go away;
|
|
|
|
* we do not ref/unref them because it slows down DMA sensibly.
|
2014-06-11 10:58:06 +02:00
|
|
|
*/
|
2015-12-09 11:44:25 +01:00
|
|
|
if (mr && mr->owner) {
|
|
|
|
object_ref(mr->owner);
|
2013-05-07 09:06:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_unref(MemoryRegion *mr)
|
|
|
|
{
|
2015-12-09 11:44:25 +01:00
|
|
|
if (mr && mr->owner) {
|
|
|
|
object_unref(mr->owner);
|
2013-05-07 09:06:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
uint64_t memory_region_size(MemoryRegion *mr)
|
|
|
|
{
|
2011-10-16 13:19:17 +02:00
|
|
|
if (int128_eq(mr->size, int128_2_64())) {
|
|
|
|
return UINT64_MAX;
|
|
|
|
}
|
|
|
|
return int128_get64(mr->size);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2014-08-15 08:55:03 +02:00
|
|
|
const char *memory_region_name(const MemoryRegion *mr)
|
2011-12-20 14:53:11 +01:00
|
|
|
{
|
2014-08-26 05:10:24 +02:00
|
|
|
if (!mr->name) {
|
|
|
|
((MemoryRegion *)mr)->name =
|
2020-07-14 18:02:00 +02:00
|
|
|
g_strdup(object_get_canonical_path_component(OBJECT(mr)));
|
2014-08-26 05:10:24 +02:00
|
|
|
}
|
2014-08-19 21:05:46 +02:00
|
|
|
return mr->name;
|
2011-12-20 14:53:11 +01:00
|
|
|
}
|
|
|
|
|
2016-10-31 16:53:03 +01:00
|
|
|
bool memory_region_is_ram_device(MemoryRegion *mr)
|
2014-09-15 05:58:23 +02:00
|
|
|
{
|
2016-10-31 16:53:03 +01:00
|
|
|
return mr->ram_device;
|
2014-09-15 05:58:23 +02:00
|
|
|
}
|
|
|
|
|
2021-07-19 13:21:04 +02:00
|
|
|
bool memory_region_is_protected(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
|
|
|
|
}
|
|
|
|
|
2015-03-23 10:50:57 +01:00
|
|
|
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
|
2011-12-15 16:20:34 +01:00
|
|
|
{
|
2015-03-23 10:57:21 +01:00
|
|
|
uint8_t mask = mr->dirty_log_mask;
|
2020-11-16 14:22:10 +01:00
|
|
|
RAMBlock *rb = mr->ram_block;
|
|
|
|
|
2021-06-29 18:01:19 +02:00
|
|
|
if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
|
2020-11-16 14:22:10 +01:00
|
|
|
memory_region_is_iommu(mr))) {
|
2015-03-23 10:57:21 +01:00
|
|
|
mask |= (1 << DIRTY_MEMORY_MIGRATION);
|
|
|
|
}
|
2020-10-28 08:52:01 +01:00
|
|
|
|
|
|
|
if (tcg_enabled() && rb) {
|
|
|
|
/* TCG only cares about dirty memory logging for RAM, not IOMMU. */
|
|
|
|
mask |= (1 << DIRTY_MEMORY_CODE);
|
|
|
|
}
|
2015-03-23 10:57:21 +01:00
|
|
|
return mask;
|
2011-12-15 16:20:34 +01:00
|
|
|
}
|
|
|
|
|
2015-03-23 10:50:57 +01:00
|
|
|
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
|
|
|
|
{
|
|
|
|
return memory_region_get_dirty_log_mask(mr) & (1 << client);
|
|
|
|
}
|
|
|
|
|
2019-09-24 10:25:17 +02:00
|
|
|
static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
|
|
|
|
Error **errp)
|
2016-09-23 07:02:27 +02:00
|
|
|
{
|
|
|
|
IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
|
|
|
|
IOMMUNotifier *iommu_notifier;
|
2017-07-11 05:56:20 +02:00
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
2019-09-24 10:25:17 +02:00
|
|
|
int ret = 0;
|
2016-09-23 07:02:27 +02:00
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
|
2016-09-23 07:02:27 +02:00
|
|
|
flags |= iommu_notifier->notifier_flags;
|
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:20 +02:00
|
|
|
if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
|
2019-09-24 10:25:17 +02:00
|
|
|
ret = imrc->notify_flag_changed(iommu_mr,
|
|
|
|
iommu_mr->iommu_notify_flags,
|
|
|
|
flags, errp);
|
2016-09-23 07:02:27 +02:00
|
|
|
}
|
|
|
|
|
2019-09-24 10:25:17 +02:00
|
|
|
if (!ret) {
|
|
|
|
iommu_mr->iommu_notify_flags = flags;
|
|
|
|
}
|
|
|
|
return ret;
|
2016-09-23 07:02:27 +02:00
|
|
|
}
|
|
|
|
|
2020-10-30 19:05:07 +01:00
|
|
|
int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
|
|
|
|
uint64_t page_size_mask,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (imrc->iommu_set_page_size_mask) {
|
|
|
|
ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-24 10:25:17 +02:00
|
|
|
int memory_region_register_iommu_notifier(MemoryRegion *mr,
|
|
|
|
IOMMUNotifier *n, Error **errp)
|
2013-05-14 11:13:56 +02:00
|
|
|
{
|
2017-07-11 05:56:19 +02:00
|
|
|
IOMMUMemoryRegion *iommu_mr;
|
2019-09-24 10:25:17 +02:00
|
|
|
int ret;
|
2017-07-11 05:56:19 +02:00
|
|
|
|
2016-12-30 11:09:17 +01:00
|
|
|
if (mr->alias) {
|
2019-09-24 10:25:17 +02:00
|
|
|
return memory_region_register_iommu_notifier(mr->alias, n, errp);
|
2016-12-30 11:09:17 +01:00
|
|
|
}
|
|
|
|
|
2016-09-23 07:02:26 +02:00
|
|
|
/* We need to register for at least one bitfield */
|
2017-07-11 05:56:19 +02:00
|
|
|
iommu_mr = IOMMU_MEMORY_REGION(mr);
|
2016-09-23 07:02:26 +02:00
|
|
|
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
|
memory: add section range info for IOMMU notifier
In this patch, IOMMUNotifier.{start|end} are introduced to store section
information for a specific notifier. When notification occurs, we not
only check the notification type (MAP|UNMAP), but also check whether the
notified iova range overlaps with the range of specific IOMMU notifier,
and skip those notifiers if not in the listened range.
When removing an region, we need to make sure we removed the correct
VFIOGuestIOMMU by checking the IOMMUNotifier.start address as well.
This patch is solving the problem that vfio-pci devices receive
duplicated UNMAP notification on x86 platform when vIOMMU is there. The
issue is that x86 IOMMU has a (0, 2^64-1) IOMMU region, which is
splitted by the (0xfee00000, 0xfeefffff) IRQ region. AFAIK
this (splitted IOMMU region) is only happening on x86.
This patch also helps vhost to leverage the new interface as well, so
that vhost won't get duplicated cache flushes. In that sense, it's an
slight performance improvement.
Suggested-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <1491562755-23867-2-git-send-email-peterx@redhat.com>
[ehabkost: included extra vhost_iommu_region_del() change from Peter Xu]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-04-07 12:59:07 +02:00
|
|
|
assert(n->start <= n->end);
|
2018-06-15 15:57:16 +02:00
|
|
|
assert(n->iommu_idx >= 0 &&
|
|
|
|
n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
|
2019-09-24 10:25:17 +02:00
|
|
|
ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
|
|
|
|
if (ret) {
|
|
|
|
QLIST_REMOVE(n, node);
|
|
|
|
}
|
|
|
|
return ret;
|
2013-05-14 11:13:56 +02:00
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
|
2015-09-30 04:13:55 +02:00
|
|
|
{
|
2017-07-11 05:56:20 +02:00
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
|
|
|
|
|
|
|
if (imrc->get_min_page_size) {
|
|
|
|
return imrc->get_min_page_size(iommu_mr);
|
2016-06-21 03:14:01 +02:00
|
|
|
}
|
|
|
|
return TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
|
2016-06-21 03:14:01 +02:00
|
|
|
{
|
2017-07-11 05:56:19 +02:00
|
|
|
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
|
2017-07-11 05:56:20 +02:00
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
2016-06-21 03:14:01 +02:00
|
|
|
hwaddr addr, granularity;
|
2015-09-30 04:13:55 +02:00
|
|
|
IOMMUTLBEntry iotlb;
|
|
|
|
|
2017-04-07 12:59:11 +02:00
|
|
|
/* If the IOMMU has its own replay callback, override */
|
2017-07-11 05:56:20 +02:00
|
|
|
if (imrc->replay) {
|
|
|
|
imrc->replay(iommu_mr, n);
|
2017-04-07 12:59:11 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
granularity = memory_region_iommu_get_min_page_size(iommu_mr);
|
2016-06-21 03:14:01 +02:00
|
|
|
|
2015-09-30 04:13:55 +02:00
|
|
|
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
|
2018-06-15 15:57:16 +02:00
|
|
|
iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
|
2015-09-30 04:13:55 +02:00
|
|
|
if (iotlb.perm != IOMMU_NONE) {
|
|
|
|
n->notify(n, &iotlb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if (2^64 - MR size) < granularity, it's possible to get an
|
|
|
|
* infinite loop here. This should catch such a wraparound */
|
|
|
|
if ((addr + granularity) < addr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-23 07:02:26 +02:00
|
|
|
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
|
|
|
|
IOMMUNotifier *n)
|
2013-05-14 11:13:56 +02:00
|
|
|
{
|
2017-07-11 05:56:19 +02:00
|
|
|
IOMMUMemoryRegion *iommu_mr;
|
|
|
|
|
2016-12-30 11:09:17 +01:00
|
|
|
if (mr->alias) {
|
|
|
|
memory_region_unregister_iommu_notifier(mr->alias, n);
|
|
|
|
return;
|
|
|
|
}
|
2016-09-23 07:02:26 +02:00
|
|
|
QLIST_REMOVE(n, node);
|
2017-07-11 05:56:19 +02:00
|
|
|
iommu_mr = IOMMU_MEMORY_REGION(mr);
|
2019-09-24 10:25:17 +02:00
|
|
|
memory_region_update_iommu_notify_flags(iommu_mr, NULL);
|
2013-05-14 11:13:56 +02:00
|
|
|
}
|
|
|
|
|
2020-11-16 17:55:02 +01:00
|
|
|
void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
|
2020-11-16 17:55:03 +01:00
|
|
|
IOMMUTLBEvent *event)
|
2013-05-14 11:13:56 +02:00
|
|
|
{
|
2020-11-16 17:55:03 +01:00
|
|
|
IOMMUTLBEntry *entry = &event->entry;
|
2019-06-25 05:21:18 +02:00
|
|
|
hwaddr entry_end = entry->iova + entry->addr_mask;
|
2020-11-16 17:55:06 +01:00
|
|
|
IOMMUTLBEntry tmp = *entry;
|
2016-09-23 07:02:26 +02:00
|
|
|
|
2020-11-16 17:55:03 +01:00
|
|
|
if (event->type == IOMMU_NOTIFIER_UNMAP) {
|
|
|
|
assert(entry->perm == IOMMU_NONE);
|
|
|
|
}
|
|
|
|
|
2017-04-07 12:59:10 +02:00
|
|
|
/*
|
|
|
|
* Skip the notification if the notification does not overlap
|
|
|
|
* with registered range.
|
|
|
|
*/
|
2019-06-25 05:21:18 +02:00
|
|
|
if (notifier->start > entry_end || notifier->end < entry->iova) {
|
2017-04-07 12:59:10 +02:00
|
|
|
return;
|
|
|
|
}
|
2016-09-23 07:02:26 +02:00
|
|
|
|
2020-11-16 17:55:06 +01:00
|
|
|
if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
|
|
|
|
/* Crop (iova, addr_mask) to range */
|
|
|
|
tmp.iova = MAX(tmp.iova, notifier->start);
|
|
|
|
tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
|
|
|
|
} else {
|
|
|
|
assert(entry->iova >= notifier->start && entry_end <= notifier->end);
|
|
|
|
}
|
2019-06-25 05:21:18 +02:00
|
|
|
|
2020-11-16 17:55:03 +01:00
|
|
|
if (event->type & notifier->notifier_flags) {
|
2020-11-16 17:55:06 +01:00
|
|
|
notifier->notify(notifier, &tmp);
|
2017-04-07 12:59:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
|
2018-06-15 15:57:16 +02:00
|
|
|
int iommu_idx,
|
2020-11-16 17:55:03 +01:00
|
|
|
IOMMUTLBEvent event)
|
2017-04-07 12:59:10 +02:00
|
|
|
{
|
|
|
|
IOMMUNotifier *iommu_notifier;
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
|
2017-04-07 12:59:10 +02:00
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
|
2018-06-15 15:57:16 +02:00
|
|
|
if (iommu_notifier->iommu_idx == iommu_idx) {
|
2020-11-16 17:55:03 +01:00
|
|
|
memory_region_notify_iommu_one(iommu_notifier, &event);
|
2018-06-15 15:57:16 +02:00
|
|
|
}
|
2016-09-23 07:02:26 +02:00
|
|
|
}
|
2013-05-14 11:13:56 +02:00
|
|
|
}
|
|
|
|
|
2018-02-06 19:08:24 +01:00
|
|
|
int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
|
|
|
|
enum IOMMUMemoryRegionAttr attr,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
|
|
|
|
|
|
|
if (!imrc->get_attr) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return imrc->get_attr(iommu_mr, attr, data);
|
|
|
|
}
|
|
|
|
|
2018-06-15 15:57:15 +02:00
|
|
|
int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
|
|
|
|
|
|
|
if (!imrc->attrs_to_index) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return imrc->attrs_to_index(iommu_mr, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
|
|
|
|
{
|
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
|
|
|
|
|
|
|
if (!imrc->num_indexes) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return imrc->num_indexes(iommu_mr);
|
|
|
|
}
|
|
|
|
|
memory: Introduce RamDiscardManager for RAM memory regions
We have some special RAM memory regions (managed by virtio-mem), whereby
the guest agreed to only use selected memory ranges. "unused" parts are
discarded so they won't consume memory - to logically unplug these memory
ranges. Before the VM is allowed to use such logically unplugged memory
again, coordination with the hypervisor is required.
This results in "sparse" mmaps/RAMBlocks/memory regions, whereby only
coordinated parts are valid to be used/accessed by the VM.
In most cases, we don't care about that - e.g., in KVM, we simply have a
single KVM memory slot. However, in case of vfio, registering the
whole region with the kernel results in all pages getting pinned, and
therefore an unexpected high memory consumption - discarding of RAM in
that context is broken.
Let's introduce a way to coordinate discarding/populating memory within a
RAM memory region with such special consumers of RAM memory regions: they
can register as listeners and get updates on memory getting discarded and
populated. Using this machinery, vfio will be able to map only the
currently populated parts, resulting in discarded parts not getting pinned
and not consuming memory.
A RamDiscardManager has to be set for a memory region before it is getting
mapped, and cannot change while the memory region is mapped.
Note: At some point, we might want to let RAMBlock users (esp. vfio used
for nvme://) consume this interface as well. We'll need RAMBlock notifier
calls when a RAMBlock is getting mapped/unmapped (via the corresponding
memory region), so we can properly register a listener there as well.
Reviewed-by: Pankaj Gupta <pankaj.gupta@cloud.ionos.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Auger Eric <eric.auger@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: teawater <teawaterz@linux.alibaba.com>
Cc: Marek Kedzierski <mkedzier@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210413095531.25603-2-david@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2021-04-13 11:55:19 +02:00
|
|
|
RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
if (!memory_region_is_mapped(mr) || !memory_region_is_ram(mr)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return mr->rdm;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_set_ram_discard_manager(MemoryRegion *mr,
|
|
|
|
RamDiscardManager *rdm)
|
|
|
|
{
|
|
|
|
g_assert(memory_region_is_ram(mr) && !memory_region_is_mapped(mr));
|
|
|
|
g_assert(!rdm || !mr->rdm);
|
|
|
|
mr->rdm = rdm;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
|
|
|
|
const MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
|
|
|
|
|
|
|
|
g_assert(rdmc->get_min_granularity);
|
|
|
|
return rdmc->get_min_granularity(rdm, mr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
|
|
|
|
const MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
|
|
|
|
|
|
|
|
g_assert(rdmc->is_populated);
|
|
|
|
return rdmc->is_populated(rdm, section);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
|
|
|
|
MemoryRegionSection *section,
|
|
|
|
ReplayRamPopulate replay_fn,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
|
|
|
|
|
|
|
|
g_assert(rdmc->replay_populated);
|
|
|
|
return rdmc->replay_populated(rdm, section, replay_fn, opaque);
|
|
|
|
}
|
|
|
|
|
2021-10-11 19:53:38 +02:00
|
|
|
void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
|
|
|
|
MemoryRegionSection *section,
|
|
|
|
ReplayRamDiscard replay_fn,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
|
|
|
|
|
|
|
|
g_assert(rdmc->replay_discarded);
|
|
|
|
rdmc->replay_discarded(rdm, section, replay_fn, opaque);
|
|
|
|
}
|
|
|
|
|
memory: Introduce RamDiscardManager for RAM memory regions
We have some special RAM memory regions (managed by virtio-mem), whereby
the guest agreed to only use selected memory ranges. "unused" parts are
discarded so they won't consume memory - to logically unplug these memory
ranges. Before the VM is allowed to use such logically unplugged memory
again, coordination with the hypervisor is required.
This results in "sparse" mmaps/RAMBlocks/memory regions, whereby only
coordinated parts are valid to be used/accessed by the VM.
In most cases, we don't care about that - e.g., in KVM, we simply have a
single KVM memory slot. However, in case of vfio, registering the
whole region with the kernel results in all pages getting pinned, and
therefore an unexpected high memory consumption - discarding of RAM in
that context is broken.
Let's introduce a way to coordinate discarding/populating memory within a
RAM memory region with such special consumers of RAM memory regions: they
can register as listeners and get updates on memory getting discarded and
populated. Using this machinery, vfio will be able to map only the
currently populated parts, resulting in discarded parts not getting pinned
and not consuming memory.
A RamDiscardManager has to be set for a memory region before it is getting
mapped, and cannot change while the memory region is mapped.
Note: At some point, we might want to let RAMBlock users (esp. vfio used
for nvme://) consume this interface as well. We'll need RAMBlock notifier
calls when a RAMBlock is getting mapped/unmapped (via the corresponding
memory region), so we can properly register a listener there as well.
Reviewed-by: Pankaj Gupta <pankaj.gupta@cloud.ionos.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Auger Eric <eric.auger@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: teawater <teawaterz@linux.alibaba.com>
Cc: Marek Kedzierski <mkedzier@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210413095531.25603-2-david@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2021-04-13 11:55:19 +02:00
|
|
|
void ram_discard_manager_register_listener(RamDiscardManager *rdm,
|
|
|
|
RamDiscardListener *rdl,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
|
|
|
|
|
|
|
|
g_assert(rdmc->register_listener);
|
|
|
|
rdmc->register_listener(rdm, rdl, section);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
|
|
|
|
RamDiscardListener *rdl)
|
|
|
|
{
|
|
|
|
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
|
|
|
|
|
|
|
|
g_assert(rdmc->unregister_listener);
|
|
|
|
rdmc->unregister_listener(rdm, rdl);
|
|
|
|
}
|
|
|
|
|
2022-10-31 04:10:19 +01:00
|
|
|
/* Called with rcu_read_lock held. */
|
|
|
|
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
|
|
|
ram_addr_t *ram_addr, bool *read_only,
|
|
|
|
bool *mr_has_discard_manager)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr;
|
|
|
|
hwaddr xlat;
|
|
|
|
hwaddr len = iotlb->addr_mask + 1;
|
|
|
|
bool writable = iotlb->perm & IOMMU_WO;
|
|
|
|
|
|
|
|
if (mr_has_discard_manager) {
|
|
|
|
*mr_has_discard_manager = false;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The IOMMU TLB entry we have just covers translation through
|
|
|
|
* this IOMMU to its immediate target. We need to translate
|
|
|
|
* it the rest of the way through to memory.
|
|
|
|
*/
|
|
|
|
mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
|
|
|
|
&xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
|
|
|
|
if (!memory_region_is_ram(mr)) {
|
|
|
|
error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
|
|
|
|
return false;
|
|
|
|
} else if (memory_region_has_ram_discard_manager(mr)) {
|
|
|
|
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
|
|
|
|
MemoryRegionSection tmp = {
|
|
|
|
.mr = mr,
|
|
|
|
.offset_within_region = xlat,
|
|
|
|
.size = int128_make64(len),
|
|
|
|
};
|
|
|
|
if (mr_has_discard_manager) {
|
|
|
|
*mr_has_discard_manager = true;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Malicious VMs can map memory into the IOMMU, which is expected
|
|
|
|
* to remain discarded. vfio will pin all pages, populating memory.
|
|
|
|
* Disallow that. vmstate priorities make sure any RamDiscardManager
|
|
|
|
* were already restored before IOMMUs are restored.
|
|
|
|
*/
|
|
|
|
if (!ram_discard_manager_is_populated(rdm, &tmp)) {
|
|
|
|
error_report("iommu map to discarded memory (e.g., unplugged via"
|
|
|
|
" virtio-mem): %" HWADDR_PRIx "",
|
|
|
|
iotlb->translated_addr);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Translation truncates length to the IOMMU page size,
|
|
|
|
* check that it did not truncate too much.
|
|
|
|
*/
|
|
|
|
if (len & iotlb->addr_mask) {
|
|
|
|
error_report("iommu has granularity incompatible with target AS");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vaddr) {
|
|
|
|
*vaddr = memory_region_get_ram_ptr(mr) + xlat;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ram_addr) {
|
|
|
|
*ram_addr = memory_region_get_ram_addr(mr) + xlat;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (read_only) {
|
|
|
|
*read_only = !writable || mr->readonly;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
|
|
|
|
{
|
2011-07-26 13:26:02 +02:00
|
|
|
uint8_t mask = 1 << client;
|
2015-07-14 13:56:53 +02:00
|
|
|
uint8_t old_logging;
|
2011-07-26 13:26:02 +02:00
|
|
|
|
2015-03-23 10:31:53 +01:00
|
|
|
assert(client == DIRTY_MEMORY_VGA);
|
2015-07-14 13:56:53 +02:00
|
|
|
old_logging = mr->vga_logging_count;
|
|
|
|
mr->vga_logging_count += log ? 1 : -1;
|
|
|
|
if (!!old_logging == !!mr->vga_logging_count) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2011-07-26 13:26:02 +02:00
|
|
|
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending |= mr->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
|
|
|
|
hwaddr size)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2016-03-01 07:18:21 +01:00
|
|
|
assert(mr->ram_block);
|
|
|
|
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
|
|
|
|
size,
|
2015-03-23 11:56:01 +01:00
|
|
|
memory_region_get_dirty_log_mask(mr));
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2021-05-06 18:05:40 +02:00
|
|
|
/*
|
|
|
|
* If memory region `mr' is NULL, do global sync. Otherwise, sync
|
|
|
|
* dirty bitmap for the specified memory region.
|
|
|
|
*/
|
2018-02-06 18:24:13 +01:00
|
|
|
static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2016-09-23 11:08:54 +02:00
|
|
|
MemoryListener *listener;
|
2012-10-02 15:28:50 +02:00
|
|
|
AddressSpace *as;
|
2016-09-23 11:08:54 +02:00
|
|
|
FlatView *view;
|
2011-07-26 13:26:02 +02:00
|
|
|
FlatRange *fr;
|
|
|
|
|
2016-09-23 11:08:54 +02:00
|
|
|
/* If the same address space has multiple log_sync listeners, we
|
|
|
|
* visit that address space's FlatView multiple times. But because
|
|
|
|
* log_sync listeners are rare, it's still cheaper than walking each
|
|
|
|
* address space once.
|
|
|
|
*/
|
|
|
|
QTAILQ_FOREACH(listener, &memory_listeners, link) {
|
2021-05-06 18:05:40 +02:00
|
|
|
if (listener->log_sync) {
|
|
|
|
as = listener->address_space;
|
|
|
|
view = address_space_get_flatview(as);
|
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
|
|
|
if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
|
|
|
|
MemoryRegionSection mrs = section_from_flat_range(fr, view);
|
|
|
|
listener->log_sync(listener, &mrs);
|
|
|
|
}
|
2012-10-02 15:28:50 +02:00
|
|
|
}
|
2021-05-06 18:05:40 +02:00
|
|
|
flatview_unref(view);
|
2021-08-17 03:37:06 +02:00
|
|
|
trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
|
2021-05-06 18:05:40 +02:00
|
|
|
} else if (listener->log_sync_global) {
|
|
|
|
/*
|
|
|
|
* No matter whether MR is specified, what we can do here
|
|
|
|
* is to do a global sync, because we are not capable to
|
|
|
|
* sync in a finer granularity.
|
|
|
|
*/
|
|
|
|
listener->log_sync_global(listener);
|
2021-08-17 03:37:06 +02:00
|
|
|
trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
|
2011-07-26 13:26:02 +02:00
|
|
|
}
|
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2019-06-03 08:50:51 +02:00
|
|
|
void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
|
|
|
|
hwaddr len)
|
|
|
|
{
|
|
|
|
MemoryRegionSection mrs;
|
|
|
|
MemoryListener *listener;
|
|
|
|
AddressSpace *as;
|
|
|
|
FlatView *view;
|
|
|
|
FlatRange *fr;
|
|
|
|
hwaddr sec_start, sec_end, sec_size;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(listener, &memory_listeners, link) {
|
|
|
|
if (!listener->log_clear) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
as = listener->address_space;
|
|
|
|
view = address_space_get_flatview(as);
|
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
|
|
|
if (!fr->dirty_log_mask || fr->mr != mr) {
|
|
|
|
/*
|
|
|
|
* Clear dirty bitmap operation only applies to those
|
|
|
|
* regions whose dirty logging is at least enabled
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
mrs = section_from_flat_range(fr, view);
|
|
|
|
|
|
|
|
sec_start = MAX(mrs.offset_within_region, start);
|
|
|
|
sec_end = mrs.offset_within_region + int128_get64(mrs.size);
|
|
|
|
sec_end = MIN(sec_end, start + len);
|
|
|
|
|
|
|
|
if (sec_start >= sec_end) {
|
|
|
|
/*
|
|
|
|
* If this memory region section has no intersection
|
|
|
|
* with the requested range, skip.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Valid case; shrink the section if needed */
|
|
|
|
mrs.offset_within_address_space +=
|
|
|
|
sec_start - mrs.offset_within_region;
|
|
|
|
mrs.offset_within_region = sec_start;
|
|
|
|
sec_size = sec_end - sec_start;
|
|
|
|
mrs.size = int128_make64(sec_size);
|
|
|
|
listener->log_clear(listener, &mrs);
|
|
|
|
}
|
|
|
|
flatview_unref(view);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:24:13 +01:00
|
|
|
DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
hwaddr size,
|
|
|
|
unsigned client)
|
|
|
|
{
|
2018-02-06 18:37:39 +01:00
|
|
|
DirtyBitmapSnapshot *snapshot;
|
2018-02-06 18:24:13 +01:00
|
|
|
assert(mr->ram_block);
|
|
|
|
memory_region_sync_dirty_bitmap(mr);
|
2018-02-06 18:37:39 +01:00
|
|
|
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
|
|
|
|
memory_global_after_dirty_log_sync();
|
|
|
|
return snapshot;
|
2018-02-06 18:24:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
|
|
|
|
hwaddr addr, hwaddr size)
|
|
|
|
{
|
|
|
|
assert(mr->ram_block);
|
|
|
|
return cpu_physical_memory_snapshot_get_dirty(snap,
|
|
|
|
memory_region_get_ram_addr(mr) + addr, size);
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
|
|
|
|
{
|
2011-09-25 13:48:47 +02:00
|
|
|
if (mr->readonly != readonly) {
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2011-09-25 13:48:47 +02:00
|
|
|
mr->readonly = readonly;
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending |= mr->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-09-25 13:48:47 +02:00
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2018-10-03 13:44:52 +02:00
|
|
|
void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
|
|
|
|
{
|
|
|
|
if (mr->nonvolatile != nonvolatile) {
|
|
|
|
memory_region_transaction_begin();
|
|
|
|
mr->nonvolatile = nonvolatile;
|
|
|
|
memory_region_update_pending |= mr->enabled;
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-07 19:04:25 +02:00
|
|
|
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
|
2011-08-08 18:58:49 +02:00
|
|
|
{
|
2013-05-07 19:04:25 +02:00
|
|
|
if (mr->romd_mode != romd_mode) {
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2013-05-07 19:04:25 +02:00
|
|
|
mr->romd_mode = romd_mode;
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending |= mr->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-08-08 18:58:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
|
|
|
|
hwaddr size, unsigned client)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2016-03-01 07:18:21 +01:00
|
|
|
assert(mr->ram_block);
|
|
|
|
cpu_physical_memory_test_and_clear_dirty(
|
|
|
|
memory_region_get_ram_addr(mr) + addr, size, client);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2014-06-10 13:15:23 +02:00
|
|
|
int memory_region_get_fd(MemoryRegion *mr)
|
|
|
|
{
|
2019-10-07 16:36:41 +02:00
|
|
|
RCU_READ_LOCK_GUARD();
|
2016-03-25 12:30:16 +01:00
|
|
|
while (mr->alias) {
|
|
|
|
mr = mr->alias;
|
2014-06-10 13:15:23 +02:00
|
|
|
}
|
2022-11-22 14:49:16 +01:00
|
|
|
return mr->ram_block->fd;
|
2016-03-25 12:30:16 +01:00
|
|
|
}
|
2014-06-10 13:15:23 +02:00
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
|
|
|
{
|
2015-12-16 10:30:47 +01:00
|
|
|
uint64_t offset = 0;
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2019-10-07 16:36:41 +02:00
|
|
|
RCU_READ_LOCK_GUARD();
|
2015-12-16 10:30:47 +01:00
|
|
|
while (mr->alias) {
|
|
|
|
offset += mr->alias_offset;
|
|
|
|
mr = mr->alias;
|
|
|
|
}
|
2016-03-01 07:18:21 +01:00
|
|
|
assert(mr->ram_block);
|
2022-11-22 14:49:16 +01:00
|
|
|
return qemu_map_ram_ptr(mr->ram_block, offset);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2016-03-25 12:55:08 +01:00
|
|
|
MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
|
|
|
|
{
|
|
|
|
RAMBlock *block;
|
|
|
|
|
|
|
|
block = qemu_ram_block_from_host(ptr, false, offset);
|
|
|
|
if (!block) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return block->mr;
|
|
|
|
}
|
|
|
|
|
2016-03-01 07:18:20 +01:00
|
|
|
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
|
|
|
|
}
|
|
|
|
|
2015-03-23 10:21:46 +01:00
|
|
|
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
|
|
|
|
{
|
2016-03-01 07:18:21 +01:00
|
|
|
assert(mr->ram_block);
|
2015-03-23 10:21:46 +01:00
|
|
|
|
2016-05-10 04:04:59 +02:00
|
|
|
qemu_ram_resize(mr->ram_block, newsize, errp);
|
2015-03-23 10:21:46 +01:00
|
|
|
}
|
|
|
|
|
2020-05-08 08:24:54 +02:00
|
|
|
void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
|
|
|
|
{
|
|
|
|
if (mr->ram_block) {
|
2020-05-08 08:24:56 +02:00
|
|
|
qemu_ram_msync(mr->ram_block, addr, size);
|
2020-05-08 08:24:54 +02:00
|
|
|
}
|
|
|
|
}
|
2019-11-21 01:08:41 +01:00
|
|
|
|
2020-05-08 08:24:53 +02:00
|
|
|
void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
|
2019-11-21 01:08:41 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Might be extended case needed to cover
|
|
|
|
* different types of memory regions
|
|
|
|
*/
|
2020-05-08 08:24:54 +02:00
|
|
|
if (mr->dirty_log_mask) {
|
|
|
|
memory_region_msync(mr, addr, size);
|
2019-11-21 01:08:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-20 16:13:28 +02:00
|
|
|
/*
|
|
|
|
* Call proper memory listeners about the change on the newly
|
|
|
|
* added/removed CoalescedMemoryRange.
|
|
|
|
*/
|
|
|
|
static void memory_region_update_coalesced_range(MemoryRegion *mr,
|
|
|
|
CoalescedMemoryRange *cmr,
|
|
|
|
bool add)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2019-08-20 16:13:28 +02:00
|
|
|
AddressSpace *as;
|
2013-05-06 10:26:13 +02:00
|
|
|
FlatView *view;
|
2011-07-26 13:26:01 +02:00
|
|
|
FlatRange *fr;
|
|
|
|
|
2012-10-02 15:28:50 +02:00
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
2019-08-20 16:13:28 +02:00
|
|
|
view = address_space_get_flatview(as);
|
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
|
|
|
if (fr->mr == mr) {
|
|
|
|
flat_range_coalesced_io_notify(fr, as, cmr, add);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
flatview_unref(view);
|
2012-10-02 15:28:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
void memory_region_set_coalescing(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
memory_region_clear_coalescing(mr);
|
2011-10-16 13:19:17 +02:00
|
|
|
memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_add_coalescing(MemoryRegion *mr,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr offset,
|
2011-07-26 13:26:01 +02:00
|
|
|
uint64_t size)
|
|
|
|
{
|
2011-08-21 05:09:37 +02:00
|
|
|
CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2011-10-16 13:19:17 +02:00
|
|
|
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
|
2011-07-26 13:26:01 +02:00
|
|
|
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
|
2019-08-20 16:13:28 +02:00
|
|
|
memory_region_update_coalesced_range(mr, cmr, true);
|
2012-08-23 13:02:29 +02:00
|
|
|
memory_region_set_flush_coalesced(mr);
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_clear_coalescing(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
CoalescedMemoryRange *cmr;
|
2019-08-20 16:13:27 +02:00
|
|
|
|
|
|
|
if (QTAILQ_EMPTY(&mr->coalesced)) {
|
|
|
|
return;
|
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
|
2012-08-23 13:02:29 +02:00
|
|
|
qemu_flush_coalesced_mmio_buffer();
|
|
|
|
mr->flush_coalesced_mmio = false;
|
|
|
|
|
2011-07-26 13:26:01 +02:00
|
|
|
while (!QTAILQ_EMPTY(&mr->coalesced)) {
|
|
|
|
cmr = QTAILQ_FIRST(&mr->coalesced);
|
|
|
|
QTAILQ_REMOVE(&mr->coalesced, cmr, link);
|
2019-08-20 16:13:28 +02:00
|
|
|
memory_region_update_coalesced_range(mr, cmr, false);
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(cmr);
|
2014-06-13 08:34:41 +02:00
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2012-08-23 13:02:29 +02:00
|
|
|
void memory_region_set_flush_coalesced(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
mr->flush_coalesced_mmio = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_clear_flush_coalesced(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
qemu_flush_coalesced_mmio_buffer();
|
|
|
|
if (QTAILQ_EMPTY(&mr->coalesced)) {
|
|
|
|
mr->flush_coalesced_mmio = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-20 10:37:16 +01:00
|
|
|
static bool userspace_eventfd_warning;
|
|
|
|
|
2011-07-26 13:26:11 +02:00
|
|
|
void memory_region_add_eventfd(MemoryRegion *mr,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr addr,
|
2011-07-26 13:26:11 +02:00
|
|
|
unsigned size,
|
|
|
|
bool match_data,
|
|
|
|
uint64_t data,
|
2012-07-05 17:16:27 +02:00
|
|
|
EventNotifier *e)
|
2011-07-26 13:26:11 +02:00
|
|
|
{
|
|
|
|
MemoryRegionIoeventfd mrfd = {
|
2011-10-16 13:19:17 +02:00
|
|
|
.addr.start = int128_make64(addr),
|
|
|
|
.addr.size = int128_make64(size),
|
2011-07-26 13:26:11 +02:00
|
|
|
.match_data = match_data,
|
|
|
|
.data = data,
|
2012-07-05 17:16:27 +02:00
|
|
|
.e = e,
|
2011-07-26 13:26:11 +02:00
|
|
|
};
|
|
|
|
unsigned i;
|
|
|
|
|
2015-11-20 10:37:16 +01:00
|
|
|
if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
|
|
|
|
userspace_eventfd_warning))) {
|
|
|
|
userspace_eventfd_warning = true;
|
|
|
|
error_report("Using eventfd without MMIO binding in KVM. "
|
|
|
|
"Suboptimal performance expected");
|
|
|
|
}
|
|
|
|
|
2015-11-06 09:02:45 +01:00
|
|
|
if (size) {
|
2019-08-23 20:36:54 +02:00
|
|
|
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
|
2015-11-06 09:02:45 +01:00
|
|
|
}
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2011-07-26 13:26:11 +02:00
|
|
|
for (i = 0; i < mr->ioeventfd_nb; ++i) {
|
2018-05-29 05:04:45 +02:00
|
|
|
if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
|
2011-07-26 13:26:11 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++mr->ioeventfd_nb;
|
2011-08-21 05:09:37 +02:00
|
|
|
mr->ioeventfds = g_realloc(mr->ioeventfds,
|
2011-07-26 13:26:11 +02:00
|
|
|
sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
|
|
|
|
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
|
|
|
|
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
|
|
|
|
mr->ioeventfds[i] = mrfd;
|
2014-05-08 05:47:32 +02:00
|
|
|
ioeventfd_update_pending |= mr->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-07-26 13:26:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_del_eventfd(MemoryRegion *mr,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr addr,
|
2011-07-26 13:26:11 +02:00
|
|
|
unsigned size,
|
|
|
|
bool match_data,
|
|
|
|
uint64_t data,
|
2012-07-05 17:16:27 +02:00
|
|
|
EventNotifier *e)
|
2011-07-26 13:26:11 +02:00
|
|
|
{
|
|
|
|
MemoryRegionIoeventfd mrfd = {
|
2011-10-16 13:19:17 +02:00
|
|
|
.addr.start = int128_make64(addr),
|
|
|
|
.addr.size = int128_make64(size),
|
2011-07-26 13:26:11 +02:00
|
|
|
.match_data = match_data,
|
|
|
|
.data = data,
|
2012-07-05 17:16:27 +02:00
|
|
|
.e = e,
|
2011-07-26 13:26:11 +02:00
|
|
|
};
|
|
|
|
unsigned i;
|
|
|
|
|
2015-11-06 09:02:45 +01:00
|
|
|
if (size) {
|
2019-08-23 20:36:54 +02:00
|
|
|
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
|
2015-11-06 09:02:45 +01:00
|
|
|
}
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2011-07-26 13:26:11 +02:00
|
|
|
for (i = 0; i < mr->ioeventfd_nb; ++i) {
|
2018-05-29 05:04:45 +02:00
|
|
|
if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
|
2011-07-26 13:26:11 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(i != mr->ioeventfd_nb);
|
|
|
|
memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
|
|
|
|
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
|
|
|
|
--mr->ioeventfd_nb;
|
2011-08-21 05:09:37 +02:00
|
|
|
mr->ioeventfds = g_realloc(mr->ioeventfds,
|
2011-07-26 13:26:11 +02:00
|
|
|
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
|
2014-05-08 05:47:32 +02:00
|
|
|
ioeventfd_update_pending |= mr->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-07-26 13:26:11 +02:00
|
|
|
}
|
|
|
|
|
2014-06-11 11:18:09 +02:00
|
|
|
static void memory_region_update_container_subregions(MemoryRegion *subregion)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
2014-06-11 11:18:09 +02:00
|
|
|
MemoryRegion *mr = subregion->container;
|
2011-07-26 13:26:01 +02:00
|
|
|
MemoryRegion *other;
|
|
|
|
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
|
|
|
|
2013-05-06 10:46:11 +02:00
|
|
|
memory_region_ref(subregion);
|
2011-07-26 13:26:01 +02:00
|
|
|
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
|
|
|
|
if (subregion->priority >= other->priority) {
|
|
|
|
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
|
|
|
|
done:
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending |= mr->enabled && subregion->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
|
|
|
|
2014-06-06 08:14:44 +02:00
|
|
|
static void memory_region_add_subregion_common(MemoryRegion *mr,
|
|
|
|
hwaddr offset,
|
|
|
|
MemoryRegion *subregion)
|
|
|
|
{
|
2021-11-02 17:43:16 +01:00
|
|
|
MemoryRegion *alias;
|
|
|
|
|
2014-06-11 11:18:09 +02:00
|
|
|
assert(!subregion->container);
|
|
|
|
subregion->container = mr;
|
2021-11-02 17:43:16 +01:00
|
|
|
for (alias = subregion->alias; alias; alias = alias->alias) {
|
|
|
|
alias->mapped_via_alias++;
|
|
|
|
}
|
2014-06-06 08:14:44 +02:00
|
|
|
subregion->addr = offset;
|
2014-06-11 11:18:09 +02:00
|
|
|
memory_region_update_container_subregions(subregion);
|
2014-06-06 08:14:44 +02:00
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
|
|
|
|
void memory_region_add_subregion(MemoryRegion *mr,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr offset,
|
2011-07-26 13:26:01 +02:00
|
|
|
MemoryRegion *subregion)
|
|
|
|
{
|
|
|
|
subregion->priority = 0;
|
|
|
|
memory_region_add_subregion_common(mr, offset, subregion);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr offset,
|
2011-07-26 13:26:01 +02:00
|
|
|
MemoryRegion *subregion,
|
2013-09-16 10:21:14 +02:00
|
|
|
int priority)
|
2011-07-26 13:26:01 +02:00
|
|
|
{
|
|
|
|
subregion->priority = priority;
|
|
|
|
memory_region_add_subregion_common(mr, offset, subregion);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_del_subregion(MemoryRegion *mr,
|
|
|
|
MemoryRegion *subregion)
|
|
|
|
{
|
2021-11-02 17:43:16 +01:00
|
|
|
MemoryRegion *alias;
|
|
|
|
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2014-06-11 11:18:09 +02:00
|
|
|
assert(subregion->container == mr);
|
|
|
|
subregion->container = NULL;
|
2021-11-02 17:43:16 +01:00
|
|
|
for (alias = subregion->alias; alias; alias = alias->alias) {
|
|
|
|
alias->mapped_via_alias--;
|
|
|
|
assert(alias->mapped_via_alias >= 0);
|
|
|
|
}
|
2011-07-26 13:26:01 +02:00
|
|
|
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
|
2013-05-06 10:46:11 +02:00
|
|
|
memory_region_unref(subregion);
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending |= mr->enabled && subregion->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-09-14 10:54:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
|
|
|
|
{
|
|
|
|
if (enabled == mr->enabled) {
|
|
|
|
return;
|
|
|
|
}
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
2011-09-14 10:54:58 +02:00
|
|
|
mr->enabled = enabled;
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending = true;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-07-26 13:26:01 +02:00
|
|
|
}
|
2011-07-26 13:26:04 +02:00
|
|
|
|
2014-12-16 10:21:23 +01:00
|
|
|
void memory_region_set_size(MemoryRegion *mr, uint64_t size)
|
|
|
|
{
|
|
|
|
Int128 s = int128_make64(size);
|
|
|
|
|
|
|
|
if (size == UINT64_MAX) {
|
|
|
|
s = int128_2_64();
|
|
|
|
}
|
|
|
|
if (int128_eq(s, mr->size)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
memory_region_transaction_begin();
|
|
|
|
mr->size = s;
|
|
|
|
memory_region_update_pending = true;
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
}
|
|
|
|
|
2014-06-06 08:15:18 +02:00
|
|
|
static void memory_region_readd_subregion(MemoryRegion *mr)
|
2011-09-14 11:10:12 +02:00
|
|
|
{
|
2014-06-11 11:18:09 +02:00
|
|
|
MemoryRegion *container = mr->container;
|
2011-09-14 11:10:12 +02:00
|
|
|
|
2014-06-11 11:18:09 +02:00
|
|
|
if (container) {
|
2014-06-06 08:15:18 +02:00
|
|
|
memory_region_transaction_begin();
|
|
|
|
memory_region_ref(mr);
|
2014-06-11 11:18:09 +02:00
|
|
|
memory_region_del_subregion(container, mr);
|
2022-02-01 11:09:40 +01:00
|
|
|
memory_region_add_subregion_common(container, mr->addr, mr);
|
2014-06-06 08:15:18 +02:00
|
|
|
memory_region_unref(mr);
|
|
|
|
memory_region_transaction_commit();
|
2011-09-14 11:10:12 +02:00
|
|
|
}
|
2014-06-06 08:15:18 +02:00
|
|
|
}
|
2011-09-14 11:10:12 +02:00
|
|
|
|
2014-06-06 08:15:18 +02:00
|
|
|
void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
|
|
|
|
{
|
|
|
|
if (addr != mr->addr) {
|
|
|
|
mr->addr = addr;
|
|
|
|
memory_region_readd_subregion(mr);
|
|
|
|
}
|
2011-09-14 11:10:12 +02:00
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
|
2011-12-04 18:16:50 +01:00
|
|
|
{
|
|
|
|
assert(mr->alias);
|
|
|
|
|
2012-08-23 13:02:30 +02:00
|
|
|
if (offset == mr->alias_offset) {
|
2011-12-04 18:16:50 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_begin();
|
|
|
|
mr->alias_offset = offset;
|
2012-11-05 16:45:56 +01:00
|
|
|
memory_region_update_pending |= mr->enabled;
|
2012-08-23 13:02:30 +02:00
|
|
|
memory_region_transaction_commit();
|
2011-12-04 18:16:50 +01:00
|
|
|
}
|
|
|
|
|
2014-10-31 17:38:37 +01:00
|
|
|
uint64_t memory_region_get_alignment(const MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
return mr->align;
|
|
|
|
}
|
|
|
|
|
2011-12-08 14:00:18 +01:00
|
|
|
static int cmp_flatrange_addr(const void *addr_, const void *fr_)
|
|
|
|
{
|
|
|
|
const AddrRange *addr = addr_;
|
|
|
|
const FlatRange *fr = fr_;
|
|
|
|
|
|
|
|
if (int128_le(addrrange_end(*addr), fr->addr.start)) {
|
|
|
|
return -1;
|
|
|
|
} else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-06 10:26:13 +02:00
|
|
|
static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
|
2011-12-08 14:00:18 +01:00
|
|
|
{
|
2013-05-06 10:26:13 +02:00
|
|
|
return bsearch(&addr, view->ranges, view->nr,
|
2011-12-08 14:00:18 +01:00
|
|
|
sizeof(FlatRange), cmp_flatrange_addr);
|
|
|
|
}
|
|
|
|
|
2014-06-02 15:25:06 +02:00
|
|
|
bool memory_region_is_mapped(MemoryRegion *mr)
|
|
|
|
{
|
2021-11-02 17:43:16 +01:00
|
|
|
return !!mr->container || mr->mapped_via_alias;
|
2014-06-02 15:25:06 +02:00
|
|
|
}
|
|
|
|
|
2015-07-14 13:45:34 +02:00
|
|
|
/* Same as memory_region_find, but it does not add a reference to the
|
|
|
|
* returned region. It must be called from an RCU critical section.
|
|
|
|
*/
|
|
|
|
static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
|
|
|
|
hwaddr addr, uint64_t size)
|
2011-12-08 14:00:18 +01:00
|
|
|
{
|
2013-05-27 10:08:27 +02:00
|
|
|
MemoryRegionSection ret = { .mr = NULL };
|
2013-05-07 15:48:28 +02:00
|
|
|
MemoryRegion *root;
|
|
|
|
AddressSpace *as;
|
|
|
|
AddrRange range;
|
2013-05-06 10:26:13 +02:00
|
|
|
FlatView *view;
|
2013-05-07 15:48:28 +02:00
|
|
|
FlatRange *fr;
|
|
|
|
|
|
|
|
addr += mr->addr;
|
2014-06-11 11:18:09 +02:00
|
|
|
for (root = mr; root->container; ) {
|
|
|
|
root = root->container;
|
2013-05-07 15:48:28 +02:00
|
|
|
addr += root->addr;
|
|
|
|
}
|
2011-12-08 14:00:18 +01:00
|
|
|
|
2013-05-07 15:48:28 +02:00
|
|
|
as = memory_region_to_address_space(root);
|
2014-06-02 15:25:06 +02:00
|
|
|
if (!as) {
|
|
|
|
return ret;
|
|
|
|
}
|
2013-05-07 15:48:28 +02:00
|
|
|
range = addrrange_make(int128_make64(addr), int128_make64(size));
|
2013-05-06 10:26:13 +02:00
|
|
|
|
2017-09-21 10:50:58 +02:00
|
|
|
view = address_space_to_flatview(as);
|
2013-05-06 10:26:13 +02:00
|
|
|
fr = flatview_lookup(view, range);
|
2011-12-08 14:00:18 +01:00
|
|
|
if (!fr) {
|
2015-07-14 13:45:34 +02:00
|
|
|
return ret;
|
2011-12-08 14:00:18 +01:00
|
|
|
}
|
|
|
|
|
2013-05-06 10:26:13 +02:00
|
|
|
while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
|
2011-12-08 14:00:18 +01:00
|
|
|
--fr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret.mr = fr->mr;
|
2017-09-21 10:50:58 +02:00
|
|
|
ret.fv = view;
|
2011-12-08 14:00:18 +01:00
|
|
|
range = addrrange_intersection(range, fr->addr);
|
|
|
|
ret.offset_within_region = fr->offset_in_region;
|
|
|
|
ret.offset_within_region += int128_get64(int128_sub(range.start,
|
|
|
|
fr->addr.start));
|
2013-05-27 10:08:27 +02:00
|
|
|
ret.size = range.size;
|
2011-12-08 14:00:18 +01:00
|
|
|
ret.offset_within_address_space = int128_get64(range.start);
|
2012-02-08 16:01:23 +01:00
|
|
|
ret.readonly = fr->readonly;
|
2018-10-03 13:44:52 +02:00
|
|
|
ret.nonvolatile = fr->nonvolatile;
|
2015-07-14 13:45:34 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
|
|
|
hwaddr addr, uint64_t size)
|
|
|
|
{
|
|
|
|
MemoryRegionSection ret;
|
2019-10-07 16:36:41 +02:00
|
|
|
RCU_READ_LOCK_GUARD();
|
2015-07-14 13:45:34 +02:00
|
|
|
ret = memory_region_find_rcu(mr, addr, size);
|
|
|
|
if (ret.mr) {
|
|
|
|
memory_region_ref(ret.mr);
|
|
|
|
}
|
2011-12-08 14:00:18 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-04-13 11:55:20 +02:00
|
|
|
MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
|
|
|
|
{
|
|
|
|
MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
|
|
|
|
|
|
|
|
*tmp = *s;
|
|
|
|
if (tmp->mr) {
|
|
|
|
memory_region_ref(tmp->mr);
|
|
|
|
}
|
|
|
|
if (tmp->fv) {
|
|
|
|
bool ret = flatview_ref(tmp->fv);
|
|
|
|
|
|
|
|
g_assert(ret);
|
|
|
|
}
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_section_free_copy(MemoryRegionSection *s)
|
|
|
|
{
|
|
|
|
if (s->fv) {
|
|
|
|
flatview_unref(s->fv);
|
|
|
|
}
|
|
|
|
if (s->mr) {
|
|
|
|
memory_region_unref(s->mr);
|
|
|
|
}
|
|
|
|
g_free(s);
|
|
|
|
}
|
|
|
|
|
2015-07-14 13:45:34 +02:00
|
|
|
bool memory_region_present(MemoryRegion *container, hwaddr addr)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
2019-10-07 16:36:41 +02:00
|
|
|
RCU_READ_LOCK_GUARD();
|
2015-07-14 13:45:34 +02:00
|
|
|
mr = memory_region_find_rcu(container, addr, 1).mr;
|
|
|
|
return mr && mr != container;
|
|
|
|
}
|
|
|
|
|
2016-09-22 16:08:31 +02:00
|
|
|
void memory_global_dirty_log_sync(void)
|
2011-12-15 15:24:49 +01:00
|
|
|
{
|
2018-02-06 17:55:27 +01:00
|
|
|
memory_region_sync_dirty_bitmap(NULL);
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
|
|
|
|
2018-02-06 18:37:39 +01:00
|
|
|
void memory_global_after_dirty_log_sync(void)
|
|
|
|
{
|
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
|
|
|
|
}
|
|
|
|
|
2022-02-07 13:30:19 +01:00
|
|
|
/*
|
|
|
|
* Dirty track stop flags that are postponed due to VM being stopped. Should
|
|
|
|
* only be used within vmstate_change hook.
|
|
|
|
*/
|
|
|
|
static unsigned int postponed_stop_flags;
|
2017-07-28 12:28:53 +02:00
|
|
|
static VMChangeStateEntry *vmstate_change;
|
2022-02-07 13:30:19 +01:00
|
|
|
static void memory_global_dirty_log_stop_postponed_run(void);
|
2017-07-28 12:28:53 +02:00
|
|
|
|
2021-06-29 18:01:19 +02:00
|
|
|
void memory_global_dirty_log_start(unsigned int flags)
|
2011-12-11 13:47:25 +01:00
|
|
|
{
|
2022-02-07 13:30:19 +01:00
|
|
|
unsigned int old_flags;
|
|
|
|
|
|
|
|
assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
|
2021-11-30 09:00:28 +01:00
|
|
|
|
2017-07-28 12:28:53 +02:00
|
|
|
if (vmstate_change) {
|
2022-02-07 13:30:19 +01:00
|
|
|
/* If there is postponed stop(), operate on it first */
|
|
|
|
postponed_stop_flags &= ~flags;
|
|
|
|
memory_global_dirty_log_stop_postponed_run();
|
2017-07-28 12:28:53 +02:00
|
|
|
}
|
|
|
|
|
2022-02-07 13:30:19 +01:00
|
|
|
flags &= ~global_dirty_tracking;
|
|
|
|
if (!flags) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
old_flags = global_dirty_tracking;
|
2021-06-29 18:01:19 +02:00
|
|
|
global_dirty_tracking |= flags;
|
|
|
|
trace_global_dirty_changed(global_dirty_tracking);
|
2015-03-23 10:57:21 +01:00
|
|
|
|
2021-11-30 09:00:28 +01:00
|
|
|
if (!old_flags) {
|
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
|
|
|
|
memory_region_transaction_begin();
|
|
|
|
memory_region_update_pending = true;
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
}
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
|
|
|
|
2021-06-29 18:01:19 +02:00
|
|
|
static void memory_global_dirty_log_do_stop(unsigned int flags)
|
2011-12-11 13:47:25 +01:00
|
|
|
{
|
2021-06-29 18:01:19 +02:00
|
|
|
assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
|
|
|
|
assert((global_dirty_tracking & flags) == flags);
|
|
|
|
global_dirty_tracking &= ~flags;
|
|
|
|
|
|
|
|
trace_global_dirty_changed(global_dirty_tracking);
|
2015-03-23 10:57:21 +01:00
|
|
|
|
2021-11-30 09:00:28 +01:00
|
|
|
if (!global_dirty_tracking) {
|
|
|
|
memory_region_transaction_begin();
|
|
|
|
memory_region_update_pending = true;
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
|
|
|
|
}
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
|
|
|
|
2022-02-07 13:30:19 +01:00
|
|
|
/*
|
|
|
|
* Execute the postponed dirty log stop operations if there is, then reset
|
|
|
|
* everything (including the flags and the vmstate change hook).
|
|
|
|
*/
|
|
|
|
static void memory_global_dirty_log_stop_postponed_run(void)
|
|
|
|
{
|
|
|
|
/* This must be called with the vmstate handler registered */
|
|
|
|
assert(vmstate_change);
|
|
|
|
|
|
|
|
/* Note: postponed_stop_flags can be cleared in log start routine */
|
|
|
|
if (postponed_stop_flags) {
|
|
|
|
memory_global_dirty_log_do_stop(postponed_stop_flags);
|
|
|
|
postponed_stop_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_del_vm_change_state_handler(vmstate_change);
|
|
|
|
vmstate_change = NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-11 16:20:20 +01:00
|
|
|
static void memory_vm_change_state_handler(void *opaque, bool running,
|
2017-07-28 12:28:53 +02:00
|
|
|
RunState state)
|
|
|
|
{
|
|
|
|
if (running) {
|
2022-02-07 13:30:19 +01:00
|
|
|
memory_global_dirty_log_stop_postponed_run();
|
2017-07-28 12:28:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-29 18:01:19 +02:00
|
|
|
void memory_global_dirty_log_stop(unsigned int flags)
|
2017-07-28 12:28:53 +02:00
|
|
|
{
|
|
|
|
if (!runstate_is_running()) {
|
2022-02-07 13:30:19 +01:00
|
|
|
/* Postpone the dirty log stop, e.g., to when VM starts again */
|
2017-07-28 12:28:53 +02:00
|
|
|
if (vmstate_change) {
|
2022-02-07 13:30:19 +01:00
|
|
|
/* Batch with previous postponed flags */
|
|
|
|
postponed_stop_flags |= flags;
|
|
|
|
} else {
|
|
|
|
postponed_stop_flags = flags;
|
|
|
|
vmstate_change = qemu_add_vm_change_state_handler(
|
|
|
|
memory_vm_change_state_handler, NULL);
|
2017-07-28 12:28:53 +02:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-06-29 18:01:19 +02:00
|
|
|
memory_global_dirty_log_do_stop(flags);
|
2017-07-28 12:28:53 +02:00
|
|
|
}
|
|
|
|
|
2011-12-11 13:47:25 +01:00
|
|
|
static void listener_add_address_space(MemoryListener *listener,
|
|
|
|
AddressSpace *as)
|
|
|
|
{
|
2013-05-06 10:26:13 +02:00
|
|
|
FlatView *view;
|
2011-12-11 13:47:25 +01:00
|
|
|
FlatRange *fr;
|
|
|
|
|
2015-11-02 09:23:52 +01:00
|
|
|
if (listener->begin) {
|
|
|
|
listener->begin(listener);
|
|
|
|
}
|
2021-06-29 18:01:19 +02:00
|
|
|
if (global_dirty_tracking) {
|
2012-10-02 16:39:57 +02:00
|
|
|
if (listener->log_global_start) {
|
|
|
|
listener->log_global_start(listener);
|
|
|
|
}
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
2012-10-02 16:39:57 +02:00
|
|
|
|
2013-05-06 11:57:21 +02:00
|
|
|
view = address_space_get_flatview(as);
|
2013-05-06 10:26:13 +02:00
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
2017-10-16 16:43:02 +02:00
|
|
|
MemoryRegionSection section = section_from_flat_range(fr, view);
|
|
|
|
|
2012-10-02 16:39:57 +02:00
|
|
|
if (listener->region_add) {
|
|
|
|
listener->region_add(listener, §ion);
|
|
|
|
}
|
2017-10-16 16:42:56 +02:00
|
|
|
if (fr->dirty_log_mask && listener->log_start) {
|
|
|
|
listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
|
|
|
|
}
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
2015-11-02 09:23:52 +01:00
|
|
|
if (listener->commit) {
|
|
|
|
listener->commit(listener);
|
|
|
|
}
|
2013-05-06 11:57:21 +02:00
|
|
|
flatview_unref(view);
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
|
|
|
|
2018-01-22 07:02:44 +01:00
|
|
|
static void listener_del_address_space(MemoryListener *listener,
|
|
|
|
AddressSpace *as)
|
|
|
|
{
|
|
|
|
FlatView *view;
|
|
|
|
FlatRange *fr;
|
|
|
|
|
|
|
|
if (listener->begin) {
|
|
|
|
listener->begin(listener);
|
|
|
|
}
|
|
|
|
view = address_space_get_flatview(as);
|
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
|
|
|
MemoryRegionSection section = section_from_flat_range(fr, view);
|
|
|
|
|
|
|
|
if (fr->dirty_log_mask && listener->log_stop) {
|
|
|
|
listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
|
|
|
|
}
|
|
|
|
if (listener->region_del) {
|
|
|
|
listener->region_del(listener, §ion);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (listener->commit) {
|
|
|
|
listener->commit(listener);
|
|
|
|
}
|
|
|
|
flatview_unref(view);
|
|
|
|
}
|
|
|
|
|
2016-09-22 16:11:54 +02:00
|
|
|
void memory_listener_register(MemoryListener *listener, AddressSpace *as)
|
2011-12-11 13:47:25 +01:00
|
|
|
{
|
2012-02-08 14:05:50 +01:00
|
|
|
MemoryListener *other = NULL;
|
|
|
|
|
2021-05-06 18:05:40 +02:00
|
|
|
/* Only one of them can be defined for a listener */
|
|
|
|
assert(!(listener->log_sync && listener->log_sync_global));
|
|
|
|
|
2016-09-22 16:11:54 +02:00
|
|
|
listener->address_space = as;
|
2012-02-08 14:05:50 +01:00
|
|
|
if (QTAILQ_EMPTY(&memory_listeners)
|
2018-12-06 13:10:34 +01:00
|
|
|
|| listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
|
2012-02-08 14:05:50 +01:00
|
|
|
QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
|
|
|
|
} else {
|
|
|
|
QTAILQ_FOREACH(other, &memory_listeners, link) {
|
|
|
|
if (listener->priority < other->priority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_BEFORE(other, listener, link);
|
|
|
|
}
|
2012-10-02 15:28:50 +02:00
|
|
|
|
2016-09-22 16:23:06 +02:00
|
|
|
if (QTAILQ_EMPTY(&as->listeners)
|
2018-12-06 13:10:34 +01:00
|
|
|
|| listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
|
2016-09-22 16:23:06 +02:00
|
|
|
QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
|
|
|
|
} else {
|
|
|
|
QTAILQ_FOREACH(other, &as->listeners, link_as) {
|
|
|
|
if (listener->priority < other->priority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_BEFORE(other, listener, link_as);
|
|
|
|
}
|
|
|
|
|
2016-09-22 16:11:54 +02:00
|
|
|
listener_add_address_space(listener, as);
|
2011-12-11 13:47:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_listener_unregister(MemoryListener *listener)
|
|
|
|
{
|
2017-01-27 16:40:12 +01:00
|
|
|
if (!listener->address_space) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-01-22 07:02:44 +01:00
|
|
|
listener_del_address_space(listener, listener->address_space);
|
2012-02-08 14:05:50 +01:00
|
|
|
QTAILQ_REMOVE(&memory_listeners, listener, link);
|
2016-09-22 16:23:06 +02:00
|
|
|
QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
|
2017-01-27 16:40:12 +01:00
|
|
|
listener->address_space = NULL;
|
2011-12-15 15:24:49 +01:00
|
|
|
}
|
2011-12-08 14:00:18 +01:00
|
|
|
|
2019-06-21 11:27:33 +02:00
|
|
|
void address_space_remove_listeners(AddressSpace *as)
|
|
|
|
{
|
|
|
|
while (!QTAILQ_EMPTY(&as->listeners)) {
|
|
|
|
memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-29 18:25:51 +02:00
|
|
|
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
|
2011-07-26 13:26:04 +02:00
|
|
|
{
|
2015-02-11 15:21:04 +01:00
|
|
|
memory_region_ref(root);
|
2012-10-02 13:53:41 +02:00
|
|
|
as->root = root;
|
2017-09-21 10:51:05 +02:00
|
|
|
as->current_map = NULL;
|
2012-10-30 12:47:44 +01:00
|
|
|
as->ioeventfd_nb = 0;
|
|
|
|
as->ioeventfds = NULL;
|
2016-09-22 16:23:06 +02:00
|
|
|
QTAILQ_INIT(&as->listeners);
|
2012-10-02 15:28:50 +02:00
|
|
|
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
|
2013-04-29 18:25:51 +02:00
|
|
|
as->name = g_strdup(name ? name : "anonymous");
|
2017-09-21 10:51:09 +02:00
|
|
|
address_space_update_topology(as);
|
|
|
|
address_space_update_ioeventfds(as);
|
2011-07-26 13:26:04 +02:00
|
|
|
}
|
2011-07-26 13:26:08 +02:00
|
|
|
|
2013-05-17 12:37:03 +02:00
|
|
|
static void do_address_space_destroy(AddressSpace *as)
|
2012-10-07 12:59:55 +02:00
|
|
|
{
|
2016-09-22 16:23:06 +02:00
|
|
|
assert(QTAILQ_EMPTY(&as->listeners));
|
2014-05-30 20:59:00 +02:00
|
|
|
|
2013-05-06 11:57:21 +02:00
|
|
|
flatview_unref(as->current_map);
|
2013-04-29 18:25:51 +02:00
|
|
|
g_free(as->name);
|
2012-10-30 12:47:44 +01:00
|
|
|
g_free(as->ioeventfds);
|
2015-02-11 15:21:04 +01:00
|
|
|
memory_region_unref(as->root);
|
2012-10-07 12:59:55 +02:00
|
|
|
}
|
|
|
|
|
2013-05-17 12:37:03 +02:00
|
|
|
void address_space_destroy(AddressSpace *as)
|
|
|
|
{
|
2015-02-11 15:21:04 +01:00
|
|
|
MemoryRegion *root = as->root;
|
|
|
|
|
2013-05-17 12:37:03 +02:00
|
|
|
/* Flush out anything from MemoryListeners listening in on this */
|
|
|
|
memory_region_transaction_begin();
|
|
|
|
as->root = NULL;
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
|
|
|
|
|
|
|
|
/* At this point, as->dispatch and as->current_map are dummy
|
|
|
|
* entries that the guest should never use. Wait for the old
|
|
|
|
* values to expire before freeing the data.
|
|
|
|
*/
|
2015-02-11 15:21:04 +01:00
|
|
|
as->root = root;
|
2013-05-17 12:37:03 +02:00
|
|
|
call_rcu(as, do_address_space_destroy, rcu);
|
|
|
|
}
|
|
|
|
|
2017-01-16 09:40:04 +01:00
|
|
|
static const char *memory_region_type(MemoryRegion *mr)
|
|
|
|
{
|
2020-02-24 10:13:00 +01:00
|
|
|
if (mr->alias) {
|
|
|
|
return memory_region_type(mr->alias);
|
|
|
|
}
|
2017-01-16 09:40:04 +01:00
|
|
|
if (memory_region_is_ram_device(mr)) {
|
|
|
|
return "ramd";
|
|
|
|
} else if (memory_region_is_romd(mr)) {
|
|
|
|
return "romd";
|
|
|
|
} else if (memory_region_is_rom(mr)) {
|
|
|
|
return "rom";
|
|
|
|
} else if (memory_region_is_ram(mr)) {
|
|
|
|
return "ram";
|
|
|
|
} else {
|
|
|
|
return "i/o";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-11 22:22:05 +02:00
|
|
|
typedef struct MemoryRegionList MemoryRegionList;
|
|
|
|
|
|
|
|
struct MemoryRegionList {
|
|
|
|
const MemoryRegion *mr;
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_ENTRY(MemoryRegionList) mrqueue;
|
2011-09-11 22:22:05 +02:00
|
|
|
};
|
|
|
|
|
2018-12-06 11:58:10 +01:00
|
|
|
typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
|
2011-09-11 22:22:05 +02:00
|
|
|
|
2017-01-16 09:40:04 +01:00
|
|
|
#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
|
|
|
|
int128_sub((size), int128_one())) : 0)
|
|
|
|
#define MTREE_INDENT " "
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
static void mtree_expand_owner(const char *label, Object *obj)
|
2018-06-04 05:25:11 +02:00
|
|
|
{
|
|
|
|
DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
|
2018-06-04 05:25:11 +02:00
|
|
|
if (dev && dev->id) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" id=%s", dev->id);
|
2018-06-04 05:25:11 +02:00
|
|
|
} else {
|
2020-05-05 17:29:10 +02:00
|
|
|
char *canonical_path = object_get_canonical_path(obj);
|
2018-06-04 05:25:11 +02:00
|
|
|
if (canonical_path) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" path=%s", canonical_path);
|
2018-06-04 05:25:11 +02:00
|
|
|
g_free(canonical_path);
|
|
|
|
} else {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" type=%s", object_get_typename(obj));
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
|
|
|
}
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("}");
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
static void mtree_print_mr_owner(const MemoryRegion *mr)
|
2018-06-04 05:25:11 +02:00
|
|
|
{
|
|
|
|
Object *owner = mr->owner;
|
|
|
|
Object *parent = memory_region_owner((MemoryRegion *)mr);
|
|
|
|
|
|
|
|
if (!owner && !parent) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" orphan");
|
2018-06-04 05:25:11 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (owner) {
|
2019-04-17 21:17:56 +02:00
|
|
|
mtree_expand_owner("owner", owner);
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
|
|
|
if (parent && parent != owner) {
|
2019-04-17 21:17:56 +02:00
|
|
|
mtree_expand_owner("parent", parent);
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr base,
|
2018-06-04 05:25:11 +02:00
|
|
|
MemoryRegionListHead *alias_print_queue,
|
memory: Make 'info mtree' not display disabled regions by default
We might have many disabled memory regions, making the 'info mtree'
output too verbose to be useful.
Remove the disabled regions in the default output, but allow the
monitor user to display them using the '-D' option.
Before:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-ram @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-pci @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-rom @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-ram @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-pci @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-rom @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-ram @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-pci @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-rom @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, ram): alias pam-ram @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-pci @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-rom @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-ram @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-pci @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-rom @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
After:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
The old behavior is preserved using 'info mtree -D'.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-05-29 14:53:25 +02:00
|
|
|
bool owner, bool display_disabled)
|
2011-09-11 22:22:05 +02:00
|
|
|
{
|
2011-09-27 15:00:41 +02:00
|
|
|
MemoryRegionList *new_ml, *ml, *next_ml;
|
|
|
|
MemoryRegionListHead submr_print_queue;
|
2011-09-11 22:22:05 +02:00
|
|
|
const MemoryRegion *submr;
|
|
|
|
unsigned int i;
|
2017-03-14 13:56:27 +01:00
|
|
|
hwaddr cur_start, cur_end;
|
2011-09-11 22:22:05 +02:00
|
|
|
|
2015-04-08 12:57:11 +02:00
|
|
|
if (!mr) {
|
2011-09-11 22:22:05 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-14 13:56:27 +01:00
|
|
|
cur_start = base + mr->addr;
|
|
|
|
cur_end = cur_start + MR_SIZE(mr->size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to detect overflow of memory region. This should never
|
|
|
|
* happen normally. When it happens, we dump something to warn the
|
|
|
|
* user who is observing this.
|
|
|
|
*/
|
|
|
|
if (cur_start < base || cur_end < cur_start) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("[DETECTED OVERFLOW!] ");
|
2017-03-14 13:56:27 +01:00
|
|
|
}
|
|
|
|
|
2011-09-11 22:22:05 +02:00
|
|
|
if (mr->alias) {
|
|
|
|
MemoryRegionList *ml;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
/* check if the alias is already in the queue */
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
|
2013-12-11 12:51:46 +01:00
|
|
|
if (ml->mr == mr->alias) {
|
2011-09-11 22:22:05 +02:00
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
ml = g_new(MemoryRegionList, 1);
|
|
|
|
ml->mr = mr->alias;
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
|
2011-09-11 22:22:05 +02:00
|
|
|
}
|
memory: Make 'info mtree' not display disabled regions by default
We might have many disabled memory regions, making the 'info mtree'
output too verbose to be useful.
Remove the disabled regions in the default output, but allow the
monitor user to display them using the '-D' option.
Before:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-ram @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-pci @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-rom @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-ram @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-pci @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-rom @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-ram @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-pci @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-rom @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, ram): alias pam-ram @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-pci @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-rom @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-ram @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-pci @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-rom @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
After:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
The old behavior is preserved using 'info mtree -D'.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-05-29 14:53:25 +02:00
|
|
|
if (mr->enabled || display_disabled) {
|
|
|
|
for (i = 0; i < level; i++) {
|
|
|
|
qemu_printf(MTREE_INDENT);
|
|
|
|
}
|
|
|
|
qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
|
|
|
|
" (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
|
|
|
|
"-" TARGET_FMT_plx "%s",
|
|
|
|
cur_start, cur_end,
|
|
|
|
mr->priority,
|
|
|
|
mr->nonvolatile ? "nv-" : "",
|
|
|
|
memory_region_type((MemoryRegion *)mr),
|
|
|
|
memory_region_name(mr),
|
|
|
|
memory_region_name(mr->alias),
|
|
|
|
mr->alias_offset,
|
|
|
|
mr->alias_offset + MR_SIZE(mr->size),
|
|
|
|
mr->enabled ? "" : " [disabled]");
|
|
|
|
if (owner) {
|
|
|
|
mtree_print_mr_owner(mr);
|
|
|
|
}
|
|
|
|
qemu_printf("\n");
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
2011-09-11 22:22:05 +02:00
|
|
|
} else {
|
memory: Make 'info mtree' not display disabled regions by default
We might have many disabled memory regions, making the 'info mtree'
output too verbose to be useful.
Remove the disabled regions in the default output, but allow the
monitor user to display them using the '-D' option.
Before:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-ram @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-pci @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-rom @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-ram @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-pci @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-rom @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-ram @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-pci @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-rom @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, ram): alias pam-ram @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-pci @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-rom @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-ram @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-pci @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-rom @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
After:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
The old behavior is preserved using 'info mtree -D'.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-05-29 14:53:25 +02:00
|
|
|
if (mr->enabled || display_disabled) {
|
|
|
|
for (i = 0; i < level; i++) {
|
|
|
|
qemu_printf(MTREE_INDENT);
|
|
|
|
}
|
|
|
|
qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
|
|
|
|
" (prio %d, %s%s): %s%s",
|
|
|
|
cur_start, cur_end,
|
|
|
|
mr->priority,
|
|
|
|
mr->nonvolatile ? "nv-" : "",
|
|
|
|
memory_region_type((MemoryRegion *)mr),
|
|
|
|
memory_region_name(mr),
|
|
|
|
mr->enabled ? "" : " [disabled]");
|
|
|
|
if (owner) {
|
|
|
|
mtree_print_mr_owner(mr);
|
|
|
|
}
|
|
|
|
qemu_printf("\n");
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
2011-09-11 22:22:05 +02:00
|
|
|
}
|
2011-09-27 15:00:41 +02:00
|
|
|
|
|
|
|
QTAILQ_INIT(&submr_print_queue);
|
|
|
|
|
2011-09-11 22:22:05 +02:00
|
|
|
QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
|
2011-09-27 15:00:41 +02:00
|
|
|
new_ml = g_new(MemoryRegionList, 1);
|
|
|
|
new_ml->mr = submr;
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
|
2011-09-27 15:00:41 +02:00
|
|
|
if (new_ml->mr->addr < ml->mr->addr ||
|
|
|
|
(new_ml->mr->addr == ml->mr->addr &&
|
|
|
|
new_ml->mr->priority > ml->mr->priority)) {
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
|
2011-09-27 15:00:41 +02:00
|
|
|
new_ml = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (new_ml) {
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
|
2011-09-27 15:00:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
|
2019-04-17 21:17:56 +02:00
|
|
|
mtree_print_mr(ml->mr, level + 1, cur_start,
|
memory: Make 'info mtree' not display disabled regions by default
We might have many disabled memory regions, making the 'info mtree'
output too verbose to be useful.
Remove the disabled regions in the default output, but allow the
monitor user to display them using the '-D' option.
Before:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-ram @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-pci @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-rom @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-ram @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-pci @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-rom @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-ram @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-pci @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-rom @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, ram): alias pam-ram @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-pci @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-rom @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-ram @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-pci @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-rom @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
After:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
The old behavior is preserved using 'info mtree -D'.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-05-29 14:53:25 +02:00
|
|
|
alias_print_queue, owner, display_disabled);
|
2011-09-27 15:00:41 +02:00
|
|
|
}
|
|
|
|
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
|
2011-09-27 15:00:41 +02:00
|
|
|
g_free(ml);
|
2011-09-11 22:22:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:06 +02:00
|
|
|
struct FlatViewInfo {
|
|
|
|
int counter;
|
|
|
|
bool dispatch_tree;
|
2018-06-04 05:25:11 +02:00
|
|
|
bool owner;
|
2019-06-14 03:52:37 +02:00
|
|
|
AccelClass *ac;
|
2017-09-21 10:51:06 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static void mtree_print_flatview(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
2017-01-16 09:40:05 +01:00
|
|
|
{
|
2017-09-21 10:51:06 +02:00
|
|
|
FlatView *view = key;
|
|
|
|
GArray *fv_address_spaces = value;
|
|
|
|
struct FlatViewInfo *fvi = user_data;
|
2017-01-16 09:40:05 +01:00
|
|
|
FlatRange *range = &view->ranges[0];
|
|
|
|
MemoryRegion *mr;
|
|
|
|
int n = view->nr;
|
2017-09-21 10:51:06 +02:00
|
|
|
int i;
|
|
|
|
AddressSpace *as;
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("FlatView #%d\n", fvi->counter);
|
2017-09-21 10:51:06 +02:00
|
|
|
++fvi->counter;
|
|
|
|
|
|
|
|
for (i = 0; i < fv_address_spaces->len; ++i) {
|
|
|
|
as = g_array_index(fv_address_spaces, AddressSpace*, i);
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" AS \"%s\", root: %s",
|
|
|
|
as->name, memory_region_name(as->root));
|
2017-09-21 10:51:06 +02:00
|
|
|
if (as->root->alias) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(", alias %s", memory_region_name(as->root->alias));
|
2017-09-21 10:51:06 +02:00
|
|
|
}
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("\n");
|
2017-09-21 10:51:06 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(" Root memory region: %s\n",
|
2017-09-21 10:51:06 +02:00
|
|
|
view->root ? memory_region_name(view->root) : "(none)");
|
2017-01-16 09:40:05 +01:00
|
|
|
|
|
|
|
if (n <= 0) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
|
2017-01-16 09:40:05 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (n--) {
|
|
|
|
mr = range->mr;
|
memory: show region offset and ROM/RAM type in "info mtree -f"
"info mtree -f" output is currently hard to use for large RAM regions, because
there is no hint as to what part of the region is being mapped. Add the offset
if it is nonzero.
Secondly, FlatView has a readonly field, that can override the MemoryRegion
in the presence of aliases. Take it into account.
Together, with this patch this:
address-space (flat view): KVM-SMRAM
0000000000000000-00000000000bffff (prio 0, ram): pc.ram
00000000000c0000-00000000000c9fff (prio 0, ram): pc.ram
00000000000ca000-00000000000ccfff (prio 0, ram): pc.ram
00000000000cd000-00000000000ebfff (prio 0, ram): pc.ram
00000000000ec000-00000000000effff (prio 0, ram): pc.ram
00000000000f0000-00000000000fffff (prio 0, ram): pc.ram
0000000000100000-00000000bfffffff (prio 0, ram): pc.ram
00000000fd000000-00000000fdffffff (prio 1, ram): vga.vram
00000000febc0000-00000000febdffff (prio 1, i/o): e1000-mmio
00000000febf0400-00000000febf041f (prio 0, i/o): vga ioports remapped
00000000febf0500-00000000febf0515 (prio 0, i/o): bochs dispi interface
00000000febf0600-00000000febf0607 (prio 0, i/o): qemu extended regs
00000000fec00000-00000000fec00fff (prio 0, i/o): kvm-ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): kvm-apic-msi
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
0000000100000000-000000013fffffff (prio 0, ram): pc.ram
becomes this:
address-space (flat view): KVM-SMRAM
0000000000000000-00000000000bffff (prio 0, ram): pc.ram
00000000000c0000-00000000000c9fff (prio 0, rom): pc.ram @00000000000c0000
00000000000ca000-00000000000ccfff (prio 0, ram): pc.ram @00000000000ca000
00000000000cd000-00000000000ebfff (prio 0, rom): pc.ram @00000000000cd000
00000000000ec000-00000000000effff (prio 0, ram): pc.ram @00000000000ec000
00000000000f0000-00000000000fffff (prio 0, rom): pc.ram @00000000000f0000
0000000000100000-00000000bfffffff (prio 0, ram): pc.ram @0000000000100000
00000000fd000000-00000000fdffffff (prio 1, ram): vga.vram
00000000febc0000-00000000febdffff (prio 1, i/o): e1000-mmio
00000000febf0400-00000000febf041f (prio 0, i/o): vga ioports remapped
00000000febf0500-00000000febf0515 (prio 0, i/o): bochs dispi interface
00000000febf0600-00000000febf0607 (prio 0, i/o): qemu extended regs
00000000fec00000-00000000fec00fff (prio 0, i/o): kvm-ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): kvm-apic-msi
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
0000000100000000-000000013fffffff (prio 0, ram): pc.ram @00000000c0000000
This should make it easier to understand what's going on.
Cc: Peter Xu <peterx@redhat.com>
Cc: "William Tambe" <tambewilliam@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-02 22:49:41 +01:00
|
|
|
if (range->offset_in_region) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
|
|
|
|
" (prio %d, %s%s): %s @" TARGET_FMT_plx,
|
|
|
|
int128_get64(range->addr.start),
|
|
|
|
int128_get64(range->addr.start)
|
|
|
|
+ MR_SIZE(range->addr.size),
|
|
|
|
mr->priority,
|
|
|
|
range->nonvolatile ? "nv-" : "",
|
|
|
|
range->readonly ? "rom" : memory_region_type(mr),
|
|
|
|
memory_region_name(mr),
|
|
|
|
range->offset_in_region);
|
memory: show region offset and ROM/RAM type in "info mtree -f"
"info mtree -f" output is currently hard to use for large RAM regions, because
there is no hint as to what part of the region is being mapped. Add the offset
if it is nonzero.
Secondly, FlatView has a readonly field, that can override the MemoryRegion
in the presence of aliases. Take it into account.
Together, with this patch this:
address-space (flat view): KVM-SMRAM
0000000000000000-00000000000bffff (prio 0, ram): pc.ram
00000000000c0000-00000000000c9fff (prio 0, ram): pc.ram
00000000000ca000-00000000000ccfff (prio 0, ram): pc.ram
00000000000cd000-00000000000ebfff (prio 0, ram): pc.ram
00000000000ec000-00000000000effff (prio 0, ram): pc.ram
00000000000f0000-00000000000fffff (prio 0, ram): pc.ram
0000000000100000-00000000bfffffff (prio 0, ram): pc.ram
00000000fd000000-00000000fdffffff (prio 1, ram): vga.vram
00000000febc0000-00000000febdffff (prio 1, i/o): e1000-mmio
00000000febf0400-00000000febf041f (prio 0, i/o): vga ioports remapped
00000000febf0500-00000000febf0515 (prio 0, i/o): bochs dispi interface
00000000febf0600-00000000febf0607 (prio 0, i/o): qemu extended regs
00000000fec00000-00000000fec00fff (prio 0, i/o): kvm-ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): kvm-apic-msi
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
0000000100000000-000000013fffffff (prio 0, ram): pc.ram
becomes this:
address-space (flat view): KVM-SMRAM
0000000000000000-00000000000bffff (prio 0, ram): pc.ram
00000000000c0000-00000000000c9fff (prio 0, rom): pc.ram @00000000000c0000
00000000000ca000-00000000000ccfff (prio 0, ram): pc.ram @00000000000ca000
00000000000cd000-00000000000ebfff (prio 0, rom): pc.ram @00000000000cd000
00000000000ec000-00000000000effff (prio 0, ram): pc.ram @00000000000ec000
00000000000f0000-00000000000fffff (prio 0, rom): pc.ram @00000000000f0000
0000000000100000-00000000bfffffff (prio 0, ram): pc.ram @0000000000100000
00000000fd000000-00000000fdffffff (prio 1, ram): vga.vram
00000000febc0000-00000000febdffff (prio 1, i/o): e1000-mmio
00000000febf0400-00000000febf041f (prio 0, i/o): vga ioports remapped
00000000febf0500-00000000febf0515 (prio 0, i/o): bochs dispi interface
00000000febf0600-00000000febf0607 (prio 0, i/o): qemu extended regs
00000000fec00000-00000000fec00fff (prio 0, i/o): kvm-ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): kvm-apic-msi
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
0000000100000000-000000013fffffff (prio 0, ram): pc.ram @00000000c0000000
This should make it easier to understand what's going on.
Cc: Peter Xu <peterx@redhat.com>
Cc: "William Tambe" <tambewilliam@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-02 22:49:41 +01:00
|
|
|
} else {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
|
|
|
|
" (prio %d, %s%s): %s",
|
|
|
|
int128_get64(range->addr.start),
|
|
|
|
int128_get64(range->addr.start)
|
|
|
|
+ MR_SIZE(range->addr.size),
|
|
|
|
mr->priority,
|
|
|
|
range->nonvolatile ? "nv-" : "",
|
|
|
|
range->readonly ? "rom" : memory_region_type(mr),
|
|
|
|
memory_region_name(mr));
|
memory: show region offset and ROM/RAM type in "info mtree -f"
"info mtree -f" output is currently hard to use for large RAM regions, because
there is no hint as to what part of the region is being mapped. Add the offset
if it is nonzero.
Secondly, FlatView has a readonly field, that can override the MemoryRegion
in the presence of aliases. Take it into account.
Together, with this patch this:
address-space (flat view): KVM-SMRAM
0000000000000000-00000000000bffff (prio 0, ram): pc.ram
00000000000c0000-00000000000c9fff (prio 0, ram): pc.ram
00000000000ca000-00000000000ccfff (prio 0, ram): pc.ram
00000000000cd000-00000000000ebfff (prio 0, ram): pc.ram
00000000000ec000-00000000000effff (prio 0, ram): pc.ram
00000000000f0000-00000000000fffff (prio 0, ram): pc.ram
0000000000100000-00000000bfffffff (prio 0, ram): pc.ram
00000000fd000000-00000000fdffffff (prio 1, ram): vga.vram
00000000febc0000-00000000febdffff (prio 1, i/o): e1000-mmio
00000000febf0400-00000000febf041f (prio 0, i/o): vga ioports remapped
00000000febf0500-00000000febf0515 (prio 0, i/o): bochs dispi interface
00000000febf0600-00000000febf0607 (prio 0, i/o): qemu extended regs
00000000fec00000-00000000fec00fff (prio 0, i/o): kvm-ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): kvm-apic-msi
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
0000000100000000-000000013fffffff (prio 0, ram): pc.ram
becomes this:
address-space (flat view): KVM-SMRAM
0000000000000000-00000000000bffff (prio 0, ram): pc.ram
00000000000c0000-00000000000c9fff (prio 0, rom): pc.ram @00000000000c0000
00000000000ca000-00000000000ccfff (prio 0, ram): pc.ram @00000000000ca000
00000000000cd000-00000000000ebfff (prio 0, rom): pc.ram @00000000000cd000
00000000000ec000-00000000000effff (prio 0, ram): pc.ram @00000000000ec000
00000000000f0000-00000000000fffff (prio 0, rom): pc.ram @00000000000f0000
0000000000100000-00000000bfffffff (prio 0, ram): pc.ram @0000000000100000
00000000fd000000-00000000fdffffff (prio 1, ram): vga.vram
00000000febc0000-00000000febdffff (prio 1, i/o): e1000-mmio
00000000febf0400-00000000febf041f (prio 0, i/o): vga ioports remapped
00000000febf0500-00000000febf0515 (prio 0, i/o): bochs dispi interface
00000000febf0600-00000000febf0607 (prio 0, i/o): qemu extended regs
00000000fec00000-00000000fec00fff (prio 0, i/o): kvm-ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): kvm-apic-msi
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
0000000100000000-000000013fffffff (prio 0, ram): pc.ram @00000000c0000000
This should make it easier to understand what's going on.
Cc: Peter Xu <peterx@redhat.com>
Cc: "William Tambe" <tambewilliam@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-02 22:49:41 +01:00
|
|
|
}
|
2018-06-04 05:25:11 +02:00
|
|
|
if (fvi->owner) {
|
2019-04-17 21:17:56 +02:00
|
|
|
mtree_print_mr_owner(mr);
|
2018-06-04 05:25:11 +02:00
|
|
|
}
|
2019-06-14 03:52:37 +02:00
|
|
|
|
|
|
|
if (fvi->ac) {
|
|
|
|
for (i = 0; i < fv_address_spaces->len; ++i) {
|
|
|
|
as = g_array_index(fv_address_spaces, AddressSpace*, i);
|
|
|
|
if (fvi->ac->has_memory(current_machine, as,
|
|
|
|
int128_get64(range->addr.start),
|
|
|
|
MR_SIZE(range->addr.size) + 1)) {
|
2019-11-13 11:50:03 +01:00
|
|
|
qemu_printf(" %s", fvi->ac->name);
|
2019-06-14 03:52:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("\n");
|
2017-01-16 09:40:05 +01:00
|
|
|
range++;
|
|
|
|
}
|
|
|
|
|
2017-09-21 10:51:06 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
if (fvi->dispatch_tree && view->root) {
|
2019-04-17 21:17:56 +02:00
|
|
|
mtree_print_dispatch(view->dispatch, view->root);
|
2017-09-21 10:51:06 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("\n");
|
2017-09-21 10:51:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
|
|
|
FlatView *view = key;
|
|
|
|
GArray *fv_address_spaces = value;
|
|
|
|
|
|
|
|
g_array_unref(fv_address_spaces);
|
2017-01-16 09:40:05 +01:00
|
|
|
flatview_unref(view);
|
2017-09-21 10:51:06 +02:00
|
|
|
|
|
|
|
return true;
|
2017-01-16 09:40:05 +01:00
|
|
|
}
|
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
static void mtree_info_flatview(bool dispatch_tree, bool owner)
|
2011-09-11 22:22:05 +02:00
|
|
|
{
|
2021-09-01 17:45:48 +02:00
|
|
|
struct FlatViewInfo fvi = {
|
|
|
|
.counter = 0,
|
|
|
|
.dispatch_tree = dispatch_tree,
|
|
|
|
.owner = owner,
|
|
|
|
};
|
2012-10-02 15:28:50 +02:00
|
|
|
AddressSpace *as;
|
2021-09-01 17:45:48 +02:00
|
|
|
FlatView *view;
|
|
|
|
GArray *fv_address_spaces;
|
|
|
|
GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
|
|
|
|
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
|
2011-09-11 22:22:05 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
if (ac->has_memory) {
|
|
|
|
fvi.ac = ac;
|
|
|
|
}
|
2017-09-21 10:51:06 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
/* Gather all FVs in one table */
|
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
|
|
|
view = address_space_get_flatview(as);
|
2017-09-21 10:51:06 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
fv_address_spaces = g_hash_table_lookup(views, view);
|
|
|
|
if (!fv_address_spaces) {
|
|
|
|
fv_address_spaces = g_array_new(false, false, sizeof(as));
|
|
|
|
g_hash_table_insert(views, view, fv_address_spaces);
|
2017-01-16 09:40:05 +01:00
|
|
|
}
|
2017-09-21 10:51:06 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
g_array_append_val(fv_address_spaces, as);
|
|
|
|
}
|
2017-09-21 10:51:06 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
/* Print */
|
|
|
|
g_hash_table_foreach(views, mtree_print_flatview, &fvi);
|
2017-09-21 10:51:06 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
/* Free */
|
|
|
|
g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
|
|
|
|
g_hash_table_unref(views);
|
|
|
|
}
|
|
|
|
|
memory: Have 'info mtree' remove duplicated Address Space information
Per Peter Maydell [*]:
'info mtree' monitor command was designed on the assumption that
there's really only one or two interesting address spaces, and
with more recent developments that's just not the case any more.
Similarly about how the FlatView are sorted using a GHashTable,
sort the AddressSpace objects to remove the duplications (AS
using the same root MemoryRegion).
This drastically reduces the output of 'info mtree' on some boards.
Before:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
423
After:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
106
(qemu) info mtree
address-space: I/O
0000000000000000-000000000000ffff (prio 0, i/o): io
address-space: cpu-memory-0
address-space: cpu-memory-1
address-space: cpu-memory-2
address-space: cpu-memory-3
address-space: cpu-secure-memory-0
address-space: cpu-secure-memory-1
address-space: cpu-secure-memory-2
address-space: cpu-secure-memory-3
address-space: memory
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-000000003fffffff (prio 0, ram): ram
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
0000000040000000-00000000400000ff (prio 0, i/o): bcm2836-control
address-space: bcm2835-dma-memory
address-space: bcm2835-fb-memory
address-space: bcm2835-property-memory
address-space: dwc2
0000000000000000-00000000ffffffff (prio 0, i/o): bcm2835-gpu
0000000000000000-000000003fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
0000000040000000-000000007fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
000000007e000000-000000007effffff (prio 1, i/o): alias bcm2835-peripherals @bcm2835-peripherals 0000000000000000-0000000000ffffff
0000000080000000-00000000bfffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
00000000c0000000-00000000ffffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
address-space: bcm2835-mbox-memory
0000000000000000-000000000000008f (prio 0, i/o): bcm2835-mbox
0000000000000010-000000000000001f (prio 0, i/o): bcm2835-fb
0000000000000080-000000000000008f (prio 0, i/o): bcm2835-property
memory-region: ram
0000000000000000-000000003fffffff (prio 0, ram): ram
memory-region: bcm2835-peripherals
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
(qemu) q
[*] https://www.mail-archive.com/qemu-devel@nongnu.org/msg829821.html
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20210904231101.1071929-2-philmd@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2021-08-20 12:34:14 +02:00
|
|
|
struct AddressSpaceInfo {
|
|
|
|
MemoryRegionListHead *ml_head;
|
|
|
|
bool owner;
|
|
|
|
bool disabled;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Returns negative value if a < b; zero if a = b; positive value if a > b. */
|
|
|
|
static gint address_space_compare_name(gconstpointer a, gconstpointer b)
|
|
|
|
{
|
|
|
|
const AddressSpace *as_a = a;
|
|
|
|
const AddressSpace *as_b = b;
|
|
|
|
|
|
|
|
return g_strcmp0(as_a->name, as_b->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mtree_print_as_name(gpointer data, gpointer user_data)
|
|
|
|
{
|
|
|
|
AddressSpace *as = data;
|
|
|
|
|
|
|
|
qemu_printf("address-space: %s\n", as->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = key;
|
|
|
|
GSList *as_same_root_mr_list = value;
|
|
|
|
struct AddressSpaceInfo *asi = user_data;
|
|
|
|
|
|
|
|
g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
|
|
|
|
mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
|
|
|
|
qemu_printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean mtree_info_as_free(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
|
|
|
GSList *as_same_root_mr_list = value;
|
|
|
|
|
|
|
|
g_slist_free(as_same_root_mr_list);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
|
|
|
|
{
|
|
|
|
MemoryRegionListHead ml_head;
|
|
|
|
MemoryRegionList *ml, *ml2;
|
|
|
|
AddressSpace *as;
|
memory: Have 'info mtree' remove duplicated Address Space information
Per Peter Maydell [*]:
'info mtree' monitor command was designed on the assumption that
there's really only one or two interesting address spaces, and
with more recent developments that's just not the case any more.
Similarly about how the FlatView are sorted using a GHashTable,
sort the AddressSpace objects to remove the duplications (AS
using the same root MemoryRegion).
This drastically reduces the output of 'info mtree' on some boards.
Before:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
423
After:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
106
(qemu) info mtree
address-space: I/O
0000000000000000-000000000000ffff (prio 0, i/o): io
address-space: cpu-memory-0
address-space: cpu-memory-1
address-space: cpu-memory-2
address-space: cpu-memory-3
address-space: cpu-secure-memory-0
address-space: cpu-secure-memory-1
address-space: cpu-secure-memory-2
address-space: cpu-secure-memory-3
address-space: memory
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-000000003fffffff (prio 0, ram): ram
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
0000000040000000-00000000400000ff (prio 0, i/o): bcm2836-control
address-space: bcm2835-dma-memory
address-space: bcm2835-fb-memory
address-space: bcm2835-property-memory
address-space: dwc2
0000000000000000-00000000ffffffff (prio 0, i/o): bcm2835-gpu
0000000000000000-000000003fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
0000000040000000-000000007fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
000000007e000000-000000007effffff (prio 1, i/o): alias bcm2835-peripherals @bcm2835-peripherals 0000000000000000-0000000000ffffff
0000000080000000-00000000bfffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
00000000c0000000-00000000ffffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
address-space: bcm2835-mbox-memory
0000000000000000-000000000000008f (prio 0, i/o): bcm2835-mbox
0000000000000010-000000000000001f (prio 0, i/o): bcm2835-fb
0000000000000080-000000000000008f (prio 0, i/o): bcm2835-property
memory-region: ram
0000000000000000-000000003fffffff (prio 0, ram): ram
memory-region: bcm2835-peripherals
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
(qemu) q
[*] https://www.mail-archive.com/qemu-devel@nongnu.org/msg829821.html
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20210904231101.1071929-2-philmd@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2021-08-20 12:34:14 +02:00
|
|
|
GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
|
|
|
|
GSList *as_same_root_mr_list;
|
|
|
|
struct AddressSpaceInfo asi = {
|
|
|
|
.ml_head = &ml_head,
|
|
|
|
.owner = owner,
|
|
|
|
.disabled = disabled,
|
|
|
|
};
|
2017-01-16 09:40:05 +01:00
|
|
|
|
2011-09-11 22:22:05 +02:00
|
|
|
QTAILQ_INIT(&ml_head);
|
|
|
|
|
2012-10-02 15:28:50 +02:00
|
|
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
memory: Have 'info mtree' remove duplicated Address Space information
Per Peter Maydell [*]:
'info mtree' monitor command was designed on the assumption that
there's really only one or two interesting address spaces, and
with more recent developments that's just not the case any more.
Similarly about how the FlatView are sorted using a GHashTable,
sort the AddressSpace objects to remove the duplications (AS
using the same root MemoryRegion).
This drastically reduces the output of 'info mtree' on some boards.
Before:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
423
After:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
106
(qemu) info mtree
address-space: I/O
0000000000000000-000000000000ffff (prio 0, i/o): io
address-space: cpu-memory-0
address-space: cpu-memory-1
address-space: cpu-memory-2
address-space: cpu-memory-3
address-space: cpu-secure-memory-0
address-space: cpu-secure-memory-1
address-space: cpu-secure-memory-2
address-space: cpu-secure-memory-3
address-space: memory
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-000000003fffffff (prio 0, ram): ram
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
0000000040000000-00000000400000ff (prio 0, i/o): bcm2836-control
address-space: bcm2835-dma-memory
address-space: bcm2835-fb-memory
address-space: bcm2835-property-memory
address-space: dwc2
0000000000000000-00000000ffffffff (prio 0, i/o): bcm2835-gpu
0000000000000000-000000003fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
0000000040000000-000000007fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
000000007e000000-000000007effffff (prio 1, i/o): alias bcm2835-peripherals @bcm2835-peripherals 0000000000000000-0000000000ffffff
0000000080000000-00000000bfffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
00000000c0000000-00000000ffffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
address-space: bcm2835-mbox-memory
0000000000000000-000000000000008f (prio 0, i/o): bcm2835-mbox
0000000000000010-000000000000001f (prio 0, i/o): bcm2835-fb
0000000000000080-000000000000008f (prio 0, i/o): bcm2835-property
memory-region: ram
0000000000000000-000000003fffffff (prio 0, ram): ram
memory-region: bcm2835-peripherals
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
(qemu) q
[*] https://www.mail-archive.com/qemu-devel@nongnu.org/msg829821.html
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20210904231101.1071929-2-philmd@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2021-08-20 12:34:14 +02:00
|
|
|
/* Create hashtable, key=AS root MR, value = list of AS */
|
|
|
|
as_same_root_mr_list = g_hash_table_lookup(views, as->root);
|
|
|
|
as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
|
|
|
|
address_space_compare_name);
|
|
|
|
g_hash_table_insert(views, as->root, as_same_root_mr_list);
|
2012-03-10 17:58:35 +01:00
|
|
|
}
|
|
|
|
|
memory: Have 'info mtree' remove duplicated Address Space information
Per Peter Maydell [*]:
'info mtree' monitor command was designed on the assumption that
there's really only one or two interesting address spaces, and
with more recent developments that's just not the case any more.
Similarly about how the FlatView are sorted using a GHashTable,
sort the AddressSpace objects to remove the duplications (AS
using the same root MemoryRegion).
This drastically reduces the output of 'info mtree' on some boards.
Before:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
423
After:
$ (echo info mtree; echo q) \
| qemu-system-aarch64 -S -monitor stdio -M raspi3b \
| wc -l
106
(qemu) info mtree
address-space: I/O
0000000000000000-000000000000ffff (prio 0, i/o): io
address-space: cpu-memory-0
address-space: cpu-memory-1
address-space: cpu-memory-2
address-space: cpu-memory-3
address-space: cpu-secure-memory-0
address-space: cpu-secure-memory-1
address-space: cpu-secure-memory-2
address-space: cpu-secure-memory-3
address-space: memory
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-000000003fffffff (prio 0, ram): ram
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
0000000040000000-00000000400000ff (prio 0, i/o): bcm2836-control
address-space: bcm2835-dma-memory
address-space: bcm2835-fb-memory
address-space: bcm2835-property-memory
address-space: dwc2
0000000000000000-00000000ffffffff (prio 0, i/o): bcm2835-gpu
0000000000000000-000000003fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
0000000040000000-000000007fffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
000000007e000000-000000007effffff (prio 1, i/o): alias bcm2835-peripherals @bcm2835-peripherals 0000000000000000-0000000000ffffff
0000000080000000-00000000bfffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
00000000c0000000-00000000ffffffff (prio 0, ram): alias bcm2835-gpu-ram-alias[*] @ram 0000000000000000-000000003fffffff
address-space: bcm2835-mbox-memory
0000000000000000-000000000000008f (prio 0, i/o): bcm2835-mbox
0000000000000010-000000000000001f (prio 0, i/o): bcm2835-fb
0000000000000080-000000000000008f (prio 0, i/o): bcm2835-property
memory-region: ram
0000000000000000-000000003fffffff (prio 0, ram): ram
memory-region: bcm2835-peripherals
000000003f000000-000000003fffffff (prio 1, i/o): bcm2835-peripherals
000000003f003000-000000003f00301f (prio 0, i/o): bcm2835-sys-timer
000000003f004000-000000003f004fff (prio -1000, i/o): bcm2835-txp
000000003f006000-000000003f006fff (prio 0, i/o): mphi
000000003f007000-000000003f007fff (prio 0, i/o): bcm2835-dma
000000003f00b200-000000003f00b3ff (prio 0, i/o): bcm2835-ic
000000003f00b400-000000003f00b43f (prio -1000, i/o): bcm2835-sp804
000000003f00b800-000000003f00bbff (prio 0, i/o): bcm2835-mbox
000000003f100000-000000003f1001ff (prio 0, i/o): bcm2835-powermgt
000000003f101000-000000003f102fff (prio 0, i/o): bcm2835-cprman
000000003f104000-000000003f10400f (prio 0, i/o): bcm2835-rng
000000003f200000-000000003f200fff (prio 0, i/o): bcm2835_gpio
000000003f201000-000000003f201fff (prio 0, i/o): pl011
000000003f202000-000000003f202fff (prio 0, i/o): bcm2835-sdhost
000000003f203000-000000003f2030ff (prio -1000, i/o): bcm2835-i2s
000000003f204000-000000003f20401f (prio -1000, i/o): bcm2835-spi0
000000003f205000-000000003f20501f (prio -1000, i/o): bcm2835-i2c0
000000003f20f000-000000003f20f07f (prio -1000, i/o): bcm2835-otp
000000003f212000-000000003f212007 (prio 0, i/o): bcm2835-thermal
000000003f214000-000000003f2140ff (prio -1000, i/o): bcm2835-spis
000000003f215000-000000003f2150ff (prio 0, i/o): bcm2835-aux
000000003f300000-000000003f3000ff (prio 0, i/o): sdhci
000000003f600000-000000003f6000ff (prio -1000, i/o): bcm2835-smi
000000003f804000-000000003f80401f (prio -1000, i/o): bcm2835-i2c1
000000003f805000-000000003f80501f (prio -1000, i/o): bcm2835-i2c2
000000003f900000-000000003f907fff (prio -1000, i/o): bcm2835-dbus
000000003f910000-000000003f917fff (prio -1000, i/o): bcm2835-ave0
000000003f980000-000000003f990fff (prio 0, i/o): dwc2
000000003f980000-000000003f980fff (prio 0, i/o): dwc2-io
000000003f981000-000000003f990fff (prio 0, i/o): dwc2-fifo
000000003fc00000-000000003fc00fff (prio -1000, i/o): bcm2835-v3d
000000003fe00000-000000003fe000ff (prio -1000, i/o): bcm2835-sdramc
000000003fe05000-000000003fe050ff (prio 0, i/o): bcm2835-dma-chan15
(qemu) q
[*] https://www.mail-archive.com/qemu-devel@nongnu.org/msg829821.html
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20210904231101.1071929-2-philmd@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2021-08-20 12:34:14 +02:00
|
|
|
/* print address spaces */
|
|
|
|
g_hash_table_foreach(views, mtree_print_as, &asi);
|
|
|
|
g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
|
|
|
|
g_hash_table_unref(views);
|
|
|
|
|
2011-09-11 22:22:05 +02:00
|
|
|
/* print aliased regions */
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
|
memory: Make 'info mtree' not display disabled regions by default
We might have many disabled memory regions, making the 'info mtree'
output too verbose to be useful.
Remove the disabled regions in the default output, but allow the
monitor user to display them using the '-D' option.
Before:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c0000-00000000000c3fff [disabled]
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000c4000-00000000000c7fff [disabled]
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000c8000-00000000000cbfff [disabled]
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-ram @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-pci @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, ram): alias pam-rom @pc.ram 00000000000cc000-00000000000cffff [disabled]
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d0000-00000000000d3fff [disabled]
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000d4000-00000000000d7fff [disabled]
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-ram @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-pci @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, ram): alias pam-rom @pc.ram 00000000000d8000-00000000000dbfff [disabled]
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-ram @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-pci @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, ram): alias pam-rom @pc.ram 00000000000dc000-00000000000dffff [disabled]
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e0000-00000000000e3fff [disabled]
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-ram @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-pci @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, ram): alias pam-rom @pc.ram 00000000000e4000-00000000000e7fff [disabled]
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-ram @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-pci @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, ram): alias pam-rom @pc.ram 00000000000e8000-00000000000ebfff [disabled]
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, ram): alias pam-ram @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-pci @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, ram): alias pam-rom @pc.ram 00000000000ec000-00000000000effff [disabled]
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-ram @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-pci @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, ram): alias pam-rom @pc.ram 00000000000f0000-00000000000fffff [disabled]
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
After:
(qemu) info mtree
memory-region: system
0000000000000000-ffffffffffffffff (prio 0, i/o): system
0000000000000000-0000000007ffffff (prio 0, ram): alias ram-below-4g @pc.ram 0000000000000000-0000000007ffffff
0000000000000000-ffffffffffffffff (prio -1, i/o): pci
00000000000a0000-00000000000bffff (prio 1, i/o): vga-lowmem
00000000000c0000-00000000000dffff (prio 1, rom): pc.rom
00000000000e0000-00000000000fffff (prio 1, rom): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fffc0000-00000000ffffffff (prio 0, rom): pc.bios
00000000000a0000-00000000000bffff (prio 1, i/o): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, i/o): alias pam-pci @pci 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, i/o): alias pam-pci @pci 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, i/o): alias pam-pci @pci 00000000000c8000-00000000000cbfff
00000000000cc000-00000000000cffff (prio 1, i/o): alias pam-pci @pci 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, i/o): alias pam-pci @pci 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, i/o): alias pam-pci @pci 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, i/o): alias pam-pci @pci 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, i/o): alias pam-pci @pci 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, i/o): alias pam-pci @pci 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, i/o): alias pam-pci @pci 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, i/o): alias pam-pci @pci 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, i/o): alias pam-pci @pci 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, i/o): alias pam-pci @pci 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, i/o): ioapic
00000000fed00000-00000000fed003ff (prio 0, i/o): hpet
00000000fee00000-00000000feefffff (prio 4096, i/o): apic-msi
The old behavior is preserved using 'info mtree -D'.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-05-29 14:53:25 +02:00
|
|
|
mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
|
2019-04-17 21:17:56 +02:00
|
|
|
qemu_printf("\n");
|
2011-09-11 22:22:05 +02:00
|
|
|
}
|
|
|
|
|
2017-09-03 18:33:04 +02:00
|
|
|
QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
|
2011-11-13 11:00:55 +01:00
|
|
|
g_free(ml);
|
2011-09-11 22:22:05 +02:00
|
|
|
}
|
|
|
|
}
|
2014-06-06 08:15:52 +02:00
|
|
|
|
2021-09-01 17:45:48 +02:00
|
|
|
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
|
|
|
|
{
|
|
|
|
if (flatview) {
|
|
|
|
mtree_info_flatview(dispatch_tree, owner);
|
|
|
|
} else {
|
|
|
|
mtree_info_as(dispatch_tree, owner, disabled);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-07 16:42:51 +02:00
|
|
|
void memory_region_init_ram(MemoryRegion *mr,
|
2021-02-25 19:20:03 +01:00
|
|
|
Object *owner,
|
2017-07-07 16:42:51 +02:00
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
DeviceState *owner_dev;
|
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
|
|
|
|
if (err) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* This will assert if owner is neither NULL nor a DeviceState.
|
|
|
|
* We only want the owner here for the purposes of defining a
|
|
|
|
* unique name for migration. TODO: Ideally we should implement
|
|
|
|
* a naming scheme for Objects which are not DeviceStates, in
|
|
|
|
* which case we can relax this restriction.
|
|
|
|
*/
|
|
|
|
owner_dev = DEVICE(owner);
|
|
|
|
vmstate_register_ram(mr, owner_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init_rom(MemoryRegion *mr,
|
2021-02-25 19:20:03 +01:00
|
|
|
Object *owner,
|
2017-07-07 16:42:51 +02:00
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
DeviceState *owner_dev;
|
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
|
|
|
|
if (err) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* This will assert if owner is neither NULL nor a DeviceState.
|
|
|
|
* We only want the owner here for the purposes of defining a
|
|
|
|
* unique name for migration. TODO: Ideally we should implement
|
|
|
|
* a naming scheme for Objects which are not DeviceStates, in
|
|
|
|
* which case we can relax this restriction.
|
|
|
|
*/
|
|
|
|
owner_dev = DEVICE(owner);
|
|
|
|
vmstate_register_ram(mr, owner_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init_rom_device(MemoryRegion *mr,
|
2021-02-25 19:20:03 +01:00
|
|
|
Object *owner,
|
2017-07-07 16:42:51 +02:00
|
|
|
const MemoryRegionOps *ops,
|
|
|
|
void *opaque,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
DeviceState *owner_dev;
|
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
|
|
|
|
name, size, &err);
|
|
|
|
if (err) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* This will assert if owner is neither NULL nor a DeviceState.
|
|
|
|
* We only want the owner here for the purposes of defining a
|
|
|
|
* unique name for migration. TODO: Ideally we should implement
|
|
|
|
* a naming scheme for Objects which are not DeviceStates, in
|
|
|
|
* which case we can relax this restriction.
|
|
|
|
*/
|
|
|
|
owner_dev = DEVICE(owner);
|
|
|
|
vmstate_register_ram(mr, owner_dev);
|
|
|
|
}
|
|
|
|
|
2020-10-23 17:07:34 +02:00
|
|
|
/*
|
|
|
|
* Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
|
|
|
|
* the fuzz_dma_read_cb callback
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_FUZZ
|
|
|
|
void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
|
|
|
|
size_t len,
|
2021-01-20 07:02:55 +01:00
|
|
|
MemoryRegion *mr)
|
2020-10-23 17:07:34 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-06 08:15:52 +02:00
|
|
|
static const TypeInfo memory_region_info = {
|
|
|
|
.parent = TYPE_OBJECT,
|
|
|
|
.name = TYPE_MEMORY_REGION,
|
2019-08-12 07:23:34 +02:00
|
|
|
.class_size = sizeof(MemoryRegionClass),
|
2014-06-06 08:15:52 +02:00
|
|
|
.instance_size = sizeof(MemoryRegion),
|
|
|
|
.instance_init = memory_region_initfn,
|
|
|
|
.instance_finalize = memory_region_finalize,
|
|
|
|
};
|
|
|
|
|
2017-07-11 05:56:19 +02:00
|
|
|
static const TypeInfo iommu_memory_region_info = {
|
|
|
|
.parent = TYPE_MEMORY_REGION,
|
|
|
|
.name = TYPE_IOMMU_MEMORY_REGION,
|
2017-07-11 05:56:20 +02:00
|
|
|
.class_size = sizeof(IOMMUMemoryRegionClass),
|
2017-07-11 05:56:19 +02:00
|
|
|
.instance_size = sizeof(IOMMUMemoryRegion),
|
|
|
|
.instance_init = iommu_memory_region_initfn,
|
2017-07-11 05:56:20 +02:00
|
|
|
.abstract = true,
|
2017-07-11 05:56:19 +02:00
|
|
|
};
|
|
|
|
|
memory: Introduce RamDiscardManager for RAM memory regions
We have some special RAM memory regions (managed by virtio-mem), whereby
the guest agreed to only use selected memory ranges. "unused" parts are
discarded so they won't consume memory - to logically unplug these memory
ranges. Before the VM is allowed to use such logically unplugged memory
again, coordination with the hypervisor is required.
This results in "sparse" mmaps/RAMBlocks/memory regions, whereby only
coordinated parts are valid to be used/accessed by the VM.
In most cases, we don't care about that - e.g., in KVM, we simply have a
single KVM memory slot. However, in case of vfio, registering the
whole region with the kernel results in all pages getting pinned, and
therefore an unexpected high memory consumption - discarding of RAM in
that context is broken.
Let's introduce a way to coordinate discarding/populating memory within a
RAM memory region with such special consumers of RAM memory regions: they
can register as listeners and get updates on memory getting discarded and
populated. Using this machinery, vfio will be able to map only the
currently populated parts, resulting in discarded parts not getting pinned
and not consuming memory.
A RamDiscardManager has to be set for a memory region before it is getting
mapped, and cannot change while the memory region is mapped.
Note: At some point, we might want to let RAMBlock users (esp. vfio used
for nvme://) consume this interface as well. We'll need RAMBlock notifier
calls when a RAMBlock is getting mapped/unmapped (via the corresponding
memory region), so we can properly register a listener there as well.
Reviewed-by: Pankaj Gupta <pankaj.gupta@cloud.ionos.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Auger Eric <eric.auger@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: teawater <teawaterz@linux.alibaba.com>
Cc: Marek Kedzierski <mkedzier@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210413095531.25603-2-david@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2021-04-13 11:55:19 +02:00
|
|
|
static const TypeInfo ram_discard_manager_info = {
|
|
|
|
.parent = TYPE_INTERFACE,
|
|
|
|
.name = TYPE_RAM_DISCARD_MANAGER,
|
|
|
|
.class_size = sizeof(RamDiscardManagerClass),
|
|
|
|
};
|
|
|
|
|
2014-06-06 08:15:52 +02:00
|
|
|
static void memory_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&memory_region_info);
|
2017-07-11 05:56:19 +02:00
|
|
|
type_register_static(&iommu_memory_region_info);
|
memory: Introduce RamDiscardManager for RAM memory regions
We have some special RAM memory regions (managed by virtio-mem), whereby
the guest agreed to only use selected memory ranges. "unused" parts are
discarded so they won't consume memory - to logically unplug these memory
ranges. Before the VM is allowed to use such logically unplugged memory
again, coordination with the hypervisor is required.
This results in "sparse" mmaps/RAMBlocks/memory regions, whereby only
coordinated parts are valid to be used/accessed by the VM.
In most cases, we don't care about that - e.g., in KVM, we simply have a
single KVM memory slot. However, in case of vfio, registering the
whole region with the kernel results in all pages getting pinned, and
therefore an unexpected high memory consumption - discarding of RAM in
that context is broken.
Let's introduce a way to coordinate discarding/populating memory within a
RAM memory region with such special consumers of RAM memory regions: they
can register as listeners and get updates on memory getting discarded and
populated. Using this machinery, vfio will be able to map only the
currently populated parts, resulting in discarded parts not getting pinned
and not consuming memory.
A RamDiscardManager has to be set for a memory region before it is getting
mapped, and cannot change while the memory region is mapped.
Note: At some point, we might want to let RAMBlock users (esp. vfio used
for nvme://) consume this interface as well. We'll need RAMBlock notifier
calls when a RAMBlock is getting mapped/unmapped (via the corresponding
memory region), so we can properly register a listener there as well.
Reviewed-by: Pankaj Gupta <pankaj.gupta@cloud.ionos.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Auger Eric <eric.auger@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: teawater <teawaterz@linux.alibaba.com>
Cc: Marek Kedzierski <mkedzier@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210413095531.25603-2-david@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2021-04-13 11:55:19 +02:00
|
|
|
type_register_static(&ram_discard_manager_info);
|
2014-06-06 08:15:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type_init(memory_register_types)
|