vhost+postcopy: Stash RAMBlock and offset
Stash the RAMBlock and offset for later use looking up addresses. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
9bb3801994
commit
905125d0e2
@ -9,6 +9,7 @@ vhost_section(const char *name, int r) "%s:%d"
|
||||
# hw/virtio/vhost-user.c
|
||||
vhost_user_postcopy_listen(void) ""
|
||||
vhost_user_set_mem_table_postcopy(uint64_t client_addr, uint64_t qhva, int reply_i, int region_i) "client:0x%"PRIx64" for hva: 0x%"PRIx64" reply %d region %d"
|
||||
vhost_user_set_mem_table_withfd(int index, const char *name, uint64_t memory_size, uint64_t guest_phys_addr, uint64_t userspace_addr, uint64_t offset) "%d:%s: size:0x%"PRIx64" GPA:0x%"PRIx64" QVA/userspace:0x%"PRIx64" RB offset:0x%"PRIx64
|
||||
|
||||
# hw/virtio/virtio.c
|
||||
virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
|
||||
|
@ -175,6 +175,15 @@ struct vhost_user {
|
||||
NotifierWithReturn postcopy_notifier;
|
||||
struct PostCopyFD postcopy_fd;
|
||||
uint64_t postcopy_client_bases[VHOST_MEMORY_MAX_NREGIONS];
|
||||
/* Length of the region_rb and region_rb_offset arrays */
|
||||
size_t region_rb_len;
|
||||
/* RAMBlock associated with a given region */
|
||||
RAMBlock **region_rb;
|
||||
/* The offset from the start of the RAMBlock to the start of the
|
||||
* vhost region.
|
||||
*/
|
||||
ram_addr_t *region_rb_offset;
|
||||
|
||||
/* True once we've entered postcopy_listen */
|
||||
bool postcopy_listen;
|
||||
};
|
||||
@ -362,6 +371,17 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
|
||||
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
||||
}
|
||||
|
||||
if (u->region_rb_len < dev->mem->nregions) {
|
||||
u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
|
||||
u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
|
||||
dev->mem->nregions);
|
||||
memset(&(u->region_rb[u->region_rb_len]), '\0',
|
||||
sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
|
||||
memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
|
||||
sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
|
||||
u->region_rb_len = dev->mem->nregions;
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->mem->nregions; ++i) {
|
||||
struct vhost_memory_region *reg = dev->mem->regions + i;
|
||||
ram_addr_t offset;
|
||||
@ -372,6 +392,12 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
|
||||
&offset);
|
||||
fd = memory_region_get_fd(mr);
|
||||
if (fd > 0) {
|
||||
trace_vhost_user_set_mem_table_withfd(fd_num, mr->name,
|
||||
reg->memory_size,
|
||||
reg->guest_phys_addr,
|
||||
reg->userspace_addr, offset);
|
||||
u->region_rb_offset[i] = offset;
|
||||
u->region_rb[i] = mr->ram_block;
|
||||
msg.payload.memory.regions[fd_num].userspace_addr =
|
||||
reg->userspace_addr;
|
||||
msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
|
||||
@ -380,6 +406,9 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
|
||||
msg.payload.memory.regions[fd_num].mmap_offset = offset;
|
||||
assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
|
||||
fds[fd_num++] = fd;
|
||||
} else {
|
||||
u->region_rb_offset[i] = 0;
|
||||
u->region_rb[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1148,6 +1177,11 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
|
||||
close(u->slave_fd);
|
||||
u->slave_fd = -1;
|
||||
}
|
||||
g_free(u->region_rb);
|
||||
u->region_rb = NULL;
|
||||
g_free(u->region_rb_offset);
|
||||
u->region_rb_offset = NULL;
|
||||
u->region_rb_len = 0;
|
||||
g_free(u);
|
||||
dev->opaque = 0;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user