util/vfio-helpers: Rework the IOVA allocator to avoid IOVA reserved regions

Introduce the qemu_vfio_find_fixed/temp_iova helpers which
respectively allocate IOVAs from the bottom/top parts of the
usable IOVA range, without picking within host IOVA reserved
windows. The allocation remains basic: if the size is too big
for the remaining of the current usable IOVA range, we jump
to the next one, leaving a hole in the address map.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Message-id: 20200929085550.30926-3-eric.auger@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Eric Auger 2020-09-29 10:55:50 +02:00 committed by Stefan Hajnoczi
parent 4487d420d0
commit 9ab5741164
1 changed files with 53 additions and 4 deletions

View File

@ -667,6 +667,50 @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
return true;
}
static int
qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
{
int i;
for (i = 0; i < s->nb_iova_ranges; i++) {
if (s->usable_iova_ranges[i].end < s->low_water_mark) {
continue;
}
s->low_water_mark =
MAX(s->low_water_mark, s->usable_iova_ranges[i].start);
if (s->usable_iova_ranges[i].end - s->low_water_mark + 1 >= size ||
s->usable_iova_ranges[i].end - s->low_water_mark + 1 == 0) {
*iova = s->low_water_mark;
s->low_water_mark += size;
return 0;
}
}
return -ENOMEM;
}
static int
qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
{
int i;
for (i = s->nb_iova_ranges - 1; i >= 0; i--) {
if (s->usable_iova_ranges[i].start > s->high_water_mark) {
continue;
}
s->high_water_mark =
MIN(s->high_water_mark, s->usable_iova_ranges[i].end + 1);
if (s->high_water_mark - s->usable_iova_ranges[i].start + 1 >= size ||
s->high_water_mark - s->usable_iova_ranges[i].start + 1 == 0) {
*iova = s->high_water_mark - size;
s->high_water_mark = *iova;
return 0;
}
}
return -ENOMEM;
}
/* Map [host, host + size) area into a contiguous IOVA address space, and store
* the result in @iova if not NULL. The caller need to make sure the area is
* aligned to page size, and mustn't overlap with existing mapping areas (split
@ -693,7 +737,11 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
goto out;
}
if (!temporary) {
iova0 = s->low_water_mark;
if (qemu_vfio_find_fixed_iova(s, size, &iova0)) {
ret = -ENOMEM;
goto out;
}
mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
if (!mapping) {
ret = -ENOMEM;
@ -705,15 +753,16 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
qemu_vfio_undo_mapping(s, mapping, NULL);
goto out;
}
s->low_water_mark += size;
qemu_vfio_dump_mappings(s);
} else {
iova0 = s->high_water_mark - size;
if (qemu_vfio_find_temp_iova(s, size, &iova0)) {
ret = -ENOMEM;
goto out;
}
ret = qemu_vfio_do_mapping(s, host, size, iova0);
if (ret) {
goto out;
}
s->high_water_mark -= size;
}
}
if (iova) {