nfs: Handle failure for potentially large allocations
Some code in the block layer makes potentially huge allocations. Failure is not completely unexpected there, so avoid aborting qemu and handle out-of-memory situations gracefully. This patch addresses the allocations in the nfs block driver. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Benoit Canet <benoit@irqsave.net>
This commit is contained in:
parent
4d5a3f888c
commit
2347dd7b68
@ -172,7 +172,11 @@ static int coroutine_fn nfs_co_writev(BlockDriverState *bs,
|
||||
|
||||
nfs_co_init_task(client, &task);
|
||||
|
||||
buf = g_malloc(nb_sectors * BDRV_SECTOR_SIZE);
|
||||
buf = g_try_malloc(nb_sectors * BDRV_SECTOR_SIZE);
|
||||
if (nb_sectors && buf == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qemu_iovec_to_buf(iov, 0, buf, nb_sectors * BDRV_SECTOR_SIZE);
|
||||
|
||||
if (nfs_pwrite_async(client->context, client->fh,
|
||||
|
Loading…
x
Reference in New Issue
Block a user