block: Convert BB interface to byte-based discards
Change sector-based blk_discard(), blk_co_discard(), and blk_aio_discard() to instead be byte-based blk_pdiscard(), blk_co_pdiscard(), and blk_aio_pdiscard(). NBD gets a lot simpler now that ignoring the unaligned portion of a byte-based discard request is handled under the hood by the block layer. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Message-id: 1468624988-423-6-git-send-email-eblake@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
60ebac16bc
commit
1c6c4bb7f0
@ -1065,17 +1065,16 @@ BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
||||
return bdrv_aio_flush(blk_bs(blk), cb, opaque);
|
||||
}
|
||||
|
||||
BlockAIOCB *blk_aio_discard(BlockBackend *blk,
|
||||
int64_t sector_num, int nb_sectors,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
|
||||
int64_t offset, int count,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
int ret = blk_check_byte_request(blk, offset, count);
|
||||
if (ret < 0) {
|
||||
return blk_abort_aio_request(blk, cb, opaque, ret);
|
||||
}
|
||||
|
||||
return bdrv_aio_pdiscard(blk_bs(blk), sector_num << BDRV_SECTOR_BITS,
|
||||
nb_sectors << BDRV_SECTOR_BITS, cb, opaque);
|
||||
return bdrv_aio_pdiscard(blk_bs(blk), offset, count, cb, opaque);
|
||||
}
|
||||
|
||||
void blk_aio_cancel(BlockAIOCB *acb)
|
||||
@ -1107,15 +1106,14 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||
return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
|
||||
}
|
||||
|
||||
int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
||||
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
int ret = blk_check_byte_request(blk, offset, count);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_co_pdiscard(blk_bs(blk), sector_num << BDRV_SECTOR_BITS,
|
||||
nb_sectors << BDRV_SECTOR_BITS);
|
||||
return bdrv_co_pdiscard(blk_bs(blk), offset, count);
|
||||
}
|
||||
|
||||
int blk_co_flush(BlockBackend *blk)
|
||||
@ -1506,15 +1504,14 @@ int blk_truncate(BlockBackend *blk, int64_t offset)
|
||||
return bdrv_truncate(blk_bs(blk), offset);
|
||||
}
|
||||
|
||||
int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
||||
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
int ret = blk_check_byte_request(blk, offset, count);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_pdiscard(blk_bs(blk), sector_num << BDRV_SECTOR_BITS,
|
||||
nb_sectors << BDRV_SECTOR_BITS);
|
||||
return bdrv_pdiscard(blk_bs(blk), offset, count);
|
||||
}
|
||||
|
||||
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
||||
|
@ -303,8 +303,9 @@ static void mirror_do_zero_or_discard(MirrorBlockJob *s,
|
||||
s->in_flight++;
|
||||
s->sectors_in_flight += nb_sectors;
|
||||
if (is_discard) {
|
||||
blk_aio_discard(s->target, sector_num, op->nb_sectors,
|
||||
mirror_write_complete, op);
|
||||
blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
|
||||
op->nb_sectors << BDRV_SECTOR_BITS,
|
||||
mirror_write_complete, op);
|
||||
} else {
|
||||
blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
|
||||
op->nb_sectors * BDRV_SECTOR_SIZE,
|
||||
|
@ -574,9 +574,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||
{
|
||||
struct blkif_request_discard *discard_req = (void *)&ioreq->req;
|
||||
ioreq->aio_inflight++;
|
||||
blk_aio_discard(blkdev->blk,
|
||||
discard_req->sector_number, discard_req->nr_sectors,
|
||||
qemu_aio_complete, ioreq);
|
||||
blk_aio_pdiscard(blkdev->blk,
|
||||
discard_req->sector_number << BDRV_SECTOR_BITS,
|
||||
discard_req->nr_sectors << BDRV_SECTOR_BITS,
|
||||
qemu_aio_complete, ioreq);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -423,8 +423,10 @@ static void ide_issue_trim_cb(void *opaque, int ret)
|
||||
}
|
||||
|
||||
/* Got an entry! Submit and exit. */
|
||||
iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
|
||||
ide_issue_trim_cb, opaque);
|
||||
iocb->aiocb = blk_aio_pdiscard(iocb->blk,
|
||||
sector << BDRV_SECTOR_BITS,
|
||||
count << BDRV_SECTOR_BITS,
|
||||
ide_issue_trim_cb, opaque);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1609,10 +1609,10 @@ static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
|
||||
goto done;
|
||||
}
|
||||
|
||||
r->req.aiocb = blk_aio_discard(s->qdev.conf.blk,
|
||||
sector_num * (s->qdev.blocksize / 512),
|
||||
nb_sectors * (s->qdev.blocksize / 512),
|
||||
scsi_unmap_complete, data);
|
||||
r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
|
||||
sector_num * s->qdev.blocksize,
|
||||
nb_sectors * s->qdev.blocksize,
|
||||
scsi_unmap_complete, data);
|
||||
data->count--;
|
||||
data->inbuf += 16;
|
||||
return;
|
||||
|
@ -139,15 +139,14 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *blk_aio_discard(BlockBackend *blk,
|
||||
int64_t sector_num, int nb_sectors,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
void blk_aio_cancel(BlockAIOCB *acb);
|
||||
void blk_aio_cancel_async(BlockAIOCB *acb);
|
||||
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
|
||||
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count);
|
||||
int blk_co_flush(BlockBackend *blk);
|
||||
int blk_flush(BlockBackend *blk);
|
||||
int blk_flush_all(void);
|
||||
@ -207,7 +206,7 @@ int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
|
||||
int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
|
||||
const uint8_t *buf, int nb_sectors);
|
||||
int blk_truncate(BlockBackend *blk, int64_t offset);
|
||||
int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
|
||||
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count);
|
||||
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
||||
int64_t pos, int size);
|
||||
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
|
||||
|
19
nbd/server.c
19
nbd/server.c
@ -1182,20 +1182,11 @@ static void nbd_trip(void *opaque)
|
||||
break;
|
||||
case NBD_CMD_TRIM:
|
||||
TRACE("Request type is TRIM");
|
||||
/* Ignore unaligned head or tail, until block layer adds byte
|
||||
* interface */
|
||||
if (request.len >= BDRV_SECTOR_SIZE) {
|
||||
request.len -= (request.from + request.len) % BDRV_SECTOR_SIZE;
|
||||
ret = blk_co_discard(exp->blk,
|
||||
DIV_ROUND_UP(request.from + exp->dev_offset,
|
||||
BDRV_SECTOR_SIZE),
|
||||
request.len / BDRV_SECTOR_SIZE);
|
||||
if (ret < 0) {
|
||||
LOG("discard failed");
|
||||
reply.error = -ret;
|
||||
}
|
||||
} else {
|
||||
TRACE("trim request too small, ignoring");
|
||||
ret = blk_co_pdiscard(exp->blk, request.from + exp->dev_offset,
|
||||
request.len);
|
||||
if (ret < 0) {
|
||||
LOG("discard failed");
|
||||
reply.error = -ret;
|
||||
}
|
||||
if (nbd_co_send_reply(req, &reply, 0) < 0) {
|
||||
goto out;
|
||||
|
@ -1696,8 +1696,7 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
|
||||
}
|
||||
|
||||
gettimeofday(&t1, NULL);
|
||||
ret = blk_discard(blk, offset >> BDRV_SECTOR_BITS,
|
||||
count >> BDRV_SECTOR_BITS);
|
||||
ret = blk_pdiscard(blk, offset, count);
|
||||
gettimeofday(&t2, NULL);
|
||||
|
||||
if (ret < 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user