From 09aa97c78a784df2f781ff03b57b7dd6f1339edc Mon Sep 17 00:00:00 2001 From: Himanshu Jha Date: Mon, 9 Oct 2017 03:00:28 +0530 Subject: [PATCH 1/3] skd: Use kmem_cache_free Use kmem_cache_free instead of kfree for freeing the memory previously allocated with kmem_cache_zalloc/kmem_cache_alloc/kmem_cache_node. Signed-off-by: Himanshu Jha Signed-off-by: Jens Axboe --- drivers/block/skd_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 7cedb4295e9d..64d0fc17c174 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, return NULL; *dma_handle = dma_map_single(dev, buf, s->size, dir); if (dma_mapping_error(dev, *dma_handle)) { - kfree(buf); + kmem_cache_free(s, buf); buf = NULL; } return buf; From 639812a1ed9bf49ae2c026086fbf975339cd1eef Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 9 Oct 2017 13:12:10 -0400 Subject: [PATCH 2/3] nbd: don't set the device size until we're connected A user reported a regression with using the normal ioctl interface on newer kernels. This happens because I was setting the device size before the device was actually connected, which caused us to error out and close everything down. This didn't happen on netlink because we hold the device lock the whole time we're setting things up, but we don't do that for the ioctl path. This fixes the problem. Cc: stable@vger.kernel.org Fixes: 29eaadc ("nbd: stop using the bdev everywhere") Signed-off-by: Josef Bacik Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 883dfebd3014..baebbdfd74d5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, struct nbd_config *config = nbd->config; config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; - nbd_size_update(nbd); } static void nbd_complete_rq(struct request *req) @@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd) args->index = i; queue_work(recv_workqueue, &args->work); } + nbd_size_update(nbd); return error; } From ffe51f0142a291a957eebb9687cafb15f2b3fc14 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Tue, 17 Oct 2017 08:43:09 -0600 Subject: [PATCH 3/3] fs: Avoid invalidation in interrupt context in dio_complete() Currently we try to defer completion of async DIO to the process context in case there are any mapped pages associated with the inode so that we can invalidate the pages when the IO completes. However the check is racy and the pages can be mapped afterwards. If this happens we might end up calling invalidate_inode_pages2_range() in dio_complete() in interrupt context which could sleep. This can be reproduced by generic/451. Fix this by passing the information whether we can or can't invalidate to the dio_complete(). Thanks Eryu Guan for reporting this and Jan Kara for suggesting a fix. Fixes: 332391a9935d ("fs: Fix page cache inconsistency when mixing buffered and AIO DIO") Reported-by: Eryu Guan Reviewed-by: Jan Kara Tested-by: Eryu Guan Signed-off-by: Lukas Czerner Signed-off-by: Jens Axboe --- fs/direct-io.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/fs/direct-io.c b/fs/direct-io.c index 62cf812ed0e5..8106a8dddfab 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -44,6 +44,12 @@ */ #define DIO_PAGES 64 +/* + * Flags for dio_complete() + */ +#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */ +#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */ + /* * This code generally works in units of "dio_blocks". A dio_block is * somewhere between the hard sector size and the filesystem block size. it @@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio, * filesystems can use it to hold additional state between get_block calls and * dio_complete. */ -static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) +static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) { loff_t offset = dio->iocb->ki_pos; ssize_t transferred = 0; @@ -266,7 +272,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) * one is a pretty crazy thing to do, so we don't support it 100%. If * this invalidation fails, tough, the write still worked... */ - if (ret > 0 && dio->op == REQ_OP_WRITE && + if (flags & DIO_COMPLETE_INVALIDATE && + ret > 0 && dio->op == REQ_OP_WRITE && dio->inode->i_mapping->nrpages) { err = invalidate_inode_pages2_range(dio->inode->i_mapping, offset >> PAGE_SHIFT, @@ -285,7 +292,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) if (!(dio->flags & DIO_SKIP_DIO_COUNT)) inode_dio_end(dio->inode); - if (is_async) { + if (flags & DIO_COMPLETE_ASYNC) { /* * generic_write_sync expects ki_pos to have been updated * already, but the submission path only does this for @@ -306,7 +313,7 @@ static void dio_aio_complete_work(struct work_struct *work) { struct dio *dio = container_of(work, struct dio, complete_work); - dio_complete(dio, 0, true); + dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); } static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); @@ -348,7 +355,7 @@ static void dio_bio_end_aio(struct bio *bio) queue_work(dio->inode->i_sb->s_dio_done_wq, &dio->complete_work); } else { - dio_complete(dio, 0, true); + dio_complete(dio, 0, DIO_COMPLETE_ASYNC); } } } @@ -1359,7 +1366,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, dio_await_completion(dio); if (drop_refcount(dio) == 0) { - retval = dio_complete(dio, retval, false); + retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); } else BUG_ON(retval != -EIOCBQUEUED);