2005-04-17 00:20:36 +02:00
|
|
|
/*
|
2006-06-09 06:48:12 +02:00
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2005-11-02 04:58:39 +01:00
|
|
|
* All Rights Reserved.
|
2005-04-17 00:20:36 +02:00
|
|
|
*
|
2005-11-02 04:58:39 +01:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 00:20:36 +02:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 04:58:39 +01:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 00:20:36 +02:00
|
|
|
*
|
2005-11-02 04:58:39 +01:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-23 01:36:05 +02:00
|
|
|
#include "xfs_shared.h"
|
2013-10-23 01:50:10 +02:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-04-17 00:20:36 +02:00
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_ag.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
2005-11-02 04:38:42 +01:00
|
|
|
#include "xfs_btree.h"
|
2013-10-23 01:51:50 +02:00
|
|
|
#include "xfs_bmap_btree.h"
|
2005-04-17 00:20:36 +02:00
|
|
|
#include "xfs_bmap.h"
|
2013-08-12 12:49:42 +02:00
|
|
|
#include "xfs_bmap_util.h"
|
2005-04-17 00:20:36 +02:00
|
|
|
#include "xfs_error.h"
|
2013-10-23 01:51:50 +02:00
|
|
|
#include "xfs_trans.h"
|
2005-04-17 00:20:36 +02:00
|
|
|
#include "xfs_trans_space.h"
|
|
|
|
#include "xfs_iomap.h"
|
2009-12-15 00:14:59 +01:00
|
|
|
#include "xfs_trace.h"
|
2012-11-06 15:50:38 +01:00
|
|
|
#include "xfs_icache.h"
|
2013-10-23 01:51:50 +02:00
|
|
|
#include "xfs_quota.h"
|
2013-03-18 15:51:47 +01:00
|
|
|
#include "xfs_dquot_item.h"
|
|
|
|
#include "xfs_dquot.h"
|
2013-10-23 01:51:50 +02:00
|
|
|
#include "xfs_dinode.h"
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
|
|
|
|
<< mp->m_writeio_log)
|
|
|
|
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
STATIC int
|
|
|
|
xfs_iomap_eof_align_last_fsb(
|
|
|
|
xfs_mount_t *mp,
|
2007-10-11 09:34:33 +02:00
|
|
|
xfs_inode_t *ip,
|
2006-01-11 05:28:28 +01:00
|
|
|
xfs_extlen_t extsize,
|
|
|
|
xfs_fileoff_t *last_fsb)
|
|
|
|
{
|
|
|
|
xfs_fileoff_t new_last_fsb = 0;
|
2011-12-18 21:00:05 +01:00
|
|
|
xfs_extlen_t align = 0;
|
2006-01-11 05:28:28 +01:00
|
|
|
int eof, error;
|
|
|
|
|
2011-12-18 21:00:05 +01:00
|
|
|
if (!XFS_IS_REALTIME_INODE(ip)) {
|
|
|
|
/*
|
|
|
|
* Round up the allocation request to a stripe unit
|
|
|
|
* (m_dalign) boundary if the file size is >= stripe unit
|
|
|
|
* size, and we are allocating past the allocation eof.
|
|
|
|
*
|
|
|
|
* If mounted with the "-o swalloc" option the alignment is
|
|
|
|
* increased from the strip unit size to the stripe width.
|
|
|
|
*/
|
|
|
|
if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
|
|
|
|
align = mp->m_swidth;
|
|
|
|
else if (mp->m_dalign)
|
|
|
|
align = mp->m_dalign;
|
|
|
|
|
2011-12-18 21:00:11 +01:00
|
|
|
if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
|
2011-12-18 21:00:05 +01:00
|
|
|
new_last_fsb = roundup_64(*last_fsb, align);
|
|
|
|
}
|
2006-01-11 05:28:28 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Always round up the allocation request to an extent boundary
|
|
|
|
* (when file on a real-time subvolume or has di_extsize hint).
|
|
|
|
*/
|
|
|
|
if (extsize) {
|
|
|
|
if (new_last_fsb)
|
|
|
|
align = roundup_64(new_last_fsb, extsize);
|
|
|
|
else
|
|
|
|
align = extsize;
|
|
|
|
new_last_fsb = roundup_64(*last_fsb, align);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_last_fsb) {
|
2007-10-11 09:34:33 +02:00
|
|
|
error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
|
2006-01-11 05:28:28 +01:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
if (eof)
|
|
|
|
*last_fsb = new_last_fsb;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-28 03:03:20 +02:00
|
|
|
STATIC int
|
2011-03-07 00:06:35 +01:00
|
|
|
xfs_alert_fsblock_zero(
|
2006-09-28 03:03:20 +02:00
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_bmbt_irec_t *imap)
|
|
|
|
{
|
2011-03-07 00:02:35 +01:00
|
|
|
xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
|
2006-09-28 03:03:20 +02:00
|
|
|
"Access to block zero in inode %llu "
|
|
|
|
"start_block: %llx start_off: %llx "
|
2013-10-12 03:59:05 +02:00
|
|
|
"blkcnt: %llx extent-state: %x",
|
2006-09-28 03:03:20 +02:00
|
|
|
(unsigned long long)ip->i_ino,
|
|
|
|
(unsigned long long)imap->br_startblock,
|
|
|
|
(unsigned long long)imap->br_startoff,
|
|
|
|
(unsigned long long)imap->br_blockcount,
|
|
|
|
imap->br_state);
|
|
|
|
return EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
2010-12-10 09:42:20 +01:00
|
|
|
int
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_iomap_write_direct(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 22:33:40 +02:00
|
|
|
xfs_off_t offset,
|
2005-04-17 00:20:36 +02:00
|
|
|
size_t count,
|
2010-06-24 03:42:19 +02:00
|
|
|
xfs_bmbt_irec_t *imap,
|
2010-12-10 09:42:19 +01:00
|
|
|
int nmaps)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_fileoff_t last_fsb;
|
2006-01-11 05:28:28 +01:00
|
|
|
xfs_filblks_t count_fsb, resaligned;
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_fsblock_t firstfsb;
|
2006-01-11 05:28:28 +01:00
|
|
|
xfs_extlen_t extsz, temp;
|
2005-11-02 05:00:01 +01:00
|
|
|
int nimaps;
|
2005-06-21 07:48:47 +02:00
|
|
|
int quota_flag;
|
2005-04-17 00:20:36 +02:00
|
|
|
int rt;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
xfs_bmap_free_t free_list;
|
2006-01-11 05:28:28 +01:00
|
|
|
uint qblocks, resblks, resrtextents;
|
2005-04-17 00:20:36 +02:00
|
|
|
int committed;
|
2006-01-11 05:28:28 +01:00
|
|
|
int error;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2012-03-27 16:34:50 +02:00
|
|
|
error = xfs_qm_dqattach(ip, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
return XFS_ERROR(error);
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
rt = XFS_IS_REALTIME_INODE(ip);
|
2007-06-18 08:50:37 +02:00
|
|
|
extsz = xfs_get_extsz_hint(ip);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2007-06-18 08:50:37 +02:00
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
|
2011-12-18 21:00:11 +01:00
|
|
|
if ((offset + count) > XFS_ISIZE(ip)) {
|
2008-12-22 07:56:49 +01:00
|
|
|
error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
|
2006-01-11 05:28:28 +01:00
|
|
|
if (error)
|
2012-03-27 16:34:50 +02:00
|
|
|
return XFS_ERROR(error);
|
2005-04-17 00:20:36 +02:00
|
|
|
} else {
|
2010-12-10 09:42:19 +01:00
|
|
|
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
|
2006-01-11 05:28:28 +01:00
|
|
|
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
|
2010-06-24 03:42:19 +02:00
|
|
|
imap->br_blockcount +
|
|
|
|
imap->br_startoff);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2006-01-11 05:28:28 +01:00
|
|
|
count_fsb = last_fsb - offset_fsb;
|
|
|
|
ASSERT(count_fsb > 0);
|
|
|
|
|
|
|
|
resaligned = count_fsb;
|
|
|
|
if (unlikely(extsz)) {
|
|
|
|
if ((temp = do_mod(offset_fsb, extsz)))
|
|
|
|
resaligned += temp;
|
|
|
|
if ((temp = do_mod(resaligned, extsz)))
|
|
|
|
resaligned += extsz - temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(rt)) {
|
|
|
|
resrtextents = qblocks = resaligned;
|
|
|
|
resrtextents /= mp->m_sb.sb_rextsize;
|
2007-06-18 08:50:27 +02:00
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
quota_flag = XFS_QMOPT_RES_RTBLKS;
|
|
|
|
} else {
|
|
|
|
resrtextents = 0;
|
2006-01-11 05:28:28 +01:00
|
|
|
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
|
2007-06-18 08:50:27 +02:00
|
|
|
quota_flag = XFS_QMOPT_RES_REGBLKS;
|
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
2005-06-21 07:48:47 +02:00
|
|
|
* Allocate and setup the transaction
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
|
|
|
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
|
2013-08-12 12:49:59 +02:00
|
|
|
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
|
|
|
|
resblks, resrtextents);
|
2005-04-17 00:20:36 +02:00
|
|
|
/*
|
2005-06-21 07:48:47 +02:00
|
|
|
* Check for running out of space, note: need lock to return
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
2012-03-27 16:34:50 +02:00
|
|
|
if (error) {
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_trans_cancel(tp, 0);
|
2012-03-27 16:34:50 +02:00
|
|
|
return XFS_ERROR(error);
|
|
|
|
}
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
|
2009-06-08 15:33:32 +02:00
|
|
|
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
|
2006-01-11 05:28:28 +01:00
|
|
|
if (error)
|
2012-03-27 16:34:50 +02:00
|
|
|
goto out_trans_cancel;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2011-09-19 17:00:54 +02:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
2010-06-24 03:42:19 +02:00
|
|
|
* From this point onwards we overwrite the imap pointer that the
|
|
|
|
* caller gave to us.
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
2009-01-15 06:22:07 +01:00
|
|
|
xfs_bmap_init(&free_list, &firstfsb);
|
2005-06-21 07:48:47 +02:00
|
|
|
nimaps = 1;
|
2014-02-10 00:27:43 +01:00
|
|
|
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
|
|
|
|
XFS_BMAPI_PREALLOC, &firstfsb, 0,
|
|
|
|
imap, &nimaps, &free_list);
|
2005-06-21 07:48:47 +02:00
|
|
|
if (error)
|
2012-03-27 16:34:50 +02:00
|
|
|
goto out_bmap_cancel;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
2005-06-21 07:48:47 +02:00
|
|
|
* Complete the transaction
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
2007-02-10 08:37:16 +01:00
|
|
|
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
2005-06-21 07:48:47 +02:00
|
|
|
if (error)
|
2012-03-27 16:34:50 +02:00
|
|
|
goto out_bmap_cancel;
|
2007-05-08 05:48:42 +02:00
|
|
|
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
2005-06-21 07:48:47 +02:00
|
|
|
if (error)
|
2012-03-27 16:34:50 +02:00
|
|
|
goto out_unlock;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-06-21 07:48:47 +02:00
|
|
|
/*
|
|
|
|
* Copy any maps to caller's array and return any error.
|
|
|
|
*/
|
2005-04-17 00:20:36 +02:00
|
|
|
if (nimaps == 0) {
|
2012-03-27 16:34:50 +02:00
|
|
|
error = XFS_ERROR(ENOSPC);
|
|
|
|
goto out_unlock;
|
2006-09-28 03:03:20 +02:00
|
|
|
}
|
|
|
|
|
2012-03-27 16:34:50 +02:00
|
|
|
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-07 00:06:35 +01:00
|
|
|
error = xfs_alert_fsblock_zero(ip, imap);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2012-03-27 16:34:50 +02:00
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return error;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2012-03-27 16:34:50 +02:00
|
|
|
out_bmap_cancel:
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_bmap_cancel(&free_list);
|
2012-05-08 12:48:53 +02:00
|
|
|
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
|
2012-03-27 16:34:50 +02:00
|
|
|
out_trans_cancel:
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
|
2012-03-27 16:34:50 +02:00
|
|
|
goto out_unlock;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
/*
|
2009-04-06 18:49:12 +02:00
|
|
|
* If the caller is doing a write at the end of the file, then extend the
|
|
|
|
* allocation out to the file system's write iosize. We clean up any extra
|
|
|
|
* space left over when the file is closed in xfs_inactive().
|
2011-01-04 01:35:03 +01:00
|
|
|
*
|
|
|
|
* If we find we already have delalloc preallocation beyond EOF, don't do more
|
|
|
|
* preallocation as it it not needed.
|
2006-01-11 05:28:28 +01:00
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_iomap_eof_want_preallocate(
|
|
|
|
xfs_mount_t *mp,
|
2007-10-11 09:34:33 +02:00
|
|
|
xfs_inode_t *ip,
|
2006-01-11 05:28:28 +01:00
|
|
|
xfs_off_t offset,
|
|
|
|
size_t count,
|
|
|
|
xfs_bmbt_irec_t *imap,
|
|
|
|
int nimaps,
|
|
|
|
int *prealloc)
|
|
|
|
{
|
|
|
|
xfs_fileoff_t start_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
xfs_fsblock_t firstblock;
|
|
|
|
int n, error, imaps;
|
2011-01-04 01:35:03 +01:00
|
|
|
int found_delalloc = 0;
|
2006-01-11 05:28:28 +01:00
|
|
|
|
|
|
|
*prealloc = 0;
|
2011-12-18 21:00:11 +01:00
|
|
|
if (offset + count <= XFS_ISIZE(ip))
|
2006-01-11 05:28:28 +01:00
|
|
|
return 0;
|
|
|
|
|
xfs: don't use speculative prealloc for small files
Dedicated small file workloads have been seeing significant free
space fragmentation causing premature inode allocation failure
when large inode sizes are in use. A particular test case showed
that a workload that runs to a real ENOSPC on 256 byte inodes would
fail inode allocation with ENOSPC about about 80% full with 512 byte
inodes, and at about 50% full with 1024 byte inodes.
The same workload, when run with -o allocsize=4096 on 1024 byte
inodes would run to being 100% full before giving ENOSPC. That is,
no freespace fragmentation at all.
The issue was caused by the specific IO pattern the application had
- the framework it was using did not support direct IO, and so it
was emulating it by using fadvise(DONT_NEED). The result was that
the data was getting written back before the speculative prealloc
had been trimmed from memory by the close(), and so small single
block files were being allocated with 2 blocks, and then having one
truncated away. The result was lots of small 4k free space extents,
and hence each new 8k allocation would take another 8k from
contiguous free space and turn it into 4k of allocated space and 4k
of free space.
Hence inode allocation, which requires contiguous, aligned
allocation of 16k (256 byte inodes), 32k (512 byte inodes) or 64k
(1024 byte inodes) can fail to find sufficiently large freespace and
hence fail while there is still lots of free space available.
There's a simple fix for this, and one that has precendence in the
allocator code already - don't do speculative allocation unless the
size of the file is larger than a certain size. In this case, that
size is the minimum default preallocation size:
mp->m_writeio_blocks. And to keep with the concept of being nice to
people when the files are still relatively small, cap the prealloc
to mp->m_writeio_blocks until the file goes over a stripe unit is
size, at which point we'll fall back to the current behaviour based
on the last extent size.
This will effectively turn off speculative prealloc for very small
files, keep preallocation low for small files, and behave as it
currently does for any file larger than a stripe unit. This
completely avoids the freespace fragmentation problem this
particular IO pattern was causing.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-27 08:04:48 +02:00
|
|
|
/*
|
|
|
|
* If the file is smaller than the minimum prealloc and we are using
|
|
|
|
* dynamic preallocation, don't do any preallocation at all as it is
|
|
|
|
* likely this is the only write to the file that is going to be done.
|
|
|
|
*/
|
|
|
|
if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
|
|
|
|
XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
|
|
|
|
return 0;
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
/*
|
|
|
|
* If there are any real blocks past eof, then don't
|
|
|
|
* do any speculative allocation.
|
|
|
|
*/
|
|
|
|
start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
|
2012-06-08 07:44:54 +02:00
|
|
|
count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
2006-01-11 05:28:28 +01:00
|
|
|
while (count_fsb > 0) {
|
|
|
|
imaps = nimaps;
|
2006-01-11 05:33:02 +01:00
|
|
|
firstblock = NULLFSBLOCK;
|
2011-09-18 22:40:45 +02:00
|
|
|
error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
|
|
|
|
0);
|
2006-01-11 05:28:28 +01:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
for (n = 0; n < imaps; n++) {
|
|
|
|
if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
|
|
|
|
(imap[n].br_startblock != DELAYSTARTBLOCK))
|
|
|
|
return 0;
|
|
|
|
start_fsb += imap[n].br_blockcount;
|
|
|
|
count_fsb -= imap[n].br_blockcount;
|
2011-01-04 01:35:03 +01:00
|
|
|
|
|
|
|
if (imap[n].br_startblock == DELAYSTARTBLOCK)
|
|
|
|
found_delalloc = 1;
|
2006-01-11 05:28:28 +01:00
|
|
|
}
|
|
|
|
}
|
2011-01-04 01:35:03 +01:00
|
|
|
if (!found_delalloc)
|
|
|
|
*prealloc = 1;
|
2006-01-11 05:28:28 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-02-11 06:05:01 +01:00
|
|
|
/*
|
|
|
|
* Determine the initial size of the preallocation. We are beyond the current
|
|
|
|
* EOF here, but we need to take into account whether this is a sparse write or
|
|
|
|
* an extending write when determining the preallocation size. Hence we need to
|
|
|
|
* look up the extent that ends at the current write offset and use the result
|
|
|
|
* to determine the preallocation size.
|
|
|
|
*
|
|
|
|
* If the extent is a hole, then preallocation is essentially disabled.
|
|
|
|
* Otherwise we take the size of the preceeding data extent as the basis for the
|
|
|
|
* preallocation size. If the size of the extent is greater than half the
|
|
|
|
* maximum extent length, then use the current offset as the basis. This ensures
|
|
|
|
* that for large files the preallocation size always extends to MAXEXTLEN
|
|
|
|
* rather than falling short due to things like stripe unit/width alignment of
|
|
|
|
* real extents.
|
|
|
|
*/
|
2013-02-24 20:04:37 +01:00
|
|
|
STATIC xfs_fsblock_t
|
2013-02-11 06:05:01 +01:00
|
|
|
xfs_iomap_eof_prealloc_initial_size(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
|
|
|
xfs_bmbt_irec_t *imap,
|
|
|
|
int nimaps)
|
|
|
|
{
|
|
|
|
xfs_fileoff_t start_fsb;
|
|
|
|
int imaps = 1;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ASSERT(nimaps >= imaps);
|
|
|
|
|
|
|
|
/* if we are using a specific prealloc size, return now */
|
|
|
|
if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
|
|
|
|
return 0;
|
|
|
|
|
xfs: don't use speculative prealloc for small files
Dedicated small file workloads have been seeing significant free
space fragmentation causing premature inode allocation failure
when large inode sizes are in use. A particular test case showed
that a workload that runs to a real ENOSPC on 256 byte inodes would
fail inode allocation with ENOSPC about about 80% full with 512 byte
inodes, and at about 50% full with 1024 byte inodes.
The same workload, when run with -o allocsize=4096 on 1024 byte
inodes would run to being 100% full before giving ENOSPC. That is,
no freespace fragmentation at all.
The issue was caused by the specific IO pattern the application had
- the framework it was using did not support direct IO, and so it
was emulating it by using fadvise(DONT_NEED). The result was that
the data was getting written back before the speculative prealloc
had been trimmed from memory by the close(), and so small single
block files were being allocated with 2 blocks, and then having one
truncated away. The result was lots of small 4k free space extents,
and hence each new 8k allocation would take another 8k from
contiguous free space and turn it into 4k of allocated space and 4k
of free space.
Hence inode allocation, which requires contiguous, aligned
allocation of 16k (256 byte inodes), 32k (512 byte inodes) or 64k
(1024 byte inodes) can fail to find sufficiently large freespace and
hence fail while there is still lots of free space available.
There's a simple fix for this, and one that has precendence in the
allocator code already - don't do speculative allocation unless the
size of the file is larger than a certain size. In this case, that
size is the minimum default preallocation size:
mp->m_writeio_blocks. And to keep with the concept of being nice to
people when the files are still relatively small, cap the prealloc
to mp->m_writeio_blocks until the file goes over a stripe unit is
size, at which point we'll fall back to the current behaviour based
on the last extent size.
This will effectively turn off speculative prealloc for very small
files, keep preallocation low for small files, and behave as it
currently does for any file larger than a stripe unit. This
completely avoids the freespace fragmentation problem this
particular IO pattern was causing.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-06-27 08:04:48 +02:00
|
|
|
/* If the file is small, then use the minimum prealloc */
|
|
|
|
if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
|
|
|
|
return 0;
|
|
|
|
|
2013-02-11 06:05:01 +01:00
|
|
|
/*
|
|
|
|
* As we write multiple pages, the offset will always align to the
|
|
|
|
* start of a page and hence point to a hole at EOF. i.e. if the size is
|
|
|
|
* 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
|
|
|
|
* will return FSB 1. Hence if there are blocks in the file, we want to
|
|
|
|
* point to the block prior to the EOF block and not the hole that maps
|
|
|
|
* directly at @offset.
|
|
|
|
*/
|
|
|
|
start_fsb = XFS_B_TO_FSB(mp, offset);
|
|
|
|
if (start_fsb)
|
|
|
|
start_fsb--;
|
|
|
|
error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
|
|
|
|
if (error)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ASSERT(imaps == 1);
|
|
|
|
if (imap[0].br_startblock == HOLESTARTBLOCK)
|
|
|
|
return 0;
|
|
|
|
if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
|
2013-02-19 16:24:41 +01:00
|
|
|
return imap[0].br_blockcount << 1;
|
2013-02-11 06:05:01 +01:00
|
|
|
return XFS_B_TO_FSB(mp, offset);
|
|
|
|
}
|
|
|
|
|
2013-03-18 15:51:47 +01:00
|
|
|
STATIC bool
|
|
|
|
xfs_quota_need_throttle(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int type,
|
|
|
|
xfs_fsblock_t alloc_blocks)
|
|
|
|
{
|
|
|
|
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
|
|
|
|
|
|
|
|
if (!dq || !xfs_this_quota_on(ip->i_mount, type))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* no hi watermark, no throttle */
|
|
|
|
if (!dq->q_prealloc_hi_wmark)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* under the lo watermark, no throttle */
|
|
|
|
if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_quota_calc_throttle(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int type,
|
|
|
|
xfs_fsblock_t *qblocks,
|
|
|
|
int *qshift)
|
|
|
|
{
|
|
|
|
int64_t freesp;
|
|
|
|
int shift = 0;
|
|
|
|
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
|
|
|
|
|
|
|
|
/* over hi wmark, squash the prealloc completely */
|
|
|
|
if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
|
|
|
|
*qblocks = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
|
|
|
|
shift = 2;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
|
|
|
|
shift += 2;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
|
|
|
|
shift += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* only overwrite the throttle values if we are more aggressive */
|
|
|
|
if ((freesp >> shift) < (*qblocks >> *qshift)) {
|
|
|
|
*qblocks = freesp;
|
|
|
|
*qshift = shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-04 01:35:03 +01:00
|
|
|
/*
|
|
|
|
* If we don't have a user specified preallocation size, dynamically increase
|
|
|
|
* the preallocation size as the size of the file grows. Cap the maximum size
|
|
|
|
* at a single extent or less if the filesystem is near full. The closer the
|
|
|
|
* filesystem is to full, the smaller the maximum prealocation.
|
|
|
|
*/
|
|
|
|
STATIC xfs_fsblock_t
|
|
|
|
xfs_iomap_prealloc_size(
|
|
|
|
struct xfs_mount *mp,
|
2013-02-11 06:05:01 +01:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
int nimaps)
|
2011-01-04 01:35:03 +01:00
|
|
|
{
|
|
|
|
xfs_fsblock_t alloc_blocks = 0;
|
2013-03-18 15:51:43 +01:00
|
|
|
int shift = 0;
|
|
|
|
int64_t freesp;
|
2013-03-18 15:51:47 +01:00
|
|
|
xfs_fsblock_t qblocks;
|
|
|
|
int qshift = 0;
|
2011-01-04 01:35:03 +01:00
|
|
|
|
2013-02-11 06:05:01 +01:00
|
|
|
alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
|
|
|
|
imap, nimaps);
|
2013-03-18 15:51:43 +01:00
|
|
|
if (!alloc_blocks)
|
|
|
|
goto check_writeio;
|
2013-03-18 15:51:47 +01:00
|
|
|
qblocks = alloc_blocks;
|
2013-03-18 15:51:43 +01:00
|
|
|
|
2013-03-18 15:51:44 +01:00
|
|
|
/*
|
|
|
|
* MAXEXTLEN is not a power of two value but we round the prealloc down
|
|
|
|
* to the nearest power of two value after throttling. To prevent the
|
|
|
|
* round down from unconditionally reducing the maximum supported prealloc
|
|
|
|
* size, we round up first, apply appropriate throttling, round down and
|
|
|
|
* cap the value to MAXEXTLEN.
|
|
|
|
*/
|
|
|
|
alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
|
|
|
|
alloc_blocks);
|
2013-03-18 15:51:43 +01:00
|
|
|
|
|
|
|
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
|
|
|
|
freesp = mp->m_sb.sb_fdblocks;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
|
|
|
|
shift = 2;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
|
|
|
|
shift++;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
|
|
|
|
shift++;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
|
|
|
|
shift++;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
|
|
|
|
shift++;
|
2011-01-04 01:35:03 +01:00
|
|
|
}
|
2013-03-18 15:51:47 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check each quota to cap the prealloc size and provide a shift
|
|
|
|
* value to throttle with.
|
|
|
|
*/
|
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
|
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift);
|
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
|
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift);
|
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
|
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The final prealloc size is set to the minimum of free space available
|
|
|
|
* in each of the quotas and the overall filesystem.
|
|
|
|
*
|
|
|
|
* The shift throttle value is set to the maximum value as determined by
|
|
|
|
* the global low free space values and per-quota low free space values.
|
|
|
|
*/
|
|
|
|
alloc_blocks = MIN(alloc_blocks, qblocks);
|
|
|
|
shift = MAX(shift, qshift);
|
|
|
|
|
2013-03-18 15:51:43 +01:00
|
|
|
if (shift)
|
|
|
|
alloc_blocks >>= shift;
|
2013-03-18 15:51:44 +01:00
|
|
|
/*
|
|
|
|
* rounddown_pow_of_two() returns an undefined result if we pass in
|
|
|
|
* alloc_blocks = 0.
|
|
|
|
*/
|
|
|
|
if (alloc_blocks)
|
|
|
|
alloc_blocks = rounddown_pow_of_two(alloc_blocks);
|
|
|
|
if (alloc_blocks > MAXEXTLEN)
|
|
|
|
alloc_blocks = MAXEXTLEN;
|
2013-03-18 15:51:43 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are still trying to allocate more space than is
|
|
|
|
* available, squash the prealloc hard. This can happen if we
|
|
|
|
* have a large file on a small filesystem and the above
|
|
|
|
* lowspace thresholds are smaller than MAXEXTLEN.
|
|
|
|
*/
|
|
|
|
while (alloc_blocks && alloc_blocks >= freesp)
|
|
|
|
alloc_blocks >>= 4;
|
2011-01-04 01:35:03 +01:00
|
|
|
|
2013-03-18 15:51:43 +01:00
|
|
|
check_writeio:
|
2011-01-04 01:35:03 +01:00
|
|
|
if (alloc_blocks < mp->m_writeio_blocks)
|
|
|
|
alloc_blocks = mp->m_writeio_blocks;
|
|
|
|
|
2013-03-18 15:51:48 +01:00
|
|
|
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
|
|
|
|
mp->m_writeio_blocks);
|
|
|
|
|
2011-01-04 01:35:03 +01:00
|
|
|
return alloc_blocks;
|
|
|
|
}
|
|
|
|
|
2010-12-10 09:42:20 +01:00
|
|
|
int
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_iomap_write_delay(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 22:33:40 +02:00
|
|
|
xfs_off_t offset,
|
2005-04-17 00:20:36 +02:00
|
|
|
size_t count,
|
2010-12-10 09:42:19 +01:00
|
|
|
xfs_bmbt_irec_t *ret_imap)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_fileoff_t last_fsb;
|
2006-01-11 05:28:28 +01:00
|
|
|
xfs_off_t aligned_offset;
|
|
|
|
xfs_fileoff_t ioalign;
|
|
|
|
xfs_extlen_t extsz;
|
2005-04-17 00:20:36 +02:00
|
|
|
int nimaps;
|
|
|
|
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
|
2012-10-08 12:56:04 +02:00
|
|
|
int prealloc;
|
2006-01-11 05:28:28 +01:00
|
|
|
int error;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-04-22 09:34:00 +02:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the dquots are there. This doesn't hold
|
|
|
|
* the ilock across a disk read.
|
|
|
|
*/
|
2009-06-08 15:33:32 +02:00
|
|
|
error = xfs_qm_dqattach_locked(ip, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
return XFS_ERROR(error);
|
|
|
|
|
2007-06-18 08:50:37 +02:00
|
|
|
extsz = xfs_get_extsz_hint(ip);
|
2006-01-11 05:28:28 +01:00
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
|
2008-12-22 07:56:49 +01:00
|
|
|
error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
|
2010-12-10 09:42:19 +01:00
|
|
|
imap, XFS_WRITE_IMAPS, &prealloc);
|
2006-01-11 05:28:28 +01:00
|
|
|
if (error)
|
|
|
|
return error;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2009-04-06 18:49:12 +02:00
|
|
|
retry:
|
2006-01-11 05:28:28 +01:00
|
|
|
if (prealloc) {
|
2013-02-11 06:05:01 +01:00
|
|
|
xfs_fsblock_t alloc_blocks;
|
|
|
|
|
|
|
|
alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
|
|
|
|
XFS_WRITE_IMAPS);
|
2011-01-04 01:35:03 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
|
|
|
|
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
|
2011-01-04 01:35:03 +01:00
|
|
|
last_fsb = ioalign + alloc_blocks;
|
2006-01-11 05:28:28 +01:00
|
|
|
} else {
|
|
|
|
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
if (prealloc || extsz) {
|
2008-12-22 07:56:49 +01:00
|
|
|
error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
|
2006-01-11 05:28:28 +01:00
|
|
|
if (error)
|
2005-04-17 00:20:36 +02:00
|
|
|
return error;
|
|
|
|
}
|
2006-01-11 05:28:28 +01:00
|
|
|
|
2012-04-29 14:43:19 +02:00
|
|
|
/*
|
|
|
|
* Make sure preallocation does not create extents beyond the range we
|
|
|
|
* actually support in this filesystem.
|
|
|
|
*/
|
2012-06-08 07:44:53 +02:00
|
|
|
if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
|
|
|
|
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
2012-04-29 14:43:19 +02:00
|
|
|
|
|
|
|
ASSERT(last_fsb > offset_fsb);
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
nimaps = XFS_WRITE_IMAPS;
|
2011-09-18 22:40:48 +02:00
|
|
|
error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
|
|
|
|
imap, &nimaps, XFS_BMAPI_ENTIRE);
|
2011-01-04 01:35:03 +01:00
|
|
|
switch (error) {
|
|
|
|
case 0:
|
|
|
|
case ENOSPC:
|
|
|
|
case EDQUOT:
|
|
|
|
break;
|
|
|
|
default:
|
2005-04-17 00:20:36 +02:00
|
|
|
return XFS_ERROR(error);
|
2011-01-04 01:35:03 +01:00
|
|
|
}
|
2006-01-11 05:28:28 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
/*
|
2012-10-08 12:56:04 +02:00
|
|
|
* If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
|
2011-01-04 01:35:03 +01:00
|
|
|
* without EOF preallocation.
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
|
|
|
if (nimaps == 0) {
|
2009-12-15 00:14:59 +01:00
|
|
|
trace_xfs_delalloc_enospc(ip, offset, count);
|
2012-10-08 12:56:04 +02:00
|
|
|
if (prealloc) {
|
|
|
|
prealloc = 0;
|
|
|
|
error = 0;
|
|
|
|
goto retry;
|
2011-01-04 01:35:03 +01:00
|
|
|
}
|
2012-10-08 12:56:04 +02:00
|
|
|
return XFS_ERROR(error ? error : ENOSPC);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2008-04-29 04:53:21 +02:00
|
|
|
if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-07 00:06:35 +01:00
|
|
|
return xfs_alert_fsblock_zero(ip, &imap[0]);
|
2006-01-11 05:28:28 +01:00
|
|
|
|
2012-11-06 15:50:38 +01:00
|
|
|
/*
|
|
|
|
* Tag the inode as speculatively preallocated so we can reclaim this
|
|
|
|
* space on demand, if necessary.
|
|
|
|
*/
|
|
|
|
if (prealloc)
|
|
|
|
xfs_inode_set_eofblocks_tag(ip);
|
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
*ret_imap = imap[0];
|
2005-04-17 00:20:36 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pass in a delayed allocate extent, convert it to real extents;
|
|
|
|
* return to the caller the extent we create which maps on top of
|
|
|
|
* the originating callers request.
|
|
|
|
*
|
|
|
|
* Called without a lock on the inode.
|
2007-11-23 06:29:11 +01:00
|
|
|
*
|
|
|
|
* We no longer bother to look at the incoming map - all we have to
|
|
|
|
* guarantee is that whatever we allocate fills the required range.
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
2010-12-10 09:42:20 +01:00
|
|
|
int
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_iomap_write_allocate(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 22:33:40 +02:00
|
|
|
xfs_off_t offset,
|
2010-12-10 09:42:19 +01:00
|
|
|
xfs_bmbt_irec_t *imap)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb, last_block;
|
|
|
|
xfs_fileoff_t end_fsb, map_start_fsb;
|
|
|
|
xfs_fsblock_t first_block;
|
|
|
|
xfs_bmap_free_t free_list;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
xfs_trans_t *tp;
|
2007-11-23 06:29:11 +01:00
|
|
|
int nimaps, committed;
|
2005-04-17 00:20:36 +02:00
|
|
|
int error = 0;
|
|
|
|
int nres;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the dquots are there.
|
|
|
|
*/
|
2009-06-08 15:33:32 +02:00
|
|
|
error = xfs_qm_dqattach(ip, 0);
|
|
|
|
if (error)
|
2005-04-17 00:20:36 +02:00
|
|
|
return XFS_ERROR(error);
|
|
|
|
|
2005-05-05 22:33:20 +02:00
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
2010-06-24 03:42:19 +02:00
|
|
|
count_fsb = imap->br_blockcount;
|
|
|
|
map_start_fsb = imap->br_startoff;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
|
|
|
|
|
|
|
|
while (count_fsb != 0) {
|
|
|
|
/*
|
|
|
|
* Set up a transaction with which to allocate the
|
|
|
|
* backing store for the file. Do allocations in a
|
|
|
|
* loop until we get some space in the range we are
|
|
|
|
* interested in. The other space that might be allocated
|
|
|
|
* is in the delayed allocation extent on which we sit
|
|
|
|
* but before our buffer starts.
|
|
|
|
*/
|
|
|
|
|
|
|
|
nimaps = 0;
|
|
|
|
while (nimaps == 0) {
|
|
|
|
tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
|
2007-06-18 08:50:27 +02:00
|
|
|
tp->t_flags |= XFS_TRANS_RESERVE;
|
2005-04-17 00:20:36 +02:00
|
|
|
nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
|
2013-08-12 12:49:59 +02:00
|
|
|
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
|
|
|
|
nres, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error) {
|
|
|
|
xfs_trans_cancel(tp, 0);
|
|
|
|
return XFS_ERROR(error);
|
|
|
|
}
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2011-09-19 17:00:54 +02:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2009-01-15 06:22:07 +01:00
|
|
|
xfs_bmap_init(&free_list, &first_block);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
2007-11-23 06:29:11 +01:00
|
|
|
* it is possible that the extents have changed since
|
|
|
|
* we did the read call as we dropped the ilock for a
|
|
|
|
* while. We have to be careful about truncates or hole
|
|
|
|
* punchs here - we are not allowed to allocate
|
|
|
|
* non-delalloc blocks here.
|
|
|
|
*
|
|
|
|
* The only protection against truncation is the pages
|
|
|
|
* for the range we are being asked to convert are
|
|
|
|
* locked and hence a truncate will block on them
|
|
|
|
* first.
|
|
|
|
*
|
|
|
|
* As a result, if we go beyond the range we really
|
|
|
|
* need and hit an delalloc extent boundary followed by
|
|
|
|
* a hole while we have excess blocks in the map, we
|
|
|
|
* will fill the hole incorrectly and overrun the
|
|
|
|
* transaction reservation.
|
|
|
|
*
|
|
|
|
* Using a single map prevents this as we are forced to
|
|
|
|
* check each map we look for overlap with the desired
|
|
|
|
* range and abort as soon as we find it. Also, given
|
|
|
|
* that we only return a single map, having one beyond
|
|
|
|
* what we can return is probably a bit silly.
|
|
|
|
*
|
|
|
|
* We also need to check that we don't go beyond EOF;
|
|
|
|
* this is a truncate optimisation as a truncate sets
|
|
|
|
* the new file size before block on the pages we
|
|
|
|
* currently have locked under writeback. Because they
|
|
|
|
* are about to be tossed, we don't need to write them
|
|
|
|
* back....
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
2007-11-23 06:29:11 +01:00
|
|
|
nimaps = 1;
|
2011-12-18 21:00:11 +01:00
|
|
|
end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
|
2014-04-14 10:58:05 +02:00
|
|
|
error = xfs_bmap_last_offset(ip, &last_block,
|
2008-04-10 04:21:59 +02:00
|
|
|
XFS_DATA_FORK);
|
|
|
|
if (error)
|
|
|
|
goto trans_cancel;
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
|
|
|
|
if ((map_start_fsb + count_fsb) > last_block) {
|
|
|
|
count_fsb = last_block - map_start_fsb;
|
|
|
|
if (count_fsb == 0) {
|
|
|
|
error = EAGAIN;
|
|
|
|
goto trans_cancel;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-24 03:42:19 +02:00
|
|
|
/*
|
|
|
|
* From this point onwards we overwrite the imap
|
|
|
|
* pointer that the caller gave to us.
|
|
|
|
*/
|
2011-09-18 22:40:52 +02:00
|
|
|
error = xfs_bmapi_write(tp, ip, map_start_fsb,
|
xfs: refine the allocation stack switch
The allocation stack switch at xfs_bmapi_allocate() has served it's
purpose, but is no longer a sufficient solution to the stack usage
problem we have in the XFS allocation path.
Whilst the kernel stack size is now 16k, that is not a valid reason
for undoing all our "keep stack usage down" modifications. What it
does allow us to do is have the freedom to refine and perfect the
modifications knowing that if we get it wrong it won't blow up in
our faces - we have a safety net now.
This is important because we still have the issue of older kernels
having smaller stacks and that they are still supported and are
demonstrating a wide range of different stack overflows. Red Hat
has several open bugs for allocation based stack overflows from
directory modifications and direct IO block allocation and these
problems still need to be solved. If we can solve them upstream,
then distro's won't need to bake their own unique solutions.
To that end, I've observed that every allocation based stack
overflow report has had a specific characteristic - it has happened
during or directly after a bmap btree block split. That event
requires a new block to be allocated to the tree, and so we
effectively stack one allocation stack on top of another, and that's
when we get into trouble.
A further observation is that bmap btree block splits are much rarer
than writeback allocation - over a range of different workloads I've
observed the ratio of bmap btree inserts to splits ranges from 100:1
(xfstests run) to 10000:1 (local VM image server with sparse files
that range in the hundreds of thousands to millions of extents).
Either way, bmap btree split events are much, much rarer than
allocation events.
Finally, we have to move the kswapd state to the allocation workqueue
work when allocation is done on behalf of kswapd. This is proving to
cause significant perturbation in performance under memory pressure
and appears to be generating allocation deadlock warnings under some
workloads, so avoiding the use of a workqueue for the majority of
kswapd writeback allocation will minimise the impact of such
behaviour.
Hence it makes sense to move the stack switch to xfs_btree_split()
and only do it for bmap btree splits. Stack switches during
allocation will be much rarer, so there won't be significant
performacne overhead caused by switching stacks. The worse case
stack from all allocation paths will be split, not just writeback.
And the majority of memory allocations will be done in the correct
context (e.g. kswapd) without causing additional latency, and so we
simplify the memory reclaim interactions between processes,
workqueues and kswapd.
The worst stack I've been able to generate with this patch in place
is 5600 bytes deep. It's very revealing because we exit XFS at:
37) 1768 64 kmem_cache_alloc+0x13b/0x170
about 1800 bytes of stack consumed, and the remaining 3800 bytes
(and 36 functions) is memory reclaim, swap and the IO stack. And
this occurs in the inode allocation from an open(O_CREAT) syscall,
not writeback.
The amount of stack being used is much less than I've previously be
able to generate - fs_mark testing has been able to generate stack
usage of around 7k without too much trouble; with this patch it's
only just getting to 5.5k. This is primarily because the metadata
allocation paths (e.g. directory blocks) are no longer causing
double splits on the same stack, and hence now stack tracing is
showing swapping being the worst stack consumer rather than XFS.
Performance of fs_mark inode create workloads is unchanged.
Performance of fs_mark async fsync workloads is consistently good
with context switches reduced by around 150,000/s (30%).
Performance of dbench, streaming IO and postmark is unchanged.
Allocation deadlock warnings have not been seen on the workloads
that generated them since adding this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-07-14 23:08:24 +02:00
|
|
|
count_fsb, 0,
|
2012-10-05 03:06:58 +02:00
|
|
|
&first_block, 1,
|
2011-09-18 22:40:52 +02:00
|
|
|
imap, &nimaps, &free_list);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
goto trans_cancel;
|
|
|
|
|
2007-02-10 08:37:16 +01:00
|
|
|
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
goto trans_cancel;
|
|
|
|
|
2007-05-08 05:48:42 +02:00
|
|
|
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
goto error0;
|
|
|
|
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we were able to allocate an extent that
|
|
|
|
* covers at least part of the callers request
|
|
|
|
*/
|
2010-06-24 03:42:19 +02:00
|
|
|
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-07 00:06:35 +01:00
|
|
|
return xfs_alert_fsblock_zero(ip, imap);
|
2008-04-29 04:53:21 +02:00
|
|
|
|
2010-06-24 03:42:19 +02:00
|
|
|
if ((offset_fsb >= imap->br_startoff) &&
|
|
|
|
(offset_fsb < (imap->br_startoff +
|
|
|
|
imap->br_blockcount))) {
|
2007-11-23 06:29:11 +01:00
|
|
|
XFS_STATS_INC(xs_xstrat_quick);
|
|
|
|
return 0;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2007-11-23 06:29:11 +01:00
|
|
|
/*
|
|
|
|
* So far we have not mapped the requested part of the
|
2005-04-17 00:20:36 +02:00
|
|
|
* file, just surrounding data, try again.
|
|
|
|
*/
|
2010-06-24 03:42:19 +02:00
|
|
|
count_fsb -= imap->br_blockcount;
|
|
|
|
map_start_fsb = imap->br_startoff + imap->br_blockcount;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
trans_cancel:
|
|
|
|
xfs_bmap_cancel(&free_list);
|
|
|
|
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
|
|
|
|
error0:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return XFS_ERROR(error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xfs_iomap_write_unwritten(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 22:33:40 +02:00
|
|
|
xfs_off_t offset,
|
2005-04-17 00:20:36 +02:00
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
xfs_filblks_t numblks_fsb;
|
2006-01-11 05:28:28 +01:00
|
|
|
xfs_fsblock_t firstfsb;
|
|
|
|
int nimaps;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
xfs_bmbt_irec_t imap;
|
|
|
|
xfs_bmap_free_t free_list;
|
2012-02-29 10:53:50 +01:00
|
|
|
xfs_fsize_t i_size;
|
2006-01-11 05:28:28 +01:00
|
|
|
uint resblks;
|
2005-04-17 00:20:36 +02:00
|
|
|
int committed;
|
|
|
|
int error;
|
|
|
|
|
2009-12-15 00:14:59 +01:00
|
|
|
trace_xfs_unwritten_convert(ip, offset, count);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
|
|
|
|
count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
|
|
|
|
|
2008-06-27 05:32:53 +02:00
|
|
|
/*
|
|
|
|
* Reserve enough blocks in this transaction for two complete extent
|
|
|
|
* btree splits. We may be converting the middle part of an unwritten
|
|
|
|
* extent and in this case we will insert two new extents in the btree
|
|
|
|
* each of which could cause a full split.
|
|
|
|
*
|
|
|
|
* This reservation amount will be used in the first call to
|
|
|
|
* xfs_bmbt_split() to select an AG with enough space to satisfy the
|
|
|
|
* rest of the operation.
|
|
|
|
*/
|
2006-01-11 05:28:28 +01:00
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2006-01-11 05:28:28 +01:00
|
|
|
do {
|
2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* set up a transaction to convert the range of extents
|
|
|
|
* from unwritten to real. Do allocations in a loop until
|
|
|
|
* we have covered the range passed in.
|
2009-10-19 06:00:03 +02:00
|
|
|
*
|
|
|
|
* Note that we open code the transaction allocation here
|
|
|
|
* to pass KM_NOFS--we can't risk to recursing back into
|
|
|
|
* the filesystem here as we might be asked to write out
|
|
|
|
* the same inode that we complete here and might deadlock
|
|
|
|
* on the iolock.
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
2012-06-12 16:20:39 +02:00
|
|
|
sb_start_intwrite(mp->m_super);
|
2009-10-19 06:00:03 +02:00
|
|
|
tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
|
2012-06-12 16:20:39 +02:00
|
|
|
tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
|
2013-08-12 12:49:59 +02:00
|
|
|
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
|
|
|
|
resblks, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error) {
|
|
|
|
xfs_trans_cancel(tp, 0);
|
2006-09-28 03:03:20 +02:00
|
|
|
return XFS_ERROR(error);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2011-09-19 17:00:54 +02:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Modify the unwritten extent state of the buffer.
|
|
|
|
*/
|
2009-01-15 06:22:07 +01:00
|
|
|
xfs_bmap_init(&free_list, &firstfsb);
|
2005-04-17 00:20:36 +02:00
|
|
|
nimaps = 1;
|
2011-09-18 22:40:52 +02:00
|
|
|
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
|
|
|
|
XFS_BMAPI_CONVERT, &firstfsb,
|
2010-06-23 10:11:15 +02:00
|
|
|
1, &imap, &nimaps, &free_list);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
goto error_on_bmapi_transaction;
|
|
|
|
|
2012-02-29 10:53:50 +01:00
|
|
|
/*
|
|
|
|
* Log the updated inode size as we go. We have to be careful
|
|
|
|
* to only log it up to the actual write offset if it is
|
|
|
|
* halfway into a block.
|
|
|
|
*/
|
|
|
|
i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
|
|
|
|
if (i_size > offset + count)
|
|
|
|
i_size = offset + count;
|
|
|
|
|
|
|
|
i_size = xfs_new_eof(ip, i_size);
|
|
|
|
if (i_size) {
|
|
|
|
ip->i_d.di_size = i_size;
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (error)
|
|
|
|
goto error_on_bmapi_transaction;
|
|
|
|
|
2007-05-08 05:48:42 +02:00
|
|
|
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
2005-04-17 00:20:36 +02:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
if (error)
|
2006-09-28 03:03:20 +02:00
|
|
|
return XFS_ERROR(error);
|
|
|
|
|
2008-04-29 04:53:21 +02:00
|
|
|
if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-07 00:06:35 +01:00
|
|
|
return xfs_alert_fsblock_zero(ip, &imap);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
if ((numblks_fsb = imap.br_blockcount) == 0) {
|
|
|
|
/*
|
|
|
|
* The numblks_fsb value should always get
|
|
|
|
* smaller, otherwise the loop is stuck.
|
|
|
|
*/
|
|
|
|
ASSERT(imap.br_blockcount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset_fsb += numblks_fsb;
|
|
|
|
count_fsb -= numblks_fsb;
|
|
|
|
} while (count_fsb > 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_on_bmapi_transaction:
|
|
|
|
xfs_bmap_cancel(&free_list);
|
|
|
|
xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return XFS_ERROR(error);
|
|
|
|
}
|