xfs: kill XBF_FS_MANAGED buffers

Filesystem level managed buffers are buffers that have their
lifecycle controlled by the filesystem layer, not the buffer cache.
We currently cache these buffers, which makes cleanup and cache
walking somewhat troublesome. Convert the fs managed buffers to
uncached buffers obtained by via xfs_buf_get_uncached(), and remove
the XBF_FS_MANAGED special cases from the buffer cache.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
Dave Chinner 2010-09-22 10:47:20 +10:00 committed by Alex Elder
parent ebad861b57
commit 26af655233
3 changed files with 20 additions and 59 deletions

View File

@ -826,8 +826,6 @@ xfs_buf_rele(
atomic_inc(&bp->b_hold);
spin_unlock(&hash->bh_lock);
(*(bp->b_relse)) (bp);
} else if (bp->b_flags & XBF_FS_MANAGED) {
spin_unlock(&hash->bh_lock);
} else {
ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
list_del_init(&bp->b_hash_list);
@ -1433,26 +1431,16 @@ void
xfs_wait_buftarg(
xfs_buftarg_t *btp)
{
xfs_buf_t *bp, *n;
xfs_bufhash_t *hash;
uint i;
for (i = 0; i < (1 << btp->bt_hashshift); i++) {
hash = &btp->bt_hash[i];
again:
spin_lock(&hash->bh_lock);
list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
ASSERT(btp == bp->b_target);
if (!(bp->b_flags & XBF_FS_MANAGED)) {
spin_unlock(&hash->bh_lock);
/*
* Catch superblock reference count leaks
* immediately
*/
BUG_ON(bp->b_bn == 0);
delay(100);
goto again;
}
while (!list_empty(&hash->bh_list)) {
spin_unlock(&hash->bh_lock);
delay(100);
spin_lock(&hash->bh_lock);
}
spin_unlock(&hash->bh_lock);
}

View File

@ -51,7 +51,6 @@ typedef enum {
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
#define XBF_ORDERED (1 << 11)/* use ordered writes */
#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
@ -104,7 +103,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_DONE, "DONE" }, \
{ XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \
{ XBF_FS_MANAGED, "FS_MANAGED" }, \
{ XBF_ORDERED, "ORDERED" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_LOCK, "LOCK" }, /* should never be set */\
@ -279,8 +277,6 @@ extern void xfs_buf_terminate(void);
XFS_BUF_DONE(bp); \
} while (0)
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)

View File

@ -646,7 +646,6 @@ int
xfs_readsb(xfs_mount_t *mp, int flags)
{
unsigned int sector_size;
unsigned int extra_flags;
xfs_buf_t *bp;
int error;
@ -659,28 +658,24 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* access to the superblock.
*/
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
extra_flags);
if (!bp || XFS_BUF_ISERROR(bp)) {
xfs_fs_mount_cmn_err(flags, "SB read failed");
error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
goto fail;
reread:
bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
XFS_SB_DADDR, sector_size, 0);
if (!bp) {
xfs_fs_mount_cmn_err(flags, "SB buffer read failed");
return EIO;
}
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
/*
* Initialize the mount structure from the superblock.
* But first do some basic consistency checking.
*/
xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
if (error) {
xfs_fs_mount_cmn_err(flags, "SB validate failed");
goto fail;
goto release_buf;
}
/*
@ -691,7 +686,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
"device supports only %u byte sectors (not %u)",
sector_size, mp->m_sb.sb_sectsize);
error = ENOSYS;
goto fail;
goto release_buf;
}
/*
@ -699,33 +694,20 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* re-read the superblock so the buffer is correctly sized.
*/
if (sector_size < mp->m_sb.sb_sectsize) {
XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
sector_size = mp->m_sb.sb_sectsize;
bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR,
BTOBB(sector_size), extra_flags);
if (!bp || XFS_BUF_ISERROR(bp)) {
xfs_fs_mount_cmn_err(flags, "SB re-read failed");
error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
goto fail;
}
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
goto reread;
}
/* Initialize per-cpu counters */
xfs_icsb_reinit_counters(mp);
mp->m_sb_bp = bp;
xfs_buf_relse(bp);
ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
xfs_buf_unlock(bp);
return 0;
fail:
if (bp) {
XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
}
release_buf:
xfs_buf_relse(bp);
return error;
}
@ -2005,18 +1987,13 @@ xfs_getsb(
*/
void
xfs_freesb(
xfs_mount_t *mp)
struct xfs_mount *mp)
{
xfs_buf_t *bp;
struct xfs_buf *bp = mp->m_sb_bp;
/*
* Use xfs_getsb() so that the buffer will be locked
* when we call xfs_buf_relse().
*/
bp = xfs_getsb(mp, 0);
XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
xfs_buf_lock(bp);
mp->m_sb_bp = NULL;
xfs_buf_relse(bp);
}
/*