[XFS] Add xfs_icsb_sync_counters_locked for when m_sb_lock already held

Add a new xfs_icsb_sync_counters_locked for the case where m_sb_lock
is already taken and add a flags argument to xfs_icsb_sync_counters so
that xfs_icsb_sync_counters_flags is not needed.

SGI-PV: 976035
SGI-Modid: xfs-linux-melb:xfs-kern:30917a

Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
This commit is contained in:
Christoph Hellwig 2008-04-22 17:34:37 +10:00 committed by Lachlan McIlroy
parent e8b0ebaa11
commit d4d90b577e
4 changed files with 14 additions and 20 deletions

View File

@ -1181,7 +1181,7 @@ xfs_fs_statfs(
statp->f_fsid.val[0] = (u32)id;
statp->f_fsid.val[1] = (u32)(id >> 32);
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
spin_lock(&mp->m_sb_lock);
statp->f_bsize = sbp->sb_blocksize;

View File

@ -462,7 +462,7 @@ xfs_fs_counts(
xfs_mount_t *mp,
xfs_fsop_counts_t *cnt)
{
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
spin_lock(&mp->m_sb_lock);
cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
cnt->freertx = mp->m_sb.sb_frextents;
@ -524,7 +524,7 @@ xfs_reserve_blocks(
*/
retry:
spin_lock(&mp->m_sb_lock);
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
xfs_icsb_sync_counters_locked(mp, 0);
/*
* If our previous reservation was larger than the current value,

View File

@ -55,7 +55,6 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *);
STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
int, int);
STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
int64_t, int);
STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
@ -64,7 +63,6 @@ STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
#define xfs_icsb_destroy_counters(mp) do { } while (0)
#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
#define xfs_icsb_sync_counters(mp) do { } while (0)
#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
#endif
@ -1400,7 +1398,7 @@ xfs_log_sbcount(
if (!xfs_fs_writable(mp))
return 0;
xfs_icsb_sync_counters(mp);
xfs_icsb_sync_counters(mp, 0);
/*
* we don't need to do this if we are updating the superblock
@ -2278,38 +2276,33 @@ xfs_icsb_enable_counter(
}
void
xfs_icsb_sync_counters_flags(
xfs_icsb_sync_counters_locked(
xfs_mount_t *mp,
int flags)
{
xfs_icsb_cnts_t cnt;
/* Pass 1: lock all counters */
if ((flags & XFS_ICSB_SB_LOCKED) == 0)
spin_lock(&mp->m_sb_lock);
xfs_icsb_count(mp, &cnt, flags);
/* Step 3: update mp->m_sb fields */
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
mp->m_sb.sb_icount = cnt.icsb_icount;
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
mp->m_sb.sb_ifree = cnt.icsb_ifree;
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
if ((flags & XFS_ICSB_SB_LOCKED) == 0)
spin_unlock(&mp->m_sb_lock);
}
/*
* Accurate update of per-cpu counters to incore superblock
*/
STATIC void
void
xfs_icsb_sync_counters(
xfs_mount_t *mp)
xfs_mount_t *mp,
int flags)
{
xfs_icsb_sync_counters_flags(mp, 0);
spin_lock(&mp->m_sb_lock);
xfs_icsb_sync_counters_locked(mp, flags);
spin_unlock(&mp->m_sb_lock);
}
/*

View File

@ -211,12 +211,13 @@ typedef struct xfs_icsb_cnts {
extern int xfs_icsb_init_counters(struct xfs_mount *);
extern void xfs_icsb_reinit_counters(struct xfs_mount *);
extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int);
extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
#else
#define xfs_icsb_init_counters(mp) (0)
#define xfs_icsb_reinit_counters(mp) do { } while (0)
#define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0)
#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
#endif
typedef struct xfs_ail {