Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw:
  GFS2: local functions should be static
  GFS2: We only need one ACL getting function
  GFS2: Fix multi-block allocation
  GFS2: decouple quota allocations from block allocations
  GFS2: split function rgblk_search
  GFS2: Fix up "off by one" in the previous patch
  GFS2: move toward a generic multi-block allocator
  GFS2: O_(D)SYNC support for fallocate
  GFS2: remove vestigial al_alloced
  GFS2: combine gfs2_alloc_block and gfs2_alloc_di
  GFS2: Add non-try locks back to get_local_rgrp
  GFS2: f_ra is always valid in dir readahead function
  GFS2: Fix very unlikley memory leak in ACL xattr code
  GFS2: More automated code analysis fixes
  GFS2: Add readahead to sequential directory traversal
  GFS2: Fix up REQ flags
This commit is contained in:
Linus Torvalds 2012-01-08 13:07:54 -08:00
commit 1619ed8f60
19 changed files with 394 additions and 334 deletions

View File

@ -38,8 +38,9 @@ static const char *gfs2_acl_name(int type)
return NULL;
}
static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct posix_acl *acl;
const char *name;
char *data;
@ -67,11 +68,6 @@ static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
return acl;
}
struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
{
return gfs2_acl_get(GFS2_I(inode), type);
}
static int gfs2_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
@ -125,7 +121,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
if (S_ISLNK(inode->i_mode))
return 0;
acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
acl = gfs2_get_acl(&dip->i_inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl) {
@ -166,7 +162,7 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
unsigned int len;
int error;
acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
acl = gfs2_get_acl(&ip->i_inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl)
@ -216,7 +212,7 @@ static int gfs2_xattr_system_get(struct dentry *dentry, const char *name,
if (type < 0)
return type;
acl = gfs2_acl_get(GFS2_I(inode), type);
acl = gfs2_get_acl(inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)

View File

@ -615,7 +615,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
struct gfs2_alloc *al = NULL;
struct gfs2_qadata *qa = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
struct page *page;
@ -639,8 +639,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
al = gfs2_alloc_get(ip);
if (!al) {
qa = gfs2_qadata_get(ip);
if (!qa) {
error = -ENOMEM;
goto out_unlock;
}
@ -649,8 +649,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (error)
goto out_alloc_put;
al->al_requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error)
goto out_qunlock;
}
@ -711,7 +710,7 @@ out_trans_fail:
out_qunlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
out_unlock:
if (&ip->i_inode == sdp->sd_rindex) {
@ -848,7 +847,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
unsigned int to = from + len;
int ret;
@ -880,10 +879,11 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
brelse(dibh);
failed:
gfs2_trans_end(sdp);
if (al) {
if (ip->i_res)
gfs2_inplace_release(ip);
if (qa) {
gfs2_quota_unlock(ip);
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
if (inode == sdp->sd_rindex) {
gfs2_glock_dq(&m_ip->i_gh);

View File

@ -133,7 +133,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
and write it out to disk */
unsigned int n = 1;
error = gfs2_alloc_block(ip, &block, &n);
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
goto out_brelse;
if (isdir) {
@ -503,7 +503,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
do {
int error;
n = blks - alloced;
error = gfs2_alloc_block(ip, &bn, &n);
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return error;
alloced += n;
@ -743,9 +743,6 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
else if (ip->i_depth)
revokes = sdp->sd_inptrs;
if (error)
return error;
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
bstart = 0;
blen = 0;
@ -1044,7 +1041,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
find_metapath(sdp, lblock, &mp, ip->i_height);
if (!gfs2_alloc_get(ip))
if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@ -1064,7 +1061,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
gfs2_quota_unhold(ip);
out:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
@ -1166,21 +1163,20 @@ static int do_grow(struct inode *inode, u64 size)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = NULL;
struct gfs2_qadata *qa = NULL;
int error;
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
al = gfs2_alloc_get(ip);
if (al == NULL)
qa = gfs2_qadata_get(ip);
if (qa == NULL)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto do_grow_alloc_put;
al->al_requested = 1;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, 1);
if (error)
goto do_grow_qunlock;
}
@ -1189,7 +1185,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
goto do_grow_release;
if (al) {
if (qa) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
goto do_end_trans;
@ -1208,12 +1204,12 @@ static int do_grow(struct inode *inode, u64 size)
do_end_trans:
gfs2_trans_end(sdp);
do_grow_release:
if (al) {
if (qa) {
gfs2_inplace_release(ip);
do_grow_qunlock:
gfs2_quota_unlock(ip);
do_grow_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
return error;
}

View File

@ -76,6 +76,8 @@
#define IS_LEAF 1 /* Hashed (leaf) directory */
#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
#define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
@ -821,7 +823,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct gfs2_dirent *dent;
struct qstr name = { .name = "", .len = 0, .hash = 0 };
error = gfs2_alloc_block(ip, &bn, &n);
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
@ -1376,6 +1378,52 @@ out:
return error;
}
/**
* gfs2_dir_readahead - Issue read-ahead requests for leaf blocks.
*
* Note: we can't calculate each index like dir_e_read can because we don't
* have the leaf, and therefore we don't have the depth, and therefore we
* don't have the length. So we have to just read enough ahead to make up
* for the loss of information.
*/
static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
struct file_ra_state *f_ra)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_glock *gl = ip->i_gl;
struct buffer_head *bh;
u64 blocknr = 0, last;
unsigned count;
/* First check if we've already read-ahead for the whole range. */
if (index + MAX_RA_BLOCKS < f_ra->start)
return;
f_ra->start = max((pgoff_t)index, f_ra->start);
for (count = 0; count < MAX_RA_BLOCKS; count++) {
if (f_ra->start >= hsize) /* if exceeded the hash table */
break;
last = blocknr;
blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]);
f_ra->start++;
if (blocknr == last)
continue;
bh = gfs2_getbuf(gl, blocknr, 1);
if (trylock_buffer(bh)) {
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
brelse(bh);
continue;
}
bh->b_end_io = end_buffer_read_sync;
submit_bh(READA | REQ_META, bh);
continue;
}
brelse(bh);
}
}
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
@ -1388,7 +1436,7 @@ out:
*/
static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
u32 hsize, len = 0;
@ -1402,10 +1450,14 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
if (dip->i_hash_cache == NULL)
f_ra->start = 0;
lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp))
return PTR_ERR(lp);
gfs2_dir_readahead(inode, hsize, index, f_ra);
while (index < hsize) {
error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
@ -1423,7 +1475,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
}
int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
@ -1437,7 +1489,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
return 0;
if (dip->i_diskflags & GFS2_DIF_EXHASH)
return dir_e_read(inode, offset, opaque, filldir);
return dir_e_read(inode, offset, opaque, filldir, f_ra);
if (!gfs2_is_stuffed(dip)) {
gfs2_consist_inode(dip);
@ -1798,7 +1850,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (!ht)
return -ENOMEM;
if (!gfs2_alloc_get(dip)) {
if (!gfs2_qadata_get(dip)) {
error = -ENOMEM;
goto out;
}
@ -1887,7 +1939,7 @@ out_rlist:
gfs2_rlist_free(&rlist);
gfs2_quota_unhold(dip);
out_put:
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
out:
kfree(ht);
return error;

View File

@ -25,7 +25,7 @@ extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
const struct gfs2_inode *ip);
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir);
filldir_t filldir, struct file_ra_state *f_ra);
extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
const struct gfs2_inode *nip, unsigned int new_type);

View File

@ -99,6 +99,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
struct gfs2_holder gh;
u64 offset = 0;
int error;
struct file_ra_state f_ra = { .start = 0 };
if (!dir)
return -EINVAL;
@ -118,7 +119,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
if (error)
return error;
error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir);
error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);
gfs2_glock_dq_uninit(&gh);

View File

@ -105,7 +105,7 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
return error;
}
error = gfs2_dir_read(dir, &offset, dirent, filldir);
error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
gfs2_glock_dq_uninit(&d_gh);
@ -365,7 +365,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
loff_t size;
int ret;
@ -393,16 +393,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
ret = -ENOMEM;
al = gfs2_alloc_get(ip);
if (al == NULL)
qa = gfs2_qadata_get(ip);
if (qa == NULL)
goto out_unlock;
ret = gfs2_quota_lock_check(ip);
if (ret)
goto out_alloc_put;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
ret = gfs2_inplace_reserve(ip);
ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (ret)
goto out_quota_unlock;
@ -448,7 +447,7 @@ out_trans_fail:
out_quota_unlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&gh);
out:
@ -609,7 +608,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
struct inode *inode = mapping->host;
int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
struct gfs2_inode *ip = GFS2_I(inode);
int ret, ret1 = 0;
int ret = 0, ret1 = 0;
if (mapping->nrpages) {
ret1 = filemap_fdatawrite_range(mapping, start, end);
@ -750,8 +749,10 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int error;
const loff_t pos = offset;
const loff_t count = len;
loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
loff_t max_chunk_size = UINT_MAX & bsize_mask;
@ -782,8 +783,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
while (len > 0) {
if (len < bytes)
bytes = len;
al = gfs2_alloc_get(ip);
if (!al) {
qa = gfs2_qadata_get(ip);
if (!qa) {
error = -ENOMEM;
goto out_unlock;
}
@ -795,8 +796,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
@ -810,7 +810,6 @@ retry:
max_bytes = bytes;
calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
&max_bytes, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
RES_RG_HDR + gfs2_rg_blocks(ip);
@ -832,8 +831,11 @@ retry:
offset += max_bytes;
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
if (error == 0)
error = generic_write_sync(file, pos, count);
goto out_unlock;
out_trans_fail:
@ -841,7 +843,7 @@ out_trans_fail:
out_qunlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&ip->i_gh);
out_uninit:

View File

@ -244,17 +244,16 @@ struct gfs2_glock {
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
struct gfs2_alloc {
struct gfs2_qadata { /* quota allocation data */
/* Quota stuff */
struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
unsigned int al_qd_num;
struct gfs2_quota_data *qa_qd[2*MAXQUOTAS];
struct gfs2_holder qa_qd_ghs[2*MAXQUOTAS];
unsigned int qa_qd_num;
};
u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
u32 al_alloced; /* Filled in by gfs2_alloc_*() */
/* Filled in by gfs2_inplace_reserve() */
struct gfs2_holder al_rgd_gh;
struct gfs2_blkreserv {
u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
struct gfs2_holder rs_rgd_gh; /* Filled in by gfs2_inplace_reserve() */
};
enum {
@ -275,7 +274,8 @@ struct gfs2_inode {
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_alloc *i_alloc;
struct gfs2_qadata *i_qadata; /* quota allocation data */
struct gfs2_blkreserv *i_res; /* resource group block reservation */
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;

View File

@ -389,12 +389,13 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
int error;
int dblocks = 1;
if (gfs2_alloc_get(dip) == NULL)
return -ENOMEM;
error = gfs2_rindex_update(sdp);
if (error)
fs_warn(sdp, "rindex update returns %d\n", error);
dip->i_alloc->al_requested = RES_DINODE;
error = gfs2_inplace_reserve(dip);
error = gfs2_inplace_reserve(dip, RES_DINODE);
if (error)
goto out;
@ -402,14 +403,13 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
if (error)
goto out_ipreserv;
error = gfs2_alloc_di(dip, no_addr, generation);
error = gfs2_alloc_blocks(dip, no_addr, &dblocks, 1, generation);
gfs2_trans_end(sdp);
out_ipreserv:
gfs2_inplace_release(dip);
out:
gfs2_alloc_put(dip);
return error;
}
@ -525,7 +525,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
int error;
munge_mode_uid_gid(dip, &mode, &uid, &gid);
if (!gfs2_alloc_get(dip))
if (!gfs2_qadata_get(dip))
return -ENOMEM;
error = gfs2_quota_lock(dip, uid, gid);
@ -547,7 +547,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
out_quota:
gfs2_quota_unlock(dip);
out:
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
return error;
}
@ -555,13 +555,13 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int alloc_required;
struct buffer_head *dibh;
int error;
al = gfs2_alloc_get(dip);
if (!al)
qa = gfs2_qadata_get(dip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@ -576,9 +576,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
al->al_requested = sdp->sd_max_dirres;
error = gfs2_inplace_reserve(dip);
error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
goto fail_quota_locks;
@ -619,11 +617,11 @@ fail_quota_locks:
gfs2_quota_unlock(dip);
fail:
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
return error;
}
int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *fs_info)
{
const struct xattr *xattr;
@ -728,9 +726,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
brelse(bh);
gfs2_trans_end(sdp);
gfs2_inplace_release(dip);
/* Check if we reserved space in the rgrp. Function link_dinode may
not, depending on whether alloc is required. */
if (dip->i_res)
gfs2_inplace_release(dip);
gfs2_quota_unlock(dip);
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
mark_inode_dirty(inode);
gfs2_glock_dq_uninit_m(2, ghs);
d_instantiate(dentry, inode);
@ -875,8 +876,9 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = 0;
if (alloc_required) {
struct gfs2_alloc *al = gfs2_alloc_get(dip);
if (!al) {
struct gfs2_qadata *qa = gfs2_qadata_get(dip);
if (!qa) {
error = -ENOMEM;
goto out_gunlock;
}
@ -885,9 +887,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
goto out_alloc;
al->al_requested = sdp->sd_max_dirres;
error = gfs2_inplace_reserve(dip);
error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
goto out_gunlock_q;
@ -930,7 +930,7 @@ out_gunlock_q:
gfs2_quota_unlock(dip);
out_alloc:
if (alloc_required)
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
out_gunlock:
gfs2_glock_dq(ghs + 1);
out_child:
@ -1037,12 +1037,14 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
struct buffer_head *bh;
struct gfs2_holder ghs[3];
struct gfs2_rgrpd *rgd;
int error;
int error = -EROFS;
gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
if (!rgd)
goto out_inodes;
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
@ -1088,12 +1090,13 @@ out_end_trans:
out_gunlock:
gfs2_glock_dq(ghs + 2);
out_rgrp:
gfs2_holder_uninit(ghs + 2);
gfs2_glock_dq(ghs + 1);
out_child:
gfs2_holder_uninit(ghs + 1);
gfs2_glock_dq(ghs);
out_parent:
gfs2_holder_uninit(ghs + 2);
out_inodes:
gfs2_holder_uninit(ghs + 1);
gfs2_holder_uninit(ghs);
return error;
}
@ -1350,8 +1353,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
error = 0;
if (alloc_required) {
struct gfs2_alloc *al = gfs2_alloc_get(ndip);
if (!al) {
struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
if (!qa) {
error = -ENOMEM;
goto out_gunlock;
}
@ -1360,9 +1364,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_alloc;
al->al_requested = sdp->sd_max_dirres;
error = gfs2_inplace_reserve(ndip);
error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres);
if (error)
goto out_gunlock_q;
@ -1423,7 +1425,7 @@ out_gunlock_q:
gfs2_quota_unlock(ndip);
out_alloc:
if (alloc_required)
gfs2_alloc_put(ndip);
gfs2_qadata_put(ndip);
out_gunlock:
while (x--) {
gfs2_glock_dq(ghs + x);
@ -1584,7 +1586,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
ogid = ngid = NO_QUOTA_CHANGE;
if (!gfs2_alloc_get(ip))
if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_lock(ip, nuid, ngid);
@ -1616,7 +1618,7 @@ out_end_trans:
out_gunlock_q:
gfs2_quota_unlock(ip);
out_alloc:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}

View File

@ -626,7 +626,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
else
submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))

View File

@ -40,7 +40,8 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
ip->i_alloc = NULL;
ip->i_qadata = NULL;
ip->i_res = NULL;
ip->i_hash_cache = NULL;
}

View File

@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
dblock++;
extlen--;
@ -444,7 +444,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(READA, 1, &bh);
ll_rw_block(READA | REQ_META, 1, &bh);
brelse(bh);
dblock++;
extlen--;

View File

@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
submit_bio(READ_SYNC | REQ_META, bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {

View File

@ -494,11 +494,11 @@ static void qdsb_put(struct gfs2_quota_data *qd)
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_quota_data **qd = al->al_qd;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data **qd = qa->qa_qd;
int error;
if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
return -EIO;
@ -508,20 +508,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
error = qdsb_get(sdp, QUOTA_USER, uid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
}
@ -529,7 +529,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
}
@ -542,16 +542,16 @@ out:
void gfs2_quota_unhold(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
unsigned int x;
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
for (x = 0; x < al->al_qd_num; x++) {
qdsb_put(al->al_qd[x]);
al->al_qd[x] = NULL;
for (x = 0; x < qa->qa_qd_num; x++) {
qdsb_put(qa->qa_qd[x]);
qa->qa_qd[x] = NULL;
}
al->al_qd_num = 0;
qa->qa_qd_num = 0;
}
static int sort_qd(const void *a, const void *b)
@ -712,7 +712,7 @@ get_a_page:
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
ll_rw_block(READ | REQ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
goto unlock_out;
@ -762,7 +762,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
struct gfs2_quota_data *qd;
loff_t offset;
unsigned int nalloc = 0, blocks;
struct gfs2_alloc *al = NULL;
int error;
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
@ -792,26 +791,19 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
nalloc++;
}
al = gfs2_alloc_get(ip);
if (!al) {
error = -ENOMEM;
goto out_gunlock;
}
/*
* 1 blk for unstuffing inode if stuffed. We add this extra
* block to the reservation unconditionally. If the inode
* doesn't need unstuffing, the block will be released to the
* rgrp since it won't be allocated during the transaction
*/
al->al_requested = 1;
/* +3 in the end for unstuffing block, inode size update block
* and another block in case quota straddles page boundary and
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
if (nalloc)
al->al_requested += nalloc * (data_blocks + ind_blocks);
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, 1 +
(nalloc * (data_blocks + ind_blocks)));
if (error)
goto out_alloc;
@ -840,8 +832,6 @@ out_end_trans:
out_ipres:
gfs2_inplace_release(ip);
out_alloc:
gfs2_alloc_put(ip);
out_gunlock:
gfs2_glock_dq_uninit(&i_gh);
out:
while (qx--)
@ -925,7 +915,7 @@ fail:
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
int error = 0;
@ -938,15 +928,15 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
sort_qd, NULL);
for (x = 0; x < al->al_qd_num; x++) {
for (x = 0; x < qa->qa_qd_num; x++) {
int force = NO_FORCE;
qd = al->al_qd[x];
qd = qa->qa_qd[x];
if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
force = FORCE;
error = do_glock(qd, force, &al->al_qd_ghs[x]);
error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
if (error)
break;
}
@ -955,7 +945,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
set_bit(GIF_QD_LOCKED, &ip->i_flags);
else {
while (x--)
gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
gfs2_quota_unhold(ip);
}
@ -1000,7 +990,7 @@ static int need_sync(struct gfs2_quota_data *qd)
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
@ -1008,14 +998,14 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
for (x = 0; x < al->al_qd_num; x++) {
for (x = 0; x < qa->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
int sync;
qd = al->al_qd[x];
qd = qa->qa_qd[x];
sync = need_sync(qd);
gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
if (sync && qd_trylock(qd))
qda[count++] = qd;
@ -1048,7 +1038,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
s64 value;
unsigned int x;
@ -1060,8 +1050,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
for (x = 0; x < al->al_qd_num; x++) {
qd = al->al_qd[x];
for (x = 0; x < qa->qa_qd_num; x++) {
qd = qa->qa_qd[x];
if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
@ -1099,7 +1089,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid)
{
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
@ -1108,8 +1098,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return;
for (x = 0; x < al->al_qd_num; x++) {
qd = al->al_qd[x];
for (x = 0; x < qa->qa_qd_num; x++) {
qd = qa->qa_qd[x];
if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
@ -1529,7 +1519,6 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
unsigned int data_blocks, ind_blocks;
unsigned int blocks = 0;
int alloc_required;
struct gfs2_alloc *al;
loff_t offset;
int error;
@ -1594,15 +1583,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
al = gfs2_alloc_get(ip);
if (al == NULL)
goto out_i;
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = al->al_requested = 1 + data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
blocks = 1 + data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, blocks);
if (error)
goto out_alloc;
goto out_i;
blocks += gfs2_rg_blocks(ip);
}
@ -1617,11 +1603,8 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
gfs2_trans_end(sdp);
out_release:
if (alloc_required) {
if (alloc_required)
gfs2_inplace_release(ip);
out_alloc:
gfs2_alloc_put(ip);
}
out_i:
gfs2_glock_dq_uninit(&i_gh);
out_q:

View File

@ -65,8 +65,8 @@ static const char valid_change[16] = {
};
static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
unsigned char old_state, unsigned char new_state,
unsigned int *n);
unsigned char old_state,
struct gfs2_bitmap **rbi);
/**
* gfs2_setbit - Set a bit in the bitmaps
@ -860,22 +860,36 @@ fail:
}
/**
* gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
* gfs2_qadata_get - get the struct gfs2_qadata structure for an inode
* @ip: the incore GFS2 inode structure
*
* Returns: the struct gfs2_alloc
* Returns: the struct gfs2_qadata
*/
struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
int error;
BUG_ON(ip->i_alloc != NULL);
ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS);
BUG_ON(ip->i_qadata != NULL);
ip->i_qadata = kzalloc(sizeof(struct gfs2_qadata), GFP_NOFS);
error = gfs2_rindex_update(sdp);
if (error)
fs_warn(sdp, "rindex update returns %d\n", error);
return ip->i_alloc;
return ip->i_qadata;
}
/**
* gfs2_blkrsv_get - get the struct gfs2_blkreserv structure for an inode
* @ip: the incore GFS2 inode structure
*
* Returns: the struct gfs2_qadata
*/
static struct gfs2_blkreserv *gfs2_blkrsv_get(struct gfs2_inode *ip)
{
BUG_ON(ip->i_res != NULL);
ip->i_res = kzalloc(sizeof(struct gfs2_blkreserv), GFP_NOFS);
return ip->i_res;
}
/**
@ -890,15 +904,20 @@ struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip)
{
const struct gfs2_alloc *al = ip->i_alloc;
const struct gfs2_blkreserv *rs = ip->i_res;
if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
return 0;
if (rgd->rd_free_clone >= al->al_requested)
if (rgd->rd_free_clone >= rs->rs_requested)
return 1;
return 0;
}
static inline u32 gfs2_bi2rgd_blk(struct gfs2_bitmap *bi, u32 blk)
{
return (bi->bi_start * GFS2_NBBY) + blk;
}
/**
* try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
* @rgd: The rgrp
@ -912,20 +931,20 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
u32 goal = 0, block;
u64 no_addr;
struct gfs2_sbd *sdp = rgd->rd_sbd;
unsigned int n;
struct gfs2_glock *gl;
struct gfs2_inode *ip;
int error;
int found = 0;
struct gfs2_bitmap *bi;
while (goal < rgd->rd_data) {
down_write(&sdp->sd_log_flush_lock);
n = 1;
block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
GFS2_BLKST_UNLINKED, &n);
block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi);
up_write(&sdp->sd_log_flush_lock);
if (block == BFITNOENT)
break;
block = gfs2_bi2rgd_blk(bi, block);
/* rgblk_search can return a block < goal, so we need to
keep it marching forward. */
no_addr = block + rgd->rd_data0;
@ -977,8 +996,8 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd, *begin = NULL;
struct gfs2_alloc *al = ip->i_alloc;
int error, rg_locked;
struct gfs2_blkreserv *rs = ip->i_res;
int error, rg_locked, flags = LM_FLAG_TRY;
int loops = 0;
if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal))
@ -997,7 +1016,7 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
error = 0;
} else {
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
LM_FLAG_TRY, &al->al_rgd_gh);
flags, &rs->rs_rgd_gh);
}
switch (error) {
case 0:
@ -1008,12 +1027,14 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
if (rgd->rd_flags & GFS2_RDF_CHECK)
try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
/* fall through */
case GLR_TRYFAILED:
rgd = gfs2_rgrpd_get_next(rgd);
if (rgd == begin)
if (rgd == begin) {
flags = 0;
loops++;
}
break;
default:
return error;
@ -1023,6 +1044,13 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
return -ENOSPC;
}
static void gfs2_blkrsv_put(struct gfs2_inode *ip)
{
BUG_ON(ip->i_res == NULL);
kfree(ip->i_res);
ip->i_res = NULL;
}
/**
* gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
@ -1030,16 +1058,23 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
* Returns: errno
*/
int gfs2_inplace_reserve(struct gfs2_inode *ip)
int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_blkreserv *rs;
int error = 0;
u64 last_unlinked = NO_BLOCK;
int tries = 0;
if (gfs2_assert_warn(sdp, al->al_requested))
return -EINVAL;
rs = gfs2_blkrsv_get(ip);
if (!rs)
return -ENOMEM;
rs->rs_requested = requested;
if (gfs2_assert_warn(sdp, requested)) {
error = -EINVAL;
goto out;
}
do {
error = get_local_rgrp(ip, &last_unlinked);
@ -1056,6 +1091,9 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip)
gfs2_log_flush(sdp, NULL);
} while (tries++ < 3);
out:
if (error)
gfs2_blkrsv_put(ip);
return error;
}
@ -1068,10 +1106,11 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip)
void gfs2_inplace_release(struct gfs2_inode *ip)
{
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_blkreserv *rs = ip->i_res;
if (al->al_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
gfs2_blkrsv_put(ip);
if (rs->rs_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
}
/**
@ -1108,39 +1147,35 @@ static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
}
/**
* rgblk_search - find a block in @old_state, change allocation
* state to @new_state
* rgblk_search - find a block in @state
* @rgd: the resource group descriptor
* @goal: the goal block within the RG (start here to search for avail block)
* @old_state: GFS2_BLKST_XXX the before-allocation state to find
* @new_state: GFS2_BLKST_XXX the after-allocation block state
* @n: The extent length
* @state: GFS2_BLKST_XXX the before-allocation state to find
* @dinode: TRUE if the first block we allocate is for a dinode
* @rbi: address of the pointer to the bitmap containing the block found
*
* Walk rgrp's bitmap to find bits that represent a block in @old_state.
* Add the found bitmap buffer to the transaction.
* Set the found bits to @new_state to change block's allocation state.
* Walk rgrp's bitmap to find bits that represent a block in @state.
*
* This function never fails, because we wouldn't call it unless we
* know (from reservation results, etc.) that a block is available.
*
* Scope of @goal and returned block is just within rgrp, not the whole
* filesystem.
* Scope of @goal is just within rgrp, not the whole filesystem.
* Scope of @returned block is just within bitmap, not the whole filesystem.
*
* Returns: the block number allocated
* Returns: the block number found relative to the bitmap rbi
*/
static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
unsigned char old_state, unsigned char new_state,
unsigned int *n)
unsigned char state,
struct gfs2_bitmap **rbi)
{
struct gfs2_bitmap *bi = NULL;
const u32 length = rgd->rd_length;
u32 blk = BFITNOENT;
unsigned int buf, x;
const unsigned int elen = *n;
const u8 *buffer = NULL;
*n = 0;
*rbi = NULL;
/* Find bitmap block that contains bits for goal block */
for (buf = 0; buf < length; buf++) {
bi = rgd->rd_bits + buf;
@ -1163,21 +1198,21 @@ do_search:
bi = rgd->rd_bits + buf;
if (test_bit(GBF_FULL, &bi->bi_flags) &&
(old_state == GFS2_BLKST_FREE))
(state == GFS2_BLKST_FREE))
goto skip;
/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
bitmaps, so we must search the originals for that. */
buffer = bi->bi_bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bi->bi_bh));
if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
blk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
if (blk != BFITNOENT)
break;
if ((goal == 0) && (old_state == GFS2_BLKST_FREE))
if ((goal == 0) && (state == GFS2_BLKST_FREE))
set_bit(GBF_FULL, &bi->bi_flags);
/* Try next bitmap block (wrap back to rgrp header if at end) */
@ -1187,16 +1222,37 @@ skip:
goal = 0;
}
if (blk == BFITNOENT)
return blk;
if (blk != BFITNOENT)
*rbi = bi;
*n = 1;
if (old_state == new_state)
goto out;
return blk;
}
/**
* gfs2_alloc_extent - allocate an extent from a given bitmap
* @rgd: the resource group descriptor
* @bi: the bitmap within the rgrp
* @blk: the block within the bitmap
* @dinode: TRUE if the first block we allocate is for a dinode
* @n: The extent length
*
* Add the found bitmap buffer to the transaction.
* Set the found bits to @new_state to change block's allocation state.
* Returns: starting block number of the extent (fs scope)
*/
static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
u32 blk, bool dinode, unsigned int *n)
{
const unsigned int elen = *n;
u32 goal;
const u8 *buffer = NULL;
*n = 0;
buffer = bi->bi_bh->b_data + bi->bi_offset;
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
bi, blk, new_state);
bi, blk, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
(*n)++;
goal = blk;
while (*n < elen) {
goal++;
@ -1206,11 +1262,12 @@ skip:
GFS2_BLKST_FREE)
break;
gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
bi, goal, new_state);
bi, goal, GFS2_BLKST_USED);
(*n)++;
}
out:
return (bi->bi_start * GFS2_NBBY) + blk;
blk = gfs2_bi2rgd_blk(bi, blk);
rgd->rd_last_alloc = blk + *n - 1;
return rgd->rd_data0 + blk;
}
/**
@ -1298,121 +1355,93 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
}
/**
* gfs2_alloc_block - Allocate one or more blocks
* gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
* @ip: the inode to allocate the block for
* @bn: Used to return the starting block number
* @n: requested number of blocks/extent length (value/result)
* @ndata: requested number of blocks/extent length (value/result)
* @dinode: 1 if we're allocating a dinode block, else 0
* @generation: the generation number of the inode
*
* Returns: 0 or error
*/
int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
bool dinode, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_rgrpd *rgd;
u32 goal, blk;
u64 block;
unsigned int ndata;
u32 goal, blk; /* block, within the rgrp scope */
u64 block; /* block, within the file system scope */
int error;
struct gfs2_bitmap *bi;
/* Only happens if there is a bug in gfs2, return something distinctive
* to ensure that it is noticed.
*/
if (al == NULL)
if (ip->i_res == NULL)
return -ECANCELED;
rgd = ip->i_rgd;
if (rgrp_contains_block(rgd, ip->i_goal))
if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
else
goal = rgd->rd_last_alloc;
blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
/* Since all blocks are reserved in advance, this shouldn't happen */
if (blk == BFITNOENT)
goto rgrp_error;
rgd->rd_last_alloc = blk;
block = rgd->rd_data0 + blk;
ip->i_goal = block + *n - 1;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error == 0) {
struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal);
brelse(dibh);
block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
ndata = *nblocks;
if (dinode)
ndata--;
if (!dinode) {
ip->i_goal = block + ndata - 1;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error == 0) {
struct gfs2_dinode *di =
(struct gfs2_dinode *)dibh->b_data;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
di->di_goal_meta = di->di_goal_data =
cpu_to_be64(ip->i_goal);
brelse(dibh);
}
}
if (rgd->rd_free < *n)
if (rgd->rd_free < *nblocks)
goto rgrp_error;
rgd->rd_free -= *n;
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
al->al_alloced += *n;
gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
rgd->rd_free_clone -= *n;
trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED);
*bn = block;
return 0;
rgrp_error:
gfs2_rgrp_error(rgd);
return -EIO;
}
/**
* gfs2_alloc_di - Allocate a dinode
* @dip: the directory that the inode is going in
* @bn: the block number which is allocated
* @generation: the generation number of the inode
*
* Returns: 0 on success or error
*/
int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_alloc *al = dip->i_alloc;
struct gfs2_rgrpd *rgd = dip->i_rgd;
u32 blk;
u64 block;
unsigned int n = 1;
blk = rgblk_search(rgd, rgd->rd_last_alloc,
GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
/* Since all blocks are reserved in advance, this shouldn't happen */
if (blk == BFITNOENT)
goto rgrp_error;
rgd->rd_last_alloc = blk;
block = rgd->rd_data0 + blk;
if (rgd->rd_free == 0)
goto rgrp_error;
rgd->rd_free--;
rgd->rd_dinodes++;
*generation = rgd->rd_igeneration++;
if (*generation == 0)
rgd->rd_free -= *nblocks;
if (dinode) {
rgd->rd_dinodes++;
*generation = rgd->rd_igeneration++;
if (*generation == 0)
*generation = rgd->rd_igeneration++;
}
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
al->al_alloced++;
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
if (dinode)
gfs2_trans_add_unrevoke(sdp, block, 1);
gfs2_statfs_change(sdp, 0, -1, +1);
gfs2_trans_add_unrevoke(sdp, block, 1);
/*
* This needs reviewing to see why we cannot do the quota change
* at this point in the dinode case.
*/
if (ndata)
gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
ip->i_inode.i_gid);
rgd->rd_free_clone--;
trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE);
rgd->rd_free_clone -= *nblocks;
trace_gfs2_block_alloc(ip, block, *nblocks,
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
*bn = block;
return 0;

View File

@ -28,19 +28,19 @@ extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
static inline void gfs2_alloc_put(struct gfs2_inode *ip)
extern struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip);
static inline void gfs2_qadata_put(struct gfs2_inode *ip)
{
BUG_ON(ip->i_alloc == NULL);
kfree(ip->i_alloc);
ip->i_alloc = NULL;
BUG_ON(ip->i_qadata == NULL);
kfree(ip->i_qadata);
ip->i_qadata = NULL;
}
extern int gfs2_inplace_reserve(struct gfs2_inode *ip);
extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
bool dinode, u64 *generation);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);

View File

@ -1399,8 +1399,9 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip)
static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
struct gfs2_rgrpd *rgd;
struct gfs2_holder gh;
int error;
if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
@ -1408,8 +1409,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
return -EIO;
}
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@ -1423,8 +1424,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
goto out_qs;
}
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
&al->al_rgd_gh);
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (error)
goto out_qs;
@ -1440,11 +1440,11 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
gfs2_trans_end(sdp);
out_rg_gunlock:
gfs2_glock_dq_uninit(&al->al_rgd_gh);
gfs2_glock_dq_uninit(&gh);
out_qs:
gfs2_quota_unhold(ip);
out:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}

View File

@ -30,9 +30,9 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
{
const struct gfs2_alloc *al = ip->i_alloc;
if (al->al_requested < ip->i_rgd->rd_length)
return al->al_requested + 1;
const struct gfs2_blkreserv *rs = ip->i_res;
if (rs->rs_requested < ip->i_rgd->rd_length)
return rs->rs_requested + 1;
return ip->i_rgd->rd_length;
}

View File

@ -321,11 +321,11 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, int leave)
{
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int error;
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@ -336,7 +336,7 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
gfs2_quota_unhold(ip);
out_alloc:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
@ -549,9 +549,10 @@ int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
goto out;
error = gfs2_ea_get_copy(ip, &el, data, len);
if (error == 0)
error = len;
*ppdata = data;
if (error < 0)
kfree(data);
else
*ppdata = data;
out:
brelse(el.el_bh);
return error;
@ -609,7 +610,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
u64 block;
int error;
error = gfs2_alloc_block(ip, &block, &n);
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
@ -671,7 +672,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
error = gfs2_alloc_block(ip, &block, &n);
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
@ -708,21 +709,19 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
struct buffer_head *dibh;
int error;
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto out;
al->al_requested = blks;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, blks);
if (error)
goto out_gunlock_q;
@ -751,7 +750,7 @@ out_ipres:
out_gunlock_q:
gfs2_quota_unlock(ip);
out:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
@ -991,7 +990,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
} else {
u64 blk;
unsigned int n = 1;
error = gfs2_alloc_block(ip, &blk, &n);
error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, blk, 1);
@ -1435,9 +1434,9 @@ out:
static int ea_dealloc_block(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_rgrpd *rgd;
struct buffer_head *dibh;
struct gfs2_holder gh;
int error;
rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
@ -1446,8 +1445,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
return -EIO;
}
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
&al->al_rgd_gh);
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (error)
return error;
@ -1471,7 +1469,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
gfs2_trans_end(sdp);
out_gunlock:
gfs2_glock_dq_uninit(&al->al_rgd_gh);
gfs2_glock_dq_uninit(&gh);
return error;
}
@ -1484,11 +1482,11 @@ out_gunlock:
int gfs2_ea_dealloc(struct gfs2_inode *ip)
{
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int error;
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@ -1510,7 +1508,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
out_quota:
gfs2_quota_unhold(ip);
out_alloc:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}