JFS: Whitespace cleanup and remove some dead code

Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
This commit is contained in:
Dave Kleikamp 2007-06-06 15:28:35 -05:00
parent 5ecd3100e6
commit f720e3ba55
23 changed files with 1061 additions and 1093 deletions

View File

@ -29,7 +29,7 @@
__u32 __x = (x); \
((__u32)( \
((__x & (__u32)0x000000ffUL) << 16) | \
(__x & (__u32)0x0000ff00UL) | \
(__x & (__u32)0x0000ff00UL) | \
((__x & (__u32)0x00ff0000UL) >> 16) )); \
})

View File

@ -19,23 +19,23 @@
#define _H_JFS_DINODE
/*
* jfs_dinode.h: on-disk inode manager
* jfs_dinode.h: on-disk inode manager
*/
#define INODESLOTSIZE 128
#define L2INODESLOTSIZE 7
#define log2INODESIZE 9 /* log2(bytes per dinode) */
#define INODESLOTSIZE 128
#define L2INODESLOTSIZE 7
#define log2INODESIZE 9 /* log2(bytes per dinode) */
/*
* on-disk inode : 512 bytes
* on-disk inode : 512 bytes
*
* note: align 64-bit fields on 8-byte boundary.
*/
struct dinode {
/*
* I. base area (128 bytes)
* ------------------------
* I. base area (128 bytes)
* ------------------------
*
* define generic/POSIX attributes
*/
@ -70,16 +70,16 @@ struct dinode {
__le32 di_acltype; /* 4: Type of ACL */
/*
* Extension Areas.
* Extension Areas.
*
* Historically, the inode was partitioned into 4 128-byte areas,
* the last 3 being defined as unions which could have multiple
* uses. The first 96 bytes had been completely unused until
* an index table was added to the directory. It is now more
* useful to describe the last 3/4 of the inode as a single
* union. We would probably be better off redesigning the
* entire structure from scratch, but we don't want to break
* commonality with OS/2's JFS at this time.
* Historically, the inode was partitioned into 4 128-byte areas,
* the last 3 being defined as unions which could have multiple
* uses. The first 96 bytes had been completely unused until
* an index table was added to the directory. It is now more
* useful to describe the last 3/4 of the inode as a single
* union. We would probably be better off redesigning the
* entire structure from scratch, but we don't want to break
* commonality with OS/2's JFS at this time.
*/
union {
struct {
@ -95,7 +95,7 @@ struct dinode {
} _dir; /* (384) */
#define di_dirtable u._dir._table
#define di_dtroot u._dir._dtroot
#define di_parent di_dtroot.header.idotdot
#define di_parent di_dtroot.header.idotdot
#define di_DASD di_dtroot.header.DASD
struct {
@ -127,14 +127,14 @@ struct dinode {
#define di_inlinedata u._file._u2._special._u
#define di_rdev u._file._u2._special._u._rdev
#define di_fastsymlink u._file._u2._special._u._fastsymlink
#define di_inlineea u._file._u2._special._inlineea
#define di_inlineea u._file._u2._special._inlineea
} u;
};
/* extended mode bits (on-disk inode di_mode) */
#define IFJOURNAL 0x00010000 /* journalled file */
#define ISPARSE 0x00020000 /* sparse file enabled */
#define INLINEEA 0x00040000 /* inline EA area free */
#define IFJOURNAL 0x00010000 /* journalled file */
#define ISPARSE 0x00020000 /* sparse file enabled */
#define INLINEEA 0x00040000 /* inline EA area free */
#define ISWAPFILE 0x00800000 /* file open for pager swap space */
/* more extended mode bits: attributes for OS/2 */

View File

@ -154,12 +154,12 @@ static const s8 budtab[256] = {
* the in-core descriptor is initialized from disk.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient memory
* -EIO - i/o error
* 0 - success
* -ENOMEM - insufficient memory
* -EIO - i/o error
*/
int dbMount(struct inode *ipbmap)
{
@ -232,11 +232,11 @@ int dbMount(struct inode *ipbmap)
* the memory for this descriptor is freed.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*/
int dbUnmount(struct inode *ipbmap, int mounterror)
{
@ -320,13 +320,13 @@ int dbSync(struct inode *ipbmap)
* at a time.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*/
int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
{
@ -395,23 +395,23 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
/*
* NAME: dbUpdatePMap()
*
* FUNCTION: update the allocation state (free or allocate) of the
* FUNCTION: update the allocation state (free or allocate) of the
* specified block range in the persistent block allocation map.
*
* the blocks will be updated in the persistent map one
* dmap at a time.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
* free - 'true' if block range is to be freed from the persistent
* map; 'false' if it is to be allocated.
* blkno - starting block number of the range.
* nblocks - number of contiguous blocks in the range.
* tblk - transaction block;
* ipbmap - pointer to in-core inode for the block map.
* free - 'true' if block range is to be freed from the persistent
* map; 'false' if it is to be allocated.
* blkno - starting block number of the range.
* nblocks - number of contiguous blocks in the range.
* tblk - transaction block;
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*/
int
dbUpdatePMap(struct inode *ipbmap,
@ -573,7 +573,7 @@ dbUpdatePMap(struct inode *ipbmap,
/*
* NAME: dbNextAG()
*
* FUNCTION: find the preferred allocation group for new allocations.
* FUNCTION: find the preferred allocation group for new allocations.
*
* Within the allocation groups, we maintain a preferred
* allocation group which consists of a group with at least
@ -589,10 +589,10 @@ dbUpdatePMap(struct inode *ipbmap,
* empty ags around for large allocations.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* the preferred allocation group number.
* the preferred allocation group number.
*/
int dbNextAG(struct inode *ipbmap)
{
@ -656,7 +656,7 @@ unlock:
/*
* NAME: dbAlloc()
*
* FUNCTION: attempt to allocate a specified number of contiguous free
* FUNCTION: attempt to allocate a specified number of contiguous free
* blocks from the working allocation block map.
*
* the block allocation policy uses hints and a multi-step
@ -680,16 +680,16 @@ unlock:
* size or requests that specify no hint value.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* hint - allocation hint.
* nblocks - number of contiguous blocks in the range.
* results - on successful return, set to the starting block number
* ip - pointer to in-core inode;
* hint - allocation hint.
* nblocks - number of contiguous blocks in the range.
* results - on successful return, set to the starting block number
* of the newly allocated contiguous range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
{
@ -706,12 +706,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
/* assert that nblocks is valid */
assert(nblocks > 0);
#ifdef _STILL_TO_PORT
/* DASD limit check F226941 */
if (OVER_LIMIT(ip, nblocks))
return -ENOSPC;
#endif /* _STILL_TO_PORT */
/* get the log2 number of blocks to be allocated.
* if the number of blocks is not a log2 multiple,
* it will be rounded up to the next log2 multiple.
@ -720,7 +714,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
bmp = JFS_SBI(ip->i_sb)->bmap;
//retry: /* serialize w.r.t.extendfs() */
mapSize = bmp->db_mapsize;
/* the hint should be within the map */
@ -879,17 +872,17 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
/*
* NAME: dbAllocExact()
*
* FUNCTION: try to allocate the requested extent;
* FUNCTION: try to allocate the requested extent;
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - extent address;
* nblocks - extent length;
* ip - pointer to in-core inode;
* blkno - extent address;
* nblocks - extent length;
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
{
@ -946,7 +939,7 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
/*
* NAME: dbReAlloc()
*
* FUNCTION: attempt to extend a current allocation by a specified
* FUNCTION: attempt to extend a current allocation by a specified
* number of blocks.
*
* this routine attempts to satisfy the allocation request
@ -959,21 +952,21 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
* number of blocks required.
*
* PARAMETERS:
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* allocation.
* addnblocks - number of blocks to add to the allocation.
* results - on successful return, set to the starting block number
* addnblocks - number of blocks to add to the allocation.
* results - on successful return, set to the starting block number
* of the existing allocation if the existing allocation
* was extended in place or to a newly allocated contiguous
* range if the existing allocation could not be extended
* in place.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int
dbReAlloc(struct inode *ip,
@ -1004,7 +997,7 @@ dbReAlloc(struct inode *ip,
/*
* NAME: dbExtend()
*
* FUNCTION: attempt to extend a current allocation by a specified
* FUNCTION: attempt to extend a current allocation by a specified
* number of blocks.
*
* this routine attempts to satisfy the allocation request
@ -1013,16 +1006,16 @@ dbReAlloc(struct inode *ip,
* immediately following the current allocation.
*
* PARAMETERS:
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* allocation.
* addnblocks - number of blocks to add to the allocation.
* addnblocks - number of blocks to add to the allocation.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
{
@ -1109,19 +1102,19 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
/*
* NAME: dbAllocNext()
*
* FUNCTION: attempt to allocate the blocks of the specified block
* FUNCTION: attempt to allocate the blocks of the specified block
* range within a dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - starting block number of the range.
* nblocks - number of contiguous free blocks of the range.
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - starting block number of the range.
* nblocks - number of contiguous free blocks of the range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
@ -1233,7 +1226,7 @@ static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
/*
* NAME: dbAllocNear()
*
* FUNCTION: attempt to allocate a number of contiguous free blocks near
* FUNCTION: attempt to allocate a number of contiguous free blocks near
* a specified block (hint) within a dmap.
*
* starting with the dmap leaf that covers the hint, we'll
@ -1242,18 +1235,18 @@ static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
* the desired free space.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - block number to allocate near.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - block number to allocate near.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
@ -1316,7 +1309,7 @@ dbAllocNear(struct bmap * bmp,
/*
* NAME: dbAllocAG()
*
* FUNCTION: attempt to allocate the specified number of contiguous
* FUNCTION: attempt to allocate the specified number of contiguous
* free blocks within the specified allocation group.
*
* unless the allocation group size is equal to the number
@ -1353,17 +1346,17 @@ dbAllocNear(struct bmap * bmp,
* the allocation group.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* bmp - pointer to bmap descriptor
* agno - allocation group number.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* note: IWRITE_LOCK(ipmap) held on entry/exit;
*/
@ -1546,7 +1539,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
/*
* NAME: dbAllocAny()
*
* FUNCTION: attempt to allocate the specified number of contiguous
* FUNCTION: attempt to allocate the specified number of contiguous
* free blocks anywhere in the file system.
*
* dbAllocAny() attempts to find the sufficient free space by
@ -1556,16 +1549,16 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
* desired free space is allocated.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
@ -1598,9 +1591,9 @@ static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
/*
* NAME: dbFindCtl()
*
* FUNCTION: starting at a specified dmap control page level and block
* FUNCTION: starting at a specified dmap control page level and block
* number, search down the dmap control levels for a range of
* contiguous free blocks large enough to satisfy an allocation
* contiguous free blocks large enough to satisfy an allocation
* request for the specified number of free blocks.
*
* if sufficient contiguous free blocks are found, this routine
@ -1609,17 +1602,17 @@ static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
* is sufficient in size.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* level - starting dmap control page level.
* l2nb - log2 number of contiguous free blocks desired.
* *blkno - on entry, starting block number for conducting the search.
* bmp - pointer to bmap descriptor
* level - starting dmap control page level.
* l2nb - log2 number of contiguous free blocks desired.
* *blkno - on entry, starting block number for conducting the search.
* on successful return, the first block within a dmap page
* that contains or starts a range of contiguous free blocks.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
@ -1699,7 +1692,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
/*
* NAME: dbAllocCtl()
*
* FUNCTION: attempt to allocate a specified number of contiguous
* FUNCTION: attempt to allocate a specified number of contiguous
* blocks starting within a specific dmap.
*
* this routine is called by higher level routines that search
@ -1726,18 +1719,18 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
* first dmap (i.e. blkno).
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks to allocate.
* l2nb - log2 number of contiguous free blocks to allocate.
* blkno - starting block number of the dmap to start the allocation
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks to allocate.
* l2nb - log2 number of contiguous free blocks to allocate.
* blkno - starting block number of the dmap to start the allocation
* from.
* results - on successful return, set to the starting block number
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
@ -1870,7 +1863,7 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
/*
* NAME: dbAllocDmapLev()
*
* FUNCTION: attempt to allocate a specified number of contiguous blocks
* FUNCTION: attempt to allocate a specified number of contiguous blocks
* from a specified dmap.
*
* this routine checks if the contiguous blocks are available.
@ -1878,17 +1871,17 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
* returned.
*
* PARAMETERS:
* mp - pointer to bmap descriptor
* dp - pointer to dmap to attempt to allocate blocks from.
* l2nb - log2 number of contiguous block desired.
* nblocks - actual number of contiguous block desired.
* results - on successful return, set to the starting block number
* mp - pointer to bmap descriptor
* dp - pointer to dmap to attempt to allocate blocks from.
* l2nb - log2 number of contiguous block desired.
* nblocks - actual number of contiguous block desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
* IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
@ -1933,7 +1926,7 @@ dbAllocDmapLev(struct bmap * bmp,
/*
* NAME: dbAllocDmap()
*
* FUNCTION: adjust the disk allocation map to reflect the allocation
* FUNCTION: adjust the disk allocation map to reflect the allocation
* of a specified block range within a dmap.
*
* this routine allocates the specified blocks from the dmap
@ -1946,14 +1939,14 @@ dbAllocDmapLev(struct bmap * bmp,
* covers this dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate the block range from.
* blkno - starting block number of the block to be allocated.
* nblocks - number of blocks to be allocated.
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate the block range from.
* blkno - starting block number of the block to be allocated.
* nblocks - number of blocks to be allocated.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
@ -1989,7 +1982,7 @@ static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
/*
* NAME: dbFreeDmap()
*
* FUNCTION: adjust the disk allocation map to reflect the allocation
* FUNCTION: adjust the disk allocation map to reflect the allocation
* of a specified block range within a dmap.
*
* this routine frees the specified blocks from the dmap through
@ -1997,18 +1990,18 @@ static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
* causes the maximum string of free blocks within the dmap to
* change (i.e. the value of the root of the dmap's dmtree), this
* routine will cause this change to be reflected up through the
* appropriate levels of the dmap control pages by a call to
* appropriate levels of the dmap control pages by a call to
* dbAdjCtl() for the L0 dmap control page that covers this dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free the block range from.
* blkno - starting block number of the block to be freed.
* nblocks - number of blocks to be freed.
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free the block range from.
* blkno - starting block number of the block to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
@ -2055,7 +2048,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
/*
* NAME: dbAllocBits()
*
* FUNCTION: allocate a specified block range from a dmap.
* FUNCTION: allocate a specified block range from a dmap.
*
* this routine updates the dmap to reflect the working
* state allocation of the specified block range. it directly
@ -2065,10 +2058,10 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
* dmap's dmtree, as a whole, to reflect the allocated range.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate bits from.
* blkno - starting block number of the bits to be allocated.
* nblocks - number of bits to be allocated.
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate bits from.
* blkno - starting block number of the bits to be allocated.
* nblocks - number of bits to be allocated.
*
* RETURN VALUES: none
*
@ -2149,7 +2142,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* the allocated words.
*/
for (; nwords > 0; nwords -= nw) {
if (leaf[word] < BUDMIN) {
if (leaf[word] < BUDMIN) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocBits: leaf page "
"corrupt");
@ -2202,7 +2195,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/*
* NAME: dbFreeBits()
*
* FUNCTION: free a specified block range from a dmap.
* FUNCTION: free a specified block range from a dmap.
*
* this routine updates the dmap to reflect the working
* state allocation of the specified block range. it directly
@ -2212,10 +2205,10 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* dmtree, as a whole, to reflect the deallocated range.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free bits from.
* blkno - starting block number of the bits to be freed.
* nblocks - number of bits to be freed.
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free bits from.
* blkno - starting block number of the bits to be freed.
* nblocks - number of bits to be freed.
*
* RETURN VALUES: 0 for success
*
@ -2388,19 +2381,19 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* the new root value and the next dmap control page level to
* be adjusted.
* PARAMETERS:
* bmp - pointer to bmap descriptor
* blkno - the first block of a block range within a dmap. it is
* bmp - pointer to bmap descriptor
* blkno - the first block of a block range within a dmap. it is
* the allocation or deallocation of this block range that
* requires the dmap control page to be adjusted.
* newval - the new value of the lower level dmap or dmap control
* newval - the new value of the lower level dmap or dmap control
* page root.
* alloc - 'true' if adjustment is due to an allocation.
* level - current level of dmap control page (i.e. L0, L1, L2) to
* alloc - 'true' if adjustment is due to an allocation.
* level - current level of dmap control page (i.e. L0, L1, L2) to
* be adjusted.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
@ -2544,16 +2537,16 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
/*
* NAME: dbSplit()
*
* FUNCTION: update the leaf of a dmtree with a new value, splitting
* FUNCTION: update the leaf of a dmtree with a new value, splitting
* the leaf from the binary buddy system of the dmtree's
* leaves, as required.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* splitsz - the size the binary buddy system starting at the leaf
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* splitsz - the size the binary buddy system starting at the leaf
* must be split to, specified as the log2 number of blocks.
* newval - the new value for the leaf.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*
@ -2600,7 +2593,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
/*
* NAME: dbBackSplit()
*
* FUNCTION: back split the binary buddy system of dmtree leaves
* FUNCTION: back split the binary buddy system of dmtree leaves
* that hold a specified leaf until the specified leaf
* starts its own binary buddy system.
*
@ -2617,8 +2610,8 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
* in which a previous join operation must be backed out.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
*
* RETURN VALUES: none
*
@ -2692,14 +2685,14 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
/*
* NAME: dbJoin()
*
* FUNCTION: update the leaf of a dmtree with a new value, joining
* FUNCTION: update the leaf of a dmtree with a new value, joining
* the leaf with other leaves of the dmtree into a multi-leaf
* binary buddy system, as required.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*/
@ -2785,15 +2778,15 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
/*
* NAME: dbAdjTree()
*
* FUNCTION: update a leaf of a dmtree with a new value, adjusting
* FUNCTION: update a leaf of a dmtree with a new value, adjusting
* the dmtree, as required, to reflect the new leaf value.
* the combination of any buddies must already be done before
* this is called.
*
* PARAMETERS:
* tp - pointer to the tree to be adjusted.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
* tp - pointer to the tree to be adjusted.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*/
@ -2852,7 +2845,7 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
/*
* NAME: dbFindLeaf()
*
* FUNCTION: search a dmtree_t for sufficient free blocks, returning
* FUNCTION: search a dmtree_t for sufficient free blocks, returning
* the index of a leaf describing the free blocks if
* sufficient free blocks are found.
*
@ -2861,15 +2854,15 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
* free space.
*
* PARAMETERS:
* tp - pointer to the tree to be searched.
* l2nb - log2 number of free blocks to search for.
* tp - pointer to the tree to be searched.
* l2nb - log2 number of free blocks to search for.
* leafidx - return pointer to be set to the index of the leaf
* describing at least l2nb free blocks if sufficient
* free blocks are found.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient free blocks.
* 0 - success
* -ENOSPC - insufficient free blocks.
*/
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
{
@ -2916,18 +2909,18 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
/*
* NAME: dbFindBits()
*
* FUNCTION: find a specified number of binary buddy free bits within a
* FUNCTION: find a specified number of binary buddy free bits within a
* dmap bitmap word value.
*
* this routine searches the bitmap value for (1 << l2nb) free
* bits at (1 << l2nb) alignments within the value.
*
* PARAMETERS:
* word - dmap bitmap word value.
* l2nb - number of free bits specified as a log2 number.
* word - dmap bitmap word value.
* l2nb - number of free bits specified as a log2 number.
*
* RETURN VALUES:
* starting bit number of free bits.
* starting bit number of free bits.
*/
static int dbFindBits(u32 word, int l2nb)
{
@ -2963,14 +2956,14 @@ static int dbFindBits(u32 word, int l2nb)
/*
* NAME: dbMaxBud(u8 *cp)
*
* FUNCTION: determine the largest binary buddy string of free
* FUNCTION: determine the largest binary buddy string of free
* bits within 32-bits of the map.
*
* PARAMETERS:
* cp - pointer to the 32-bit value.
* cp - pointer to the 32-bit value.
*
* RETURN VALUES:
* largest binary buddy of free bits within a dmap word.
* largest binary buddy of free bits within a dmap word.
*/
static int dbMaxBud(u8 * cp)
{
@ -3000,14 +2993,14 @@ static int dbMaxBud(u8 * cp)
/*
* NAME: cnttz(uint word)
*
* FUNCTION: determine the number of trailing zeros within a 32-bit
* FUNCTION: determine the number of trailing zeros within a 32-bit
* value.
*
* PARAMETERS:
* value - 32-bit value to be examined.
* value - 32-bit value to be examined.
*
* RETURN VALUES:
* count of trailing zeros
* count of trailing zeros
*/
static int cnttz(u32 word)
{
@ -3025,14 +3018,14 @@ static int cnttz(u32 word)
/*
* NAME: cntlz(u32 value)
*
* FUNCTION: determine the number of leading zeros within a 32-bit
* FUNCTION: determine the number of leading zeros within a 32-bit
* value.
*
* PARAMETERS:
* value - 32-bit value to be examined.
* value - 32-bit value to be examined.
*
* RETURN VALUES:
* count of leading zeros
* count of leading zeros
*/
static int cntlz(u32 value)
{
@ -3050,14 +3043,14 @@ static int cntlz(u32 value)
* NAME: blkstol2(s64 nb)
*
* FUNCTION: convert a block count to its log2 value. if the block
* count is not a l2 multiple, it is rounded up to the next
* count is not a l2 multiple, it is rounded up to the next
* larger l2 multiple.
*
* PARAMETERS:
* nb - number of blocks
* nb - number of blocks
*
* RETURN VALUES:
* log2 number of blocks
* log2 number of blocks
*/
static int blkstol2(s64 nb)
{
@ -3099,13 +3092,13 @@ static int blkstol2(s64 nb)
* at a time.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
* 0 - success
* -EIO - i/o error
*/
int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
{
@ -3278,10 +3271,10 @@ static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
* L2
* |
* L1---------------------------------L1
* | |
* L0---------L0---------L0 L0---------L0---------L0
* | | | | | |
* d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm;
* | |
* L0---------L0---------L0 L0---------L0---------L0
* | | | | | |
* d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm;
* L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
*
* <---old---><----------------------------extend----------------------->
@ -3307,7 +3300,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
(long long) blkno, (long long) nblocks, (long long) newsize);
/*
* initialize bmap control page.
* initialize bmap control page.
*
* all the data in bmap control page should exclude
* the mkfs hidden dmap page.
@ -3330,7 +3323,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
/*
* reconfigure db_agfree[]
* reconfigure db_agfree[]
* from old AG configuration to new AG configuration;
*
* coalesce contiguous k (newAGSize/oldAGSize) AGs;
@ -3362,7 +3355,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
bmp->db_maxag = bmp->db_maxag / k;
/*
* extend bmap
* extend bmap
*
* update bit maps and corresponding level control pages;
* global control page db_nfree, db_agfree[agno], db_maxfreebud;
@ -3410,7 +3403,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
/* compute start L0 */
j = 0;
l1leaf = l1dcp->stree + CTLLEAFIND;
p += nbperpage; /* 1st L0 of L1.k */
p += nbperpage; /* 1st L0 of L1.k */
}
/*
@ -3548,7 +3541,7 @@ errout:
return -EIO;
/*
* finalize bmap control page
* finalize bmap control page
*/
finalize:
@ -3567,7 +3560,7 @@ void dbFinalizeBmap(struct inode *ipbmap)
int i, n;
/*
* finalize bmap control page
* finalize bmap control page
*/
//finalize:
/*
@ -3953,8 +3946,8 @@ static int dbGetL2AGSize(s64 nblocks)
* convert number of map pages to the zero origin top dmapctl level
*/
#define BMAPPGTOLEV(npages) \
(((npages) <= 3 + MAXL0PAGES) ? 0 \
: ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
(((npages) <= 3 + MAXL0PAGES) ? 0 : \
((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
{
@ -3981,8 +3974,8 @@ s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
factor =
(i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
complete = (u32) npages / factor;
ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL
: ((i == 1) ? LPERCTL : 1));
ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL :
((i == 1) ? LPERCTL : 1));
/* pages in last/incomplete child */
npages = (u32) npages % factor;

View File

@ -83,7 +83,7 @@ static __inline signed char TREEMAX(signed char *cp)
* - 1 is added to account for the control page of the map.
*/
#define BLKTODMAP(b,s) \
((((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) << (s))
((((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) << (s))
/*
* convert disk block number to the logical block number of the LEVEL 0
@ -98,7 +98,7 @@ static __inline signed char TREEMAX(signed char *cp)
* - 1 is added to account for the control page of the map.
*/
#define BLKTOL0(b,s) \
(((((b) >> 23) << 10) + ((b) >> 23) + ((b) >> 33) + 2 + 1) << (s))
(((((b) >> 23) << 10) + ((b) >> 23) + ((b) >> 33) + 2 + 1) << (s))
/*
* convert disk block number to the logical block number of the LEVEL 1
@ -120,7 +120,7 @@ static __inline signed char TREEMAX(signed char *cp)
* at the specified level which describes the disk block.
*/
#define BLKTOCTL(b,s,l) \
(((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s)))
(((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s)))
/*
* convert aggregate map size to the zero origin dmapctl level of the
@ -145,27 +145,27 @@ static __inline signed char TREEMAX(signed char *cp)
* dmaptree must be consistent with dmapctl.
*/
struct dmaptree {
__le32 nleafs; /* 4: number of tree leafs */
__le32 l2nleafs; /* 4: l2 number of tree leafs */
__le32 leafidx; /* 4: index of first tree leaf */
__le32 height; /* 4: height of the tree */
__le32 nleafs; /* 4: number of tree leafs */
__le32 l2nleafs; /* 4: l2 number of tree leafs */
__le32 leafidx; /* 4: index of first tree leaf */
__le32 height; /* 4: height of the tree */
s8 budmin; /* 1: min l2 tree leaf value to combine */
s8 stree[TREESIZE]; /* TREESIZE: tree */
u8 pad[2]; /* 2: pad to word boundary */
}; /* - 360 - */
s8 stree[TREESIZE]; /* TREESIZE: tree */
u8 pad[2]; /* 2: pad to word boundary */
}; /* - 360 - */
/*
* dmap page per 8K blocks bitmap
*/
struct dmap {
__le32 nblocks; /* 4: num blks covered by this dmap */
__le32 nfree; /* 4: num of free blks in this dmap */
__le64 start; /* 8: starting blkno for this dmap */
struct dmaptree tree; /* 360: dmap tree */
u8 pad[1672]; /* 1672: pad to 2048 bytes */
__le32 wmap[LPERDMAP]; /* 1024: bits of the working map */
__le32 pmap[LPERDMAP]; /* 1024: bits of the persistent map */
}; /* - 4096 - */
__le32 nblocks; /* 4: num blks covered by this dmap */
__le32 nfree; /* 4: num of free blks in this dmap */
__le64 start; /* 8: starting blkno for this dmap */
struct dmaptree tree; /* 360: dmap tree */
u8 pad[1672]; /* 1672: pad to 2048 bytes */
__le32 wmap[LPERDMAP]; /* 1024: bits of the working map */
__le32 pmap[LPERDMAP]; /* 1024: bits of the persistent map */
}; /* - 4096 - */
/*
* disk map control page per level.
@ -173,14 +173,14 @@ struct dmap {
* dmapctl must be consistent with dmaptree.
*/
struct dmapctl {
__le32 nleafs; /* 4: number of tree leafs */
__le32 l2nleafs; /* 4: l2 number of tree leafs */
__le32 leafidx; /* 4: index of the first tree leaf */
__le32 height; /* 4: height of tree */
s8 budmin; /* 1: minimum l2 tree leaf value */
s8 stree[CTLTREESIZE]; /* CTLTREESIZE: dmapctl tree */
u8 pad[2714]; /* 2714: pad to 4096 */
}; /* - 4096 - */
__le32 nleafs; /* 4: number of tree leafs */
__le32 l2nleafs; /* 4: l2 number of tree leafs */
__le32 leafidx; /* 4: index of the first tree leaf */
__le32 height; /* 4: height of tree */
s8 budmin; /* 1: minimum l2 tree leaf value */
s8 stree[CTLTREESIZE]; /* CTLTREESIZE: dmapctl tree */
u8 pad[2714]; /* 2714: pad to 4096 */
}; /* - 4096 - */
/*
* common definition for dmaptree within dmap and dmapctl
@ -202,41 +202,41 @@ typedef union dmtree {
* on-disk aggregate disk allocation map descriptor.
*/
struct dbmap_disk {
__le64 dn_mapsize; /* 8: number of blocks in aggregate */
__le64 dn_nfree; /* 8: num free blks in aggregate map */
__le32 dn_l2nbperpage; /* 4: number of blks per page */
__le32 dn_numag; /* 4: total number of ags */
__le32 dn_maxlevel; /* 4: number of active ags */
__le32 dn_maxag; /* 4: max active alloc group number */
__le32 dn_agpref; /* 4: preferred alloc group (hint) */
__le32 dn_aglevel; /* 4: dmapctl level holding the AG */
__le32 dn_agheigth; /* 4: height in dmapctl of the AG */
__le32 dn_agwidth; /* 4: width in dmapctl of the AG */
__le32 dn_agstart; /* 4: start tree index at AG height */
__le32 dn_agl2size; /* 4: l2 num of blks per alloc group */
__le64 dn_agfree[MAXAG];/* 8*MAXAG: per AG free count */
__le64 dn_agsize; /* 8: num of blks per alloc group */
s8 dn_maxfreebud; /* 1: max free buddy system */
u8 pad[3007]; /* 3007: pad to 4096 */
}; /* - 4096 - */
__le64 dn_mapsize; /* 8: number of blocks in aggregate */
__le64 dn_nfree; /* 8: num free blks in aggregate map */
__le32 dn_l2nbperpage; /* 4: number of blks per page */
__le32 dn_numag; /* 4: total number of ags */
__le32 dn_maxlevel; /* 4: number of active ags */
__le32 dn_maxag; /* 4: max active alloc group number */
__le32 dn_agpref; /* 4: preferred alloc group (hint) */
__le32 dn_aglevel; /* 4: dmapctl level holding the AG */
__le32 dn_agheigth; /* 4: height in dmapctl of the AG */
__le32 dn_agwidth; /* 4: width in dmapctl of the AG */
__le32 dn_agstart; /* 4: start tree index at AG height */
__le32 dn_agl2size; /* 4: l2 num of blks per alloc group */
__le64 dn_agfree[MAXAG];/* 8*MAXAG: per AG free count */
__le64 dn_agsize; /* 8: num of blks per alloc group */
s8 dn_maxfreebud; /* 1: max free buddy system */
u8 pad[3007]; /* 3007: pad to 4096 */
}; /* - 4096 - */
struct dbmap {
s64 dn_mapsize; /* number of blocks in aggregate */
s64 dn_nfree; /* num free blks in aggregate map */
int dn_l2nbperpage; /* number of blks per page */
int dn_numag; /* total number of ags */
int dn_maxlevel; /* number of active ags */
int dn_maxag; /* max active alloc group number */
int dn_agpref; /* preferred alloc group (hint) */
int dn_aglevel; /* dmapctl level holding the AG */
int dn_agheigth; /* height in dmapctl of the AG */
int dn_agwidth; /* width in dmapctl of the AG */
int dn_agstart; /* start tree index at AG height */
int dn_agl2size; /* l2 num of blks per alloc group */
s64 dn_agfree[MAXAG]; /* per AG free count */
s64 dn_agsize; /* num of blks per alloc group */
signed char dn_maxfreebud; /* max free buddy system */
}; /* - 4096 - */
s64 dn_mapsize; /* number of blocks in aggregate */
s64 dn_nfree; /* num free blks in aggregate map */
int dn_l2nbperpage; /* number of blks per page */
int dn_numag; /* total number of ags */
int dn_maxlevel; /* number of active ags */
int dn_maxag; /* max active alloc group number */
int dn_agpref; /* preferred alloc group (hint) */
int dn_aglevel; /* dmapctl level holding the AG */
int dn_agheigth; /* height in dmapctl of the AG */
int dn_agwidth; /* width in dmapctl of the AG */
int dn_agstart; /* start tree index at AG height */
int dn_agl2size; /* l2 num of blks per alloc group */
s64 dn_agfree[MAXAG]; /* per AG free count */
s64 dn_agsize; /* num of blks per alloc group */
signed char dn_maxfreebud; /* max free buddy system */
}; /* - 4096 - */
/*
* in-memory aggregate disk allocation map descriptor.
*/

View File

@ -315,8 +315,8 @@ static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
lv = &llck->lv[llck->index];
/*
* Linelock slot size is twice the size of directory table
* slot size. 512 entries per page.
* Linelock slot size is twice the size of directory table
* slot size. 512 entries per page.
*/
lv->offset = ((index - 2) & 511) >> 1;
lv->length = 1;
@ -615,7 +615,7 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
btstack->nsplit = 1;
/*
* search down tree from root:
* search down tree from root:
*
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
@ -659,7 +659,7 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
}
if (cmp == 0) {
/*
* search hit
* search hit
*/
/* search hit - leaf page:
* return the entry found
@ -723,7 +723,7 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
}
/*
* search miss
* search miss
*
* base is the smallest index with key (Kj) greater than
* search key (K) and may be zero or (maxindex + 1) index.
@ -834,7 +834,7 @@ int dtInsert(tid_t tid, struct inode *ip,
struct lv *lv;
/*
* retrieve search result
* retrieve search result
*
* dtSearch() returns (leaf page pinned, index at which to insert).
* n.b. dtSearch() may return index of (maxindex + 1) of
@ -843,7 +843,7 @@ int dtInsert(tid_t tid, struct inode *ip,
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
/*
* insert entry for new key
* insert entry for new key
*/
if (DO_INDEX(ip)) {
if (JFS_IP(ip)->next_index == DIREND) {
@ -860,9 +860,9 @@ int dtInsert(tid_t tid, struct inode *ip,
data.leaf.ino = *fsn;
/*
* leaf page does not have enough room for new entry:
* leaf page does not have enough room for new entry:
*
* extend/split the leaf page;
* extend/split the leaf page;
*
* dtSplitUp() will insert the entry and unpin the leaf page.
*/
@ -877,9 +877,9 @@ int dtInsert(tid_t tid, struct inode *ip,
}
/*
* leaf page does have enough room for new entry:
* leaf page does have enough room for new entry:
*
* insert the new data entry into the leaf page;
* insert the new data entry into the leaf page;
*/
BT_MARK_DIRTY(mp, ip);
/*
@ -967,13 +967,13 @@ static int dtSplitUp(tid_t tid,
}
/*
* split leaf page
* split leaf page
*
* The split routines insert the new entry, and
* acquire txLock as appropriate.
*/
/*
* split root leaf page:
* split root leaf page:
*/
if (sp->header.flag & BT_ROOT) {
/*
@ -1012,7 +1012,7 @@ static int dtSplitUp(tid_t tid,
}
/*
* extend first leaf page
* extend first leaf page
*
* extend the 1st extent if less than buffer page size
* (dtExtendPage() reurns leaf page unpinned)
@ -1068,7 +1068,7 @@ static int dtSplitUp(tid_t tid,
}
/*
* split leaf page <sp> into <sp> and a new right page <rp>.
* split leaf page <sp> into <sp> and a new right page <rp>.
*
* return <rp> pinned and its extent descriptor <rpxd>
*/
@ -1433,7 +1433,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
rp->header.freecnt = rp->header.maxslot - fsi;
/*
* sequential append at tail: append without split
* sequential append at tail: append without split
*
* If splitting the last page on a level because of appending
* a entry to it (skip is maxentry), it's likely that the access is
@ -1467,7 +1467,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
}
/*
* non-sequential insert (at possibly middle page)
* non-sequential insert (at possibly middle page)
*/
/*
@ -1508,7 +1508,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
left = 0;
/*
* compute fill factor for split pages
* compute fill factor for split pages
*
* <nxt> traces the next entry to move to rp
* <off> traces the next entry to stay in sp
@ -1551,7 +1551,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
/* <nxt> poins to the 1st entry to move */
/*
* move entries to right page
* move entries to right page
*
* dtMoveEntry() initializes rp and reserves entry for insertion
*
@ -1677,7 +1677,7 @@ static int dtExtendPage(tid_t tid,
return (rc);
/*
* extend the extent
* extend the extent
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
@ -1722,7 +1722,7 @@ static int dtExtendPage(tid_t tid,
}
/*
* extend the page
* extend the page
*/
sp->header.self = *pxd;
@ -1739,9 +1739,6 @@ static int dtExtendPage(tid_t tid,
/* update buffer extent descriptor of extended page */
xlen = lengthPXD(pxd);
xsize = xlen << JFS_SBI(sb)->l2bsize;
#ifdef _STILL_TO_PORT
bmSetXD(smp, xaddr, xsize);
#endif /* _STILL_TO_PORT */
/*
* copy old stbl to new stbl at start of extended area
@ -1836,7 +1833,7 @@ static int dtExtendPage(tid_t tid,
}
/*
* update parent entry on the parent/root page
* update parent entry on the parent/root page
*/
/*
* acquire a transaction lock on the parent/root page
@ -1904,7 +1901,7 @@ static int dtSplitRoot(tid_t tid,
sp = &JFS_IP(ip)->i_dtroot;
/*
* allocate/initialize a single (right) child page
* allocate/initialize a single (right) child page
*
* N.B. at first split, a one (or two) block to fit new entry
* is allocated; at subsequent split, a full page is allocated;
@ -1943,7 +1940,7 @@ static int dtSplitRoot(tid_t tid,
rp->header.prev = 0;
/*
* move in-line root page into new right page extent
* move in-line root page into new right page extent
*/
/* linelock header + copied entries + new stbl (1st slot) in new page */
ASSERT(dtlck->index == 0);
@ -2016,7 +2013,7 @@ static int dtSplitRoot(tid_t tid,
dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
/*
* reset parent/root page
* reset parent/root page
*
* set the 1st entry offset to 0, which force the left-most key
* at any level of the tree to be less than any search key.
@ -2102,7 +2099,7 @@ int dtDelete(tid_t tid,
dtpage_t *np;
/*
* search for the entry to delete:
* search for the entry to delete:
*
* dtSearch() returns (leaf page pinned, index at which to delete).
*/
@ -2253,7 +2250,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
int i;
/*
* keep the root leaf page which has become empty
* keep the root leaf page which has become empty
*/
if (BT_IS_ROOT(fmp)) {
/*
@ -2269,7 +2266,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
}
/*
* free the non-root leaf page
* free the non-root leaf page
*/
/*
* acquire a transaction lock on the page
@ -2299,7 +2296,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
discard_metapage(fmp);
/*
* propagate page deletion up the directory tree
* propagate page deletion up the directory tree
*
* If the delete from the parent page makes it empty,
* continue all the way up the tree.
@ -2440,10 +2437,10 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
#ifdef _NOTYET
/*
* NAME: dtRelocate()
* NAME: dtRelocate()
*
* FUNCTION: relocate dtpage (internal or leaf) of directory;
* This function is mainly used by defragfs utility.
* FUNCTION: relocate dtpage (internal or leaf) of directory;
* This function is mainly used by defragfs utility.
*/
int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
s64 nxaddr)
@ -2471,8 +2468,8 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
xlen);
/*
* 1. get the internal parent dtpage covering
* router entry for the tartget page to be relocated;
* 1. get the internal parent dtpage covering
* router entry for the tartget page to be relocated;
*/
rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
if (rc)
@ -2483,7 +2480,7 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
jfs_info("dtRelocate: parent router entry validated.");
/*
* 2. relocate the target dtpage
* 2. relocate the target dtpage
*/
/* read in the target page from src extent */
DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
@ -2581,9 +2578,7 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
/* update the buffer extent descriptor of the dtpage */
xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
#ifdef _STILL_TO_PORT
bmSetXD(mp, nxaddr, xsize);
#endif /* _STILL_TO_PORT */
/* unpin the relocated page */
DT_PUTPAGE(mp);
jfs_info("dtRelocate: target dtpage relocated.");
@ -2594,7 +2589,7 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
*/
/*
* 3. acquire maplock for the source extent to be freed;
* 3. acquire maplock for the source extent to be freed;
*/
/* for dtpage relocation, write a LOG_NOREDOPAGE record
* for the source dtpage (logredo() will init NoRedoPage
@ -2609,7 +2604,7 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
pxdlock->index = 1;
/*
* 4. update the parent router entry for relocation;
* 4. update the parent router entry for relocation;
*
* acquire tlck for the parent entry covering the target dtpage;
* write LOG_REDOPAGE to apply after image only;
@ -2637,7 +2632,7 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
* NAME: dtSearchNode()
*
* FUNCTION: Search for an dtpage containing a specified address
* This function is mainly used by defragfs utility.
* This function is mainly used by defragfs utility.
*
* NOTE: Search result on stack, the found page is pinned at exit.
* The result page must be an internal dtpage.
@ -2660,7 +2655,7 @@ static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
BT_CLR(btstack); /* reset stack */
/*
* descend tree to the level with specified leftmost page
* descend tree to the level with specified leftmost page
*
* by convention, root bn = 0.
*/
@ -2699,7 +2694,7 @@ static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
}
/*
* search each page at the current levevl
* search each page at the current levevl
*/
loop:
stbl = DT_GETSTBL(p);
@ -3044,9 +3039,9 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (DO_INDEX(ip)) {
/*
* persistent index is stored in directory entries.
* Special cases: 0 = .
* 1 = ..
* -1 = End of directory
* Special cases: 0 = .
* 1 = ..
* -1 = End of directory
*/
do_index = 1;
@ -3128,10 +3123,10 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
/*
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
*
* pn = index = 0: First entry "."
* pn = 0; index = 1: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
* pn = index = 0: First entry "."
* pn = 0; index = 1: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
*/
dtpos = filp->f_pos;
if (dtpos == 0) {
@ -3351,7 +3346,7 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack)
BT_CLR(btstack); /* reset stack */
/*
* descend leftmost path of the tree
* descend leftmost path of the tree
*
* by convention, root bn = 0.
*/
@ -4531,7 +4526,7 @@ int dtModify(tid_t tid, struct inode *ip,
struct ldtentry *entry;
/*
* search for the entry to modify:
* search for the entry to modify:
*
* dtSearch() returns (leaf page pinned, index at which to modify).
*/

View File

@ -35,7 +35,7 @@ typedef union {
/*
* entry segment/slot
* entry segment/slot
*
* an entry consists of type dependent head/only segment/slot and
* additional segments/slots linked vi next field;

View File

@ -34,8 +34,8 @@ static int extBrealloc(struct inode *, s64, s64, s64 *, s64 *);
#endif
static s64 extRoundDown(s64 nb);
#define DPD(a) (printk("(a): %d\n",(a)))
#define DPC(a) (printk("(a): %c\n",(a)))
#define DPD(a) (printk("(a): %d\n",(a)))
#define DPC(a) (printk("(a): %c\n",(a)))
#define DPL1(a) \
{ \
if ((a) >> 32) \
@ -51,19 +51,19 @@ static s64 extRoundDown(s64 nb);
printk("(a): %x\n",(a) << 32); \
}
#define DPD1(a) (printk("(a): %d ",(a)))
#define DPX(a) (printk("(a): %08x\n",(a)))
#define DPX1(a) (printk("(a): %08x ",(a)))
#define DPS(a) (printk("%s\n",(a)))
#define DPE(a) (printk("\nENTERING: %s\n",(a)))
#define DPE1(a) (printk("\nENTERING: %s",(a)))
#define DPS1(a) (printk(" %s ",(a)))
#define DPD1(a) (printk("(a): %d ",(a)))
#define DPX(a) (printk("(a): %08x\n",(a)))
#define DPX1(a) (printk("(a): %08x ",(a)))
#define DPS(a) (printk("%s\n",(a)))
#define DPE(a) (printk("\nENTERING: %s\n",(a)))
#define DPE1(a) (printk("\nENTERING: %s",(a)))
#define DPS1(a) (printk(" %s ",(a)))
/*
* NAME: extAlloc()
*
* FUNCTION: allocate an extent for a specified page range within a
* FUNCTION: allocate an extent for a specified page range within a
* file.
*
* PARAMETERS:
@ -78,9 +78,9 @@ static s64 extRoundDown(s64 nb);
* should be marked as allocated but not recorded.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
int
extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
@ -192,9 +192,9 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
#ifdef _NOTYET
/*
* NAME: extRealloc()
* NAME: extRealloc()
*
* FUNCTION: extend the allocation of a file extent containing a
* FUNCTION: extend the allocation of a file extent containing a
* partial back last page.
*
* PARAMETERS:
@ -207,9 +207,9 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
* should be marked as allocated but not recorded.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
{
@ -345,9 +345,9 @@ exit:
/*
* NAME: extHint()
* NAME: extHint()
*
* FUNCTION: produce an extent allocation hint for a file offset.
* FUNCTION: produce an extent allocation hint for a file offset.
*
* PARAMETERS:
* ip - the inode of the file.
@ -356,8 +356,8 @@ exit:
* the hint.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* 0 - success
* -EIO - i/o error.
*/
int extHint(struct inode *ip, s64 offset, xad_t * xp)
{
@ -387,7 +387,7 @@ int extHint(struct inode *ip, s64 offset, xad_t * xp)
lxdl.nlxd = 1;
lxdl.lxd = &lxd;
LXDoffset(&lxd, prev)
LXDlength(&lxd, nbperpage);
LXDlength(&lxd, nbperpage);
xadl.maxnxad = 1;
xadl.nxad = 0;
@ -397,11 +397,11 @@ int extHint(struct inode *ip, s64 offset, xad_t * xp)
if ((rc = xtLookupList(ip, &lxdl, &xadl, 0)))
return (rc);
/* check if not extent exists for the previous page.
/* check if no extent exists for the previous page.
* this is possible for sparse files.
*/
if (xadl.nxad == 0) {
// assert(ISSPARSE(ip));
// assert(ISSPARSE(ip));
return (0);
}
@ -410,28 +410,28 @@ int extHint(struct inode *ip, s64 offset, xad_t * xp)
*/
xp->flag &= XAD_NOTRECORDED;
if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {
if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {
jfs_error(ip->i_sb, "extHint: corrupt xtree");
return -EIO;
}
}
return (0);
}
/*
* NAME: extRecord()
* NAME: extRecord()
*
* FUNCTION: change a page with a file from not recorded to recorded.
* FUNCTION: change a page with a file from not recorded to recorded.
*
* PARAMETERS:
* ip - inode of the file.
* cp - cbuf of the file page.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
int extRecord(struct inode *ip, xad_t * xp)
{
@ -451,9 +451,9 @@ int extRecord(struct inode *ip, xad_t * xp)
#ifdef _NOTYET
/*
* NAME: extFill()
* NAME: extFill()
*
* FUNCTION: allocate disk space for a file page that represents
* FUNCTION: allocate disk space for a file page that represents
* a file hole.
*
* PARAMETERS:
@ -461,16 +461,16 @@ int extRecord(struct inode *ip, xad_t * xp)
* cp - cbuf of the file page represent the hole.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
int extFill(struct inode *ip, xad_t * xp)
{
int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
// assert(ISSPARSE(ip));
// assert(ISSPARSE(ip));
/* initialize the extent allocation hint */
XADaddress(xp, 0);
@ -489,7 +489,7 @@ int extFill(struct inode *ip, xad_t * xp)
/*
* NAME: extBalloc()
*
* FUNCTION: allocate disk blocks to form an extent.
* FUNCTION: allocate disk blocks to form an extent.
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
@ -513,9 +513,9 @@ int extFill(struct inode *ip, xad_t * xp)
* allocated block range.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
static int
extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
@ -580,7 +580,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
/*
* NAME: extBrealloc()
*
* FUNCTION: attempt to extend an extent's allocation.
* FUNCTION: attempt to extend an extent's allocation.
*
* Initially, we will try to extend the extent's allocation
* in place. If this fails, we'll try to move the extent
@ -597,8 +597,8 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
*
* PARAMETERS:
* ip - the inode of the file.
* blkno - starting block number of the extents current allocation.
* nblks - number of blocks within the extents current allocation.
* blkno - starting block number of the extents current allocation.
* nblks - number of blocks within the extents current allocation.
* newnblks - pointer to a s64 value. on entry, this value is the
* the new desired extent size (number of blocks). on
* successful exit, this value is set to the extent's actual
@ -606,9 +606,9 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* newblkno - the starting block number of the extents new allocation.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
static int
extBrealloc(struct inode *ip,
@ -634,16 +634,16 @@ extBrealloc(struct inode *ip,
/*
* NAME: extRoundDown()
* NAME: extRoundDown()
*
* FUNCTION: round down a specified number of blocks to the next
* FUNCTION: round down a specified number of blocks to the next
* smallest power of 2 number.
*
* PARAMETERS:
* nb - the inode of the file.
*
* RETURN VALUES:
* next smallest power of 2 number.
* next smallest power of 2 number.
*/
static s64 extRoundDown(s64 nb)
{

View File

@ -34,9 +34,9 @@
#define JFS_UNICODE 0x00000001 /* unicode name */
/* mount time flags for error handling */
#define JFS_ERR_REMOUNT_RO 0x00000002 /* remount read-only */
#define JFS_ERR_CONTINUE 0x00000004 /* continue */
#define JFS_ERR_PANIC 0x00000008 /* panic */
#define JFS_ERR_REMOUNT_RO 0x00000002 /* remount read-only */
#define JFS_ERR_CONTINUE 0x00000004 /* continue */
#define JFS_ERR_PANIC 0x00000008 /* panic */
/* Quota support */
#define JFS_USRQUOTA 0x00000010
@ -83,7 +83,6 @@
/* case-insensitive name/directory support */
#define JFS_AIX 0x80000000 /* AIX support */
/* POSIX name/directory support - Never implemented*/
/*
* buffer cache configuration
@ -113,10 +112,10 @@
#define IDATASIZE 256 /* inode inline data size */
#define IXATTRSIZE 128 /* inode inline extended attribute size */
#define XTPAGE_SIZE 4096
#define log2_PAGESIZE 12
#define XTPAGE_SIZE 4096
#define log2_PAGESIZE 12
#define IAG_SIZE 4096
#define IAG_SIZE 4096
#define IAG_EXTENT_SIZE 4096
#define INOSPERIAG 4096 /* number of disk inodes per iag */
#define L2INOSPERIAG 12 /* l2 number of disk inodes per iag */

View File

@ -93,21 +93,21 @@ static int copy_from_dinode(struct dinode *, struct inode *);
static void copy_to_dinode(struct dinode *, struct inode *);
/*
* NAME: diMount()
* NAME: diMount()
*
* FUNCTION: initialize the incore inode map control structures for
* FUNCTION: initialize the incore inode map control structures for
* a fileset or aggregate init time.
*
* the inode map's control structure (dinomap) is
* brought in from disk and placed in virtual memory.
* the inode map's control structure (dinomap) is
* brought in from disk and placed in virtual memory.
*
* PARAMETERS:
* ipimap - pointer to inode map inode for the aggregate or fileset.
* ipimap - pointer to inode map inode for the aggregate or fileset.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
*/
int diMount(struct inode *ipimap)
{
@ -180,18 +180,18 @@ int diMount(struct inode *ipimap)
/*
* NAME: diUnmount()
* NAME: diUnmount()
*
* FUNCTION: write to disk the incore inode map control structures for
* FUNCTION: write to disk the incore inode map control structures for
* a fileset or aggregate at unmount time.
*
* PARAMETERS:
* ipimap - pointer to inode map inode for the aggregate or fileset.
* ipimap - pointer to inode map inode for the aggregate or fileset.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
*/
int diUnmount(struct inode *ipimap, int mounterror)
{
@ -274,9 +274,9 @@ int diSync(struct inode *ipimap)
/*
* NAME: diRead()
* NAME: diRead()
*
* FUNCTION: initialize an incore inode from disk.
* FUNCTION: initialize an incore inode from disk.
*
* on entry, the specifed incore inode should itself
* specify the disk inode number corresponding to the
@ -285,7 +285,7 @@ int diSync(struct inode *ipimap)
* this routine handles incore inode initialization for
* both "special" and "regular" inodes. special inodes
* are those required early in the mount process and
* require special handling since much of the file system
* require special handling since much of the file system
* is not yet initialized. these "special" inodes are
* identified by a NULL inode map inode pointer and are
* actually initialized by a call to diReadSpecial().
@ -298,12 +298,12 @@ int diSync(struct inode *ipimap)
* incore inode.
*
* PARAMETERS:
* ip - pointer to incore inode to be initialized from disk.
* ip - pointer to incore inode to be initialized from disk.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOMEM - insufficient memory
* 0 - success
* -EIO - i/o error.
* -ENOMEM - insufficient memory
*
*/
int diRead(struct inode *ip)
@ -410,26 +410,26 @@ int diRead(struct inode *ip)
/*
* NAME: diReadSpecial()
* NAME: diReadSpecial()
*
* FUNCTION: initialize a 'special' inode from disk.
* FUNCTION: initialize a 'special' inode from disk.
*
* this routines handles aggregate level inodes. The
* inode cache cannot differentiate between the
* aggregate inodes and the filesystem inodes, so we
* handle these here. We don't actually use the aggregate
* inode map, since these inodes are at a fixed location
* inode map, since these inodes are at a fixed location
* and in some cases the aggregate inode map isn't initialized
* yet.
*
* PARAMETERS:
* sb - filesystem superblock
* sb - filesystem superblock
* inum - aggregate inode number
* secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES:
* new inode - success
* NULL - i/o error.
* new inode - success
* NULL - i/o error.
*/
struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
{
@ -502,12 +502,12 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
}
/*
* NAME: diWriteSpecial()
* NAME: diWriteSpecial()
*
* FUNCTION: Write the special inode to disk
* FUNCTION: Write the special inode to disk
*
* PARAMETERS:
* ip - special inode
* ip - special inode
* secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES: none
@ -554,9 +554,9 @@ void diWriteSpecial(struct inode *ip, int secondary)
}
/*
* NAME: diFreeSpecial()
* NAME: diFreeSpecial()
*
* FUNCTION: Free allocated space for special inode
* FUNCTION: Free allocated space for special inode
*/
void diFreeSpecial(struct inode *ip)
{
@ -572,9 +572,9 @@ void diFreeSpecial(struct inode *ip)
/*
* NAME: diWrite()
* NAME: diWrite()
*
* FUNCTION: write the on-disk inode portion of the in-memory inode
* FUNCTION: write the on-disk inode portion of the in-memory inode
* to its corresponding on-disk inode.
*
* on entry, the specifed incore inode should itself
@ -589,11 +589,11 @@ void diFreeSpecial(struct inode *ip)
*
* PARAMETERS:
* tid - transacation id
* ip - pointer to incore inode to be written to the inode extent.
* ip - pointer to incore inode to be written to the inode extent.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* 0 - success
* -EIO - i/o error.
*/
int diWrite(tid_t tid, struct inode *ip)
{
@ -730,7 +730,7 @@ int diWrite(tid_t tid, struct inode *ip)
ilinelock = (struct linelock *) & tlck->lock;
/*
* regular file: 16 byte (XAD slot) granularity
* regular file: 16 byte (XAD slot) granularity
*/
if (type & tlckXTREE) {
xtpage_t *p, *xp;
@ -755,7 +755,7 @@ int diWrite(tid_t tid, struct inode *ip)
xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
}
/*
* directory: 32 byte (directory entry slot) granularity
* directory: 32 byte (directory entry slot) granularity
*/
else if (type & tlckDTREE) {
dtpage_t *p, *xp;
@ -800,9 +800,8 @@ int diWrite(tid_t tid, struct inode *ip)
}
/*
* lock/copy inode base: 128 byte slot granularity
* lock/copy inode base: 128 byte slot granularity
*/
// baseDinode:
lv = & dilinelock->lv[dilinelock->index];
lv->offset = dioffset >> L2INODESLOTSIZE;
copy_to_dinode(dp, ip);
@ -813,17 +812,6 @@ int diWrite(tid_t tid, struct inode *ip)
lv->length = 1;
dilinelock->index++;
#ifdef _JFS_FASTDASD
/*
* We aren't logging changes to the DASD used in directory inodes,
* but we need to write them to disk. If we don't unmount cleanly,
* mount will recalculate the DASD used.
*/
if (S_ISDIR(ip->i_mode)
&& (ip->i_ipmnt->i_mntflag & JFS_DASD_ENABLED))
memcpy(&dp->di_DASD, &ip->i_DASD, sizeof(struct dasd));
#endif /* _JFS_FASTDASD */
/* release the buffer holding the updated on-disk inode.
* the buffer will be later written by commit processing.
*/
@ -834,9 +822,9 @@ int diWrite(tid_t tid, struct inode *ip)
/*
* NAME: diFree(ip)
* NAME: diFree(ip)
*
* FUNCTION: free a specified inode from the inode working map
* FUNCTION: free a specified inode from the inode working map
* for a fileset or aggregate.
*
* if the inode to be freed represents the first (only)
@ -865,11 +853,11 @@ int diWrite(tid_t tid, struct inode *ip)
* any updates and are held until all updates are complete.
*
* PARAMETERS:
* ip - inode to be freed.
* ip - inode to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* 0 - success
* -EIO - i/o error.
*/
int diFree(struct inode *ip)
{
@ -964,8 +952,8 @@ int diFree(struct inode *ip)
return -EIO;
}
/*
* inode extent still has some inodes or below low water mark:
* keep the inode extent;
* inode extent still has some inodes or below low water mark:
* keep the inode extent;
*/
if (bitmap ||
imap->im_agctl[agno].numfree < 96 ||
@ -1047,12 +1035,12 @@ int diFree(struct inode *ip)
/*
* inode extent has become free and above low water mark:
* free the inode extent;
* inode extent has become free and above low water mark:
* free the inode extent;
*/
/*
* prepare to update iag list(s) (careful update step 1)
* prepare to update iag list(s) (careful update step 1)
*/
amp = bmp = cmp = dmp = NULL;
fwd = back = -1;
@ -1152,7 +1140,7 @@ int diFree(struct inode *ip)
invalidate_pxd_metapages(ip, freepxd);
/*
* update iag list(s) (careful update step 2)
* update iag list(s) (careful update step 2)
*/
/* add the iag to the ag extent free list if this is the
* first free extent for the iag.
@ -1338,20 +1326,20 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
/*
* NAME: diAlloc(pip,dir,ip)
* NAME: diAlloc(pip,dir,ip)
*
* FUNCTION: allocate a disk inode from the inode working map
* FUNCTION: allocate a disk inode from the inode working map
* for a fileset or aggregate.
*
* PARAMETERS:
* pip - pointer to incore inode for the parent inode.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode
* pip - pointer to incore inode for the parent inode.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
int diAlloc(struct inode *pip, bool dir, struct inode *ip)
{
@ -1433,7 +1421,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts);
/*
* try to allocate from the IAG
* try to allocate from the IAG
*/
/* check if the inode may be allocated from the iag
* (i.e. the inode has free inodes or new extent can be added).
@ -1633,9 +1621,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
/*
* NAME: diAllocAG(imap,agno,dir,ip)
* NAME: diAllocAG(imap,agno,dir,ip)
*
* FUNCTION: allocate a disk inode from the allocation group.
* FUNCTION: allocate a disk inode from the allocation group.
*
* this routine first determines if a new extent of free
* inodes should be added for the allocation group, with
@ -1649,17 +1637,17 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
* PRE CONDITION: Already have the AG lock for this AG.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group to allocate from.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to the new inode to be filled in on successful return
* imap - pointer to inode map control structure.
* agno - allocation group to allocate from.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to the new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int
diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
@ -1709,9 +1697,9 @@ diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
/*
* NAME: diAllocAny(imap,agno,dir,iap)
* NAME: diAllocAny(imap,agno,dir,iap)
*
* FUNCTION: allocate a disk inode from any other allocation group.
* FUNCTION: allocate a disk inode from any other allocation group.
*
* this routine is called when an allocation attempt within
* the primary allocation group has failed. if attempts to
@ -1719,17 +1707,17 @@ diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
* specified primary group.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - primary allocation group (to avoid).
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode to be filled in on successful return
* imap - pointer to inode map control structure.
* agno - primary allocation group (to avoid).
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int
diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
@ -1772,9 +1760,9 @@ diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
/*
* NAME: diAllocIno(imap,agno,ip)
* NAME: diAllocIno(imap,agno,ip)
*
* FUNCTION: allocate a disk inode from the allocation group's free
* FUNCTION: allocate a disk inode from the allocation group's free
* inode list, returning an error if this free list is
* empty (i.e. no iags on the list).
*
@ -1785,16 +1773,16 @@ diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
* PRE CONDITION: Already have AG lock for this AG.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group.
* ip - pointer to new inode to be filled in on successful return
* imap - pointer to inode map control structure.
* agno - allocation group.
* ip - pointer to new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
{
@ -1890,7 +1878,7 @@ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
/*
* NAME: diAllocExt(imap,agno,ip)
* NAME: diAllocExt(imap,agno,ip)
*
* FUNCTION: add a new extent of free inodes to an iag, allocating
* an inode from this extent to satisfy the current allocation
@ -1910,16 +1898,16 @@ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
* for the purpose of satisfying this request.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group number.
* ip - pointer to new inode to be filled in on successful return
* imap - pointer to inode map control structure.
* agno - allocation group number.
* ip - pointer to new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
{
@ -2010,7 +1998,7 @@ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
/*
* NAME: diAllocBit(imap,iagp,ino)
* NAME: diAllocBit(imap,iagp,ino)
*
* FUNCTION: allocate a backed inode from an iag.
*
@ -2030,14 +2018,14 @@ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
* this AG. Must have read lock on imap inode.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* ino - inode number to be allocated within the iag.
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* ino - inode number to be allocated within the iag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
{
@ -2144,11 +2132,11 @@ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
/*
* NAME: diNewExt(imap,iagp,extno)
* NAME: diNewExt(imap,iagp,extno)
*
* FUNCTION: initialize a new extent of inodes for an iag, allocating
* the first inode of the extent for use for the current
* allocation request.
* FUNCTION: initialize a new extent of inodes for an iag, allocating
* the first inode of the extent for use for the current
* allocation request.
*
* disk resources are allocated for the new extent of inodes
* and the inodes themselves are initialized to reflect their
@ -2177,14 +2165,14 @@ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
* this AG. Must have read lock on imap inode.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* extno - extent number.
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* extno - extent number.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
{
@ -2430,7 +2418,7 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
/*
* NAME: diNewIAG(imap,iagnop,agno)
* NAME: diNewIAG(imap,iagnop,agno)
*
* FUNCTION: allocate a new iag for an allocation group.
*
@ -2443,16 +2431,16 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
* and returned to satisfy the request.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagnop - pointer to an iag number set with the number of the
* imap - pointer to inode map control structure.
* iagnop - pointer to an iag number set with the number of the
* newly allocated iag upon successful return.
* agno - allocation group number.
* agno - allocation group number.
* bpp - Buffer pointer to be filled in with new IAG's buffer
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*
* serialization:
* AG lock held on entry/exit;
@ -2461,7 +2449,7 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
*
* note: new iag transaction:
* . synchronously write iag;
* . write log of xtree and inode of imap;
* . write log of xtree and inode of imap;
* . commit;
* . synchronous write of xtree (right to left, bottom to top);
* . at start of logredo(): init in-memory imap with one additional iag page;
@ -2481,9 +2469,6 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
s64 xaddr = 0;
s64 blkno;
tid_t tid;
#ifdef _STILL_TO_PORT
xad_t xad;
#endif /* _STILL_TO_PORT */
struct inode *iplist[1];
/* pick up pointers to the inode map and mount inodes */
@ -2674,15 +2659,15 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
}
/*
* NAME: diIAGRead()
* NAME: diIAGRead()
*
* FUNCTION: get the buffer for the specified iag within a fileset
* FUNCTION: get the buffer for the specified iag within a fileset
* or aggregate inode map.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagno - iag number.
* bpp - point to buffer pointer to be filled in on successful
* imap - pointer to inode map control structure.
* iagno - iag number.
* bpp - point to buffer pointer to be filled in on successful
* exit.
*
* SERIALIZATION:
@ -2691,8 +2676,8 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
* the read lock is unnecessary.)
*
* RETURN VALUES:
* 0 - success.
* -EIO - i/o error.
* 0 - success.
* -EIO - i/o error.
*/
static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
{
@ -2712,17 +2697,17 @@ static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
}
/*
* NAME: diFindFree()
* NAME: diFindFree()
*
* FUNCTION: find the first free bit in a word starting at
* FUNCTION: find the first free bit in a word starting at
* the specified bit position.
*
* PARAMETERS:
* word - word to be examined.
* start - starting bit position.
* word - word to be examined.
* start - starting bit position.
*
* RETURN VALUES:
* bit position of first free bit in the word or 32 if
* bit position of first free bit in the word or 32 if
* no free bits were found.
*/
static int diFindFree(u32 word, int start)
@ -2897,7 +2882,7 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
atomic_read(&imap->im_numfree));
/*
* reconstruct imap
* reconstruct imap
*
* coalesce contiguous k (newAGSize/oldAGSize) AGs;
* i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
@ -2913,7 +2898,7 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
}
/*
* process each iag page of the map.
* process each iag page of the map.
*
* rebuild AG Free Inode List, AG Free Inode Extent List;
*/
@ -2932,7 +2917,7 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
/* leave free iag in the free iag list */
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
release_metapage(bp);
release_metapage(bp);
continue;
}
@ -3063,13 +3048,13 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
}
/*
* NAME: copy_from_dinode()
* NAME: copy_from_dinode()
*
* FUNCTION: Copies inode info from disk inode to in-memory inode
* FUNCTION: Copies inode info from disk inode to in-memory inode
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient memory
* 0 - success
* -ENOMEM - insufficient memory
*/
static int copy_from_dinode(struct dinode * dip, struct inode *ip)
{
@ -3151,9 +3136,9 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
}
/*
* NAME: copy_to_dinode()
* NAME: copy_to_dinode()
*
* FUNCTION: Copies inode info from in-memory inode to disk inode
* FUNCTION: Copies inode info from in-memory inode to disk inode
*/
static void copy_to_dinode(struct dinode * dip, struct inode *ip)
{

View File

@ -24,17 +24,17 @@
* jfs_imap.h: disk inode manager
*/
#define EXTSPERIAG 128 /* number of disk inode extent per iag */
#define IMAPBLKNO 0 /* lblkno of dinomap within inode map */
#define SMAPSZ 4 /* number of words per summary map */
#define EXTSPERIAG 128 /* number of disk inode extent per iag */
#define IMAPBLKNO 0 /* lblkno of dinomap within inode map */
#define SMAPSZ 4 /* number of words per summary map */
#define EXTSPERSUM 32 /* number of extents per summary map entry */
#define L2EXTSPERSUM 5 /* l2 number of extents per summary map */
#define PGSPERIEXT 4 /* number of 4K pages per dinode extent */
#define MAXIAGS ((1<<20)-1) /* maximum number of iags */
#define MAXAG 128 /* maximum number of allocation groups */
#define MAXIAGS ((1<<20)-1) /* maximum number of iags */
#define MAXAG 128 /* maximum number of allocation groups */
#define AMAPSIZE 512 /* bytes in the IAG allocation maps */
#define SMAPSIZE 16 /* bytes in the IAG summary maps */
#define AMAPSIZE 512 /* bytes in the IAG allocation maps */
#define SMAPSIZE 16 /* bytes in the IAG summary maps */
/* convert inode number to iag number */
#define INOTOIAG(ino) ((ino) >> L2INOSPERIAG)
@ -60,31 +60,31 @@
* inode allocation group page (per 4096 inodes of an AG)
*/
struct iag {
__le64 agstart; /* 8: starting block of ag */
__le32 iagnum; /* 4: inode allocation group number */
__le32 inofreefwd; /* 4: ag inode free list forward */
__le32 inofreeback; /* 4: ag inode free list back */
__le32 extfreefwd; /* 4: ag inode extent free list forward */
__le32 extfreeback; /* 4: ag inode extent free list back */
__le32 iagfree; /* 4: iag free list */
__le64 agstart; /* 8: starting block of ag */
__le32 iagnum; /* 4: inode allocation group number */
__le32 inofreefwd; /* 4: ag inode free list forward */
__le32 inofreeback; /* 4: ag inode free list back */
__le32 extfreefwd; /* 4: ag inode extent free list forward */
__le32 extfreeback; /* 4: ag inode extent free list back */
__le32 iagfree; /* 4: iag free list */
/* summary map: 1 bit per inode extent */
__le32 inosmap[SMAPSZ]; /* 16: sum map of mapwords w/ free inodes;
* note: this indicates free and backed
* inodes, if the extent is not backed the
* value will be 1. if the extent is
* backed but all inodes are being used the
* value will be 1. if the extent is
* backed but at least one of the inodes is
* free the value will be 0.
* note: this indicates free and backed
* inodes, if the extent is not backed the
* value will be 1. if the extent is
* backed but all inodes are being used the
* value will be 1. if the extent is
* backed but at least one of the inodes is
* free the value will be 0.
*/
__le32 extsmap[SMAPSZ]; /* 16: sum map of mapwords w/ free extents */
__le32 nfreeinos; /* 4: number of free inodes */
__le32 nfreeexts; /* 4: number of free extents */
__le32 nfreeinos; /* 4: number of free inodes */
__le32 nfreeexts; /* 4: number of free extents */
/* (72) */
u8 pad[1976]; /* 1976: pad to 2048 bytes */
/* allocation bit map: 1 bit per inode (0 - free, 1 - allocated) */
__le32 wmap[EXTSPERIAG]; /* 512: working allocation map */
__le32 wmap[EXTSPERIAG]; /* 512: working allocation map */
__le32 pmap[EXTSPERIAG]; /* 512: persistent allocation map */
pxd_t inoext[EXTSPERIAG]; /* 1024: inode extent addresses */
}; /* (4096) */
@ -93,44 +93,44 @@ struct iag {
* per AG control information (in inode map control page)
*/
struct iagctl_disk {
__le32 inofree; /* 4: free inode list anchor */
__le32 extfree; /* 4: free extent list anchor */
__le32 numinos; /* 4: number of backed inodes */
__le32 numfree; /* 4: number of free inodes */
__le32 inofree; /* 4: free inode list anchor */
__le32 extfree; /* 4: free extent list anchor */
__le32 numinos; /* 4: number of backed inodes */
__le32 numfree; /* 4: number of free inodes */
}; /* (16) */
struct iagctl {
int inofree; /* free inode list anchor */
int extfree; /* free extent list anchor */
int numinos; /* number of backed inodes */
int numfree; /* number of free inodes */
int inofree; /* free inode list anchor */
int extfree; /* free extent list anchor */
int numinos; /* number of backed inodes */
int numfree; /* number of free inodes */
};
/*
* per fileset/aggregate inode map control page
*/
struct dinomap_disk {
__le32 in_freeiag; /* 4: free iag list anchor */
__le32 in_nextiag; /* 4: next free iag number */
__le32 in_numinos; /* 4: num of backed inodes */
__le32 in_freeiag; /* 4: free iag list anchor */
__le32 in_nextiag; /* 4: next free iag number */
__le32 in_numinos; /* 4: num of backed inodes */
__le32 in_numfree; /* 4: num of free backed inodes */
__le32 in_nbperiext; /* 4: num of blocks per inode extent */
__le32 in_l2nbperiext; /* 4: l2 of in_nbperiext */
__le32 in_diskblock; /* 4: for standalone test driver */
__le32 in_maxag; /* 4: for standalone test driver */
u8 pad[2016]; /* 2016: pad to 2048 */
__le32 in_l2nbperiext; /* 4: l2 of in_nbperiext */
__le32 in_diskblock; /* 4: for standalone test driver */
__le32 in_maxag; /* 4: for standalone test driver */
u8 pad[2016]; /* 2016: pad to 2048 */
struct iagctl_disk in_agctl[MAXAG]; /* 2048: AG control information */
}; /* (4096) */
struct dinomap {
int in_freeiag; /* free iag list anchor */
int in_nextiag; /* next free iag number */
int in_numinos; /* num of backed inodes */
int in_numfree; /* num of free backed inodes */
int in_freeiag; /* free iag list anchor */
int in_nextiag; /* next free iag number */
int in_numinos; /* num of backed inodes */
int in_numfree; /* num of free backed inodes */
int in_nbperiext; /* num of blocks per inode extent */
int in_l2nbperiext; /* l2 of in_nbperiext */
int in_diskblock; /* for standalone test driver */
int in_maxag; /* for standalone test driver */
int in_l2nbperiext; /* l2 of in_nbperiext */
int in_diskblock; /* for standalone test driver */
int in_maxag; /* for standalone test driver */
struct iagctl in_agctl[MAXAG]; /* AG control information */
};
@ -139,9 +139,9 @@ struct dinomap {
*/
struct inomap {
struct dinomap im_imap; /* 4096: inode allocation control */
struct inode *im_ipimap; /* 4: ptr to inode for imap */
struct mutex im_freelock; /* 4: iag free list lock */
struct mutex im_aglock[MAXAG]; /* 512: per AG locks */
struct inode *im_ipimap; /* 4: ptr to inode for imap */
struct mutex im_freelock; /* 4: iag free list lock */
struct mutex im_aglock[MAXAG]; /* 512: per AG locks */
u32 *im_DBGdimap;
atomic_t im_numinos; /* num of backed inodes */
atomic_t im_numfree; /* num of free backed inodes */

View File

@ -40,7 +40,7 @@ struct jfs_inode_info {
uint mode2; /* jfs-specific mode */
uint saved_uid; /* saved for uid mount option */
uint saved_gid; /* saved for gid mount option */
pxd_t ixpxd; /* inode extent descriptor */
pxd_t ixpxd; /* inode extent descriptor */
dxd_t acl; /* dxd describing acl */
dxd_t ea; /* dxd describing ea */
time_t otime; /* time created */
@ -190,7 +190,7 @@ struct jfs_sb_info {
uint gengen; /* inode generation generator*/
uint inostamp; /* shows inode belongs to fileset*/
/* Formerly in ipbmap */
/* Formerly in ipbmap */
struct bmap *bmap; /* incore bmap descriptor */
struct nls_table *nls_tab; /* current codepage */
struct inode *direct_inode; /* metadata inode */

View File

@ -244,7 +244,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
goto writeRecord;
/*
* initialize/update page/transaction recovery lsn
* initialize/update page/transaction recovery lsn
*/
lsn = log->lsn;
@ -263,7 +263,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* initialize/update lsn of tblock of the page
* initialize/update lsn of tblock of the page
*
* transaction inherits oldest lsn of pages associated
* with allocation/deallocation of resources (their
@ -307,7 +307,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
LOGSYNC_UNLOCK(log, flags);
/*
* write the log record
* write the log record
*/
writeRecord:
lsn = lmWriteRecord(log, tblk, lrd, tlck);
@ -372,7 +372,7 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
goto moveLrd;
/*
* move log record data
* move log record data
*/
/* retrieve source meta-data page to log */
if (tlck->flag & tlckPAGELOCK) {
@ -465,7 +465,7 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* move log record descriptor
* move log record descriptor
*/
moveLrd:
lrd->length = cpu_to_le16(len);
@ -574,7 +574,7 @@ static int lmNextPage(struct jfs_log * log)
LOGGC_LOCK(log);
/*
* write or queue the full page at the tail of write queue
* write or queue the full page at the tail of write queue
*/
/* get the tail tblk on commit queue */
if (list_empty(&log->cqueue))
@ -625,7 +625,7 @@ static int lmNextPage(struct jfs_log * log)
LOGGC_UNLOCK(log);
/*
* allocate/initialize next page
* allocate/initialize next page
*/
/* if log wraps, the first data page of log is 2
* (0 never used, 1 is superblock).
@ -953,7 +953,7 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
}
/*
* forward syncpt
* forward syncpt
*/
/* if last sync is same as last syncpt,
* invoke sync point forward processing to update sync.
@ -989,7 +989,7 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
lsn = log->lsn;
/*
* setup next syncpt trigger (SWAG)
* setup next syncpt trigger (SWAG)
*/
logsize = log->logsize;
@ -1000,11 +1000,11 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
if (more < 2 * LOGPSIZE) {
jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n");
/*
* log wrapping
* log wrapping
*
* option 1 - panic ? No.!
* option 2 - shutdown file systems
* associated with log ?
* associated with log ?
* option 3 - extend log ?
*/
/*
@ -1062,7 +1062,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync)
/*
* NAME: lmLogOpen()
*
* FUNCTION: open the log on first open;
* FUNCTION: open the log on first open;
* insert filesystem in the active list of the log.
*
* PARAMETER: ipmnt - file system mount inode
@ -1113,7 +1113,7 @@ int lmLogOpen(struct super_block *sb)
init_waitqueue_head(&log->syncwait);
/*
* external log as separate logical volume
* external log as separate logical volume
*
* file systems to log may have n-to-1 relationship;
*/
@ -1155,7 +1155,7 @@ journal_found:
return 0;
/*
* unwind on error
* unwind on error
*/
shutdown: /* unwind lbmLogInit() */
list_del(&log->journal_list);
@ -1427,7 +1427,7 @@ int lmLogInit(struct jfs_log * log)
return 0;
/*
* unwind on error
* unwind on error
*/
errout30: /* release log page */
log->wqueue = NULL;
@ -1480,7 +1480,7 @@ int lmLogClose(struct super_block *sb)
if (test_bit(log_INLINELOG, &log->flag)) {
/*
* in-line log in host file system
* in-line log in host file system
*/
rc = lmLogShutdown(log);
kfree(log);
@ -1504,7 +1504,7 @@ int lmLogClose(struct super_block *sb)
goto out;
/*
* external log as separate logical volume
* external log as separate logical volume
*/
list_del(&log->journal_list);
bdev = log->bdev;
@ -1723,7 +1723,7 @@ int lmLogShutdown(struct jfs_log * log)
*
* PARAMETE: log - pointer to logs inode.
* fsdev - kdev_t of filesystem.
* serial - pointer to returned log serial number
* serial - pointer to returned log serial number
* activate - insert/remove device from active list.
*
* RETURN: 0 - success
@ -1963,7 +1963,7 @@ static void lbmfree(struct lbuf * bp)
* FUNCTION: add a log buffer to the log redrive list
*
* PARAMETER:
* bp - log buffer
* bp - log buffer
*
* NOTES:
* Takes log_redrive_lock.
@ -2054,7 +2054,7 @@ static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag,
bp->l_flag = flag;
/*
* insert bp at tail of write queue associated with log
* insert bp at tail of write queue associated with log
*
* (request is either for bp already/currently at head of queue
* or new bp to be inserted at tail)
@ -2117,7 +2117,7 @@ static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag)
log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
/*
* initiate pageout of the page
* initiate pageout of the page
*/
lbmStartIO(bp);
}
@ -2128,7 +2128,7 @@ static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag)
*
* FUNCTION: Interface to DD strategy routine
*
* RETURN: none
* RETURN: none
*
* serialization: LCACHE_LOCK() is NOT held during log i/o;
*/
@ -2222,7 +2222,7 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
bio_put(bio);
/*
* pagein completion
* pagein completion
*/
if (bp->l_flag & lbmREAD) {
bp->l_flag &= ~lbmREAD;
@ -2236,7 +2236,7 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
}
/*
* pageout completion
* pageout completion
*
* the bp at the head of write queue has completed pageout.
*
@ -2302,7 +2302,7 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
}
/*
* synchronous pageout:
* synchronous pageout:
*
* buffer has not necessarily been removed from write queue
* (e.g., synchronous write of partial-page with COMMIT):
@ -2316,7 +2316,7 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
}
/*
* Group Commit pageout:
* Group Commit pageout:
*/
else if (bp->l_flag & lbmGC) {
LCACHE_UNLOCK(flags);
@ -2324,7 +2324,7 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
}
/*
* asynchronous pageout:
* asynchronous pageout:
*
* buffer must have been removed from write queue:
* insert buffer at head of freelist where it can be recycled
@ -2375,7 +2375,7 @@ int jfsIOWait(void *arg)
* FUNCTION: format file system log
*
* PARAMETERS:
* log - volume log
* log - volume log
* logAddress - start address of log space in FS block
* logSize - length of log space in FS block;
*
@ -2407,16 +2407,16 @@ int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
npages = logSize >> sbi->l2nbperpage;
/*
* log space:
* log space:
*
* page 0 - reserved;
* page 1 - log superblock;
* page 2 - log data page: A SYNC log record is written
* into this page at logform time;
* into this page at logform time;
* pages 3-N - log data page: set to empty log data pages;
*/
/*
* init log superblock: log page 1
* init log superblock: log page 1
*/
logsuper = (struct logsuper *) bp->l_ldata;
@ -2436,7 +2436,7 @@ int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
goto exit;
/*
* init pages 2 to npages-1 as log data pages:
* init pages 2 to npages-1 as log data pages:
*
* log page sequence number (lpsn) initialization:
*
@ -2479,7 +2479,7 @@ int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
goto exit;
/*
* initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
* initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
*/
for (lspn = 0; lspn < npages - 3; lspn++) {
lp->h.page = lp->t.page = cpu_to_le32(lspn);
@ -2495,7 +2495,7 @@ int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
rc = 0;
exit:
/*
* finalize log
* finalize log
*/
/* release the buffer */
lbmFree(bp);

View File

@ -144,7 +144,7 @@ struct logpage {
*
* (this comment should be rewritten !)
* jfs uses only "after" log records (only a single writer is allowed
* in a page, pages are written to temporary paging space if
* in a page, pages are written to temporary paging space if
* if they must be written to disk before commit, and i/o is
* scheduled for modified pages to their home location after
* the log records containing the after values and the commit
@ -153,7 +153,7 @@ struct logpage {
*
* a log record consists of a data area of variable length followed by
* a descriptor of fixed size LOGRDSIZE bytes.
* the data area is rounded up to an integral number of 4-bytes and
* the data area is rounded up to an integral number of 4-bytes and
* must be no longer than LOGPSIZE.
* the descriptor is of size of multiple of 4-bytes and aligned on a
* 4-byte boundary.
@ -215,13 +215,13 @@ struct lrd {
union {
/*
* COMMIT: commit
* COMMIT: commit
*
* transaction commit: no type-dependent information;
*/
/*
* REDOPAGE: after-image
* REDOPAGE: after-image
*
* apply after-image;
*
@ -236,7 +236,7 @@ struct lrd {
} redopage; /* (20) */
/*
* NOREDOPAGE: the page is freed
* NOREDOPAGE: the page is freed
*
* do not apply after-image records which precede this record
* in the log with the same page block number to this page.
@ -252,7 +252,7 @@ struct lrd {
} noredopage; /* (20) */
/*
* UPDATEMAP: update block allocation map
* UPDATEMAP: update block allocation map
*
* either in-line PXD,
* or out-of-line XADLIST;
@ -268,7 +268,7 @@ struct lrd {
} updatemap; /* (20) */
/*
* NOREDOINOEXT: the inode extent is freed
* NOREDOINOEXT: the inode extent is freed
*
* do not apply after-image records which precede this
* record in the log with the any of the 4 page block
@ -286,7 +286,7 @@ struct lrd {
} noredoinoext; /* (20) */
/*
* SYNCPT: log sync point
* SYNCPT: log sync point
*
* replay log upto syncpt address specified;
*/
@ -295,13 +295,13 @@ struct lrd {
} syncpt;
/*
* MOUNT: file system mount
* MOUNT: file system mount
*
* file system mount: no type-dependent information;
*/
/*
* ? FREEXTENT: free specified extent(s)
* ? FREEXTENT: free specified extent(s)
*
* free specified extent(s) from block allocation map
* N.B.: nextents should be length of data/sizeof(xad_t)
@ -314,7 +314,7 @@ struct lrd {
} freextent;
/*
* ? NOREDOFILE: this file is freed
* ? NOREDOFILE: this file is freed
*
* do not apply records which precede this record in the log
* with the same inode number.
@ -330,7 +330,7 @@ struct lrd {
} noredofile;
/*
* ? NEWPAGE:
* ? NEWPAGE:
*
* metadata type dependent
*/
@ -342,7 +342,7 @@ struct lrd {
} newpage;
/*
* ? DUMMY: filler
* ? DUMMY: filler
*
* no type-dependent information
*/

View File

@ -80,7 +80,7 @@ static int logMOUNT(struct super_block *sb);
*/
int jfs_mount(struct super_block *sb)
{
int rc = 0; /* Return code */
int rc = 0; /* Return code */
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct inode *ipaimap = NULL;
struct inode *ipaimap2 = NULL;
@ -169,7 +169,7 @@ int jfs_mount(struct super_block *sb)
sbi->ipaimap2 = NULL;
/*
* mount (the only/single) fileset
* mount (the only/single) fileset
*/
/*
* open fileset inode allocation map (aka fileset inode)
@ -195,7 +195,7 @@ int jfs_mount(struct super_block *sb)
goto out;
/*
* unwind on error
* unwind on error
*/
errout41: /* close fileset inode allocation map inode */
diFreeSpecial(ipimap);

View File

@ -18,7 +18,7 @@
*/
/*
* jfs_txnmgr.c: transaction manager
* jfs_txnmgr.c: transaction manager
*
* notes:
* transaction starts with txBegin() and ends with txCommit()
@ -60,7 +60,7 @@
#include "jfs_debug.h"
/*
* transaction management structures
* transaction management structures
*/
static struct {
int freetid; /* index of a free tid structure */
@ -103,19 +103,19 @@ module_param(nTxLock, int, 0);
MODULE_PARM_DESC(nTxLock,
"Number of transaction locks (max:65536)");
struct tblock *TxBlock; /* transaction block table */
static int TxLockLWM; /* Low water mark for number of txLocks used */
static int TxLockHWM; /* High water mark for number of txLocks used */
static int TxLockVHWM; /* Very High water mark */
struct tlock *TxLock; /* transaction lock table */
struct tblock *TxBlock; /* transaction block table */
static int TxLockLWM; /* Low water mark for number of txLocks used */
static int TxLockHWM; /* High water mark for number of txLocks used */
static int TxLockVHWM; /* Very High water mark */
struct tlock *TxLock; /* transaction lock table */
/*
* transaction management lock
* transaction management lock
*/
static DEFINE_SPINLOCK(jfsTxnLock);
#define TXN_LOCK() spin_lock(&jfsTxnLock)
#define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
#define TXN_LOCK() spin_lock(&jfsTxnLock)
#define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
#define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
#define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
@ -148,7 +148,7 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
#define TXN_WAKEUP(event) wake_up_all(event)
/*
* statistics
* statistics
*/
static struct {
tid_t maxtid; /* 4: biggest tid ever used */
@ -181,8 +181,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
static void LogSyncRelease(struct metapage * mp);
/*
* transaction block/lock management
* ---------------------------------
* transaction block/lock management
* ---------------------------------
*/
/*
@ -227,9 +227,9 @@ static void txLockFree(lid_t lid)
}
/*
* NAME: txInit()
* NAME: txInit()
*
* FUNCTION: initialize transaction management structures
* FUNCTION: initialize transaction management structures
*
* RETURN:
*
@ -333,9 +333,9 @@ int txInit(void)
}
/*
* NAME: txExit()
* NAME: txExit()
*
* FUNCTION: clean up when module is unloaded
* FUNCTION: clean up when module is unloaded
*/
void txExit(void)
{
@ -346,12 +346,12 @@ void txExit(void)
}
/*
* NAME: txBegin()
* NAME: txBegin()
*
* FUNCTION: start a transaction.
* FUNCTION: start a transaction.
*
* PARAMETER: sb - superblock
* flag - force for nested tx;
* PARAMETER: sb - superblock
* flag - force for nested tx;
*
* RETURN: tid - transaction id
*
@ -447,13 +447,13 @@ tid_t txBegin(struct super_block *sb, int flag)
}
/*
* NAME: txBeginAnon()
* NAME: txBeginAnon()
*
* FUNCTION: start an anonymous transaction.
* FUNCTION: start an anonymous transaction.
* Blocks if logsync or available tlocks are low to prevent
* anonymous tlocks from depleting supply.
*
* PARAMETER: sb - superblock
* PARAMETER: sb - superblock
*
* RETURN: none
*/
@ -489,11 +489,11 @@ void txBeginAnon(struct super_block *sb)
}
/*
* txEnd()
* txEnd()
*
* function: free specified transaction block.
*
* logsync barrier processing:
* logsync barrier processing:
*
* serialization:
*/
@ -577,13 +577,13 @@ wakeup:
}
/*
* txLock()
* txLock()
*
* function: acquire a transaction lock on the specified <mp>
*
* parameter:
*
* return: transaction lock id
* return: transaction lock id
*
* serialization:
*/
@ -857,17 +857,17 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
}
/*
* NAME: txRelease()
* NAME: txRelease()
*
* FUNCTION: Release buffers associated with transaction locks, but don't
* FUNCTION: Release buffers associated with transaction locks, but don't
* mark homeok yet. The allows other transactions to modify
* buffers, but won't let them go to disk until commit record
* actually gets written.
*
* PARAMETER:
* tblk -
* tblk -
*
* RETURN: Errors from subroutines.
* RETURN: Errors from subroutines.
*/
static void txRelease(struct tblock * tblk)
{
@ -896,10 +896,10 @@ static void txRelease(struct tblock * tblk)
}
/*
* NAME: txUnlock()
* NAME: txUnlock()
*
* FUNCTION: Initiates pageout of pages modified by tid in journalled
* objects and frees their lockwords.
* FUNCTION: Initiates pageout of pages modified by tid in journalled
* objects and frees their lockwords.
*/
static void txUnlock(struct tblock * tblk)
{
@ -983,10 +983,10 @@ static void txUnlock(struct tblock * tblk)
}
/*
* txMaplock()
* txMaplock()
*
* function: allocate a transaction lock for freed page/entry;
* for freed page, maplock is used as xtlock/dtlock type;
* for freed page, maplock is used as xtlock/dtlock type;
*/
struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
{
@ -1057,7 +1057,7 @@ struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
}
/*
* txLinelock()
* txLinelock()
*
* function: allocate a transaction lock for log vector list
*/
@ -1092,39 +1092,39 @@ struct linelock *txLinelock(struct linelock * tlock)
}
/*
* transaction commit management
* -----------------------------
* transaction commit management
* -----------------------------
*/
/*
* NAME: txCommit()
* NAME: txCommit()
*
* FUNCTION: commit the changes to the objects specified in
* clist. For journalled segments only the
* changes of the caller are committed, ie by tid.
* for non-journalled segments the data are flushed to
* disk and then the change to the disk inode and indirect
* blocks committed (so blocks newly allocated to the
* segment will be made a part of the segment atomically).
* FUNCTION: commit the changes to the objects specified in
* clist. For journalled segments only the
* changes of the caller are committed, ie by tid.
* for non-journalled segments the data are flushed to
* disk and then the change to the disk inode and indirect
* blocks committed (so blocks newly allocated to the
* segment will be made a part of the segment atomically).
*
* all of the segments specified in clist must be in
* one file system. no more than 6 segments are needed
* to handle all unix svcs.
* all of the segments specified in clist must be in
* one file system. no more than 6 segments are needed
* to handle all unix svcs.
*
* if the i_nlink field (i.e. disk inode link count)
* is zero, and the type of inode is a regular file or
* directory, or symbolic link , the inode is truncated
* to zero length. the truncation is committed but the
* VM resources are unaffected until it is closed (see
* iput and iclose).
* if the i_nlink field (i.e. disk inode link count)
* is zero, and the type of inode is a regular file or
* directory, or symbolic link , the inode is truncated
* to zero length. the truncation is committed but the
* VM resources are unaffected until it is closed (see
* iput and iclose).
*
* PARAMETER:
*
* RETURN:
*
* serialization:
* on entry the inode lock on each segment is assumed
* to be held.
* on entry the inode lock on each segment is assumed
* to be held.
*
* i/o error:
*/
@ -1175,7 +1175,7 @@ int txCommit(tid_t tid, /* transaction identifier */
if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
tblk->xflag |= COMMIT_LAZY;
/*
* prepare non-journaled objects for commit
* prepare non-journaled objects for commit
*
* flush data pages of non-journaled file
* to prevent the file getting non-initialized disk blocks
@ -1186,7 +1186,7 @@ int txCommit(tid_t tid, /* transaction identifier */
cd.nip = nip;
/*
* acquire transaction lock on (on-disk) inodes
* acquire transaction lock on (on-disk) inodes
*
* update on-disk inode from in-memory inode
* acquiring transaction locks for AFTER records
@ -1262,7 +1262,7 @@ int txCommit(tid_t tid, /* transaction identifier */
}
/*
* write log records from transaction locks
* write log records from transaction locks
*
* txUpdateMap() resets XAD_NEW in XAD.
*/
@ -1294,7 +1294,7 @@ int txCommit(tid_t tid, /* transaction identifier */
!test_cflag(COMMIT_Nolink, tblk->u.ip)));
/*
* write COMMIT log record
* write COMMIT log record
*/
lrd->type = cpu_to_le16(LOG_COMMIT);
lrd->length = 0;
@ -1303,7 +1303,7 @@ int txCommit(tid_t tid, /* transaction identifier */
lmGroupCommit(log, tblk);
/*
* - transaction is now committed -
* - transaction is now committed -
*/
/*
@ -1314,11 +1314,11 @@ int txCommit(tid_t tid, /* transaction identifier */
txForce(tblk);
/*
* update allocation map.
* update allocation map.
*
* update inode allocation map and inode:
* free pager lock on memory object of inode if any.
* update block allocation map.
* update block allocation map.
*
* txUpdateMap() resets XAD_NEW in XAD.
*/
@ -1326,7 +1326,7 @@ int txCommit(tid_t tid, /* transaction identifier */
txUpdateMap(tblk);
/*
* free transaction locks and pageout/free pages
* free transaction locks and pageout/free pages
*/
txRelease(tblk);
@ -1335,7 +1335,7 @@ int txCommit(tid_t tid, /* transaction identifier */
/*
* reset in-memory object state
* reset in-memory object state
*/
for (k = 0; k < cd.nip; k++) {
ip = cd.iplist[k];
@ -1358,11 +1358,11 @@ int txCommit(tid_t tid, /* transaction identifier */
}
/*
* NAME: txLog()
* NAME: txLog()
*
* FUNCTION: Writes AFTER log records for all lines modified
* by tid for segments specified by inodes in comdata.
* Code assumes only WRITELOCKS are recorded in lockwords.
* FUNCTION: Writes AFTER log records for all lines modified
* by tid for segments specified by inodes in comdata.
* Code assumes only WRITELOCKS are recorded in lockwords.
*
* PARAMETERS:
*
@ -1421,12 +1421,12 @@ static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
}
/*
* diLog()
* diLog()
*
* function: log inode tlock and format maplock to update bmap;
* function: log inode tlock and format maplock to update bmap;
*/
static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck, struct commit * cd)
struct tlock * tlck, struct commit * cd)
{
int rc = 0;
struct metapage *mp;
@ -1442,7 +1442,7 @@ static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
pxd = &lrd->log.redopage.pxd;
/*
* inode after image
* inode after image
*/
if (tlck->type & tlckENTRY) {
/* log after-image for logredo(): */
@ -1456,7 +1456,7 @@ static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
tlck->flag |= tlckWRITEPAGE;
} else if (tlck->type & tlckFREE) {
/*
* free inode extent
* free inode extent
*
* (pages of the freed inode extent have been invalidated and
* a maplock for free of the extent has been formatted at
@ -1498,7 +1498,7 @@ static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
jfs_err("diLog: UFO type tlck:0x%p", tlck);
#ifdef _JFS_WIP
/*
* alloc/free external EA extent
* alloc/free external EA extent
*
* a maplock for txUpdateMap() to update bPWMAP for alloc/free
* of the extent has been formatted at txLock() time;
@ -1534,9 +1534,9 @@ static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* dataLog()
* dataLog()
*
* function: log data tlock
* function: log data tlock
*/
static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
@ -1580,9 +1580,9 @@ static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* dtLog()
* dtLog()
*
* function: log dtree tlock and format maplock to update bmap;
* function: log dtree tlock and format maplock to update bmap;
*/
static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
@ -1603,10 +1603,10 @@ static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
/*
* page extension via relocation: entry insertion;
* page extension in-place: entry insertion;
* new right page from page split, reinitialized in-line
* root from root page split: entry insertion;
* page extension via relocation: entry insertion;
* page extension in-place: entry insertion;
* new right page from page split, reinitialized in-line
* root from root page split: entry insertion;
*/
if (tlck->type & (tlckNEW | tlckEXTEND)) {
/* log after-image of the new page for logredo():
@ -1641,8 +1641,8 @@ static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* entry insertion/deletion,
* sibling page link update (old right page before split);
* entry insertion/deletion,
* sibling page link update (old right page before split);
*/
if (tlck->type & (tlckENTRY | tlckRELINK)) {
/* log after-image for logredo(): */
@ -1658,11 +1658,11 @@ static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* page deletion: page has been invalidated
* page relocation: source extent
* page deletion: page has been invalidated
* page relocation: source extent
*
* a maplock for free of the page has been formatted
* at txLock() time);
* a maplock for free of the page has been formatted
* at txLock() time);
*/
if (tlck->type & (tlckFREE | tlckRELOCATE)) {
/* log LOG_NOREDOPAGE of the deleted page for logredo()
@ -1683,9 +1683,9 @@ static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* xtLog()
* xtLog()
*
* function: log xtree tlock and format maplock to update bmap;
* function: log xtree tlock and format maplock to update bmap;
*/
static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
@ -1725,8 +1725,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
xadlock = (struct xdlistlock *) maplock;
/*
* entry insertion/extension;
* sibling page link update (old right page before split);
* entry insertion/extension;
* sibling page link update (old right page before split);
*/
if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
/* log after-image for logredo():
@ -1801,7 +1801,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* page deletion: file deletion/truncation (ref. xtTruncate())
* page deletion: file deletion/truncation (ref. xtTruncate())
*
* (page will be invalidated after log is written and bmap
* is updated from the page);
@ -1908,13 +1908,13 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* page/entry truncation: file truncation (ref. xtTruncate())
* page/entry truncation: file truncation (ref. xtTruncate())
*
* |----------+------+------+---------------|
* | | |
* | | hwm - hwm before truncation
* | next - truncation point
* lwm - lwm before truncation
* |----------+------+------+---------------|
* | | |
* | | hwm - hwm before truncation
* | next - truncation point
* lwm - lwm before truncation
* header ?
*/
if (tlck->type & tlckTRUNCATE) {
@ -1937,7 +1937,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
twm = xtlck->twm.offset;
/*
* write log records
* write log records
*/
/* log after-image for logredo():
*
@ -1997,7 +1997,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* format maplock(s) for txUpdateMap() to update bmap
* format maplock(s) for txUpdateMap() to update bmap
*/
maplock->index = 0;
@ -2069,9 +2069,9 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* mapLog()
* mapLog()
*
* function: log from maplock of freed data extents;
* function: log from maplock of freed data extents;
*/
static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
@ -2081,7 +2081,7 @@ static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
pxd_t *pxd;
/*
* page relocation: free the source page extent
* page relocation: free the source page extent
*
* a maplock for txUpdateMap() for free of the page
* has been formatted at txLock() time saving the src
@ -2155,10 +2155,10 @@ static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
/*
* txEA()
* txEA()
*
* function: acquire maplock for EA/ACL extents or
* set COMMIT_INLINE flag;
* function: acquire maplock for EA/ACL extents or
* set COMMIT_INLINE flag;
*/
void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
{
@ -2207,10 +2207,10 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
}
/*
* txForce()
* txForce()
*
* function: synchronously write pages locked by transaction
* after txLog() but before txUpdateMap();
* after txLog() but before txUpdateMap();
*/
static void txForce(struct tblock * tblk)
{
@ -2273,10 +2273,10 @@ static void txForce(struct tblock * tblk)
}
/*
* txUpdateMap()
* txUpdateMap()
*
* function: update persistent allocation map (and working map
* if appropriate);
* function: update persistent allocation map (and working map
* if appropriate);
*
* parameter:
*/
@ -2298,7 +2298,7 @@ static void txUpdateMap(struct tblock * tblk)
/*
* update block allocation map
* update block allocation map
*
* update allocation state in pmap (and wmap) and
* update lsn of the pmap page;
@ -2382,7 +2382,7 @@ static void txUpdateMap(struct tblock * tblk)
}
}
/*
* update inode allocation map
* update inode allocation map
*
* update allocation state in pmap and
* update lsn of the pmap page;
@ -2407,24 +2407,24 @@ static void txUpdateMap(struct tblock * tblk)
}
/*
* txAllocPMap()
* txAllocPMap()
*
* function: allocate from persistent map;
*
* parameter:
* ipbmap -
* malock -
* xad list:
* pxd:
* ipbmap -
* malock -
* xad list:
* pxd:
*
* maptype -
* allocate from persistent map;
* free from persistent map;
* (e.g., tmp file - free from working map at releae
* of last reference);
* free from persistent and working map;
* maptype -
* allocate from persistent map;
* free from persistent map;
* (e.g., tmp file - free from working map at releae
* of last reference);
* free from persistent and working map;
*
* lsn - log sequence number;
* lsn - log sequence number;
*/
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk)
@ -2478,9 +2478,9 @@ static void txAllocPMap(struct inode *ip, struct maplock * maplock,
}
/*
* txFreeMap()
* txFreeMap()
*
* function: free from persistent and/or working map;
* function: free from persistent and/or working map;
*
* todo: optimization
*/
@ -2579,9 +2579,9 @@ void txFreeMap(struct inode *ip,
}
/*
* txFreelock()
* txFreelock()
*
* function: remove tlock from inode anonymous locklist
* function: remove tlock from inode anonymous locklist
*/
void txFreelock(struct inode *ip)
{
@ -2619,7 +2619,7 @@ void txFreelock(struct inode *ip)
}
/*
* txAbort()
* txAbort()
*
* function: abort tx before commit;
*
@ -2679,7 +2679,7 @@ void txAbort(tid_t tid, int dirty)
}
/*
* txLazyCommit(void)
* txLazyCommit(void)
*
* All transactions except those changing ipimap (COMMIT_FORCE) are
* processed by this routine. This insures that the inode and block
@ -2728,7 +2728,7 @@ static void txLazyCommit(struct tblock * tblk)
}
/*
* jfs_lazycommit(void)
* jfs_lazycommit(void)
*
* To be run as a kernel daemon. If lbmIODone is called in an interrupt
* context, or where blocking is not wanted, this routine will process
@ -2913,7 +2913,7 @@ void txResume(struct super_block *sb)
}
/*
* jfs_sync(void)
* jfs_sync(void)
*
* To be run as a kernel daemon. This is awakened when tlocks run low.
* We write any inodes that have anonymous tlocks so they will become

View File

@ -94,7 +94,7 @@ extern struct tblock *TxBlock; /* transaction block table */
*/
struct tlock {
lid_t next; /* 2: index next lockword on tid locklist
* next lockword on freelist
* next lockword on freelist
*/
tid_t tid; /* 2: transaction id holding lock */

View File

@ -21,7 +21,7 @@
/*
* jfs_types.h:
*
* basic type/utility definitions
* basic type/utility definitions
*
* note: this header file must be the 1st include file
* of JFS include list in all JFS .c file.
@ -54,8 +54,8 @@ struct timestruc_t {
*/
#define LEFTMOSTONE 0x80000000
#define HIGHORDER 0x80000000u /* high order bit on */
#define ONES 0xffffffffu /* all bit on */
#define HIGHORDER 0x80000000u /* high order bit on */
#define ONES 0xffffffffu /* all bit on */
/*
* logical xd (lxd)
@ -148,7 +148,7 @@ typedef struct {
#define sizeDXD(dxd) le32_to_cpu((dxd)->size)
/*
* directory entry argument
* directory entry argument
*/
struct component_name {
int namlen;
@ -160,14 +160,14 @@ struct component_name {
* DASD limit information - stored in directory inode
*/
struct dasd {
u8 thresh; /* Alert Threshold (in percent) */
u8 delta; /* Alert Threshold delta (in percent) */
u8 thresh; /* Alert Threshold (in percent) */
u8 delta; /* Alert Threshold delta (in percent) */
u8 rsrvd1;
u8 limit_hi; /* DASD limit (in logical blocks) */
__le32 limit_lo; /* DASD limit (in logical blocks) */
u8 limit_hi; /* DASD limit (in logical blocks) */
__le32 limit_lo; /* DASD limit (in logical blocks) */
u8 rsrvd2[3];
u8 used_hi; /* DASD usage (in logical blocks) */
__le32 used_lo; /* DASD usage (in logical blocks) */
u8 used_hi; /* DASD usage (in logical blocks) */
__le32 used_lo; /* DASD usage (in logical blocks) */
};
#define DASDLIMIT(dasdp) \

View File

@ -60,7 +60,7 @@ int jfs_umount(struct super_block *sb)
jfs_info("UnMount JFS: sb:0x%p", sb);
/*
* update superblock and close log
* update superblock and close log
*
* if mounted read-write and log based recovery was enabled
*/

View File

@ -16,7 +16,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* jfs_xtree.c: extent allocation descriptor B+-tree manager
* jfs_xtree.c: extent allocation descriptor B+-tree manager
*/
#include <linux/fs.h>
@ -32,30 +32,30 @@
/*
* xtree local flag
*/
#define XT_INSERT 0x00000001
#define XT_INSERT 0x00000001
/*
* xtree key/entry comparison: extent offset
* xtree key/entry comparison: extent offset
*
* return:
* -1: k < start of extent
* 0: start_of_extent <= k <= end_of_extent
* 1: k > end_of_extent
* -1: k < start of extent
* 0: start_of_extent <= k <= end_of_extent
* 1: k > end_of_extent
*/
#define XT_CMP(CMP, K, X, OFFSET64)\
{\
OFFSET64 = offsetXAD(X);\
(CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\
((K) < OFFSET64) ? -1 : 0;\
OFFSET64 = offsetXAD(X);\
(CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\
((K) < OFFSET64) ? -1 : 0;\
}
/* write a xad entry */
#define XT_PUTENTRY(XAD, FLAG, OFF, LEN, ADDR)\
{\
(XAD)->flag = (FLAG);\
XADoffset((XAD), (OFF));\
XADlength((XAD), (LEN));\
XADaddress((XAD), (ADDR));\
(XAD)->flag = (FLAG);\
XADoffset((XAD), (OFF));\
XADlength((XAD), (LEN));\
XADaddress((XAD), (ADDR));\
}
#define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
@ -76,13 +76,13 @@
MP = NULL;\
RC = -EIO;\
}\
}\
}\
}
/* for consistency */
#define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
#define XT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
#define XT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
BT_GETSEARCH(IP, LEAF, BN, MP, xtpage_t, P, INDEX, i_xtroot)
/* xtree entry parameter descriptor */
struct xtsplit {
@ -97,7 +97,7 @@ struct xtsplit {
/*
* statistics
* statistics
*/
#ifdef CONFIG_JFS_STATISTICS
static struct {
@ -136,7 +136,7 @@ static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
#endif /* _STILL_TO_PORT */
/*
* xtLookup()
* xtLookup()
*
* function: map a single page into a physical extent;
*/
@ -179,7 +179,7 @@ int xtLookup(struct inode *ip, s64 lstart,
}
/*
* compute the physical extent covering logical extent
* compute the physical extent covering logical extent
*
* N.B. search may have failed (e.g., hole in sparse file),
* and returned the index of the next entry.
@ -220,27 +220,27 @@ int xtLookup(struct inode *ip, s64 lstart,
/*
* xtLookupList()
* xtLookupList()
*
* function: map a single logical extent into a list of physical extent;
*
* parameter:
* struct inode *ip,
* struct lxdlist *lxdlist, lxd list (in)
* struct xadlist *xadlist, xad list (in/out)
* int flag)
* struct inode *ip,
* struct lxdlist *lxdlist, lxd list (in)
* struct xadlist *xadlist, xad list (in/out)
* int flag)
*
* coverage of lxd by xad under assumption of
* . lxd's are ordered and disjoint.
* . xad's are ordered and disjoint.
*
* return:
* 0: success
* 0: success
*
* note: a page being written (even a single byte) is backed fully,
* except the last page which is only backed with blocks
* required to cover the last byte;
* the extent backing a page is fully contained within an xad;
* except the last page which is only backed with blocks
* required to cover the last byte;
* the extent backing a page is fully contained within an xad;
*/
int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
struct xadlist * xadlist, int flag)
@ -284,7 +284,7 @@ int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
return rc;
/*
* compute the physical extent covering logical extent
* compute the physical extent covering logical extent
*
* N.B. search may have failed (e.g., hole in sparse file),
* and returned the index of the next entry.
@ -343,7 +343,7 @@ int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
if (lstart >= size)
goto mapend;
/* compare with the current xad */
/* compare with the current xad */
goto compare1;
}
/* lxd is covered by xad */
@ -430,7 +430,7 @@ int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
/*
* lxd is partially covered by xad
*/
else { /* (xend < lend) */
else { /* (xend < lend) */
/*
* get next xad
@ -477,22 +477,22 @@ int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
/*
* xtSearch()
* xtSearch()
*
* function: search for the xad entry covering specified offset.
* function: search for the xad entry covering specified offset.
*
* parameters:
* ip - file object;
* xoff - extent offset;
* nextp - address of next extent (if any) for search miss
* cmpp - comparison result:
* btstack - traverse stack;
* flag - search process flag (XT_INSERT);
* ip - file object;
* xoff - extent offset;
* nextp - address of next extent (if any) for search miss
* cmpp - comparison result:
* btstack - traverse stack;
* flag - search process flag (XT_INSERT);
*
* returns:
* btstack contains (bn, index) of search path traversed to the entry.
* *cmpp is set to result of comparison with the entry returned.
* the page containing the entry is pinned at exit.
* btstack contains (bn, index) of search path traversed to the entry.
* *cmpp is set to result of comparison with the entry returned.
* the page containing the entry is pinned at exit.
*/
static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
int *cmpp, struct btstack * btstack, int flag)
@ -517,7 +517,7 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
btstack->nsplit = 0;
/*
* search down tree from root:
* search down tree from root:
*
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
@ -642,7 +642,7 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
XT_CMP(cmp, xoff, &p->xad[index], t64);
if (cmp == 0) {
/*
* search hit
* search hit
*/
/* search hit - leaf page:
* return the entry found
@ -692,7 +692,7 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
}
/*
* search miss
* search miss
*
* base is the smallest index with key (Kj) greater than
* search key (K) and may be zero or maxentry index.
@ -773,22 +773,22 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
}
/*
* xtInsert()
* xtInsert()
*
* function:
*
* parameter:
* tid - transaction id;
* ip - file object;
* xflag - extent flag (XAD_NOTRECORDED):
* xoff - extent offset;
* xlen - extent length;
* xaddrp - extent address pointer (in/out):
* if (*xaddrp)
* caller allocated data extent at *xaddrp;
* else
* allocate data extent and return its xaddr;
* flag -
* tid - transaction id;
* ip - file object;
* xflag - extent flag (XAD_NOTRECORDED):
* xoff - extent offset;
* xlen - extent length;
* xaddrp - extent address pointer (in/out):
* if (*xaddrp)
* caller allocated data extent at *xaddrp;
* else
* allocate data extent and return its xaddr;
* flag -
*
* return:
*/
@ -813,7 +813,7 @@ int xtInsert(tid_t tid, /* transaction id */
jfs_info("xtInsert: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen);
/*
* search for the entry location at which to insert:
* search for the entry location at which to insert:
*
* xtFastSearch() and xtSearch() both returns (leaf page
* pinned, index at which to insert).
@ -853,13 +853,13 @@ int xtInsert(tid_t tid, /* transaction id */
}
/*
* insert entry for new extent
* insert entry for new extent
*/
xflag |= XAD_NEW;
/*
* if the leaf page is full, split the page and
* propagate up the router entry for the new page from split
* if the leaf page is full, split the page and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
@ -886,7 +886,7 @@ int xtInsert(tid_t tid, /* transaction id */
}
/*
* insert the new entry into the leaf page
* insert the new entry into the leaf page
*/
/*
* acquire a transaction lock on the leaf page;
@ -930,16 +930,16 @@ int xtInsert(tid_t tid, /* transaction id */
/*
* xtSplitUp()
* xtSplitUp()
*
* function:
* split full pages as propagating insertion up the tree
* split full pages as propagating insertion up the tree
*
* parameter:
* tid - transaction id;
* ip - file object;
* split - entry parameter descriptor;
* btstack - traverse stack from xtSearch()
* tid - transaction id;
* ip - file object;
* split - entry parameter descriptor;
* btstack - traverse stack from xtSearch()
*
* return:
*/
@ -1199,22 +1199,22 @@ xtSplitUp(tid_t tid,
/*
* xtSplitPage()
* xtSplitPage()
*
* function:
* split a full non-root page into
* original/split/left page and new right page
* i.e., the original/split page remains as left page.
* split a full non-root page into
* original/split/left page and new right page
* i.e., the original/split page remains as left page.
*
* parameter:
* int tid,
* struct inode *ip,
* struct xtsplit *split,
* struct metapage **rmpp,
* u64 *rbnp,
* int tid,
* struct inode *ip,
* struct xtsplit *split,
* struct metapage **rmpp,
* u64 *rbnp,
*
* return:
* Pointer to page in which to insert or NULL on error.
* Pointer to page in which to insert or NULL on error.
*/
static int
xtSplitPage(tid_t tid, struct inode *ip,
@ -1248,9 +1248,9 @@ xtSplitPage(tid_t tid, struct inode *ip,
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
}
quota_allocation += lengthPXD(pxd);
@ -1304,7 +1304,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
skip = split->index;
/*
* sequential append at tail (after last entry of last page)
* sequential append at tail (after last entry of last page)
*
* if splitting the last page on a level because of appending
* a entry to it (skip is maxentry), it's likely that the access is
@ -1342,7 +1342,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
}
/*
* non-sequential insert (at possibly middle page)
* non-sequential insert (at possibly middle page)
*/
/*
@ -1465,25 +1465,24 @@ xtSplitPage(tid_t tid, struct inode *ip,
/*
* xtSplitRoot()
* xtSplitRoot()
*
* function:
* split the full root page into
* original/root/split page and new right page
* i.e., root remains fixed in tree anchor (inode) and
* the root is copied to a single new right child page
* since root page << non-root page, and
* the split root page contains a single entry for the
* new right child page.
* split the full root page into original/root/split page and new
* right page
* i.e., root remains fixed in tree anchor (inode) and the root is
* copied to a single new right child page since root page <<
* non-root page, and the split root page contains a single entry
* for the new right child page.
*
* parameter:
* int tid,
* struct inode *ip,
* struct xtsplit *split,
* struct metapage **rmpp)
* int tid,
* struct inode *ip,
* struct xtsplit *split,
* struct metapage **rmpp)
*
* return:
* Pointer to page in which to insert or NULL on error.
* Pointer to page in which to insert or NULL on error.
*/
static int
xtSplitRoot(tid_t tid,
@ -1505,7 +1504,7 @@ xtSplitRoot(tid_t tid,
INCREMENT(xtStat.split);
/*
* allocate a single (right) child page
* allocate a single (right) child page
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
@ -1573,7 +1572,7 @@ xtSplitRoot(tid_t tid,
}
/*
* reset the root
* reset the root
*
* init root with the single entry for the new right page
* set the 1st entry offset to 0, which force the left-most key
@ -1610,7 +1609,7 @@ xtSplitRoot(tid_t tid,
/*
* xtExtend()
* xtExtend()
*
* function: extend in-place;
*
@ -1677,7 +1676,7 @@ int xtExtend(tid_t tid, /* transaction id */
goto extendOld;
/*
* extent overflow: insert entry for new extent
* extent overflow: insert entry for new extent
*/
//insertNew:
xoff = offsetXAD(xad) + MAXXLEN;
@ -1685,8 +1684,8 @@ int xtExtend(tid_t tid, /* transaction id */
nextindex = le16_to_cpu(p->header.nextindex);
/*
* if the leaf page is full, insert the new entry and
* propagate up the router entry for the new page from split
* if the leaf page is full, insert the new entry and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
@ -1731,7 +1730,7 @@ int xtExtend(tid_t tid, /* transaction id */
}
}
/*
* insert the new entry into the leaf page
* insert the new entry into the leaf page
*/
else {
/* insert the new entry: mark the entry NEW */
@ -1771,11 +1770,11 @@ int xtExtend(tid_t tid, /* transaction id */
#ifdef _NOTYET
/*
* xtTailgate()
* xtTailgate()
*
* function: split existing 'tail' extent
* (split offset >= start offset of tail extent), and
* relocate and extend the split tail half;
* (split offset >= start offset of tail extent), and
* relocate and extend the split tail half;
*
* note: existing extent may or may not have been committed.
* caller is responsible for pager buffer cache update, and
@ -1804,7 +1803,7 @@ int xtTailgate(tid_t tid, /* transaction id */
/*
printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
(ulong)xoff, xlen, (ulong)xaddr);
(ulong)xoff, xlen, (ulong)xaddr);
*/
/* there must exist extent to be tailgated */
@ -1842,18 +1841,18 @@ printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
xad = &p->xad[index];
/*
printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
(ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
(ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
*/
if ((llen = xoff - offsetXAD(xad)) == 0)
goto updateOld;
/*
* partially replace extent: insert entry for new extent
* partially replace extent: insert entry for new extent
*/
//insertNew:
/*
* if the leaf page is full, insert the new entry and
* propagate up the router entry for the new page from split
* if the leaf page is full, insert the new entry and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
@ -1898,7 +1897,7 @@ printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
}
}
/*
* insert the new entry into the leaf page
* insert the new entry into the leaf page
*/
else {
/* insert the new entry: mark the entry NEW */
@ -1955,17 +1954,17 @@ printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
#endif /* _NOTYET */
/*
* xtUpdate()
* xtUpdate()
*
* function: update XAD;
*
* update extent for allocated_but_not_recorded or
* compressed extent;
* update extent for allocated_but_not_recorded or
* compressed extent;
*
* parameter:
* nxad - new XAD;
* logical extent of the specified XAD must be completely
* contained by an existing XAD;
* nxad - new XAD;
* logical extent of the specified XAD must be completely
* contained by an existing XAD;
*/
int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
{ /* new XAD */
@ -2416,19 +2415,19 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
/*
* xtAppend()
* xtAppend()
*
* function: grow in append mode from contiguous region specified ;
*
* parameter:
* tid - transaction id;
* ip - file object;
* xflag - extent flag:
* xoff - extent offset;
* maxblocks - max extent length;
* xlen - extent length (in/out);
* xaddrp - extent address pointer (in/out):
* flag -
* tid - transaction id;
* ip - file object;
* xflag - extent flag:
* xoff - extent offset;
* maxblocks - max extent length;
* xlen - extent length (in/out);
* xaddrp - extent address pointer (in/out):
* flag -
*
* return:
*/
@ -2460,7 +2459,7 @@ int xtAppend(tid_t tid, /* transaction id */
(ulong) xoff, maxblocks, xlen, (ulong) xaddr);
/*
* search for the entry location at which to insert:
* search for the entry location at which to insert:
*
* xtFastSearch() and xtSearch() both returns (leaf page
* pinned, index at which to insert).
@ -2482,13 +2481,13 @@ int xtAppend(tid_t tid, /* transaction id */
xlen = min(xlen, (int)(next - xoff));
//insert:
/*
* insert entry for new extent
* insert entry for new extent
*/
xflag |= XAD_NEW;
/*
* if the leaf page is full, split the page and
* propagate up the router entry for the new page from split
* if the leaf page is full, split the page and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
@ -2545,7 +2544,7 @@ int xtAppend(tid_t tid, /* transaction id */
return 0;
/*
* insert the new entry into the leaf page
* insert the new entry into the leaf page
*/
insertLeaf:
/*
@ -2589,17 +2588,17 @@ int xtAppend(tid_t tid, /* transaction id */
/* - TBD for defragmentaion/reorganization -
*
* xtDelete()
* xtDelete()
*
* function:
* delete the entry with the specified key.
* delete the entry with the specified key.
*
* N.B.: whole extent of the entry is assumed to be deleted.
* N.B.: whole extent of the entry is assumed to be deleted.
*
* parameter:
*
* return:
* ENOENT: if the entry is not found.
* ENOENT: if the entry is not found.
*
* exception:
*/
@ -2665,10 +2664,10 @@ int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
/* - TBD for defragmentaion/reorganization -
*
* xtDeleteUp()
* xtDeleteUp()
*
* function:
* free empty pages as propagating deletion up the tree
* free empty pages as propagating deletion up the tree
*
* parameter:
*
@ -2815,15 +2814,15 @@ xtDeleteUp(tid_t tid, struct inode *ip,
/*
* NAME: xtRelocate()
* NAME: xtRelocate()
*
* FUNCTION: relocate xtpage or data extent of regular file;
* This function is mainly used by defragfs utility.
* FUNCTION: relocate xtpage or data extent of regular file;
* This function is mainly used by defragfs utility.
*
* NOTE: This routine does not have the logic to handle
* uncommitted allocated extent. The caller should call
* txCommit() to commit all the allocation before call
* this routine.
* NOTE: This routine does not have the logic to handle
* uncommitted allocated extent. The caller should call
* txCommit() to commit all the allocation before call
* this routine.
*/
int
xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
@ -2865,8 +2864,8 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr);
/*
* 1. get and validate the parent xtpage/xad entry
* covering the source extent to be relocated;
* 1. get and validate the parent xtpage/xad entry
* covering the source extent to be relocated;
*/
if (xtype == DATAEXT) {
/* search in leaf entry */
@ -2910,7 +2909,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
jfs_info("xtRelocate: parent xad entry validated.");
/*
* 2. relocate the extent
* 2. relocate the extent
*/
if (xtype == DATAEXT) {
/* if the extent is allocated-but-not-recorded
@ -2923,7 +2922,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
XT_PUTPAGE(pmp);
/*
* cmRelocate()
* cmRelocate()
*
* copy target data pages to be relocated;
*
@ -2945,8 +2944,8 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
pno = offset >> CM_L2BSIZE;
npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE;
/*
npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
(offset >> CM_L2BSIZE) + 1;
npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
(offset >> CM_L2BSIZE) + 1;
*/
sxaddr = oxaddr;
dxaddr = nxaddr;
@ -2981,7 +2980,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
jfs_info("xtRelocate: target data extent relocated.");
} else { /* (xtype == XTPAGE) */
} else { /* (xtype == XTPAGE) */
/*
* read in the target xtpage from the source extent;
@ -3026,16 +3025,14 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
*/
if (lmp) {
BT_MARK_DIRTY(lmp, ip);
tlck =
txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
lp->header.next = cpu_to_le64(nxaddr);
XT_PUTPAGE(lmp);
}
if (rmp) {
BT_MARK_DIRTY(rmp, ip);
tlck =
txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
rp->header.prev = cpu_to_le64(nxaddr);
XT_PUTPAGE(rmp);
}
@ -3062,7 +3059,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
* scan may be skipped by commit() and logredo();
*/
BT_MARK_DIRTY(mp, ip);
/* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
/* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
xtlck = (struct xtlock *) & tlck->lock;
@ -3084,7 +3081,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
}
/*
* 3. acquire maplock for the source extent to be freed;
* 3. acquire maplock for the source extent to be freed;
*
* acquire a maplock saving the src relocated extent address;
* to free of the extent at commit time;
@ -3105,7 +3102,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
* is no buffer associated with this lock since the buffer
* has been redirected to the target location.
*/
else /* (xtype == XTPAGE) */
else /* (xtype == XTPAGE) */
tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
pxdlock = (struct pxd_lock *) & tlck->lock;
@ -3115,7 +3112,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
pxdlock->index = 1;
/*
* 4. update the parent xad entry for relocation;
* 4. update the parent xad entry for relocation;
*
* acquire tlck for the parent entry with XAD_NEW as entry
* update which will write LOG_REDOPAGE and update bmap for
@ -3143,22 +3140,22 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
/*
* xtSearchNode()
* xtSearchNode()
*
* function: search for the internal xad entry covering specified extent.
* This function is mainly used by defragfs utility.
* function: search for the internal xad entry covering specified extent.
* This function is mainly used by defragfs utility.
*
* parameters:
* ip - file object;
* xad - extent to find;
* cmpp - comparison result:
* btstack - traverse stack;
* flag - search process flag;
* ip - file object;
* xad - extent to find;
* cmpp - comparison result:
* btstack - traverse stack;
* flag - search process flag;
*
* returns:
* btstack contains (bn, index) of search path traversed to the entry.
* *cmpp is set to result of comparison with the entry returned.
* the page containing the entry is pinned at exit.
* btstack contains (bn, index) of search path traversed to the entry.
* *cmpp is set to result of comparison with the entry returned.
* the page containing the entry is pinned at exit.
*/
static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
int *cmpp, struct btstack * btstack, int flag)
@ -3181,7 +3178,7 @@ static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
xaddr = addressXAD(xad);
/*
* search down tree from root:
* search down tree from root:
*
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
@ -3217,7 +3214,7 @@ static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
XT_CMP(cmp, xoff, &p->xad[index], t64);
if (cmp == 0) {
/*
* search hit
* search hit
*
* verify for exact match;
*/
@ -3245,7 +3242,7 @@ static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
}
/*
* search miss - non-leaf page:
* search miss - non-leaf page:
*
* base is the smallest index with key (Kj) greater than
* search key (K) and may be zero or maxentry index.
@ -3268,15 +3265,15 @@ static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
/*
* xtRelink()
* xtRelink()
*
* function:
* link around a freed page.
* link around a freed page.
*
* Parameter:
* int tid,
* struct inode *ip,
* xtpage_t *p)
* int tid,
* struct inode *ip,
* xtpage_t *p)
*
* returns:
*/
@ -3338,7 +3335,7 @@ static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
/*
* xtInitRoot()
* xtInitRoot()
*
* initialize file root (inline in inode)
*/
@ -3385,42 +3382,42 @@ void xtInitRoot(tid_t tid, struct inode *ip)
#define MAX_TRUNCATE_LEAVES 50
/*
* xtTruncate()
* xtTruncate()
*
* function:
* traverse for truncation logging backward bottom up;
* terminate at the last extent entry at the current subtree
* root page covering new down size.
* truncation may occur within the last extent entry.
* traverse for truncation logging backward bottom up;
* terminate at the last extent entry at the current subtree
* root page covering new down size.
* truncation may occur within the last extent entry.
*
* parameter:
* int tid,
* struct inode *ip,
* s64 newsize,
* int type) {PWMAP, PMAP, WMAP; DELETE, TRUNCATE}
* int tid,
* struct inode *ip,
* s64 newsize,
* int type) {PWMAP, PMAP, WMAP; DELETE, TRUNCATE}
*
* return:
*
* note:
* PWMAP:
* 1. truncate (non-COMMIT_NOLINK file)
* by jfs_truncate() or jfs_open(O_TRUNC):
* xtree is updated;
* PWMAP:
* 1. truncate (non-COMMIT_NOLINK file)
* by jfs_truncate() or jfs_open(O_TRUNC):
* xtree is updated;
* 2. truncate index table of directory when last entry removed
* map update via tlock at commit time;
* PMAP:
* map update via tlock at commit time;
* PMAP:
* Call xtTruncate_pmap instead
* WMAP:
* 1. remove (free zero link count) on last reference release
* (pmap has been freed at commit zero link count);
* 2. truncate (COMMIT_NOLINK file, i.e., tmp file):
* xtree is updated;
* map update directly at truncation time;
* WMAP:
* 1. remove (free zero link count) on last reference release
* (pmap has been freed at commit zero link count);
* 2. truncate (COMMIT_NOLINK file, i.e., tmp file):
* xtree is updated;
* map update directly at truncation time;
*
* if (DELETE)
* no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient);
* else if (TRUNCATE)
* must write LOG_NOREDOPAGE for deleted index page;
* if (DELETE)
* no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient);
* else if (TRUNCATE)
* must write LOG_NOREDOPAGE for deleted index page;
*
* pages may already have been tlocked by anonymous transactions
* during file growth (i.e., write) before truncation;
@ -3493,7 +3490,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* retained in the new sized file.
* if type is PMAP, the data and index pages are NOT
* freed, and the data and index blocks are NOT freed
* from working map.
* from working map.
* (this will allow continued access of data/index of
* temporary file (zerolink count file truncated to zero-length)).
*/
@ -3542,7 +3539,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
goto getChild;
/*
* leaf page
* leaf page
*/
freed = 0;
@ -3916,7 +3913,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
}
/*
* internal page: go down to child page of current entry
* internal page: go down to child page of current entry
*/
getChild:
/* save current parent entry for the child page */
@ -3965,7 +3962,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
/*
* xtTruncate_pmap()
* xtTruncate_pmap()
*
* function:
* Perform truncate to zero lenghth for deleted file, leaving the
@ -3974,9 +3971,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* is committed to disk.
*
* parameter:
* tid_t tid,
* struct inode *ip,
* s64 committed_size)
* tid_t tid,
* struct inode *ip,
* s64 committed_size)
*
* return: new committed size
*
@ -4050,7 +4047,7 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
}
/*
* leaf page
* leaf page
*/
if (++locked_leaves > MAX_TRUNCATE_LEAVES) {
@ -4062,7 +4059,7 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
xoff = offsetXAD(xad);
xlen = lengthXAD(xad);
XT_PUTPAGE(mp);
return (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
return (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
}
tlck = txLock(tid, ip, mp, tlckXTREE);
tlck->type = tlckXTREE | tlckFREE;
@ -4099,8 +4096,7 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset =
le16_to_cpu(p->header.nextindex) - 1;
xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
tlck->type = tlckXTREE | tlckFREE;
XT_PUTPAGE(mp);
@ -4118,7 +4114,7 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
else
index--;
/*
* internal page: go down to child page of current entry
* internal page: go down to child page of current entry
*/
getChild:
/* save current parent entry for the child page */

View File

@ -19,14 +19,14 @@
#define _H_JFS_XTREE
/*
* jfs_xtree.h: extent allocation descriptor B+-tree manager
* jfs_xtree.h: extent allocation descriptor B+-tree manager
*/
#include "jfs_btree.h"
/*
* extent allocation descriptor (xad)
* extent allocation descriptor (xad)
*/
typedef struct xad {
unsigned flag:8; /* 1: flag */
@ -38,30 +38,30 @@ typedef struct xad {
__le32 addr2; /* 4: address in unit of fsblksize */
} xad_t; /* (16) */
#define MAXXLEN ((1 << 24) - 1)
#define MAXXLEN ((1 << 24) - 1)
#define XTSLOTSIZE 16
#define L2XTSLOTSIZE 4
#define XTSLOTSIZE 16
#define L2XTSLOTSIZE 4
/* xad_t field construction */
#define XADoffset(xad, offset64)\
{\
(xad)->off1 = ((u64)offset64) >> 32;\
(xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
(xad)->off1 = ((u64)offset64) >> 32;\
(xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
}
#define XADaddress(xad, address64)\
{\
(xad)->addr1 = ((u64)address64) >> 32;\
(xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
(xad)->addr1 = ((u64)address64) >> 32;\
(xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
}
#define XADlength(xad, length32) (xad)->len = __cpu_to_le24(length32)
#define XADlength(xad, length32) (xad)->len = __cpu_to_le24(length32)
/* xad_t field extraction */
#define offsetXAD(xad)\
( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
#define addressXAD(xad)\
( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
#define lengthXAD(xad) __le24_to_cpu((xad)->len)
( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
#define lengthXAD(xad) __le24_to_cpu((xad)->len)
/* xad list */
struct xadlist {
@ -71,22 +71,22 @@ struct xadlist {
};
/* xad_t flags */
#define XAD_NEW 0x01 /* new */
#define XAD_EXTENDED 0x02 /* extended */
#define XAD_COMPRESSED 0x04 /* compressed with recorded length */
#define XAD_NEW 0x01 /* new */
#define XAD_EXTENDED 0x02 /* extended */
#define XAD_COMPRESSED 0x04 /* compressed with recorded length */
#define XAD_NOTRECORDED 0x08 /* allocated but not recorded */
#define XAD_COW 0x10 /* copy-on-write */
#define XAD_COW 0x10 /* copy-on-write */
/* possible values for maxentry */
#define XTROOTINITSLOT_DIR 6
#define XTROOTINITSLOT 10
#define XTROOTMAXSLOT 18
#define XTPAGEMAXSLOT 256
#define XTENTRYSTART 2
#define XTROOTINITSLOT_DIR 6
#define XTROOTINITSLOT 10
#define XTROOTMAXSLOT 18
#define XTPAGEMAXSLOT 256
#define XTENTRYSTART 2
/*
* xtree page:
* xtree page:
*/
typedef union {
struct xtheader {
@ -106,7 +106,7 @@ typedef union {
} xtpage_t;
/*
* external declaration
* external declaration
*/
extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
int *pflag, s64 * paddr, int *plen, int flag);

View File

@ -328,7 +328,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
* dentry - child directory dentry
*
* RETURN: -EINVAL - if name is . or ..
* -EINVAL - if . or .. exist but are invalid.
* -EINVAL - if . or .. exist but are invalid.
* errors from subroutines
*
* note:
@ -517,7 +517,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
inode_dec_link_count(ip);
/*
* commit zero link count object
* commit zero link count object
*/
if (ip->i_nlink == 0) {
assert(!test_cflag(COMMIT_Nolink, ip));
@ -596,7 +596,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
/*
* NAME: commitZeroLink()
*
* FUNCTION: for non-directory, called by jfs_remove(),
* FUNCTION: for non-directory, called by jfs_remove(),
* truncate a regular file, directory or symbolic
* link to zero length. return 0 if type is not
* one of these.
@ -676,7 +676,7 @@ static s64 commitZeroLink(tid_t tid, struct inode *ip)
/*
* NAME: jfs_free_zero_link()
*
* FUNCTION: for non-directory, called by iClose(),
* FUNCTION: for non-directory, called by iClose(),
* free resources of a file from cache and WORKING map
* for a file previously committed with zero link count
* while associated with a pager object,
@ -855,12 +855,12 @@ static int jfs_link(struct dentry *old_dentry,
* NAME: jfs_symlink(dip, dentry, name)
*
* FUNCTION: creates a symbolic link to <symlink> by name <name>
* in directory <dip>
* in directory <dip>
*
* PARAMETER: dip - parent directory vnode
* dentry - dentry of symbolic link
* name - the path name of the existing object
* that will be the source of the link
* PARAMETER: dip - parent directory vnode
* dentry - dentry of symbolic link
* name - the path name of the existing object
* that will be the source of the link
*
* RETURN: errors from subroutines
*
@ -1052,9 +1052,9 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
/*
* NAME: jfs_rename
* NAME: jfs_rename
*
* FUNCTION: rename a file or directory
* FUNCTION: rename a file or directory
*/
static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
@ -1331,9 +1331,9 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/*
* NAME: jfs_mknod
* NAME: jfs_mknod
*
* FUNCTION: Create a special file (device)
* FUNCTION: Create a special file (device)
*/
static int jfs_mknod(struct inode *dir, struct dentry *dentry,
int mode, dev_t rdev)

View File

@ -29,17 +29,17 @@
#include "jfs_txnmgr.h"
#include "jfs_debug.h"
#define BITSPERPAGE (PSIZE << 3)
#define L2MEGABYTE 20
#define MEGABYTE (1 << L2MEGABYTE)
#define MEGABYTE32 (MEGABYTE << 5)
#define BITSPERPAGE (PSIZE << 3)
#define L2MEGABYTE 20
#define MEGABYTE (1 << L2MEGABYTE)
#define MEGABYTE32 (MEGABYTE << 5)
/* convert block number to bmap file page number */
#define BLKTODMAPN(b)\
(((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
(((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
/*
* jfs_extendfs()
* jfs_extendfs()
*
* function: extend file system;
*
@ -48,9 +48,9 @@
* workspace space
*
* input:
* new LVSize: in LV blocks (required)
* new LogSize: in LV blocks (optional)
* new FSSize: in LV blocks (optional)
* new LVSize: in LV blocks (required)
* new LogSize: in LV blocks (optional)
* new FSSize: in LV blocks (optional)
*
* new configuration:
* 1. set new LogSize as specified or default from new LVSize;
@ -125,8 +125,8 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
}
/*
* reconfigure LV spaces
* ---------------------
* reconfigure LV spaces
* ---------------------
*
* validate new size, or, if not specified, determine new size
*/
@ -198,7 +198,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
log_formatted = 1;
}
/*
* quiesce file system
* quiesce file system
*
* (prepare to move the inline log and to prevent map update)
*
@ -270,8 +270,8 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
}
/*
* extend block allocation map
* ---------------------------
* extend block allocation map
* ---------------------------
*
* extendfs() for new extension, retry after crash recovery;
*
@ -283,7 +283,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
* s_size: aggregate size in physical blocks;
*/
/*
* compute the new block allocation map configuration
* compute the new block allocation map configuration
*
* map dinode:
* di_size: map file size in byte;
@ -301,7 +301,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
newNpages = BLKTODMAPN(t64) + 1;
/*
* extend map from current map (WITHOUT growing mapfile)
* extend map from current map (WITHOUT growing mapfile)
*
* map new extension with unmapped part of the last partial
* dmap page, if applicable, and extra page(s) allocated
@ -341,8 +341,8 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
XSize -= nblocks;
/*
* grow map file to cover remaining extension
* and/or one extra dmap page for next extendfs();
* grow map file to cover remaining extension
* and/or one extra dmap page for next extendfs();
*
* allocate new map pages and its backing blocks, and
* update map file xtree
@ -422,8 +422,8 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
dbFinalizeBmap(ipbmap);
/*
* update inode allocation map
* ---------------------------
* update inode allocation map
* ---------------------------
*
* move iag lists from old to new iag;
* agstart field is not updated for logredo() to reconstruct
@ -442,8 +442,8 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
}
/*
* finalize
* --------
* finalize
* --------
*
* extension is committed when on-disk super block is
* updated with new descriptors: logredo will recover
@ -480,7 +480,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
diFreeSpecial(ipbmap2);
/*
* update superblock
* update superblock
*/
if ((rc = readSuper(sb, &bh)))
goto error_out;
@ -530,7 +530,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
resume:
/*
* resume file system transactions
* resume file system transactions
*/
txResume(sb);

View File

@ -63,9 +63,9 @@
*
* On-disk:
*
* FEALISTs are stored on disk using blocks allocated by dbAlloc() and
* written directly. An EA list may be in-lined in the inode if there is
* sufficient room available.
* FEALISTs are stored on disk using blocks allocated by dbAlloc() and
* written directly. An EA list may be in-lined in the inode if there is
* sufficient room available.
*/
struct ea_buffer {