mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 00:51:44 +00:00
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: (49 commits) xfs: add size update tracepoint to IO completion xfs: convert AIL cursors to use struct list_head xfs: remove confusing ail cursor wrapper xfs: use a cursor for bulk AIL insertion xfs: failure mapping nfs fh to inode should return ESTALE xfs: Remove the second parameter to xfs_sb_count() xfs: remove the dead XFS_DABUF_DEBUG code xfs: remove leftovers of the old btree tracing code xfs: remove the dead QUOTADEBUG code xfs: remove the unused xfs_buf_delwri_sort function xfs: remove wrappers around b_iodone xfs: remove wrappers around b_fspriv xfs: add a proper transaction pointer to struct xfs_buf xfs: factor out xfs_da_grow_inode_int xfs: factor out xfs_dir2_leaf_find_stale xfs: cleanup struct xfs_dir2_free xfs: reshuffle dir2 headers xfs: start periodic workers later Revert "xfs: fix filesystsem freeze race in xfs_trans_alloc" xfs: remove variables that serve no purpose in xfs_alloc_ag_vextent_exact() ...
This commit is contained in:
commit
c1f792a5bf
@ -88,8 +88,6 @@ xfs-y += xfs_alloc.o \
|
||||
xfs_vnodeops.o \
|
||||
xfs_rw.o
|
||||
|
||||
xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
|
||||
|
||||
# Objects in linux/
|
||||
xfs-y += $(addprefix $(XFS_LINUX)/, \
|
||||
kmem.o \
|
||||
|
@ -264,7 +264,7 @@ xfs_set_mode(struct inode *inode, mode_t mode)
|
||||
iattr.ia_mode = mode;
|
||||
iattr.ia_ctime = current_fs_time(inode->i_sb);
|
||||
|
||||
error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
|
||||
error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
|
||||
}
|
||||
|
||||
return error;
|
||||
|
@ -181,6 +181,7 @@ xfs_setfilesize(
|
||||
|
||||
isize = xfs_ioend_new_eof(ioend);
|
||||
if (isize) {
|
||||
trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
|
||||
ip->i_d.di_size = isize;
|
||||
xfs_mark_inode_dirty(ip);
|
||||
}
|
||||
@ -894,11 +895,6 @@ out_invalidate:
|
||||
* For unwritten space on the page we need to start the conversion to
|
||||
* regular allocated space.
|
||||
* For any other dirty buffer heads on the page we should flush them.
|
||||
*
|
||||
* If we detect that a transaction would be required to flush the page, we
|
||||
* have to check the process flags first, if we are already in a transaction
|
||||
* or disk I/O during allocations is off, we need to fail the writepage and
|
||||
* redirty the page.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_vm_writepage(
|
||||
@ -906,7 +902,6 @@ xfs_vm_writepage(
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
int delalloc, unwritten;
|
||||
struct buffer_head *bh, *head;
|
||||
struct xfs_bmbt_irec imap;
|
||||
xfs_ioend_t *ioend = NULL, *iohead = NULL;
|
||||
@ -938,15 +933,10 @@ xfs_vm_writepage(
|
||||
goto redirty;
|
||||
|
||||
/*
|
||||
* We need a transaction if there are delalloc or unwritten buffers
|
||||
* on the page.
|
||||
*
|
||||
* If we need a transaction and the process flags say we are already
|
||||
* in a transaction, or no IO is allowed then mark the page dirty
|
||||
* again and leave the page as is.
|
||||
* Given that we do not allow direct reclaim to call us, we should
|
||||
* never be called while in a filesystem transaction.
|
||||
*/
|
||||
xfs_count_page_state(page, &delalloc, &unwritten);
|
||||
if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
|
||||
if (WARN_ON(current->flags & PF_FSTRANS))
|
||||
goto redirty;
|
||||
|
||||
/* Is this page beyond the end of the file? */
|
||||
@ -970,7 +960,7 @@ xfs_vm_writepage(
|
||||
offset = page_offset(page);
|
||||
type = IO_OVERWRITE;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
|
||||
if (wbc->sync_mode == WB_SYNC_NONE)
|
||||
nonblocking = 1;
|
||||
|
||||
do {
|
||||
|
@ -499,16 +499,14 @@ found:
|
||||
spin_unlock(&pag->pag_buf_lock);
|
||||
xfs_perag_put(pag);
|
||||
|
||||
if (xfs_buf_cond_lock(bp)) {
|
||||
/* failed, so wait for the lock if requested. */
|
||||
if (!(flags & XBF_TRYLOCK)) {
|
||||
xfs_buf_lock(bp);
|
||||
XFS_STATS_INC(xb_get_locked_waited);
|
||||
} else {
|
||||
if (!xfs_buf_trylock(bp)) {
|
||||
if (flags & XBF_TRYLOCK) {
|
||||
xfs_buf_rele(bp);
|
||||
XFS_STATS_INC(xb_busy_locked);
|
||||
return NULL;
|
||||
}
|
||||
xfs_buf_lock(bp);
|
||||
XFS_STATS_INC(xb_get_locked_waited);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -594,10 +592,8 @@ _xfs_buf_read(
|
||||
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
|
||||
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
|
||||
|
||||
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
|
||||
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
|
||||
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
|
||||
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
|
||||
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
|
||||
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
|
||||
|
||||
status = xfs_buf_iorequest(bp);
|
||||
if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
|
||||
@ -681,7 +677,6 @@ xfs_buf_read_uncached(
|
||||
return NULL;
|
||||
|
||||
/* set up the buffer for a read IO */
|
||||
xfs_buf_lock(bp);
|
||||
XFS_BUF_SET_ADDR(bp, daddr);
|
||||
XFS_BUF_READ(bp);
|
||||
XFS_BUF_BUSY(bp);
|
||||
@ -816,8 +811,6 @@ xfs_buf_get_uncached(
|
||||
goto fail_free_mem;
|
||||
}
|
||||
|
||||
xfs_buf_unlock(bp);
|
||||
|
||||
trace_xfs_buf_get_uncached(bp, _RET_IP_);
|
||||
return bp;
|
||||
|
||||
@ -896,8 +889,8 @@ xfs_buf_rele(
|
||||
* to push on stale inode buffers.
|
||||
*/
|
||||
int
|
||||
xfs_buf_cond_lock(
|
||||
xfs_buf_t *bp)
|
||||
xfs_buf_trylock(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
int locked;
|
||||
|
||||
@ -907,15 +900,8 @@ xfs_buf_cond_lock(
|
||||
else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
|
||||
xfs_log_force(bp->b_target->bt_mount, 0);
|
||||
|
||||
trace_xfs_buf_cond_lock(bp, _RET_IP_);
|
||||
return locked ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_buf_lock_value(
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
return bp->b_sema.count;
|
||||
trace_xfs_buf_trylock(bp, _RET_IP_);
|
||||
return locked;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -929,7 +915,7 @@ xfs_buf_lock_value(
|
||||
*/
|
||||
void
|
||||
xfs_buf_lock(
|
||||
xfs_buf_t *bp)
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
trace_xfs_buf_lock(bp, _RET_IP_);
|
||||
|
||||
@ -950,7 +936,7 @@ xfs_buf_lock(
|
||||
*/
|
||||
void
|
||||
xfs_buf_unlock(
|
||||
xfs_buf_t *bp)
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
|
||||
atomic_inc(&bp->b_hold);
|
||||
@ -1121,7 +1107,7 @@ xfs_bioerror_relse(
|
||||
XFS_BUF_UNDELAYWRITE(bp);
|
||||
XFS_BUF_DONE(bp);
|
||||
XFS_BUF_STALE(bp);
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
bp->b_iodone = NULL;
|
||||
if (!(fl & XBF_ASYNC)) {
|
||||
/*
|
||||
* Mark b_error and B_ERROR _both_.
|
||||
@ -1223,23 +1209,21 @@ _xfs_buf_ioapply(
|
||||
total_nr_pages = bp->b_page_count;
|
||||
map_i = 0;
|
||||
|
||||
if (bp->b_flags & XBF_ORDERED) {
|
||||
ASSERT(!(bp->b_flags & XBF_READ));
|
||||
rw = WRITE_FLUSH_FUA;
|
||||
} else if (bp->b_flags & XBF_LOG_BUFFER) {
|
||||
ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
|
||||
bp->b_flags &= ~_XBF_RUN_QUEUES;
|
||||
rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
|
||||
} else if (bp->b_flags & _XBF_RUN_QUEUES) {
|
||||
ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
|
||||
bp->b_flags &= ~_XBF_RUN_QUEUES;
|
||||
rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
|
||||
if (bp->b_flags & XBF_WRITE) {
|
||||
if (bp->b_flags & XBF_SYNCIO)
|
||||
rw = WRITE_SYNC;
|
||||
else
|
||||
rw = WRITE;
|
||||
if (bp->b_flags & XBF_FUA)
|
||||
rw |= REQ_FUA;
|
||||
if (bp->b_flags & XBF_FLUSH)
|
||||
rw |= REQ_FLUSH;
|
||||
} else if (bp->b_flags & XBF_READ_AHEAD) {
|
||||
rw = READA;
|
||||
} else {
|
||||
rw = (bp->b_flags & XBF_WRITE) ? WRITE :
|
||||
(bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
|
||||
rw = READ;
|
||||
}
|
||||
|
||||
|
||||
next_chunk:
|
||||
atomic_inc(&bp->b_io_remaining);
|
||||
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
|
||||
@ -1694,15 +1678,14 @@ xfs_buf_delwri_split(
|
||||
list_for_each_entry_safe(bp, n, dwq, b_list) {
|
||||
ASSERT(bp->b_flags & XBF_DELWRI);
|
||||
|
||||
if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
|
||||
if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) {
|
||||
if (!force &&
|
||||
time_before(jiffies, bp->b_queuetime + age)) {
|
||||
xfs_buf_unlock(bp);
|
||||
break;
|
||||
}
|
||||
|
||||
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
|
||||
_XBF_RUN_QUEUES);
|
||||
bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
|
||||
bp->b_flags |= XBF_WRITE;
|
||||
list_move_tail(&bp->b_list, list);
|
||||
trace_xfs_buf_delwri_split(bp, _RET_IP_);
|
||||
@ -1738,14 +1721,6 @@ xfs_buf_cmp(
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_buf_delwri_sort(
|
||||
xfs_buftarg_t *target,
|
||||
struct list_head *list)
|
||||
{
|
||||
list_sort(NULL, list, xfs_buf_cmp);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfsbufd(
|
||||
void *data)
|
||||
|
@ -46,43 +46,46 @@ typedef enum {
|
||||
|
||||
#define XBF_READ (1 << 0) /* buffer intended for reading from device */
|
||||
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
|
||||
#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */
|
||||
#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
|
||||
#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */
|
||||
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
|
||||
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
|
||||
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
|
||||
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
|
||||
#define XBF_ORDERED (1 << 11)/* use ordered writes */
|
||||
#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
|
||||
#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
|
||||
|
||||
/* I/O hints for the BIO layer */
|
||||
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
|
||||
#define XBF_FUA (1 << 11)/* force cache write through mode */
|
||||
#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
|
||||
|
||||
/* flags used only as arguments to access routines */
|
||||
#define XBF_LOCK (1 << 14)/* lock requested */
|
||||
#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */
|
||||
#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
|
||||
#define XBF_LOCK (1 << 15)/* lock requested */
|
||||
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
|
||||
#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */
|
||||
|
||||
/* flags used only internally */
|
||||
#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
|
||||
#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
|
||||
#define _XBF_KMEM (1 << 20)/* backed by heap memory */
|
||||
#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
|
||||
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
|
||||
#define _XBF_KMEM (1 << 21)/* backed by heap memory */
|
||||
#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */
|
||||
|
||||
typedef unsigned int xfs_buf_flags_t;
|
||||
|
||||
#define XFS_BUF_FLAGS \
|
||||
{ XBF_READ, "READ" }, \
|
||||
{ XBF_WRITE, "WRITE" }, \
|
||||
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
|
||||
{ XBF_MAPPED, "MAPPED" }, \
|
||||
{ XBF_ASYNC, "ASYNC" }, \
|
||||
{ XBF_DONE, "DONE" }, \
|
||||
{ XBF_DELWRI, "DELWRI" }, \
|
||||
{ XBF_STALE, "STALE" }, \
|
||||
{ XBF_ORDERED, "ORDERED" }, \
|
||||
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
|
||||
{ XBF_SYNCIO, "SYNCIO" }, \
|
||||
{ XBF_FUA, "FUA" }, \
|
||||
{ XBF_FLUSH, "FLUSH" }, \
|
||||
{ XBF_LOCK, "LOCK" }, /* should never be set */\
|
||||
{ XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
|
||||
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
|
||||
{ _XBF_PAGES, "PAGES" }, \
|
||||
{ _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
|
||||
{ _XBF_KMEM, "KMEM" }, \
|
||||
{ _XBF_DELWRI_Q, "DELWRI_Q" }
|
||||
|
||||
@ -91,11 +94,6 @@ typedef enum {
|
||||
XBT_FORCE_FLUSH = 1,
|
||||
} xfs_buftarg_flags_t;
|
||||
|
||||
typedef struct xfs_bufhash {
|
||||
struct list_head bh_list;
|
||||
spinlock_t bh_lock;
|
||||
} xfs_bufhash_t;
|
||||
|
||||
typedef struct xfs_buftarg {
|
||||
dev_t bt_dev;
|
||||
struct block_device *bt_bdev;
|
||||
@ -151,7 +149,7 @@ typedef struct xfs_buf {
|
||||
xfs_buf_iodone_t b_iodone; /* I/O completion function */
|
||||
struct completion b_iowait; /* queue for I/O waiters */
|
||||
void *b_fspriv;
|
||||
void *b_fspriv2;
|
||||
struct xfs_trans *b_transp;
|
||||
struct page **b_pages; /* array of page pointers */
|
||||
struct page *b_page_array[XB_PAGES]; /* inline pages */
|
||||
unsigned long b_queuetime; /* time buffer was queued */
|
||||
@ -192,10 +190,11 @@ extern void xfs_buf_free(xfs_buf_t *);
|
||||
extern void xfs_buf_rele(xfs_buf_t *);
|
||||
|
||||
/* Locking and Unlocking Buffers */
|
||||
extern int xfs_buf_cond_lock(xfs_buf_t *);
|
||||
extern int xfs_buf_lock_value(xfs_buf_t *);
|
||||
extern int xfs_buf_trylock(xfs_buf_t *);
|
||||
extern void xfs_buf_lock(xfs_buf_t *);
|
||||
extern void xfs_buf_unlock(xfs_buf_t *);
|
||||
#define xfs_buf_islocked(bp) \
|
||||
((bp)->b_sema.count <= 0)
|
||||
|
||||
/* Buffer Read and Write Routines */
|
||||
extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
|
||||
@ -234,8 +233,9 @@ extern void xfs_buf_terminate(void);
|
||||
|
||||
|
||||
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
|
||||
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
|
||||
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
|
||||
#define XFS_BUF_ZEROFLAGS(bp) \
|
||||
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \
|
||||
XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
|
||||
|
||||
void xfs_buf_stale(struct xfs_buf *bp);
|
||||
#define XFS_BUF_STALE(bp) xfs_buf_stale(bp);
|
||||
@ -267,10 +267,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
|
||||
#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
|
||||
#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
|
||||
|
||||
#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
|
||||
#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
|
||||
#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
|
||||
|
||||
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
|
||||
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
|
||||
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
|
||||
@ -280,14 +276,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
|
||||
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
|
||||
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
|
||||
|
||||
#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
|
||||
#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
|
||||
#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
|
||||
|
||||
#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
|
||||
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
|
||||
#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
|
||||
#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
|
||||
#define XFS_BUF_SET_START(bp) do { } while (0)
|
||||
|
||||
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
|
||||
@ -313,10 +301,6 @@ xfs_buf_set_ref(
|
||||
|
||||
#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
|
||||
|
||||
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
|
||||
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
|
||||
#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
|
||||
#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
|
||||
#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait);
|
||||
|
||||
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
|
||||
|
@ -151,14 +151,14 @@ xfs_nfs_get_inode(
|
||||
* We don't use ESTALE directly down the chain to not
|
||||
* confuse applications using bulkstat that expect EINVAL.
|
||||
*/
|
||||
if (error == EINVAL)
|
||||
if (error == EINVAL || error == ENOENT)
|
||||
error = ESTALE;
|
||||
return ERR_PTR(-error);
|
||||
}
|
||||
|
||||
if (ip->i_d.di_gen != generation) {
|
||||
IRELE(ip);
|
||||
return ERR_PTR(-ENOENT);
|
||||
return ERR_PTR(-ESTALE);
|
||||
}
|
||||
|
||||
return VFS_I(ip);
|
||||
|
@ -944,7 +944,7 @@ xfs_file_fallocate(
|
||||
|
||||
iattr.ia_valid = ATTR_SIZE;
|
||||
iattr.ia_size = new_size;
|
||||
error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
|
||||
error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_utils.h"
|
||||
#include "xfs_vnodeops.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
#include <linux/capability.h>
|
||||
@ -497,12 +498,442 @@ xfs_vn_getattr(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_setattr_nonsize(
|
||||
struct xfs_inode *ip,
|
||||
struct iattr *iattr,
|
||||
int flags)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
int mask = iattr->ia_valid;
|
||||
xfs_trans_t *tp;
|
||||
int error;
|
||||
uid_t uid = 0, iuid = 0;
|
||||
gid_t gid = 0, igid = 0;
|
||||
struct xfs_dquot *udqp = NULL, *gdqp = NULL;
|
||||
struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL;
|
||||
|
||||
trace_xfs_setattr(ip);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
error = -inode_change_ok(inode, iattr);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
ASSERT((mask & ATTR_SIZE) == 0);
|
||||
|
||||
/*
|
||||
* If disk quotas is on, we make sure that the dquots do exist on disk,
|
||||
* before we start any other transactions. Trying to do this later
|
||||
* is messy. We don't care to take a readlock to look at the ids
|
||||
* in inode here, because we can't hold it across the trans_reserve.
|
||||
* If the IDs do change before we take the ilock, we're covered
|
||||
* because the i_*dquot fields will get updated anyway.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
|
||||
uint qflags = 0;
|
||||
|
||||
if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
|
||||
uid = iattr->ia_uid;
|
||||
qflags |= XFS_QMOPT_UQUOTA;
|
||||
} else {
|
||||
uid = ip->i_d.di_uid;
|
||||
}
|
||||
if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
|
||||
gid = iattr->ia_gid;
|
||||
qflags |= XFS_QMOPT_GQUOTA;
|
||||
} else {
|
||||
gid = ip->i_d.di_gid;
|
||||
}
|
||||
|
||||
/*
|
||||
* We take a reference when we initialize udqp and gdqp,
|
||||
* so it is important that we never blindly double trip on
|
||||
* the same variable. See xfs_create() for an example.
|
||||
*/
|
||||
ASSERT(udqp == NULL);
|
||||
ASSERT(gdqp == NULL);
|
||||
error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
|
||||
qflags, &udqp, &gdqp);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
|
||||
if (error)
|
||||
goto out_dqrele;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
/*
|
||||
* Change file ownership. Must be the owner or privileged.
|
||||
*/
|
||||
if (mask & (ATTR_UID|ATTR_GID)) {
|
||||
/*
|
||||
* These IDs could have changed since we last looked at them.
|
||||
* But, we're assured that if the ownership did change
|
||||
* while we didn't have the inode locked, inode's dquot(s)
|
||||
* would have changed also.
|
||||
*/
|
||||
iuid = ip->i_d.di_uid;
|
||||
igid = ip->i_d.di_gid;
|
||||
gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
|
||||
uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
|
||||
|
||||
/*
|
||||
* Do a quota reservation only if uid/gid is actually
|
||||
* going to change.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) &&
|
||||
((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
|
||||
(XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
|
||||
ASSERT(tp);
|
||||
error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
|
||||
capable(CAP_FOWNER) ?
|
||||
XFS_QMOPT_FORCE_RES : 0);
|
||||
if (error) /* out of quota */
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
}
|
||||
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
/*
|
||||
* Change file ownership. Must be the owner or privileged.
|
||||
*/
|
||||
if (mask & (ATTR_UID|ATTR_GID)) {
|
||||
/*
|
||||
* CAP_FSETID overrides the following restrictions:
|
||||
*
|
||||
* The set-user-ID and set-group-ID bits of a file will be
|
||||
* cleared upon successful return from chown()
|
||||
*/
|
||||
if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
|
||||
!capable(CAP_FSETID))
|
||||
ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
|
||||
|
||||
/*
|
||||
* Change the ownerships and register quota modifications
|
||||
* in the transaction.
|
||||
*/
|
||||
if (iuid != uid) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
|
||||
ASSERT(mask & ATTR_UID);
|
||||
ASSERT(udqp);
|
||||
olddquot1 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_udquot, udqp);
|
||||
}
|
||||
ip->i_d.di_uid = uid;
|
||||
inode->i_uid = uid;
|
||||
}
|
||||
if (igid != gid) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
|
||||
ASSERT(!XFS_IS_PQUOTA_ON(mp));
|
||||
ASSERT(mask & ATTR_GID);
|
||||
ASSERT(gdqp);
|
||||
olddquot2 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_gdquot, gdqp);
|
||||
}
|
||||
ip->i_d.di_gid = gid;
|
||||
inode->i_gid = gid;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Change file access modes.
|
||||
*/
|
||||
if (mask & ATTR_MODE) {
|
||||
umode_t mode = iattr->ia_mode;
|
||||
|
||||
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
|
||||
mode &= ~S_ISGID;
|
||||
|
||||
ip->i_d.di_mode &= S_IFMT;
|
||||
ip->i_d.di_mode |= mode & ~S_IFMT;
|
||||
|
||||
inode->i_mode &= S_IFMT;
|
||||
inode->i_mode |= mode & ~S_IFMT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Change file access or modified times.
|
||||
*/
|
||||
if (mask & ATTR_ATIME) {
|
||||
inode->i_atime = iattr->ia_atime;
|
||||
ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
|
||||
ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
if (mask & ATTR_CTIME) {
|
||||
inode->i_ctime = iattr->ia_ctime;
|
||||
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
|
||||
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
if (mask & ATTR_MTIME) {
|
||||
inode->i_mtime = iattr->ia_mtime;
|
||||
ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
|
||||
ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
XFS_STATS_INC(xs_ig_attrchg);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
xfs_trans_set_sync(tp);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
/*
|
||||
* Release any dquot(s) the inode had kept before chown.
|
||||
*/
|
||||
xfs_qm_dqrele(olddquot1);
|
||||
xfs_qm_dqrele(olddquot2);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
/*
|
||||
* XXX(hch): Updating the ACL entries is not atomic vs the i_mode
|
||||
* update. We could avoid this with linked transactions
|
||||
* and passing down the transaction pointer all the way
|
||||
* to attr_set. No previous user of the generic
|
||||
* Posix ACL code seems to care about this issue either.
|
||||
*/
|
||||
if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
|
||||
error = -xfs_acl_chmod(inode);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp, 0);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
out_dqrele:
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncate file. Must have write permission and not be a directory.
|
||||
*/
|
||||
int
|
||||
xfs_setattr_size(
|
||||
struct xfs_inode *ip,
|
||||
struct iattr *iattr,
|
||||
int flags)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
int mask = iattr->ia_valid;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
uint lock_flags;
|
||||
uint commit_flags = 0;
|
||||
|
||||
trace_xfs_setattr(ip);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
error = -inode_change_ok(inode, iattr);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
ASSERT(S_ISREG(ip->i_d.di_mode));
|
||||
ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
|
||||
ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
|
||||
ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
|
||||
|
||||
lock_flags = XFS_ILOCK_EXCL;
|
||||
if (!(flags & XFS_ATTR_NOLOCK))
|
||||
lock_flags |= XFS_IOLOCK_EXCL;
|
||||
xfs_ilock(ip, lock_flags);
|
||||
|
||||
/*
|
||||
* Short circuit the truncate case for zero length files.
|
||||
*/
|
||||
if (iattr->ia_size == 0 &&
|
||||
ip->i_size == 0 && ip->i_d.di_nextents == 0) {
|
||||
if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Use the regular setattr path to update the timestamps.
|
||||
*/
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
iattr->ia_valid &= ~ATTR_SIZE;
|
||||
return xfs_setattr_nonsize(ip, iattr, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that the dquots are attached to the inode.
|
||||
*/
|
||||
error = xfs_qm_dqattach_locked(ip, 0);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Now we can make the changes. Before we join the inode to the
|
||||
* transaction, take care of the part of the truncation that must be
|
||||
* done without the inode lock. This needs to be done before joining
|
||||
* the inode to the transaction, because the inode cannot be unlocked
|
||||
* once it is a part of the transaction.
|
||||
*/
|
||||
if (iattr->ia_size > ip->i_size) {
|
||||
/*
|
||||
* Do the first part of growing a file: zero any data in the
|
||||
* last block that is beyond the old EOF. We need to do this
|
||||
* before the inode is joined to the transaction to modify
|
||||
* i_size.
|
||||
*/
|
||||
error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
lock_flags &= ~XFS_ILOCK_EXCL;
|
||||
|
||||
/*
|
||||
* We are going to log the inode size change in this transaction so
|
||||
* any previous writes that are beyond the on disk EOF and the new
|
||||
* EOF that have not been written out need to be written here. If we
|
||||
* do not write the data out, we expose ourselves to the null files
|
||||
* problem.
|
||||
*
|
||||
* Only flush from the on disk size to the smaller of the in memory
|
||||
* file size or the new size as that's the range we really care about
|
||||
* here and prevents waiting for other data not within the range we
|
||||
* care about here.
|
||||
*/
|
||||
if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) {
|
||||
error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size,
|
||||
XBF_ASYNC, FI_NONE);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for all I/O to complete.
|
||||
*/
|
||||
xfs_ioend_wait(ip);
|
||||
|
||||
error = -block_truncate_page(inode->i_mapping, iattr->ia_size,
|
||||
xfs_get_blocks);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_ITRUNCATE_LOG_COUNT);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
truncate_setsize(inode, iattr->ia_size);
|
||||
|
||||
commit_flags = XFS_TRANS_RELEASE_LOG_RES;
|
||||
lock_flags |= XFS_ILOCK_EXCL;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
/*
|
||||
* Only change the c/mtime if we are changing the size or we are
|
||||
* explicitly asked to change it. This handles the semantic difference
|
||||
* between truncate() and ftruncate() as implemented in the VFS.
|
||||
*
|
||||
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
|
||||
* special case where we need to update the times despite not having
|
||||
* these flags set. For all other operations the VFS set these flags
|
||||
* explicitly if it wants a timestamp update.
|
||||
*/
|
||||
if (iattr->ia_size != ip->i_size &&
|
||||
(!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
|
||||
iattr->ia_ctime = iattr->ia_mtime =
|
||||
current_fs_time(inode->i_sb);
|
||||
mask |= ATTR_CTIME | ATTR_MTIME;
|
||||
}
|
||||
|
||||
if (iattr->ia_size > ip->i_size) {
|
||||
ip->i_d.di_size = iattr->ia_size;
|
||||
ip->i_size = iattr->ia_size;
|
||||
} else if (iattr->ia_size <= ip->i_size ||
|
||||
(iattr->ia_size == 0 && ip->i_d.di_nextents)) {
|
||||
error = xfs_itruncate_data(&tp, ip, iattr->ia_size);
|
||||
if (error)
|
||||
goto out_trans_abort;
|
||||
|
||||
/*
|
||||
* Truncated "down", so we're removing references to old data
|
||||
* here - if we delay flushing for a long time, we expose
|
||||
* ourselves unduly to the notorious NULL files problem. So,
|
||||
* we mark this inode and flush it when the file is closed,
|
||||
* and do not wait the usual (long) time for writeout.
|
||||
*/
|
||||
xfs_iflags_set(ip, XFS_ITRUNCATED);
|
||||
}
|
||||
|
||||
if (mask & ATTR_CTIME) {
|
||||
inode->i_ctime = iattr->ia_ctime;
|
||||
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
|
||||
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
if (mask & ATTR_MTIME) {
|
||||
inode->i_mtime = iattr->ia_mtime;
|
||||
ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
|
||||
ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
XFS_STATS_INC(xs_ig_attrchg);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
xfs_trans_set_sync(tp);
|
||||
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
||||
out_unlock:
|
||||
if (lock_flags)
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
return error;
|
||||
|
||||
out_trans_abort:
|
||||
commit_flags |= XFS_TRANS_ABORT;
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp, commit_flags);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_vn_setattr(
|
||||
struct dentry *dentry,
|
||||
struct iattr *iattr)
|
||||
{
|
||||
return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
|
||||
if (iattr->ia_valid & ATTR_SIZE)
|
||||
return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
|
||||
return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
|
||||
}
|
||||
|
||||
#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
|
||||
|
@ -33,7 +33,6 @@
|
||||
#endif
|
||||
|
||||
#include <xfs_types.h>
|
||||
#include <xfs_arch.h>
|
||||
|
||||
#include <kmem.h>
|
||||
#include <mrlock.h>
|
||||
@ -88,6 +87,12 @@
|
||||
#include <xfs_buf.h>
|
||||
#include <xfs_message.h>
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define XFS_NATIVE_HOST 1
|
||||
#else
|
||||
#undef XFS_NATIVE_HOST
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Feature macros (disable/enable)
|
||||
*/
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_rtalloc.h"
|
||||
@ -1412,37 +1411,35 @@ xfs_fs_fill_super(
|
||||
sb->s_time_gran = 1;
|
||||
set_posix_acl_flag(sb);
|
||||
|
||||
error = xfs_syncd_init(mp);
|
||||
if (error)
|
||||
goto out_filestream_unmount;
|
||||
|
||||
xfs_inode_shrinker_register(mp);
|
||||
|
||||
error = xfs_mountfs(mp);
|
||||
if (error)
|
||||
goto out_syncd_stop;
|
||||
goto out_filestream_unmount;
|
||||
|
||||
error = xfs_syncd_init(mp);
|
||||
if (error)
|
||||
goto out_unmount;
|
||||
|
||||
root = igrab(VFS_I(mp->m_rootip));
|
||||
if (!root) {
|
||||
error = ENOENT;
|
||||
goto fail_unmount;
|
||||
goto out_syncd_stop;
|
||||
}
|
||||
if (is_bad_inode(root)) {
|
||||
error = EINVAL;
|
||||
goto fail_vnrele;
|
||||
goto out_syncd_stop;
|
||||
}
|
||||
sb->s_root = d_alloc_root(root);
|
||||
if (!sb->s_root) {
|
||||
error = ENOMEM;
|
||||
goto fail_vnrele;
|
||||
goto out_iput;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_syncd_stop:
|
||||
xfs_inode_shrinker_unregister(mp);
|
||||
xfs_syncd_stop(mp);
|
||||
out_filestream_unmount:
|
||||
xfs_inode_shrinker_unregister(mp);
|
||||
xfs_filestream_unmount(mp);
|
||||
out_free_sb:
|
||||
xfs_freesb(mp);
|
||||
@ -1456,17 +1453,12 @@ xfs_fs_fill_super(
|
||||
out:
|
||||
return -error;
|
||||
|
||||
fail_vnrele:
|
||||
if (sb->s_root) {
|
||||
dput(sb->s_root);
|
||||
sb->s_root = NULL;
|
||||
} else {
|
||||
iput(root);
|
||||
}
|
||||
|
||||
fail_unmount:
|
||||
xfs_inode_shrinker_unregister(mp);
|
||||
out_iput:
|
||||
iput(root);
|
||||
out_syncd_stop:
|
||||
xfs_syncd_stop(mp);
|
||||
out_unmount:
|
||||
xfs_inode_shrinker_unregister(mp);
|
||||
|
||||
/*
|
||||
* Blow away any referenced inode in the filestreams cache.
|
||||
|
@ -359,14 +359,12 @@ xfs_quiesce_data(
|
||||
{
|
||||
int error, error2 = 0;
|
||||
|
||||
/* push non-blocking */
|
||||
xfs_sync_data(mp, 0);
|
||||
xfs_qm_sync(mp, SYNC_TRYLOCK);
|
||||
|
||||
/* push and block till complete */
|
||||
xfs_sync_data(mp, SYNC_WAIT);
|
||||
xfs_qm_sync(mp, SYNC_WAIT);
|
||||
|
||||
/* force out the newly dirtied log buffers */
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
|
||||
/* write superblock and hoover up shutdown errors */
|
||||
error = xfs_sync_fsdata(mp);
|
||||
|
||||
@ -436,7 +434,7 @@ xfs_quiesce_attr(
|
||||
WARN_ON(atomic_read(&mp->m_active_trans) != 0);
|
||||
|
||||
/* Push the superblock and write an unmount record */
|
||||
error = xfs_log_sbcount(mp, 1);
|
||||
error = xfs_log_sbcount(mp);
|
||||
if (error)
|
||||
xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
|
||||
"Frozen image may not be consistent.");
|
||||
|
@ -21,14 +21,6 @@
|
||||
struct xfs_mount;
|
||||
struct xfs_perag;
|
||||
|
||||
typedef struct xfs_sync_work {
|
||||
struct list_head w_list;
|
||||
struct xfs_mount *w_mount;
|
||||
void *w_data; /* syncer routine argument */
|
||||
void (*w_syncer)(struct xfs_mount *, void *);
|
||||
struct completion *w_completion;
|
||||
} xfs_sync_work_t;
|
||||
|
||||
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
|
||||
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
|
||||
|
||||
|
@ -293,7 +293,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
|
||||
__entry->buffer_length = bp->b_buffer_length;
|
||||
__entry->hold = atomic_read(&bp->b_hold);
|
||||
__entry->pincount = atomic_read(&bp->b_pin_count);
|
||||
__entry->lockval = xfs_buf_lock_value(bp);
|
||||
__entry->lockval = bp->b_sema.count;
|
||||
__entry->flags = bp->b_flags;
|
||||
__entry->caller_ip = caller_ip;
|
||||
),
|
||||
@ -323,7 +323,7 @@ DEFINE_BUF_EVENT(xfs_buf_bawrite);
|
||||
DEFINE_BUF_EVENT(xfs_buf_bdwrite);
|
||||
DEFINE_BUF_EVENT(xfs_buf_lock);
|
||||
DEFINE_BUF_EVENT(xfs_buf_lock_done);
|
||||
DEFINE_BUF_EVENT(xfs_buf_cond_lock);
|
||||
DEFINE_BUF_EVENT(xfs_buf_trylock);
|
||||
DEFINE_BUF_EVENT(xfs_buf_unlock);
|
||||
DEFINE_BUF_EVENT(xfs_buf_iowait);
|
||||
DEFINE_BUF_EVENT(xfs_buf_iowait_done);
|
||||
@ -366,7 +366,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
|
||||
__entry->flags = flags;
|
||||
__entry->hold = atomic_read(&bp->b_hold);
|
||||
__entry->pincount = atomic_read(&bp->b_pin_count);
|
||||
__entry->lockval = xfs_buf_lock_value(bp);
|
||||
__entry->lockval = bp->b_sema.count;
|
||||
__entry->caller_ip = caller_ip;
|
||||
),
|
||||
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
|
||||
@ -409,7 +409,7 @@ TRACE_EVENT(xfs_buf_ioerror,
|
||||
__entry->buffer_length = bp->b_buffer_length;
|
||||
__entry->hold = atomic_read(&bp->b_hold);
|
||||
__entry->pincount = atomic_read(&bp->b_pin_count);
|
||||
__entry->lockval = xfs_buf_lock_value(bp);
|
||||
__entry->lockval = bp->b_sema.count;
|
||||
__entry->error = error;
|
||||
__entry->flags = bp->b_flags;
|
||||
__entry->caller_ip = caller_ip;
|
||||
@ -454,7 +454,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
|
||||
__entry->buf_flags = bip->bli_buf->b_flags;
|
||||
__entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
|
||||
__entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
|
||||
__entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf);
|
||||
__entry->buf_lockval = bip->bli_buf->b_sema.count;
|
||||
__entry->li_desc = bip->bli_item.li_desc;
|
||||
__entry->li_flags = bip->bli_item.li_flags;
|
||||
),
|
||||
@ -998,7 +998,8 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_ino_t, ino)
|
||||
__field(loff_t, size)
|
||||
__field(loff_t, isize)
|
||||
__field(loff_t, disize)
|
||||
__field(loff_t, new_size)
|
||||
__field(loff_t, offset)
|
||||
__field(size_t, count)
|
||||
@ -1006,16 +1007,18 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
|
||||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->size = ip->i_d.di_size;
|
||||
__entry->isize = ip->i_size;
|
||||
__entry->disize = ip->i_d.di_size;
|
||||
__entry->new_size = ip->i_new_size;
|
||||
__entry->offset = offset;
|
||||
__entry->count = count;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
|
||||
TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx "
|
||||
"offset 0x%llx count %zd",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->size,
|
||||
__entry->isize,
|
||||
__entry->disize,
|
||||
__entry->new_size,
|
||||
__entry->offset,
|
||||
__entry->count)
|
||||
@ -1028,40 +1031,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \
|
||||
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
|
||||
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
|
||||
DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
|
||||
|
||||
|
||||
TRACE_EVENT(xfs_itruncate_start,
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
|
||||
xfs_off_t toss_start, xfs_off_t toss_finish),
|
||||
TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_ino_t, ino)
|
||||
__field(xfs_fsize_t, size)
|
||||
__field(xfs_fsize_t, new_size)
|
||||
__field(xfs_off_t, toss_start)
|
||||
__field(xfs_off_t, toss_finish)
|
||||
__field(int, flag)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->size = ip->i_d.di_size;
|
||||
__entry->new_size = new_size;
|
||||
__entry->toss_start = toss_start;
|
||||
__entry->toss_finish = toss_finish;
|
||||
__entry->flag = flag;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
|
||||
"toss start 0x%llx toss finish 0x%llx",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
|
||||
__entry->size,
|
||||
__entry->new_size,
|
||||
__entry->toss_start,
|
||||
__entry->toss_finish)
|
||||
);
|
||||
DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_itrunc_class,
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
|
||||
@ -1089,8 +1059,8 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class,
|
||||
DEFINE_EVENT(xfs_itrunc_class, name, \
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
|
||||
TP_ARGS(ip, new_size))
|
||||
DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
|
||||
DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
|
||||
DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start);
|
||||
DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end);
|
||||
|
||||
TRACE_EVENT(xfs_pagecache_inval,
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
|
||||
|
@ -220,7 +220,7 @@ xfs_qm_adjust_dqtimers(
|
||||
{
|
||||
ASSERT(d->d_id);
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
#ifdef DEBUG
|
||||
if (d->d_blk_hardlimit)
|
||||
ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
|
||||
be64_to_cpu(d->d_blk_hardlimit));
|
||||
@ -231,6 +231,7 @@ xfs_qm_adjust_dqtimers(
|
||||
ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
|
||||
be64_to_cpu(d->d_rtb_hardlimit));
|
||||
#endif
|
||||
|
||||
if (!d->d_btimer) {
|
||||
if ((d->d_blk_softlimit &&
|
||||
(be64_to_cpu(d->d_bcount) >=
|
||||
@ -318,7 +319,7 @@ xfs_qm_init_dquot_blk(
|
||||
|
||||
ASSERT(tp);
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
|
||||
d = (xfs_dqblk_t *)XFS_BUF_PTR(bp);
|
||||
|
||||
@ -534,7 +535,7 @@ xfs_qm_dqtobp(
|
||||
}
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
|
||||
/*
|
||||
* calculate the location of the dquot inside the buffer.
|
||||
@ -622,7 +623,7 @@ xfs_qm_dqread(
|
||||
* brelse it because we have the changes incore.
|
||||
*/
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
xfs_trans_brelse(tp, bp);
|
||||
|
||||
return (error);
|
||||
@ -1423,45 +1424,6 @@ xfs_qm_dqpurge(
|
||||
}
|
||||
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
void
|
||||
xfs_qm_dqprint(xfs_dquot_t *dqp)
|
||||
{
|
||||
struct xfs_mount *mp = dqp->q_mount;
|
||||
|
||||
xfs_debug(mp, "-----------KERNEL DQUOT----------------");
|
||||
xfs_debug(mp, "---- dquotID = %d",
|
||||
(int)be32_to_cpu(dqp->q_core.d_id));
|
||||
xfs_debug(mp, "---- type = %s", DQFLAGTO_TYPESTR(dqp));
|
||||
xfs_debug(mp, "---- fs = 0x%p", dqp->q_mount);
|
||||
xfs_debug(mp, "---- blkno = 0x%x", (int) dqp->q_blkno);
|
||||
xfs_debug(mp, "---- boffset = 0x%x", (int) dqp->q_bufoffset);
|
||||
xfs_debug(mp, "---- blkhlimit = %Lu (0x%x)",
|
||||
be64_to_cpu(dqp->q_core.d_blk_hardlimit),
|
||||
(int)be64_to_cpu(dqp->q_core.d_blk_hardlimit));
|
||||
xfs_debug(mp, "---- blkslimit = %Lu (0x%x)",
|
||||
be64_to_cpu(dqp->q_core.d_blk_softlimit),
|
||||
(int)be64_to_cpu(dqp->q_core.d_blk_softlimit));
|
||||
xfs_debug(mp, "---- inohlimit = %Lu (0x%x)",
|
||||
be64_to_cpu(dqp->q_core.d_ino_hardlimit),
|
||||
(int)be64_to_cpu(dqp->q_core.d_ino_hardlimit));
|
||||
xfs_debug(mp, "---- inoslimit = %Lu (0x%x)",
|
||||
be64_to_cpu(dqp->q_core.d_ino_softlimit),
|
||||
(int)be64_to_cpu(dqp->q_core.d_ino_softlimit));
|
||||
xfs_debug(mp, "---- bcount = %Lu (0x%x)",
|
||||
be64_to_cpu(dqp->q_core.d_bcount),
|
||||
(int)be64_to_cpu(dqp->q_core.d_bcount));
|
||||
xfs_debug(mp, "---- icount = %Lu (0x%x)",
|
||||
be64_to_cpu(dqp->q_core.d_icount),
|
||||
(int)be64_to_cpu(dqp->q_core.d_icount));
|
||||
xfs_debug(mp, "---- btimer = %d",
|
||||
(int)be32_to_cpu(dqp->q_core.d_btimer));
|
||||
xfs_debug(mp, "---- itimer = %d",
|
||||
(int)be32_to_cpu(dqp->q_core.d_itimer));
|
||||
xfs_debug(mp, "---------------------------");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Give the buffer a little push if it is incore and
|
||||
* wait on the flush lock.
|
||||
|
@ -116,12 +116,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
|
||||
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
|
||||
(XFS_IS_OQUOTA_ON((d)->q_mount))))
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
extern void xfs_qm_dqprint(xfs_dquot_t *);
|
||||
#else
|
||||
#define xfs_qm_dqprint(a)
|
||||
#endif
|
||||
|
||||
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
|
||||
extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
|
||||
extern int xfs_qm_dqpurge(xfs_dquot_t *);
|
||||
|
@ -67,32 +67,6 @@ static struct shrinker xfs_qm_shaker = {
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
extern struct mutex qcheck_lock;
|
||||
#endif
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
static void
|
||||
xfs_qm_dquot_list_print(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_dquot_t *dqp;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
|
||||
xfs_debug(mp, " %d. \"%d (%s)\" "
|
||||
"bcnt = %lld, icnt = %lld, refs = %d",
|
||||
i++, be32_to_cpu(dqp->q_core.d_id),
|
||||
DQFLAGTO_TYPESTR(dqp),
|
||||
(long long)be64_to_cpu(dqp->q_core.d_bcount),
|
||||
(long long)be64_to_cpu(dqp->q_core.d_icount),
|
||||
dqp->q_nrefs);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the XQM structure.
|
||||
* Note that there is not one quota manager per file system.
|
||||
@ -165,9 +139,6 @@ xfs_Gqm_init(void)
|
||||
atomic_set(&xqm->qm_totaldquots, 0);
|
||||
xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
|
||||
xqm->qm_nrefs = 0;
|
||||
#ifdef DEBUG
|
||||
mutex_init(&qcheck_lock);
|
||||
#endif
|
||||
return xqm;
|
||||
|
||||
out_free_udqhash:
|
||||
@ -204,9 +175,6 @@ xfs_qm_destroy(
|
||||
mutex_lock(&xqm->qm_dqfrlist_lock);
|
||||
list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
|
||||
xfs_dqlock(dqp);
|
||||
#ifdef QUOTADEBUG
|
||||
xfs_debug(dqp->q_mount, "FREELIST destroy 0x%p", dqp);
|
||||
#endif
|
||||
list_del_init(&dqp->q_freelist);
|
||||
xfs_Gqm->qm_dqfrlist_cnt--;
|
||||
xfs_dqunlock(dqp);
|
||||
@ -214,9 +182,6 @@ xfs_qm_destroy(
|
||||
}
|
||||
mutex_unlock(&xqm->qm_dqfrlist_lock);
|
||||
mutex_destroy(&xqm->qm_dqfrlist_lock);
|
||||
#ifdef DEBUG
|
||||
mutex_destroy(&qcheck_lock);
|
||||
#endif
|
||||
kmem_free(xqm);
|
||||
}
|
||||
|
||||
@ -409,11 +374,6 @@ xfs_qm_mount_quotas(
|
||||
xfs_warn(mp, "Failed to initialize disk quotas.");
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
xfs_qm_internalqcheck(mp);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -866,8 +826,8 @@ xfs_qm_dqattach_locked(
|
||||
}
|
||||
|
||||
done:
|
||||
#ifdef QUOTADEBUG
|
||||
if (! error) {
|
||||
#ifdef DEBUG
|
||||
if (!error) {
|
||||
if (XFS_IS_UQUOTA_ON(mp))
|
||||
ASSERT(ip->i_udquot);
|
||||
if (XFS_IS_OQUOTA_ON(mp))
|
||||
@ -1733,8 +1693,6 @@ xfs_qm_quotacheck(
|
||||
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
|
||||
mp->m_qflags |= flags;
|
||||
|
||||
xfs_qm_dquot_list_print(mp);
|
||||
|
||||
error_return:
|
||||
if (error) {
|
||||
xfs_warn(mp,
|
||||
@ -2096,9 +2054,6 @@ xfs_qm_write_sb_changes(
|
||||
xfs_trans_t *tp;
|
||||
int error;
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
xfs_notice(mp, "Writing superblock quota changes");
|
||||
#endif
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
||||
if ((error = xfs_trans_reserve(tp, 0,
|
||||
mp->m_sb.sb_sectsize + 128, 0,
|
||||
|
@ -163,10 +163,4 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
|
||||
extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
|
||||
extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
|
||||
|
||||
#ifdef DEBUG
|
||||
extern int xfs_qm_internalqcheck(xfs_mount_t *);
|
||||
#else
|
||||
#define xfs_qm_internalqcheck(mp) (0)
|
||||
#endif
|
||||
|
||||
#endif /* __XFS_QM_H__ */
|
||||
|
@ -263,7 +263,7 @@ xfs_qm_scall_trunc_qfile(
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
|
||||
error = xfs_itruncate_data(&tp, ip, 0);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
|
||||
XFS_TRANS_ABORT);
|
||||
@ -622,7 +622,6 @@ xfs_qm_scall_setqlim(
|
||||
xfs_trans_log_dquot(tp, dqp);
|
||||
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
xfs_qm_dqprint(dqp);
|
||||
xfs_qm_dqrele(dqp);
|
||||
|
||||
out_unlock:
|
||||
@ -657,7 +656,6 @@ xfs_qm_scall_getquota(
|
||||
xfs_qm_dqput(dqp);
|
||||
return XFS_ERROR(ENOENT);
|
||||
}
|
||||
/* xfs_qm_dqprint(dqp); */
|
||||
/*
|
||||
* Convert the disk dquot to the exportable format
|
||||
*/
|
||||
@ -906,354 +904,3 @@ xfs_qm_dqrele_all_inodes(
|
||||
ASSERT(mp->m_quotainfo);
|
||||
xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
|
||||
}
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* This contains all the test functions for XFS disk quotas.
|
||||
* Currently it does a quota accounting check. ie. it walks through
|
||||
* all inodes in the file system, calculating the dquot accounting fields,
|
||||
* and prints out any inconsistencies.
|
||||
*/
|
||||
xfs_dqhash_t *qmtest_udqtab;
|
||||
xfs_dqhash_t *qmtest_gdqtab;
|
||||
int qmtest_hashmask;
|
||||
int qmtest_nfails;
|
||||
struct mutex qcheck_lock;
|
||||
|
||||
#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
|
||||
(__psunsigned_t)(id)) & \
|
||||
(qmtest_hashmask - 1))
|
||||
|
||||
#define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \
|
||||
(qmtest_udqtab + \
|
||||
DQTEST_HASHVAL(mp, id)) : \
|
||||
(qmtest_gdqtab + \
|
||||
DQTEST_HASHVAL(mp, id)))
|
||||
|
||||
#define DQTEST_LIST_PRINT(l, NXT, title) \
|
||||
{ \
|
||||
xfs_dqtest_t *dqp; int i = 0;\
|
||||
xfs_debug(NULL, "%s (#%d)", title, (int) (l)->qh_nelems); \
|
||||
for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
|
||||
dqp = (xfs_dqtest_t *)dqp->NXT) { \
|
||||
xfs_debug(dqp->q_mount, \
|
||||
" %d. \"%d (%s)\" bcnt = %d, icnt = %d", \
|
||||
++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \
|
||||
dqp->d_bcount, dqp->d_icount); } \
|
||||
}
|
||||
|
||||
typedef struct dqtest {
|
||||
uint dq_flags; /* various flags (XFS_DQ_*) */
|
||||
struct list_head q_hashlist;
|
||||
xfs_dqhash_t *q_hash; /* the hashchain header */
|
||||
xfs_mount_t *q_mount; /* filesystem this relates to */
|
||||
xfs_dqid_t d_id; /* user id or group id */
|
||||
xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */
|
||||
xfs_qcnt_t d_icount; /* # inodes owned by the user */
|
||||
} xfs_dqtest_t;
|
||||
|
||||
STATIC void
|
||||
xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
|
||||
{
|
||||
list_add(&dqp->q_hashlist, &h->qh_list);
|
||||
h->qh_version++;
|
||||
h->qh_nelems++;
|
||||
}
|
||||
STATIC void
|
||||
xfs_qm_dqtest_print(
|
||||
struct xfs_mount *mp,
|
||||
struct dqtest *d)
|
||||
{
|
||||
xfs_debug(mp, "-----------DQTEST DQUOT----------------");
|
||||
xfs_debug(mp, "---- dquot ID = %d", d->d_id);
|
||||
xfs_debug(mp, "---- fs = 0x%p", d->q_mount);
|
||||
xfs_debug(mp, "---- bcount = %Lu (0x%x)",
|
||||
d->d_bcount, (int)d->d_bcount);
|
||||
xfs_debug(mp, "---- icount = %Lu (0x%x)",
|
||||
d->d_icount, (int)d->d_icount);
|
||||
xfs_debug(mp, "---------------------------");
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_qm_dqtest_failed(
|
||||
xfs_dqtest_t *d,
|
||||
xfs_dquot_t *dqp,
|
||||
char *reason,
|
||||
xfs_qcnt_t a,
|
||||
xfs_qcnt_t b,
|
||||
int error)
|
||||
{
|
||||
qmtest_nfails++;
|
||||
if (error)
|
||||
xfs_debug(dqp->q_mount,
|
||||
"quotacheck failed id=%d, err=%d\nreason: %s",
|
||||
d->d_id, error, reason);
|
||||
else
|
||||
xfs_debug(dqp->q_mount,
|
||||
"quotacheck failed id=%d (%s) [%d != %d]",
|
||||
d->d_id, reason, (int)a, (int)b);
|
||||
xfs_qm_dqtest_print(dqp->q_mount, d);
|
||||
if (dqp)
|
||||
xfs_qm_dqprint(dqp);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_dqtest_cmp2(
|
||||
xfs_dqtest_t *d,
|
||||
xfs_dquot_t *dqp)
|
||||
{
|
||||
int err = 0;
|
||||
if (be64_to_cpu(dqp->q_core.d_icount) != d->d_icount) {
|
||||
xfs_qm_dqtest_failed(d, dqp, "icount mismatch",
|
||||
be64_to_cpu(dqp->q_core.d_icount),
|
||||
d->d_icount, 0);
|
||||
err++;
|
||||
}
|
||||
if (be64_to_cpu(dqp->q_core.d_bcount) != d->d_bcount) {
|
||||
xfs_qm_dqtest_failed(d, dqp, "bcount mismatch",
|
||||
be64_to_cpu(dqp->q_core.d_bcount),
|
||||
d->d_bcount, 0);
|
||||
err++;
|
||||
}
|
||||
if (dqp->q_core.d_blk_softlimit &&
|
||||
be64_to_cpu(dqp->q_core.d_bcount) >=
|
||||
be64_to_cpu(dqp->q_core.d_blk_softlimit)) {
|
||||
if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
|
||||
xfs_debug(dqp->q_mount,
|
||||
"%d [%s] BLK TIMER NOT STARTED",
|
||||
d->d_id, DQFLAGTO_TYPESTR(d));
|
||||
err++;
|
||||
}
|
||||
}
|
||||
if (dqp->q_core.d_ino_softlimit &&
|
||||
be64_to_cpu(dqp->q_core.d_icount) >=
|
||||
be64_to_cpu(dqp->q_core.d_ino_softlimit)) {
|
||||
if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
|
||||
xfs_debug(dqp->q_mount,
|
||||
"%d [%s] INO TIMER NOT STARTED",
|
||||
d->d_id, DQFLAGTO_TYPESTR(d));
|
||||
err++;
|
||||
}
|
||||
}
|
||||
#ifdef QUOTADEBUG
|
||||
if (!err) {
|
||||
xfs_debug(dqp->q_mount, "%d [%s] qchecked",
|
||||
d->d_id, DQFLAGTO_TYPESTR(d));
|
||||
}
|
||||
#endif
|
||||
return (err);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_dqtest_cmp(
|
||||
xfs_dqtest_t *d)
|
||||
{
|
||||
xfs_dquot_t *dqp;
|
||||
int error;
|
||||
|
||||
/* xfs_qm_dqtest_print(d); */
|
||||
if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
|
||||
&dqp))) {
|
||||
xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
|
||||
return;
|
||||
}
|
||||
xfs_dqtest_cmp2(d, dqp);
|
||||
xfs_qm_dqput(dqp);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_qm_internalqcheck_dqget(
|
||||
xfs_mount_t *mp,
|
||||
xfs_dqid_t id,
|
||||
uint type,
|
||||
xfs_dqtest_t **O_dq)
|
||||
{
|
||||
xfs_dqtest_t *d;
|
||||
xfs_dqhash_t *h;
|
||||
|
||||
h = DQTEST_HASH(mp, id, type);
|
||||
list_for_each_entry(d, &h->qh_list, q_hashlist) {
|
||||
if (d->d_id == id && mp == d->q_mount) {
|
||||
*O_dq = d;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP);
|
||||
d->dq_flags = type;
|
||||
d->d_id = id;
|
||||
d->q_mount = mp;
|
||||
d->q_hash = h;
|
||||
INIT_LIST_HEAD(&d->q_hashlist);
|
||||
xfs_qm_hashinsert(h, d);
|
||||
*O_dq = d;
|
||||
return (0);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_qm_internalqcheck_get_dquots(
|
||||
xfs_mount_t *mp,
|
||||
xfs_dqid_t uid,
|
||||
xfs_dqid_t projid,
|
||||
xfs_dqid_t gid,
|
||||
xfs_dqtest_t **ud,
|
||||
xfs_dqtest_t **gd)
|
||||
{
|
||||
if (XFS_IS_UQUOTA_ON(mp))
|
||||
xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
|
||||
if (XFS_IS_GQUOTA_ON(mp))
|
||||
xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
|
||||
else if (XFS_IS_PQUOTA_ON(mp))
|
||||
xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd);
|
||||
}
|
||||
|
||||
|
||||
STATIC void
|
||||
xfs_qm_internalqcheck_dqadjust(
|
||||
xfs_inode_t *ip,
|
||||
xfs_dqtest_t *d)
|
||||
{
|
||||
d->d_icount++;
|
||||
d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_qm_internalqcheck_adjust(
|
||||
xfs_mount_t *mp, /* mount point for filesystem */
|
||||
xfs_ino_t ino, /* inode number to get data for */
|
||||
void __user *buffer, /* not used */
|
||||
int ubsize, /* not used */
|
||||
int *ubused, /* not used */
|
||||
int *res) /* bulkstat result code */
|
||||
{
|
||||
xfs_inode_t *ip;
|
||||
xfs_dqtest_t *ud, *gd;
|
||||
uint lock_flags;
|
||||
boolean_t ipreleased;
|
||||
int error;
|
||||
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
||||
|
||||
if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
|
||||
*res = BULKSTAT_RV_NOTHING;
|
||||
xfs_debug(mp, "%s: ino=%llu, uqino=%llu, gqino=%llu\n",
|
||||
__func__, (unsigned long long) ino,
|
||||
(unsigned long long) mp->m_sb.sb_uquotino,
|
||||
(unsigned long long) mp->m_sb.sb_gquotino);
|
||||
return XFS_ERROR(EINVAL);
|
||||
}
|
||||
ipreleased = B_FALSE;
|
||||
again:
|
||||
lock_flags = XFS_ILOCK_SHARED;
|
||||
if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
|
||||
*res = BULKSTAT_RV_NOTHING;
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* This inode can have blocks after eof which can get released
|
||||
* when we send it to inactive. Since we don't check the dquot
|
||||
* until the after all our calculations are done, we must get rid
|
||||
* of those now.
|
||||
*/
|
||||
if (! ipreleased) {
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
IRELE(ip);
|
||||
ipreleased = B_TRUE;
|
||||
goto again;
|
||||
}
|
||||
xfs_qm_internalqcheck_get_dquots(mp,
|
||||
(xfs_dqid_t) ip->i_d.di_uid,
|
||||
(xfs_dqid_t) xfs_get_projid(ip),
|
||||
(xfs_dqid_t) ip->i_d.di_gid,
|
||||
&ud, &gd);
|
||||
if (XFS_IS_UQUOTA_ON(mp)) {
|
||||
ASSERT(ud);
|
||||
xfs_qm_internalqcheck_dqadjust(ip, ud);
|
||||
}
|
||||
if (XFS_IS_OQUOTA_ON(mp)) {
|
||||
ASSERT(gd);
|
||||
xfs_qm_internalqcheck_dqadjust(ip, gd);
|
||||
}
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
IRELE(ip);
|
||||
*res = BULKSTAT_RV_DIDONE;
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
||||
/* PRIVATE, debugging */
|
||||
int
|
||||
xfs_qm_internalqcheck(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
xfs_ino_t lastino;
|
||||
int done, count;
|
||||
int i;
|
||||
int error;
|
||||
|
||||
lastino = 0;
|
||||
qmtest_hashmask = 32;
|
||||
count = 5;
|
||||
done = 0;
|
||||
qmtest_nfails = 0;
|
||||
|
||||
if (! XFS_IS_QUOTA_ON(mp))
|
||||
return XFS_ERROR(ESRCH);
|
||||
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
XFS_bflush(mp->m_ddev_targp);
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
XFS_bflush(mp->m_ddev_targp);
|
||||
|
||||
mutex_lock(&qcheck_lock);
|
||||
/* There should be absolutely no quota activity while this
|
||||
is going on. */
|
||||
qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
|
||||
sizeof(xfs_dqhash_t), KM_SLEEP);
|
||||
qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
|
||||
sizeof(xfs_dqhash_t), KM_SLEEP);
|
||||
do {
|
||||
/*
|
||||
* Iterate thru all the inodes in the file system,
|
||||
* adjusting the corresponding dquot counters
|
||||
*/
|
||||
error = xfs_bulkstat(mp, &lastino, &count,
|
||||
xfs_qm_internalqcheck_adjust,
|
||||
0, NULL, &done);
|
||||
if (error) {
|
||||
xfs_debug(mp, "Bulkstat returned error 0x%x", error);
|
||||
break;
|
||||
}
|
||||
} while (!done);
|
||||
|
||||
xfs_debug(mp, "Checking results against system dquots");
|
||||
for (i = 0; i < qmtest_hashmask; i++) {
|
||||
xfs_dqtest_t *d, *n;
|
||||
xfs_dqhash_t *h;
|
||||
|
||||
h = &qmtest_udqtab[i];
|
||||
list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
|
||||
xfs_dqtest_cmp(d);
|
||||
kmem_free(d);
|
||||
}
|
||||
h = &qmtest_gdqtab[i];
|
||||
list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
|
||||
xfs_dqtest_cmp(d);
|
||||
kmem_free(d);
|
||||
}
|
||||
}
|
||||
|
||||
if (qmtest_nfails) {
|
||||
xfs_debug(mp, "******** quotacheck failed ********");
|
||||
xfs_debug(mp, "failures = %d", qmtest_nfails);
|
||||
} else {
|
||||
xfs_debug(mp, "******** quotacheck successful! ********");
|
||||
}
|
||||
kmem_free(qmtest_udqtab);
|
||||
kmem_free(qmtest_gdqtab);
|
||||
mutex_unlock(&qcheck_lock);
|
||||
return (qmtest_nfails);
|
||||
}
|
||||
|
||||
#endif /* DEBUG */
|
||||
|
@ -59,7 +59,7 @@ xfs_trans_dqjoin(
|
||||
xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
|
||||
|
||||
/*
|
||||
* Initialize i_transp so we can later determine if this dquot is
|
||||
* Initialize d_transp so we can later determine if this dquot is
|
||||
* associated with this transaction.
|
||||
*/
|
||||
dqp->q_transp = tp;
|
||||
@ -387,18 +387,18 @@ xfs_trans_apply_dquot_deltas(
|
||||
qtrx->qt_delbcnt_delta;
|
||||
totalrtbdelta = qtrx->qt_rtbcount_delta +
|
||||
qtrx->qt_delrtb_delta;
|
||||
#ifdef QUOTADEBUG
|
||||
#ifdef DEBUG
|
||||
if (totalbdelta < 0)
|
||||
ASSERT(be64_to_cpu(d->d_bcount) >=
|
||||
(xfs_qcnt_t) -totalbdelta);
|
||||
-totalbdelta);
|
||||
|
||||
if (totalrtbdelta < 0)
|
||||
ASSERT(be64_to_cpu(d->d_rtbcount) >=
|
||||
(xfs_qcnt_t) -totalrtbdelta);
|
||||
-totalrtbdelta);
|
||||
|
||||
if (qtrx->qt_icount_delta < 0)
|
||||
ASSERT(be64_to_cpu(d->d_icount) >=
|
||||
(xfs_qcnt_t) -qtrx->qt_icount_delta);
|
||||
-qtrx->qt_icount_delta);
|
||||
#endif
|
||||
if (totalbdelta)
|
||||
be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
|
||||
@ -642,11 +642,6 @@ xfs_trans_dqresv(
|
||||
((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
|
||||
(XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) &&
|
||||
(XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
|
||||
#ifdef QUOTADEBUG
|
||||
xfs_debug(mp,
|
||||
"BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?",
|
||||
nblks, *resbcountp, hardlimit);
|
||||
#endif
|
||||
if (nblks > 0) {
|
||||
/*
|
||||
* dquot is locked already. See if we'd go over the
|
||||
|
@ -22,7 +22,6 @@
|
||||
#define STATIC
|
||||
#define DEBUG 1
|
||||
#define XFS_BUF_LOCK_TRACKING 1
|
||||
/* #define QUOTADEBUG 1 */
|
||||
#endif
|
||||
|
||||
#include <linux-2.6/xfs_linux.h>
|
||||
|
@ -570,9 +570,7 @@ xfs_alloc_ag_vextent_exact(
|
||||
xfs_agblock_t tbno; /* start block of trimmed extent */
|
||||
xfs_extlen_t tlen; /* length of trimmed extent */
|
||||
xfs_agblock_t tend; /* end block of trimmed extent */
|
||||
xfs_agblock_t end; /* end of allocated extent */
|
||||
int i; /* success/failure of operation */
|
||||
xfs_extlen_t rlen; /* length of returned extent */
|
||||
|
||||
ASSERT(args->alignment == 1);
|
||||
|
||||
@ -625,18 +623,16 @@ xfs_alloc_ag_vextent_exact(
|
||||
*
|
||||
* Fix the length according to mod and prod if given.
|
||||
*/
|
||||
end = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen);
|
||||
args->len = end - args->agbno;
|
||||
args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
|
||||
- args->agbno;
|
||||
xfs_alloc_fix_len(args);
|
||||
if (!xfs_alloc_fix_minleft(args))
|
||||
goto not_found;
|
||||
|
||||
rlen = args->len;
|
||||
ASSERT(args->agbno + rlen <= tend);
|
||||
end = args->agbno + rlen;
|
||||
ASSERT(args->agbno + args->len <= tend);
|
||||
|
||||
/*
|
||||
* We are allocating agbno for rlen [agbno .. end]
|
||||
* We are allocating agbno for args->len
|
||||
* Allocate/initialize a cursor for the by-size btree.
|
||||
*/
|
||||
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
|
||||
@ -2127,7 +2123,7 @@ xfs_read_agf(
|
||||
* Validate the magic number of the agf block.
|
||||
*/
|
||||
agf_ok =
|
||||
be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
|
||||
agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
|
||||
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
|
||||
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
|
||||
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
@ -311,72 +310,6 @@ xfs_allocbt_recs_inorder(
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
ktrace_t *xfs_allocbt_trace_buf;
|
||||
|
||||
STATIC void
|
||||
xfs_allocbt_trace_enter(
|
||||
struct xfs_btree_cur *cur,
|
||||
const char *func,
|
||||
char *s,
|
||||
int type,
|
||||
int line,
|
||||
__psunsigned_t a0,
|
||||
__psunsigned_t a1,
|
||||
__psunsigned_t a2,
|
||||
__psunsigned_t a3,
|
||||
__psunsigned_t a4,
|
||||
__psunsigned_t a5,
|
||||
__psunsigned_t a6,
|
||||
__psunsigned_t a7,
|
||||
__psunsigned_t a8,
|
||||
__psunsigned_t a9,
|
||||
__psunsigned_t a10)
|
||||
{
|
||||
ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
|
||||
(void *)func, (void *)s, NULL, (void *)cur,
|
||||
(void *)a0, (void *)a1, (void *)a2, (void *)a3,
|
||||
(void *)a4, (void *)a5, (void *)a6, (void *)a7,
|
||||
(void *)a8, (void *)a9, (void *)a10);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_allocbt_trace_cursor(
|
||||
struct xfs_btree_cur *cur,
|
||||
__uint32_t *s0,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1)
|
||||
{
|
||||
*s0 = cur->bc_private.a.agno;
|
||||
*l0 = cur->bc_rec.a.ar_startblock;
|
||||
*l1 = cur->bc_rec.a.ar_blockcount;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_allocbt_trace_key(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1)
|
||||
{
|
||||
*l0 = be32_to_cpu(key->alloc.ar_startblock);
|
||||
*l1 = be32_to_cpu(key->alloc.ar_blockcount);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_allocbt_trace_record(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_rec *rec,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1,
|
||||
__uint64_t *l2)
|
||||
{
|
||||
*l0 = be32_to_cpu(rec->alloc.ar_startblock);
|
||||
*l1 = be32_to_cpu(rec->alloc.ar_blockcount);
|
||||
*l2 = 0;
|
||||
}
|
||||
#endif /* XFS_BTREE_TRACE */
|
||||
|
||||
static const struct xfs_btree_ops xfs_allocbt_ops = {
|
||||
.rec_len = sizeof(xfs_alloc_rec_t),
|
||||
.key_len = sizeof(xfs_alloc_key_t),
|
||||
@ -393,18 +326,10 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
|
||||
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
|
||||
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
|
||||
.key_diff = xfs_allocbt_key_diff,
|
||||
|
||||
#ifdef DEBUG
|
||||
.keys_inorder = xfs_allocbt_keys_inorder,
|
||||
.recs_inorder = xfs_allocbt_recs_inorder,
|
||||
#endif
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
.trace_enter = xfs_allocbt_trace_enter,
|
||||
.trace_cursor = xfs_allocbt_trace_cursor,
|
||||
.trace_key = xfs_allocbt_trace_key,
|
||||
.trace_record = xfs_allocbt_trace_record,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@ -427,13 +352,16 @@ xfs_allocbt_init_cursor(
|
||||
|
||||
cur->bc_tp = tp;
|
||||
cur->bc_mp = mp;
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
|
||||
cur->bc_btnum = btnum;
|
||||
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
||||
|
||||
cur->bc_ops = &xfs_allocbt_ops;
|
||||
if (btnum == XFS_BTNUM_CNT)
|
||||
|
||||
if (btnum == XFS_BTNUM_CNT) {
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
|
||||
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
|
||||
} else {
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
|
||||
}
|
||||
|
||||
cur->bc_private.a.agbp = agbp;
|
||||
cur->bc_private.a.agno = agno;
|
||||
|
@ -1,136 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_ARCH_H__
|
||||
#define __XFS_ARCH_H__
|
||||
|
||||
#ifndef XFS_BIG_INUMS
|
||||
# error XFS_BIG_INUMS must be defined true or false
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define XFS_NATIVE_HOST 1
|
||||
#else
|
||||
#undef XFS_NATIVE_HOST
|
||||
#endif
|
||||
|
||||
#else /* __KERNEL__ */
|
||||
|
||||
#if __BYTE_ORDER == __BIG_ENDIAN
|
||||
#define XFS_NATIVE_HOST 1
|
||||
#else
|
||||
#undef XFS_NATIVE_HOST
|
||||
#endif
|
||||
|
||||
#ifdef XFS_NATIVE_HOST
|
||||
#define cpu_to_be16(val) ((__force __be16)(__u16)(val))
|
||||
#define cpu_to_be32(val) ((__force __be32)(__u32)(val))
|
||||
#define cpu_to_be64(val) ((__force __be64)(__u64)(val))
|
||||
#define be16_to_cpu(val) ((__force __u16)(__be16)(val))
|
||||
#define be32_to_cpu(val) ((__force __u32)(__be32)(val))
|
||||
#define be64_to_cpu(val) ((__force __u64)(__be64)(val))
|
||||
#else
|
||||
#define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
|
||||
#define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
|
||||
#define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
|
||||
#define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
|
||||
#define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
|
||||
#define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
|
||||
#endif
|
||||
|
||||
static inline void be16_add_cpu(__be16 *a, __s16 b)
|
||||
{
|
||||
*a = cpu_to_be16(be16_to_cpu(*a) + b);
|
||||
}
|
||||
|
||||
static inline void be32_add_cpu(__be32 *a, __s32 b)
|
||||
{
|
||||
*a = cpu_to_be32(be32_to_cpu(*a) + b);
|
||||
}
|
||||
|
||||
static inline void be64_add_cpu(__be64 *a, __s64 b)
|
||||
{
|
||||
*a = cpu_to_be64(be64_to_cpu(*a) + b);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
* get and set integers from potentially unaligned locations
|
||||
*/
|
||||
|
||||
#define INT_GET_UNALIGNED_16_BE(pointer) \
|
||||
((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
|
||||
#define INT_SET_UNALIGNED_16_BE(pointer,value) \
|
||||
{ \
|
||||
((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
|
||||
((__u8*)(pointer))[1] = (((value) ) & 0xff); \
|
||||
}
|
||||
|
||||
/*
|
||||
* In directories inode numbers are stored as unaligned arrays of unsigned
|
||||
* 8bit integers on disk.
|
||||
*
|
||||
* For v1 directories or v2 directories that contain inode numbers that
|
||||
* do not fit into 32bit the array has eight members, but the first member
|
||||
* is always zero:
|
||||
*
|
||||
* |unused|48-55|40-47|32-39|24-31|16-23| 8-15| 0- 7|
|
||||
*
|
||||
* For v2 directories that only contain entries with inode numbers that fit
|
||||
* into 32bits a four-member array is used:
|
||||
*
|
||||
* |24-31|16-23| 8-15| 0- 7|
|
||||
*/
|
||||
|
||||
#define XFS_GET_DIR_INO4(di) \
|
||||
(((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
|
||||
|
||||
#define XFS_PUT_DIR_INO4(from, di) \
|
||||
do { \
|
||||
(di).i[0] = (((from) & 0xff000000ULL) >> 24); \
|
||||
(di).i[1] = (((from) & 0x00ff0000ULL) >> 16); \
|
||||
(di).i[2] = (((from) & 0x0000ff00ULL) >> 8); \
|
||||
(di).i[3] = ((from) & 0x000000ffULL); \
|
||||
} while (0)
|
||||
|
||||
#define XFS_DI_HI(di) \
|
||||
(((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
|
||||
#define XFS_DI_LO(di) \
|
||||
(((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
|
||||
|
||||
#define XFS_GET_DIR_INO8(di) \
|
||||
(((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
|
||||
((xfs_ino_t)XFS_DI_HI(di) << 32))
|
||||
|
||||
#define XFS_PUT_DIR_INO8(from, di) \
|
||||
do { \
|
||||
(di).i[0] = 0; \
|
||||
(di).i[1] = (((from) & 0x00ff000000000000ULL) >> 48); \
|
||||
(di).i[2] = (((from) & 0x0000ff0000000000ULL) >> 40); \
|
||||
(di).i[3] = (((from) & 0x000000ff00000000ULL) >> 32); \
|
||||
(di).i[4] = (((from) & 0x00000000ff000000ULL) >> 24); \
|
||||
(di).i[5] = (((from) & 0x0000000000ff0000ULL) >> 16); \
|
||||
(di).i[6] = (((from) & 0x000000000000ff00ULL) >> 8); \
|
||||
(di).i[7] = ((from) & 0x00000000000000ffULL); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __XFS_ARCH_H__ */
|
@ -822,17 +822,21 @@ xfs_attr_inactive(xfs_inode_t *dp)
|
||||
error = xfs_attr_root_inactive(&trans, dp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* signal synchronous inactive transactions unless this
|
||||
* is a synchronous mount filesystem in which case we
|
||||
* know that we're here because we've been called out of
|
||||
* xfs_inactive which means that the last reference is gone
|
||||
* and the unlink transaction has already hit the disk so
|
||||
* async inactive transactions are safe.
|
||||
* Signal synchronous inactive transactions unless this is a
|
||||
* synchronous mount filesystem in which case we know that we're here
|
||||
* because we've been called out of xfs_inactive which means that the
|
||||
* last reference is gone and the unlink transaction has already hit
|
||||
* the disk so async inactive transactions are safe.
|
||||
*/
|
||||
if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK,
|
||||
(!(mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
? 1 : 0))))
|
||||
if (!(mp->m_flags & XFS_MOUNT_WSYNC)) {
|
||||
if (dp->i_d.di_anextents > 0)
|
||||
xfs_trans_set_sync(trans);
|
||||
}
|
||||
|
||||
error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
@ -1199,7 +1203,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
|
||||
return XFS_ERROR(error);
|
||||
ASSERT(bp != NULL);
|
||||
leaf = bp->data;
|
||||
if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
|
||||
if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
|
||||
XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
|
||||
context->dp->i_mount, leaf);
|
||||
xfs_da_brelse(NULL, bp);
|
||||
@ -1606,9 +1610,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
|
||||
XFS_ATTR_FORK);
|
||||
if (error)
|
||||
goto out;
|
||||
ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *)
|
||||
bp->data)->hdr.info.magic)
|
||||
== XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) ==
|
||||
cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
|
||||
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
|
||||
xfs_bmap_init(args->flist, args->firstblock);
|
||||
@ -1873,11 +1876,11 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
||||
return(XFS_ERROR(EFSCORRUPTED));
|
||||
}
|
||||
node = bp->data;
|
||||
if (be16_to_cpu(node->hdr.info.magic)
|
||||
== XFS_ATTR_LEAF_MAGIC)
|
||||
if (node->hdr.info.magic ==
|
||||
cpu_to_be16(XFS_ATTR_LEAF_MAGIC))
|
||||
break;
|
||||
if (unlikely(be16_to_cpu(node->hdr.info.magic)
|
||||
!= XFS_DA_NODE_MAGIC)) {
|
||||
if (unlikely(node->hdr.info.magic !=
|
||||
cpu_to_be16(XFS_DA_NODE_MAGIC))) {
|
||||
XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
|
||||
XFS_ERRLEVEL_LOW,
|
||||
context->dp->i_mount,
|
||||
@ -1912,8 +1915,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
||||
*/
|
||||
for (;;) {
|
||||
leaf = bp->data;
|
||||
if (unlikely(be16_to_cpu(leaf->hdr.info.magic)
|
||||
!= XFS_ATTR_LEAF_MAGIC)) {
|
||||
if (unlikely(leaf->hdr.info.magic !=
|
||||
cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
|
||||
XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
|
||||
XFS_ERRLEVEL_LOW,
|
||||
context->dp->i_mount, leaf);
|
||||
|
@ -731,7 +731,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
|
||||
int bytes, i;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
|
||||
entry = &leaf->entries[0];
|
||||
bytes = sizeof(struct xfs_attr_sf_hdr);
|
||||
@ -777,7 +777,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
|
||||
ASSERT(bp != NULL);
|
||||
memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
|
||||
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
|
||||
|
||||
/*
|
||||
@ -872,7 +872,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
|
||||
goto out;
|
||||
node = bp1->data;
|
||||
leaf = bp2->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
/* both on-disk, don't endian-flip twice */
|
||||
node->btree[0].hashval =
|
||||
leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
|
||||
@ -997,7 +997,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
|
||||
int tablesize, entsize, sum, tmp, i;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT((args->index >= 0)
|
||||
&& (args->index <= be16_to_cpu(leaf->hdr.count)));
|
||||
hdr = &leaf->hdr;
|
||||
@ -1070,7 +1070,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
|
||||
int tmp, i;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
hdr = &leaf->hdr;
|
||||
ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
|
||||
ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count)));
|
||||
@ -1256,8 +1256,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
||||
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
|
||||
leaf1 = blk1->bp->data;
|
||||
leaf2 = blk2->bp->data;
|
||||
ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
args = state->args;
|
||||
|
||||
/*
|
||||
@ -1533,7 +1533,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
|
||||
*/
|
||||
blk = &state->path.blk[ state->path.active-1 ];
|
||||
info = blk->bp->data;
|
||||
ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
leaf = (xfs_attr_leafblock_t *)info;
|
||||
count = be16_to_cpu(leaf->hdr.count);
|
||||
bytes = sizeof(xfs_attr_leaf_hdr_t) +
|
||||
@ -1596,7 +1596,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
|
||||
bytes = state->blocksize - (state->blocksize>>2);
|
||||
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
count += be16_to_cpu(leaf->hdr.count);
|
||||
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
|
||||
bytes -= count * sizeof(xfs_attr_leaf_entry_t);
|
||||
@ -1650,7 +1650,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
|
||||
xfs_mount_t *mp;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
hdr = &leaf->hdr;
|
||||
mp = args->trans->t_mountp;
|
||||
ASSERT((be16_to_cpu(hdr->count) > 0)
|
||||
@ -1813,8 +1813,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
||||
ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
|
||||
drop_leaf = drop_blk->bp->data;
|
||||
save_leaf = save_blk->bp->data;
|
||||
ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
drop_hdr = &drop_leaf->hdr;
|
||||
save_hdr = &save_leaf->hdr;
|
||||
|
||||
@ -1915,7 +1915,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
|
||||
xfs_dahash_t hashval;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(be16_to_cpu(leaf->hdr.count)
|
||||
< (XFS_LBSIZE(args->dp->i_mount)/8));
|
||||
|
||||
@ -2019,7 +2019,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
|
||||
xfs_attr_leaf_name_remote_t *name_rmt;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(be16_to_cpu(leaf->hdr.count)
|
||||
< (XFS_LBSIZE(args->dp->i_mount)/8));
|
||||
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
|
||||
@ -2087,8 +2087,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
|
||||
/*
|
||||
* Set up environment.
|
||||
*/
|
||||
ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf_s->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(leaf_d->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
hdr_s = &leaf_s->hdr;
|
||||
hdr_d = &leaf_d->hdr;
|
||||
ASSERT((be16_to_cpu(hdr_s->count) > 0) &&
|
||||
@ -2222,8 +2222,8 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
|
||||
|
||||
leaf1 = leaf1_bp->data;
|
||||
leaf2 = leaf2_bp->data;
|
||||
ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) &&
|
||||
(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) &&
|
||||
(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)));
|
||||
if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
|
||||
(be16_to_cpu(leaf2->hdr.count) > 0) &&
|
||||
((be32_to_cpu(leaf2->entries[0].hashval) <
|
||||
@ -2246,7 +2246,7 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
|
||||
xfs_attr_leafblock_t *leaf;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
if (count)
|
||||
*count = be16_to_cpu(leaf->hdr.count);
|
||||
if (!leaf->hdr.count)
|
||||
@ -2265,7 +2265,7 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
|
||||
xfs_attr_leaf_name_remote_t *name_rmt;
|
||||
int size;
|
||||
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
|
||||
name_loc = xfs_attr_leaf_name_local(leaf, index);
|
||||
size = xfs_attr_leaf_entsize_local(name_loc->namelen,
|
||||
@ -2451,7 +2451,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
|
||||
ASSERT(bp != NULL);
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
|
||||
ASSERT(args->index >= 0);
|
||||
entry = &leaf->entries[ args->index ];
|
||||
@ -2515,7 +2515,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
|
||||
ASSERT(bp != NULL);
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
|
||||
ASSERT(args->index >= 0);
|
||||
entry = &leaf->entries[ args->index ];
|
||||
@ -2585,13 +2585,13 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
|
||||
}
|
||||
|
||||
leaf1 = bp1->data;
|
||||
ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
|
||||
ASSERT(args->index >= 0);
|
||||
entry1 = &leaf1->entries[ args->index ];
|
||||
|
||||
leaf2 = bp2->data;
|
||||
ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
|
||||
ASSERT(args->index2 >= 0);
|
||||
entry2 = &leaf2->entries[ args->index2 ];
|
||||
@ -2689,9 +2689,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
|
||||
* This is a depth-first traversal!
|
||||
*/
|
||||
info = bp->data;
|
||||
if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
|
||||
if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
|
||||
error = xfs_attr_node_inactive(trans, dp, bp, 1);
|
||||
} else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
|
||||
} else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
|
||||
error = xfs_attr_leaf_inactive(trans, dp, bp);
|
||||
} else {
|
||||
error = XFS_ERROR(EIO);
|
||||
@ -2739,7 +2739,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
|
||||
}
|
||||
|
||||
node = bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
parent_blkno = xfs_da_blkno(bp); /* save for re-read later */
|
||||
count = be16_to_cpu(node->hdr.count);
|
||||
if (!count) {
|
||||
@ -2773,10 +2773,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
|
||||
* Invalidate the subtree, however we have to.
|
||||
*/
|
||||
info = child_bp->data;
|
||||
if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
|
||||
if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
|
||||
error = xfs_attr_node_inactive(trans, dp,
|
||||
child_bp, level+1);
|
||||
} else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
|
||||
} else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
|
||||
error = xfs_attr_leaf_inactive(trans, dp,
|
||||
child_bp);
|
||||
} else {
|
||||
@ -2836,7 +2836,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
|
||||
int error, count, size, tmp, i;
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
|
||||
/*
|
||||
* Count the number of "remote" value extents.
|
||||
|
@ -29,15 +29,11 @@
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_ialloc_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_extfree_item.h"
|
||||
#include "xfs_alloc.h"
|
||||
@ -94,6 +90,7 @@ xfs_bmap_add_attrfork_local(
|
||||
*/
|
||||
STATIC int /* error */
|
||||
xfs_bmap_add_extent_delay_real(
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
xfs_extnum_t *idx, /* extent number to update/insert */
|
||||
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
|
||||
@ -439,6 +436,7 @@ xfs_bmap_add_attrfork_local(
|
||||
*/
|
||||
STATIC int /* error */
|
||||
xfs_bmap_add_extent(
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
xfs_extnum_t *idx, /* extent number to update/insert */
|
||||
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
|
||||
@ -524,7 +522,7 @@ xfs_bmap_add_extent(
|
||||
if (cur)
|
||||
ASSERT(cur->bc_private.b.flags &
|
||||
XFS_BTCUR_BPRV_WASDEL);
|
||||
error = xfs_bmap_add_extent_delay_real(ip,
|
||||
error = xfs_bmap_add_extent_delay_real(tp, ip,
|
||||
idx, &cur, new, &da_new,
|
||||
first, flist, &logflags);
|
||||
} else {
|
||||
@ -561,7 +559,7 @@ xfs_bmap_add_extent(
|
||||
int tmp_logflags; /* partial log flag return val */
|
||||
|
||||
ASSERT(cur == NULL);
|
||||
error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
|
||||
error = xfs_bmap_extents_to_btree(tp, ip, first,
|
||||
flist, &cur, da_old > 0, &tmp_logflags, whichfork);
|
||||
logflags |= tmp_logflags;
|
||||
if (error)
|
||||
@ -604,6 +602,7 @@ done:
|
||||
*/
|
||||
STATIC int /* error */
|
||||
xfs_bmap_add_extent_delay_real(
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
xfs_extnum_t *idx, /* extent number to update/insert */
|
||||
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
|
||||
@ -901,7 +900,7 @@ xfs_bmap_add_extent_delay_real(
|
||||
}
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
|
||||
ip->i_d.di_nextents > ip->i_df.if_ext_max) {
|
||||
error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
|
||||
error = xfs_bmap_extents_to_btree(tp, ip,
|
||||
first, flist, &cur, 1, &tmp_rval,
|
||||
XFS_DATA_FORK);
|
||||
rval |= tmp_rval;
|
||||
@ -984,7 +983,7 @@ xfs_bmap_add_extent_delay_real(
|
||||
}
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
|
||||
ip->i_d.di_nextents > ip->i_df.if_ext_max) {
|
||||
error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
|
||||
error = xfs_bmap_extents_to_btree(tp, ip,
|
||||
first, flist, &cur, 1, &tmp_rval,
|
||||
XFS_DATA_FORK);
|
||||
rval |= tmp_rval;
|
||||
@ -1052,7 +1051,7 @@ xfs_bmap_add_extent_delay_real(
|
||||
}
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
|
||||
ip->i_d.di_nextents > ip->i_df.if_ext_max) {
|
||||
error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
|
||||
error = xfs_bmap_extents_to_btree(tp, ip,
|
||||
first, flist, &cur, 1, &tmp_rval,
|
||||
XFS_DATA_FORK);
|
||||
rval |= tmp_rval;
|
||||
@ -2871,8 +2870,8 @@ xfs_bmap_del_extent(
|
||||
len = del->br_blockcount;
|
||||
do_div(bno, mp->m_sb.sb_rextsize);
|
||||
do_div(len, mp->m_sb.sb_rextsize);
|
||||
if ((error = xfs_rtfree_extent(ip->i_transp, bno,
|
||||
(xfs_extlen_t)len)))
|
||||
error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
|
||||
if (error)
|
||||
goto done;
|
||||
do_fx = 0;
|
||||
nblks = len * mp->m_sb.sb_rextsize;
|
||||
@ -4080,7 +4079,7 @@ xfs_bmap_sanity_check(
|
||||
{
|
||||
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
|
||||
|
||||
if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC ||
|
||||
if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) ||
|
||||
be16_to_cpu(block->bb_level) != level ||
|
||||
be16_to_cpu(block->bb_numrecs) == 0 ||
|
||||
be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
|
||||
@ -4662,7 +4661,7 @@ xfs_bmapi(
|
||||
if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
|
||||
got.br_state = XFS_EXT_UNWRITTEN;
|
||||
}
|
||||
error = xfs_bmap_add_extent(ip, &lastx, &cur, &got,
|
||||
error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &got,
|
||||
firstblock, flist, &tmp_logflags,
|
||||
whichfork);
|
||||
logflags |= tmp_logflags;
|
||||
@ -4763,7 +4762,7 @@ xfs_bmapi(
|
||||
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
|
||||
? XFS_EXT_NORM
|
||||
: XFS_EXT_UNWRITTEN;
|
||||
error = xfs_bmap_add_extent(ip, &lastx, &cur, mval,
|
||||
error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, mval,
|
||||
firstblock, flist, &tmp_logflags,
|
||||
whichfork);
|
||||
logflags |= tmp_logflags;
|
||||
@ -5117,7 +5116,7 @@ xfs_bunmapi(
|
||||
del.br_blockcount = mod;
|
||||
}
|
||||
del.br_state = XFS_EXT_UNWRITTEN;
|
||||
error = xfs_bmap_add_extent(ip, &lastx, &cur, &del,
|
||||
error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &del,
|
||||
firstblock, flist, &logflags,
|
||||
XFS_DATA_FORK);
|
||||
if (error)
|
||||
@ -5175,18 +5174,18 @@ xfs_bunmapi(
|
||||
}
|
||||
prev.br_state = XFS_EXT_UNWRITTEN;
|
||||
lastx--;
|
||||
error = xfs_bmap_add_extent(ip, &lastx, &cur,
|
||||
&prev, firstblock, flist, &logflags,
|
||||
XFS_DATA_FORK);
|
||||
error = xfs_bmap_add_extent(tp, ip, &lastx,
|
||||
&cur, &prev, firstblock, flist,
|
||||
&logflags, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto error0;
|
||||
goto nodelete;
|
||||
} else {
|
||||
ASSERT(del.br_state == XFS_EXT_NORM);
|
||||
del.br_state = XFS_EXT_UNWRITTEN;
|
||||
error = xfs_bmap_add_extent(ip, &lastx, &cur,
|
||||
&del, firstblock, flist, &logflags,
|
||||
XFS_DATA_FORK);
|
||||
error = xfs_bmap_add_extent(tp, ip, &lastx,
|
||||
&cur, &del, firstblock, flist,
|
||||
&logflags, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto error0;
|
||||
goto nodelete;
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_error.h"
|
||||
@ -425,10 +424,10 @@ xfs_bmbt_to_bmdr(
|
||||
xfs_bmbt_key_t *tkp;
|
||||
__be64 *tpp;
|
||||
|
||||
ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
|
||||
ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO);
|
||||
ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO);
|
||||
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
|
||||
ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
|
||||
ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO));
|
||||
ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO));
|
||||
ASSERT(rblock->bb_level != 0);
|
||||
dblock->bb_level = rblock->bb_level;
|
||||
dblock->bb_numrecs = rblock->bb_numrecs;
|
||||
dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
|
||||
@ -732,95 +731,6 @@ xfs_bmbt_recs_inorder(
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
ktrace_t *xfs_bmbt_trace_buf;
|
||||
|
||||
STATIC void
|
||||
xfs_bmbt_trace_enter(
|
||||
struct xfs_btree_cur *cur,
|
||||
const char *func,
|
||||
char *s,
|
||||
int type,
|
||||
int line,
|
||||
__psunsigned_t a0,
|
||||
__psunsigned_t a1,
|
||||
__psunsigned_t a2,
|
||||
__psunsigned_t a3,
|
||||
__psunsigned_t a4,
|
||||
__psunsigned_t a5,
|
||||
__psunsigned_t a6,
|
||||
__psunsigned_t a7,
|
||||
__psunsigned_t a8,
|
||||
__psunsigned_t a9,
|
||||
__psunsigned_t a10)
|
||||
{
|
||||
struct xfs_inode *ip = cur->bc_private.b.ip;
|
||||
int whichfork = cur->bc_private.b.whichfork;
|
||||
|
||||
ktrace_enter(xfs_bmbt_trace_buf,
|
||||
(void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
|
||||
(void *)func, (void *)s, (void *)ip, (void *)cur,
|
||||
(void *)a0, (void *)a1, (void *)a2, (void *)a3,
|
||||
(void *)a4, (void *)a5, (void *)a6, (void *)a7,
|
||||
(void *)a8, (void *)a9, (void *)a10);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_bmbt_trace_cursor(
|
||||
struct xfs_btree_cur *cur,
|
||||
__uint32_t *s0,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1)
|
||||
{
|
||||
struct xfs_bmbt_rec_host r;
|
||||
|
||||
xfs_bmbt_set_all(&r, &cur->bc_rec.b);
|
||||
|
||||
*s0 = (cur->bc_nlevels << 24) |
|
||||
(cur->bc_private.b.flags << 16) |
|
||||
cur->bc_private.b.allocated;
|
||||
*l0 = r.l0;
|
||||
*l1 = r.l1;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_bmbt_trace_key(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1)
|
||||
{
|
||||
*l0 = be64_to_cpu(key->bmbt.br_startoff);
|
||||
*l1 = 0;
|
||||
}
|
||||
|
||||
/* Endian flipping versions of the bmbt extraction functions */
|
||||
STATIC void
|
||||
xfs_bmbt_disk_get_all(
|
||||
xfs_bmbt_rec_t *r,
|
||||
xfs_bmbt_irec_t *s)
|
||||
{
|
||||
__xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
|
||||
get_unaligned_be64(&r->l1), s);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_bmbt_trace_record(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_rec *rec,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1,
|
||||
__uint64_t *l2)
|
||||
{
|
||||
struct xfs_bmbt_irec irec;
|
||||
|
||||
xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
|
||||
*l0 = irec.br_startoff;
|
||||
*l1 = irec.br_startblock;
|
||||
*l2 = irec.br_blockcount;
|
||||
}
|
||||
#endif /* XFS_BTREE_TRACE */
|
||||
|
||||
static const struct xfs_btree_ops xfs_bmbt_ops = {
|
||||
.rec_len = sizeof(xfs_bmbt_rec_t),
|
||||
.key_len = sizeof(xfs_bmbt_key_t),
|
||||
@ -837,18 +747,10 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
|
||||
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
|
||||
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
|
||||
.key_diff = xfs_bmbt_key_diff,
|
||||
|
||||
#ifdef DEBUG
|
||||
.keys_inorder = xfs_bmbt_keys_inorder,
|
||||
.recs_inorder = xfs_bmbt_recs_inorder,
|
||||
#endif
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
.trace_enter = xfs_bmbt_trace_enter,
|
||||
.trace_cursor = xfs_bmbt_trace_cursor,
|
||||
.trace_key = xfs_bmbt_trace_key,
|
||||
.trace_record = xfs_bmbt_trace_record,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
@ -66,11 +65,11 @@ xfs_btree_check_lblock(
|
||||
be16_to_cpu(block->bb_numrecs) <=
|
||||
cur->bc_ops->get_maxrecs(cur, level) &&
|
||||
block->bb_u.l.bb_leftsib &&
|
||||
(be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO ||
|
||||
(block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) ||
|
||||
XFS_FSB_SANITY_CHECK(mp,
|
||||
be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
|
||||
block->bb_u.l.bb_rightsib &&
|
||||
(be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO ||
|
||||
(block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) ||
|
||||
XFS_FSB_SANITY_CHECK(mp,
|
||||
be64_to_cpu(block->bb_u.l.bb_rightsib)));
|
||||
if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
|
||||
@ -105,10 +104,10 @@ xfs_btree_check_sblock(
|
||||
be16_to_cpu(block->bb_level) == level &&
|
||||
be16_to_cpu(block->bb_numrecs) <=
|
||||
cur->bc_ops->get_maxrecs(cur, level) &&
|
||||
(be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK ||
|
||||
(block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
|
||||
be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
|
||||
block->bb_u.s.bb_leftsib &&
|
||||
(be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK ||
|
||||
(block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
|
||||
be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
|
||||
block->bb_u.s.bb_rightsib;
|
||||
if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
|
||||
@ -511,9 +510,9 @@ xfs_btree_islastblock(
|
||||
block = xfs_btree_get_block(cur, level, &bp);
|
||||
xfs_btree_check_block(cur, block, level, bp);
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
|
||||
return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO;
|
||||
return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO);
|
||||
else
|
||||
return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK;
|
||||
return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -777,14 +776,14 @@ xfs_btree_setbuf(
|
||||
|
||||
b = XFS_BUF_TO_BLOCK(bp);
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
|
||||
if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
|
||||
if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO))
|
||||
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
|
||||
if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO)
|
||||
if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO))
|
||||
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
|
||||
} else {
|
||||
if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK)
|
||||
if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
|
||||
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
|
||||
if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK)
|
||||
if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
|
||||
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
|
||||
}
|
||||
}
|
||||
@ -795,9 +794,9 @@ xfs_btree_ptr_is_null(
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
|
||||
return be64_to_cpu(ptr->l) == NULLDFSBNO;
|
||||
return ptr->l == cpu_to_be64(NULLDFSBNO);
|
||||
else
|
||||
return be32_to_cpu(ptr->s) == NULLAGBLOCK;
|
||||
return ptr->s == cpu_to_be32(NULLAGBLOCK);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
@ -923,12 +922,12 @@ xfs_btree_ptr_to_daddr(
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
|
||||
ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO);
|
||||
ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO));
|
||||
|
||||
return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
|
||||
} else {
|
||||
ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
|
||||
ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK);
|
||||
ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
|
||||
|
||||
return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
|
||||
be32_to_cpu(ptr->s));
|
||||
|
@ -199,25 +199,6 @@ struct xfs_btree_ops {
|
||||
union xfs_btree_rec *r1,
|
||||
union xfs_btree_rec *r2);
|
||||
#endif
|
||||
|
||||
/* btree tracing */
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
void (*trace_enter)(struct xfs_btree_cur *, const char *,
|
||||
char *, int, int, __psunsigned_t,
|
||||
__psunsigned_t, __psunsigned_t,
|
||||
__psunsigned_t, __psunsigned_t,
|
||||
__psunsigned_t, __psunsigned_t,
|
||||
__psunsigned_t, __psunsigned_t,
|
||||
__psunsigned_t, __psunsigned_t);
|
||||
void (*trace_cursor)(struct xfs_btree_cur *, __uint32_t *,
|
||||
__uint64_t *, __uint64_t *);
|
||||
void (*trace_key)(struct xfs_btree_cur *,
|
||||
union xfs_btree_key *, __uint64_t *,
|
||||
__uint64_t *);
|
||||
void (*trace_record)(struct xfs_btree_cur *,
|
||||
union xfs_btree_rec *, __uint64_t *,
|
||||
__uint64_t *, __uint64_t *);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@ -452,4 +433,23 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
|
||||
(XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
|
||||
XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
|
||||
|
||||
/*
|
||||
* Trace hooks. Currently not implemented as they need to be ported
|
||||
* over to the generic tracing functionality, which is some effort.
|
||||
*
|
||||
* i,j = integer (32 bit)
|
||||
* b = btree block buffer (xfs_buf_t)
|
||||
* p = btree ptr
|
||||
* r = btree record
|
||||
* k = btree key
|
||||
*/
|
||||
#define XFS_BTREE_TRACE_ARGBI(c, b, i)
|
||||
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
|
||||
#define XFS_BTREE_TRACE_ARGI(c, i)
|
||||
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
|
||||
#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
|
||||
#define XFS_BTREE_TRACE_ARGIK(c, i, k)
|
||||
#define XFS_BTREE_TRACE_ARGR(c, r)
|
||||
#define XFS_BTREE_TRACE_CURSOR(c, t)
|
||||
|
||||
#endif /* __XFS_BTREE_H__ */
|
||||
|
@ -1,249 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_types.h"
|
||||
#include "xfs_inum.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_ialloc_btree.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
|
||||
STATIC void
|
||||
xfs_btree_trace_ptr(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr ptr,
|
||||
__psunsigned_t *high,
|
||||
__psunsigned_t *low)
|
||||
{
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
|
||||
__u64 val = be64_to_cpu(ptr.l);
|
||||
*high = val >> 32;
|
||||
*low = (int)val;
|
||||
} else {
|
||||
*high = 0;
|
||||
*low = be32_to_cpu(ptr.s);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for a buffer & 1 integer arg.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argbi(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *b,
|
||||
int i,
|
||||
int line)
|
||||
{
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBI,
|
||||
line, (__psunsigned_t)b, i, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for a buffer & 2 integer args.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argbii(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *b,
|
||||
int i0,
|
||||
int i1,
|
||||
int line)
|
||||
{
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBII,
|
||||
line, (__psunsigned_t)b, i0, i1, 0, 0, 0, 0,
|
||||
0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for 3 block-length args
|
||||
* and an integer arg.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argfffi(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
xfs_dfiloff_t o,
|
||||
xfs_dfsbno_t b,
|
||||
xfs_dfilblks_t i,
|
||||
int j,
|
||||
int line)
|
||||
{
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGFFFI,
|
||||
line,
|
||||
o >> 32, (int)o,
|
||||
b >> 32, (int)b,
|
||||
i >> 32, (int)i,
|
||||
(int)j, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for one integer arg.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argi(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
int i,
|
||||
int line)
|
||||
{
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGI,
|
||||
line, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for int, fsblock, key.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argipk(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
int i,
|
||||
union xfs_btree_ptr ptr,
|
||||
union xfs_btree_key *key,
|
||||
int line)
|
||||
{
|
||||
__psunsigned_t high, low;
|
||||
__uint64_t l0, l1;
|
||||
|
||||
xfs_btree_trace_ptr(cur, ptr, &high, &low);
|
||||
cur->bc_ops->trace_key(cur, key, &l0, &l1);
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPK,
|
||||
line, i, high, low,
|
||||
l0 >> 32, (int)l0,
|
||||
l1 >> 32, (int)l1,
|
||||
0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for int, fsblock, rec.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argipr(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
int i,
|
||||
union xfs_btree_ptr ptr,
|
||||
union xfs_btree_rec *rec,
|
||||
int line)
|
||||
{
|
||||
__psunsigned_t high, low;
|
||||
__uint64_t l0, l1, l2;
|
||||
|
||||
xfs_btree_trace_ptr(cur, ptr, &high, &low);
|
||||
cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2);
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPR,
|
||||
line, i,
|
||||
high, low,
|
||||
l0 >> 32, (int)l0,
|
||||
l1 >> 32, (int)l1,
|
||||
l2 >> 32, (int)l2,
|
||||
0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for int, key.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argik(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
int i,
|
||||
union xfs_btree_key *key,
|
||||
int line)
|
||||
{
|
||||
__uint64_t l0, l1;
|
||||
|
||||
cur->bc_ops->trace_key(cur, key, &l0, &l1);
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIK,
|
||||
line, i,
|
||||
l0 >> 32, (int)l0,
|
||||
l1 >> 32, (int)l1,
|
||||
0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for arguments, for record.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_argr(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_rec *rec,
|
||||
int line)
|
||||
{
|
||||
__uint64_t l0, l1, l2;
|
||||
|
||||
cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2);
|
||||
cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGR,
|
||||
line,
|
||||
l0 >> 32, (int)l0,
|
||||
l1 >> 32, (int)l1,
|
||||
l2 >> 32, (int)l2,
|
||||
0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a trace buffer entry for the cursor/operation.
|
||||
*/
|
||||
void
|
||||
xfs_btree_trace_cursor(
|
||||
const char *func,
|
||||
struct xfs_btree_cur *cur,
|
||||
int type,
|
||||
int line)
|
||||
{
|
||||
__uint32_t s0;
|
||||
__uint64_t l0, l1;
|
||||
char *s;
|
||||
|
||||
switch (type) {
|
||||
case XBT_ARGS:
|
||||
s = "args";
|
||||
break;
|
||||
case XBT_ENTRY:
|
||||
s = "entry";
|
||||
break;
|
||||
case XBT_ERROR:
|
||||
s = "error";
|
||||
break;
|
||||
case XBT_EXIT:
|
||||
s = "exit";
|
||||
break;
|
||||
default:
|
||||
s = "unknown";
|
||||
break;
|
||||
}
|
||||
|
||||
cur->bc_ops->trace_cursor(cur, &s0, &l0, &l1);
|
||||
cur->bc_ops->trace_enter(cur, func, s, XFS_BTREE_KTRACE_CUR, line,
|
||||
s0,
|
||||
l0 >> 32, (int)l0,
|
||||
l1 >> 32, (int)l1,
|
||||
(__psunsigned_t)cur->bc_bufs[0],
|
||||
(__psunsigned_t)cur->bc_bufs[1],
|
||||
(__psunsigned_t)cur->bc_bufs[2],
|
||||
(__psunsigned_t)cur->bc_bufs[3],
|
||||
(cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1],
|
||||
(cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]);
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_BTREE_TRACE_H__
|
||||
#define __XFS_BTREE_TRACE_H__
|
||||
|
||||
struct xfs_btree_cur;
|
||||
struct xfs_buf;
|
||||
|
||||
|
||||
/*
|
||||
* Trace hooks.
|
||||
* i,j = integer (32 bit)
|
||||
* b = btree block buffer (xfs_buf_t)
|
||||
* p = btree ptr
|
||||
* r = btree record
|
||||
* k = btree key
|
||||
*/
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
|
||||
/*
|
||||
* Trace buffer entry types.
|
||||
*/
|
||||
#define XFS_BTREE_KTRACE_ARGBI 1
|
||||
#define XFS_BTREE_KTRACE_ARGBII 2
|
||||
#define XFS_BTREE_KTRACE_ARGFFFI 3
|
||||
#define XFS_BTREE_KTRACE_ARGI 4
|
||||
#define XFS_BTREE_KTRACE_ARGIPK 5
|
||||
#define XFS_BTREE_KTRACE_ARGIPR 6
|
||||
#define XFS_BTREE_KTRACE_ARGIK 7
|
||||
#define XFS_BTREE_KTRACE_ARGR 8
|
||||
#define XFS_BTREE_KTRACE_CUR 9
|
||||
|
||||
/*
|
||||
* Sub-types for cursor traces.
|
||||
*/
|
||||
#define XBT_ARGS 0
|
||||
#define XBT_ENTRY 1
|
||||
#define XBT_ERROR 2
|
||||
#define XBT_EXIT 3
|
||||
|
||||
void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *,
|
||||
struct xfs_buf *, int, int);
|
||||
void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *,
|
||||
struct xfs_buf *, int, int, int);
|
||||
void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int);
|
||||
void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int,
|
||||
union xfs_btree_ptr, union xfs_btree_key *, int);
|
||||
void xfs_btree_trace_argipr(const char *, struct xfs_btree_cur *, int,
|
||||
union xfs_btree_ptr, union xfs_btree_rec *, int);
|
||||
void xfs_btree_trace_argik(const char *, struct xfs_btree_cur *, int,
|
||||
union xfs_btree_key *, int);
|
||||
void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *,
|
||||
union xfs_btree_rec *, int);
|
||||
void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int);
|
||||
|
||||
#define XFS_BTREE_TRACE_ARGBI(c, b, i) \
|
||||
xfs_btree_trace_argbi(__func__, c, b, i, __LINE__)
|
||||
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \
|
||||
xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__)
|
||||
#define XFS_BTREE_TRACE_ARGI(c, i) \
|
||||
xfs_btree_trace_argi(__func__, c, i, __LINE__)
|
||||
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \
|
||||
xfs_btree_trace_argipk(__func__, c, i, p, k, __LINE__)
|
||||
#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) \
|
||||
xfs_btree_trace_argipr(__func__, c, i, p, r, __LINE__)
|
||||
#define XFS_BTREE_TRACE_ARGIK(c, i, k) \
|
||||
xfs_btree_trace_argik(__func__, c, i, k, __LINE__)
|
||||
#define XFS_BTREE_TRACE_ARGR(c, r) \
|
||||
xfs_btree_trace_argr(__func__, c, r, __LINE__)
|
||||
#define XFS_BTREE_TRACE_CURSOR(c, t) \
|
||||
xfs_btree_trace_cursor(__func__, c, t, __LINE__)
|
||||
#else
|
||||
#define XFS_BTREE_TRACE_ARGBI(c, b, i)
|
||||
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
|
||||
#define XFS_BTREE_TRACE_ARGI(c, i)
|
||||
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
|
||||
#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
|
||||
#define XFS_BTREE_TRACE_ARGIK(c, i, k)
|
||||
#define XFS_BTREE_TRACE_ARGR(c, r)
|
||||
#define XFS_BTREE_TRACE_CURSOR(c, t)
|
||||
#endif /* XFS_BTREE_TRACE */
|
||||
|
||||
#endif /* __XFS_BTREE_TRACE_H__ */
|
@ -90,13 +90,11 @@ xfs_buf_item_flush_log_debug(
|
||||
uint first,
|
||||
uint last)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
uint nbytes;
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
|
||||
if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) {
|
||||
if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(bip->bli_logged != NULL);
|
||||
nbytes = last - first + 1;
|
||||
@ -408,7 +406,7 @@ xfs_buf_item_unpin(
|
||||
int stale = bip->bli_flags & XFS_BLI_STALE;
|
||||
int freed;
|
||||
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
|
||||
ASSERT(bp->b_fspriv == bip);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
trace_xfs_buf_item_unpin(bip);
|
||||
@ -420,7 +418,7 @@ xfs_buf_item_unpin(
|
||||
|
||||
if (freed && stale) {
|
||||
ASSERT(bip->bli_flags & XFS_BLI_STALE);
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
|
||||
ASSERT(XFS_BUF_ISSTALE(bp));
|
||||
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
|
||||
@ -443,7 +441,7 @@ xfs_buf_item_unpin(
|
||||
* Since the transaction no longer refers to the buffer,
|
||||
* the buffer should no longer refer to the transaction.
|
||||
*/
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
|
||||
bp->b_transp = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -454,13 +452,13 @@ xfs_buf_item_unpin(
|
||||
*/
|
||||
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
|
||||
xfs_buf_do_callbacks(bp);
|
||||
XFS_BUF_SET_FSPRIVATE(bp, NULL);
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
bp->b_fspriv = NULL;
|
||||
bp->b_iodone = NULL;
|
||||
} else {
|
||||
spin_lock(&ailp->xa_lock);
|
||||
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
|
||||
xfs_buf_item_relse(bp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
|
||||
ASSERT(bp->b_fspriv == NULL);
|
||||
}
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
@ -483,7 +481,7 @@ xfs_buf_item_trylock(
|
||||
|
||||
if (XFS_BUF_ISPINNED(bp))
|
||||
return XFS_ITEM_PINNED;
|
||||
if (!XFS_BUF_CPSEMA(bp))
|
||||
if (!xfs_buf_trylock(bp))
|
||||
return XFS_ITEM_LOCKED;
|
||||
|
||||
/* take a reference to the buffer. */
|
||||
@ -525,7 +523,7 @@ xfs_buf_item_unlock(
|
||||
uint hold;
|
||||
|
||||
/* Clear the buffer's association with this transaction. */
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
|
||||
bp->b_transp = NULL;
|
||||
|
||||
/*
|
||||
* If this is a transaction abort, don't return early. Instead, allow
|
||||
@ -684,7 +682,7 @@ xfs_buf_item_init(
|
||||
xfs_buf_t *bp,
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
xfs_log_item_t *lip;
|
||||
xfs_log_item_t *lip = bp->b_fspriv;
|
||||
xfs_buf_log_item_t *bip;
|
||||
int chunks;
|
||||
int map_size;
|
||||
@ -696,12 +694,8 @@ xfs_buf_item_init(
|
||||
* nothing to do here so return.
|
||||
*/
|
||||
ASSERT(bp->b_target->bt_mount == mp);
|
||||
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
|
||||
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
if (lip->li_type == XFS_LI_BUF) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (lip != NULL && lip->li_type == XFS_LI_BUF)
|
||||
return;
|
||||
|
||||
/*
|
||||
* chunks is the number of XFS_BLF_CHUNK size pieces
|
||||
@ -740,11 +734,9 @@ xfs_buf_item_init(
|
||||
* Put the buf item into the list of items attached to the
|
||||
* buffer at the front.
|
||||
*/
|
||||
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
|
||||
bip->bli_item.li_bio_list =
|
||||
XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
}
|
||||
XFS_BUF_SET_FSPRIVATE(bp, bip);
|
||||
if (bp->b_fspriv)
|
||||
bip->bli_item.li_bio_list = bp->b_fspriv;
|
||||
bp->b_fspriv = bip;
|
||||
}
|
||||
|
||||
|
||||
@ -876,12 +868,11 @@ xfs_buf_item_relse(
|
||||
|
||||
trace_xfs_buf_item_relse(bp, _RET_IP_);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
|
||||
XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
|
||||
if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
|
||||
(XFS_BUF_IODONE_FUNC(bp) != NULL)) {
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
}
|
||||
bip = bp->b_fspriv;
|
||||
bp->b_fspriv = bip->bli_item.li_bio_list;
|
||||
if (bp->b_fspriv == NULL)
|
||||
bp->b_iodone = NULL;
|
||||
|
||||
xfs_buf_rele(bp);
|
||||
xfs_buf_item_free(bip);
|
||||
}
|
||||
@ -905,20 +896,20 @@ xfs_buf_attach_iodone(
|
||||
xfs_log_item_t *head_lip;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
|
||||
lip->li_cb = cb;
|
||||
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
|
||||
head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
head_lip = bp->b_fspriv;
|
||||
if (head_lip) {
|
||||
lip->li_bio_list = head_lip->li_bio_list;
|
||||
head_lip->li_bio_list = lip;
|
||||
} else {
|
||||
XFS_BUF_SET_FSPRIVATE(bp, lip);
|
||||
bp->b_fspriv = lip;
|
||||
}
|
||||
|
||||
ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) ||
|
||||
(XFS_BUF_IODONE_FUNC(bp) == NULL));
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
|
||||
ASSERT(bp->b_iodone == NULL ||
|
||||
bp->b_iodone == xfs_buf_iodone_callbacks);
|
||||
bp->b_iodone = xfs_buf_iodone_callbacks;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -939,8 +930,8 @@ xfs_buf_do_callbacks(
|
||||
{
|
||||
struct xfs_log_item *lip;
|
||||
|
||||
while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
|
||||
XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
|
||||
while ((lip = bp->b_fspriv) != NULL) {
|
||||
bp->b_fspriv = lip->li_bio_list;
|
||||
ASSERT(lip->li_cb != NULL);
|
||||
/*
|
||||
* Clear the next pointer so we don't have any
|
||||
@ -1007,7 +998,7 @@ xfs_buf_iodone_callbacks(
|
||||
XFS_BUF_DONE(bp);
|
||||
XFS_BUF_SET_START(bp);
|
||||
}
|
||||
ASSERT(XFS_BUF_IODONE_FUNC(bp));
|
||||
ASSERT(bp->b_iodone != NULL);
|
||||
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
|
||||
xfs_buf_relse(bp);
|
||||
return;
|
||||
@ -1026,8 +1017,8 @@ xfs_buf_iodone_callbacks(
|
||||
|
||||
do_callbacks:
|
||||
xfs_buf_do_callbacks(bp);
|
||||
XFS_BUF_SET_FSPRIVATE(bp, NULL);
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
bp->b_fspriv = NULL;
|
||||
bp->b_iodone = NULL;
|
||||
xfs_buf_ioend(bp, 0);
|
||||
}
|
||||
|
||||
|
@ -24,11 +24,12 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_inode_item.h"
|
||||
@ -36,10 +37,6 @@
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_attr_leaf.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2_node.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
@ -89,7 +86,7 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
|
||||
*/
|
||||
STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
|
||||
STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
|
||||
STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra);
|
||||
STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
|
||||
STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
|
||||
xfs_da_state_blk_t *drop_blk,
|
||||
xfs_da_state_blk_t *save_blk);
|
||||
@ -321,11 +318,11 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
||||
ASSERT(bp != NULL);
|
||||
node = bp->data;
|
||||
oldroot = blk1->bp->data;
|
||||
if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) {
|
||||
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
|
||||
size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
|
||||
(char *)oldroot);
|
||||
} else {
|
||||
ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
leaf = (xfs_dir2_leaf_t *)oldroot;
|
||||
size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
|
||||
(char *)leaf);
|
||||
@ -352,7 +349,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
||||
node->hdr.count = cpu_to_be16(2);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) {
|
||||
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
|
||||
ASSERT(blk1->blkno >= mp->m_dirleafblk &&
|
||||
blk1->blkno < mp->m_dirfreeblk);
|
||||
ASSERT(blk2->blkno >= mp->m_dirleafblk &&
|
||||
@ -384,7 +381,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
|
||||
int useextra;
|
||||
|
||||
node = oldblk->bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
|
||||
/*
|
||||
* With V2 dirs the extra block is data or freespace.
|
||||
@ -483,8 +480,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
||||
node1 = node2;
|
||||
node2 = tmpnode;
|
||||
}
|
||||
ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
|
||||
if (count == 0)
|
||||
return;
|
||||
@ -578,7 +575,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
|
||||
int tmp;
|
||||
|
||||
node = oldblk->bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
|
||||
ASSERT(newblk->blkno != 0);
|
||||
if (state->args->whichfork == XFS_DATA_FORK)
|
||||
@ -714,7 +711,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
|
||||
ASSERT(args != NULL);
|
||||
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
|
||||
oldroot = root_blk->bp->data;
|
||||
ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
ASSERT(!oldroot->hdr.info.forw);
|
||||
ASSERT(!oldroot->hdr.info.back);
|
||||
|
||||
@ -737,10 +734,10 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
|
||||
ASSERT(bp != NULL);
|
||||
blkinfo = bp->data;
|
||||
if (be16_to_cpu(oldroot->hdr.level) == 1) {
|
||||
ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC ||
|
||||
be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(blkinfo->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
|
||||
blkinfo->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
} else {
|
||||
ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(blkinfo->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
}
|
||||
ASSERT(!blkinfo->forw);
|
||||
ASSERT(!blkinfo->back);
|
||||
@ -776,7 +773,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
|
||||
*/
|
||||
blk = &state->path.blk[ state->path.active-1 ];
|
||||
info = blk->bp->data;
|
||||
ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
node = (xfs_da_intnode_t *)info;
|
||||
count = be16_to_cpu(node->hdr.count);
|
||||
if (count > (state->node_ents >> 1)) {
|
||||
@ -836,7 +833,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
|
||||
count -= state->node_ents >> 2;
|
||||
count -= be16_to_cpu(node->hdr.count);
|
||||
node = bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
count -= be16_to_cpu(node->hdr.count);
|
||||
xfs_da_brelse(state->args->trans, bp);
|
||||
if (count >= 0)
|
||||
@ -911,7 +908,7 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
|
||||
}
|
||||
for (blk--, level--; level >= 0; blk--, level--) {
|
||||
node = blk->bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
btree = &node->btree[ blk->index ];
|
||||
if (be32_to_cpu(btree->hashval) == lasthash)
|
||||
break;
|
||||
@ -979,8 +976,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
||||
|
||||
drop_node = drop_blk->bp->data;
|
||||
save_node = save_blk->bp->data;
|
||||
ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
tp = state->args->trans;
|
||||
|
||||
/*
|
||||
@ -1278,8 +1275,8 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
|
||||
|
||||
node1 = node1_bp->data;
|
||||
node2 = node2_bp->data;
|
||||
ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) &&
|
||||
(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC));
|
||||
ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
|
||||
node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
|
||||
((be32_to_cpu(node2->btree[0].hashval) <
|
||||
be32_to_cpu(node1->btree[0].hashval)) ||
|
||||
@ -1299,7 +1296,7 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
|
||||
xfs_da_intnode_t *node;
|
||||
|
||||
node = bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
if (count)
|
||||
*count = be16_to_cpu(node->hdr.count);
|
||||
if (!node->hdr.count)
|
||||
@ -1412,7 +1409,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
|
||||
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
|
||||
ASSERT(blk->bp != NULL);
|
||||
node = blk->bp->data;
|
||||
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
|
||||
blk->index++;
|
||||
blkno = be32_to_cpu(node->btree[blk->index].before);
|
||||
@ -1451,9 +1448,9 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
|
||||
return(error);
|
||||
ASSERT(blk->bp != NULL);
|
||||
info = blk->bp->data;
|
||||
ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC ||
|
||||
be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC ||
|
||||
be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
|
||||
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
|
||||
info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
|
||||
info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
||||
blk->magic = be16_to_cpu(info->magic);
|
||||
if (blk->magic == XFS_DA_NODE_MAGIC) {
|
||||
node = (xfs_da_intnode_t *)info;
|
||||
@ -1546,79 +1543,62 @@ const struct xfs_nameops xfs_default_nameops = {
|
||||
.compname = xfs_da_compname
|
||||
};
|
||||
|
||||
/*
|
||||
* Add a block to the btree ahead of the file.
|
||||
* Return the new block number to the caller.
|
||||
*/
|
||||
int
|
||||
xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
|
||||
xfs_da_grow_inode_int(
|
||||
struct xfs_da_args *args,
|
||||
xfs_fileoff_t *bno,
|
||||
int count)
|
||||
{
|
||||
xfs_fileoff_t bno, b;
|
||||
xfs_bmbt_irec_t map;
|
||||
xfs_bmbt_irec_t *mapp;
|
||||
xfs_inode_t *dp;
|
||||
int nmap, error, w, count, c, got, i, mapi;
|
||||
xfs_trans_t *tp;
|
||||
xfs_mount_t *mp;
|
||||
xfs_drfsbno_t nblks;
|
||||
struct xfs_trans *tp = args->trans;
|
||||
struct xfs_inode *dp = args->dp;
|
||||
int w = args->whichfork;
|
||||
xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
|
||||
struct xfs_bmbt_irec map, *mapp;
|
||||
int nmap, error, got, i, mapi;
|
||||
|
||||
dp = args->dp;
|
||||
mp = dp->i_mount;
|
||||
w = args->whichfork;
|
||||
tp = args->trans;
|
||||
nblks = dp->i_d.di_nblocks;
|
||||
|
||||
/*
|
||||
* For new directories adjust the file offset and block count.
|
||||
*/
|
||||
if (w == XFS_DATA_FORK) {
|
||||
bno = mp->m_dirleafblk;
|
||||
count = mp->m_dirblkfsbs;
|
||||
} else {
|
||||
bno = 0;
|
||||
count = 1;
|
||||
}
|
||||
/*
|
||||
* Find a spot in the file space to put the new block.
|
||||
*/
|
||||
if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w)))
|
||||
error = xfs_bmap_first_unused(tp, dp, count, bno, w);
|
||||
if (error)
|
||||
return error;
|
||||
if (w == XFS_DATA_FORK)
|
||||
ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk);
|
||||
|
||||
/*
|
||||
* Try mapping it in one filesystem block.
|
||||
*/
|
||||
nmap = 1;
|
||||
ASSERT(args->firstblock != NULL);
|
||||
if ((error = xfs_bmapi(tp, dp, bno, count,
|
||||
error = xfs_bmapi(tp, dp, *bno, count,
|
||||
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
|
||||
XFS_BMAPI_CONTIG,
|
||||
args->firstblock, args->total, &map, &nmap,
|
||||
args->flist))) {
|
||||
args->flist);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
ASSERT(nmap <= 1);
|
||||
if (nmap == 1) {
|
||||
mapp = ↦
|
||||
mapi = 1;
|
||||
}
|
||||
/*
|
||||
* If we didn't get it and the block might work if fragmented,
|
||||
* try without the CONTIG flag. Loop until we get it all.
|
||||
*/
|
||||
else if (nmap == 0 && count > 1) {
|
||||
} else if (nmap == 0 && count > 1) {
|
||||
xfs_fileoff_t b;
|
||||
int c;
|
||||
|
||||
/*
|
||||
* If we didn't get it and the block might work if fragmented,
|
||||
* try without the CONTIG flag. Loop until we get it all.
|
||||
*/
|
||||
mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
|
||||
for (b = bno, mapi = 0; b < bno + count; ) {
|
||||
for (b = *bno, mapi = 0; b < *bno + count; ) {
|
||||
nmap = MIN(XFS_BMAP_MAX_NMAP, count);
|
||||
c = (int)(bno + count - b);
|
||||
if ((error = xfs_bmapi(tp, dp, b, c,
|
||||
c = (int)(*bno + count - b);
|
||||
error = xfs_bmapi(tp, dp, b, c,
|
||||
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
|
||||
XFS_BMAPI_METADATA,
|
||||
args->firstblock, args->total,
|
||||
&mapp[mapi], &nmap, args->flist))) {
|
||||
kmem_free(mapp);
|
||||
return error;
|
||||
}
|
||||
&mapp[mapi], &nmap, args->flist);
|
||||
if (error)
|
||||
goto out_free_map;
|
||||
if (nmap < 1)
|
||||
break;
|
||||
mapi += nmap;
|
||||
@ -1629,24 +1609,53 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
|
||||
mapi = 0;
|
||||
mapp = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Count the blocks we got, make sure it matches the total.
|
||||
*/
|
||||
for (i = 0, got = 0; i < mapi; i++)
|
||||
got += mapp[i].br_blockcount;
|
||||
if (got != count || mapp[0].br_startoff != bno ||
|
||||
if (got != count || mapp[0].br_startoff != *bno ||
|
||||
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
|
||||
bno + count) {
|
||||
if (mapp != &map)
|
||||
kmem_free(mapp);
|
||||
return XFS_ERROR(ENOSPC);
|
||||
*bno + count) {
|
||||
error = XFS_ERROR(ENOSPC);
|
||||
goto out_free_map;
|
||||
}
|
||||
if (mapp != &map)
|
||||
kmem_free(mapp);
|
||||
|
||||
/* account for newly allocated blocks in reserved blocks total */
|
||||
args->total -= dp->i_d.di_nblocks - nblks;
|
||||
*new_blkno = (xfs_dablk_t)bno;
|
||||
return 0;
|
||||
|
||||
out_free_map:
|
||||
if (mapp != &map)
|
||||
kmem_free(mapp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a block to the btree ahead of the file.
|
||||
* Return the new block number to the caller.
|
||||
*/
|
||||
int
|
||||
xfs_da_grow_inode(
|
||||
struct xfs_da_args *args,
|
||||
xfs_dablk_t *new_blkno)
|
||||
{
|
||||
xfs_fileoff_t bno;
|
||||
int count;
|
||||
int error;
|
||||
|
||||
if (args->whichfork == XFS_DATA_FORK) {
|
||||
bno = args->dp->i_mount->m_dirleafblk;
|
||||
count = args->dp->i_mount->m_dirblkfsbs;
|
||||
} else {
|
||||
bno = 0;
|
||||
count = 1;
|
||||
}
|
||||
|
||||
error = xfs_da_grow_inode_int(args, &bno, count);
|
||||
if (!error)
|
||||
*new_blkno = (xfs_dablk_t)bno;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1704,12 +1713,12 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
||||
/*
|
||||
* Get values from the moved block.
|
||||
*/
|
||||
if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) {
|
||||
if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
|
||||
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
|
||||
dead_level = 0;
|
||||
dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
|
||||
} else {
|
||||
ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC);
|
||||
ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
||||
dead_node = (xfs_da_intnode_t *)dead_info;
|
||||
dead_level = be16_to_cpu(dead_node->hdr.level);
|
||||
dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
|
||||
@ -1768,8 +1777,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
||||
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
|
||||
goto done;
|
||||
par_node = par_buf->data;
|
||||
if (unlikely(
|
||||
be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC ||
|
||||
if (unlikely(par_node->hdr.info.magic !=
|
||||
cpu_to_be16(XFS_DA_NODE_MAGIC) ||
|
||||
(level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
|
||||
XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
@ -1820,7 +1829,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
||||
par_node = par_buf->data;
|
||||
if (unlikely(
|
||||
be16_to_cpu(par_node->hdr.level) != level ||
|
||||
be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) {
|
||||
par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
|
||||
XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
error = XFS_ERROR(EFSCORRUPTED);
|
||||
@ -1930,8 +1939,7 @@ xfs_da_do_buf(
|
||||
xfs_daddr_t *mappedbnop,
|
||||
xfs_dabuf_t **bpp,
|
||||
int whichfork,
|
||||
int caller,
|
||||
inst_t *ra)
|
||||
int caller)
|
||||
{
|
||||
xfs_buf_t *bp = NULL;
|
||||
xfs_buf_t **bplist;
|
||||
@ -2070,25 +2078,22 @@ xfs_da_do_buf(
|
||||
* Build a dabuf structure.
|
||||
*/
|
||||
if (bplist) {
|
||||
rbp = xfs_da_buf_make(nbplist, bplist, ra);
|
||||
rbp = xfs_da_buf_make(nbplist, bplist);
|
||||
} else if (bp)
|
||||
rbp = xfs_da_buf_make(1, &bp, ra);
|
||||
rbp = xfs_da_buf_make(1, &bp);
|
||||
else
|
||||
rbp = NULL;
|
||||
/*
|
||||
* For read_buf, check the magic number.
|
||||
*/
|
||||
if (caller == 1) {
|
||||
xfs_dir2_data_t *data;
|
||||
xfs_dir2_free_t *free;
|
||||
xfs_da_blkinfo_t *info;
|
||||
xfs_dir2_data_hdr_t *hdr = rbp->data;
|
||||
xfs_dir2_free_t *free = rbp->data;
|
||||
xfs_da_blkinfo_t *info = rbp->data;
|
||||
uint magic, magic1;
|
||||
|
||||
info = rbp->data;
|
||||
data = rbp->data;
|
||||
free = rbp->data;
|
||||
magic = be16_to_cpu(info->magic);
|
||||
magic1 = be32_to_cpu(data->hdr.magic);
|
||||
magic1 = be32_to_cpu(hdr->magic);
|
||||
if (unlikely(
|
||||
XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
|
||||
(magic != XFS_ATTR_LEAF_MAGIC) &&
|
||||
@ -2096,7 +2101,7 @@ xfs_da_do_buf(
|
||||
(magic != XFS_DIR2_LEAFN_MAGIC) &&
|
||||
(magic1 != XFS_DIR2_BLOCK_MAGIC) &&
|
||||
(magic1 != XFS_DIR2_DATA_MAGIC) &&
|
||||
(be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
|
||||
(free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
|
||||
mp, XFS_ERRTAG_DA_READ_BUF,
|
||||
XFS_RANDOM_DA_READ_BUF))) {
|
||||
trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
|
||||
@ -2143,8 +2148,7 @@ xfs_da_get_buf(
|
||||
xfs_dabuf_t **bpp,
|
||||
int whichfork)
|
||||
{
|
||||
return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0,
|
||||
(inst_t *)__return_address);
|
||||
return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2159,8 +2163,7 @@ xfs_da_read_buf(
|
||||
xfs_dabuf_t **bpp,
|
||||
int whichfork)
|
||||
{
|
||||
return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1,
|
||||
(inst_t *)__return_address);
|
||||
return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2176,8 +2179,7 @@ xfs_da_reada_buf(
|
||||
xfs_daddr_t rval;
|
||||
|
||||
rval = -1;
|
||||
if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3,
|
||||
(inst_t *)__return_address))
|
||||
if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
|
||||
return -1;
|
||||
else
|
||||
return rval;
|
||||
@ -2235,17 +2237,12 @@ xfs_da_state_free(xfs_da_state_t *state)
|
||||
kmem_zone_free(xfs_da_state_zone, state);
|
||||
}
|
||||
|
||||
#ifdef XFS_DABUF_DEBUG
|
||||
xfs_dabuf_t *xfs_dabuf_global_list;
|
||||
static DEFINE_SPINLOCK(xfs_dabuf_global_lock);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a dabuf.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
STATIC xfs_dabuf_t *
|
||||
xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
|
||||
xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
|
||||
{
|
||||
xfs_buf_t *bp;
|
||||
xfs_dabuf_t *dabuf;
|
||||
@ -2257,11 +2254,6 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
|
||||
else
|
||||
dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
|
||||
dabuf->dirty = 0;
|
||||
#ifdef XFS_DABUF_DEBUG
|
||||
dabuf->ra = ra;
|
||||
dabuf->target = XFS_BUF_TARGET(bps[0]);
|
||||
dabuf->blkno = XFS_BUF_ADDR(bps[0]);
|
||||
#endif
|
||||
if (nbuf == 1) {
|
||||
dabuf->nbuf = 1;
|
||||
bp = bps[0];
|
||||
@ -2281,23 +2273,6 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
|
||||
XFS_BUF_COUNT(bp));
|
||||
}
|
||||
}
|
||||
#ifdef XFS_DABUF_DEBUG
|
||||
{
|
||||
xfs_dabuf_t *p;
|
||||
|
||||
spin_lock(&xfs_dabuf_global_lock);
|
||||
for (p = xfs_dabuf_global_list; p; p = p->next) {
|
||||
ASSERT(p->blkno != dabuf->blkno ||
|
||||
p->target != dabuf->target);
|
||||
}
|
||||
dabuf->prev = NULL;
|
||||
if (xfs_dabuf_global_list)
|
||||
xfs_dabuf_global_list->prev = dabuf;
|
||||
dabuf->next = xfs_dabuf_global_list;
|
||||
xfs_dabuf_global_list = dabuf;
|
||||
spin_unlock(&xfs_dabuf_global_lock);
|
||||
}
|
||||
#endif
|
||||
return dabuf;
|
||||
}
|
||||
|
||||
@ -2333,25 +2308,12 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf)
|
||||
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
|
||||
if (dabuf->dirty)
|
||||
xfs_da_buf_clean(dabuf);
|
||||
if (dabuf->nbuf > 1)
|
||||
if (dabuf->nbuf > 1) {
|
||||
kmem_free(dabuf->data);
|
||||
#ifdef XFS_DABUF_DEBUG
|
||||
{
|
||||
spin_lock(&xfs_dabuf_global_lock);
|
||||
if (dabuf->prev)
|
||||
dabuf->prev->next = dabuf->next;
|
||||
else
|
||||
xfs_dabuf_global_list = dabuf->next;
|
||||
if (dabuf->next)
|
||||
dabuf->next->prev = dabuf->prev;
|
||||
spin_unlock(&xfs_dabuf_global_lock);
|
||||
}
|
||||
memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
|
||||
#endif
|
||||
if (dabuf->nbuf == 1)
|
||||
kmem_zone_free(xfs_dabuf_zone, dabuf);
|
||||
else
|
||||
kmem_free(dabuf);
|
||||
} else {
|
||||
kmem_zone_free(xfs_dabuf_zone, dabuf);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -145,22 +145,11 @@ typedef struct xfs_dabuf {
|
||||
short dirty; /* data needs to be copied back */
|
||||
short bbcount; /* how large is data in bbs */
|
||||
void *data; /* pointer for buffers' data */
|
||||
#ifdef XFS_DABUF_DEBUG
|
||||
inst_t *ra; /* return address of caller to make */
|
||||
struct xfs_dabuf *next; /* next in global chain */
|
||||
struct xfs_dabuf *prev; /* previous in global chain */
|
||||
struct xfs_buftarg *target; /* device for buffer */
|
||||
xfs_daddr_t blkno; /* daddr first in bps[0] */
|
||||
#endif
|
||||
struct xfs_buf *bps[1]; /* actually nbuf of these */
|
||||
} xfs_dabuf_t;
|
||||
#define XFS_DA_BUF_SIZE(n) \
|
||||
(sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1))
|
||||
|
||||
#ifdef XFS_DABUF_DEBUG
|
||||
extern xfs_dabuf_t *xfs_dabuf_global_list;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Storage for holding state during Btree searches and split/join ops.
|
||||
*
|
||||
@ -248,6 +237,8 @@ int xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
|
||||
* Utility routines.
|
||||
*/
|
||||
int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno);
|
||||
int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
|
||||
int count);
|
||||
int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
|
||||
xfs_dablk_t bno, xfs_daddr_t mappedbno,
|
||||
xfs_dabuf_t **bp, int whichfork);
|
||||
|
@ -24,20 +24,17 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2_node.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_vnodeops.h"
|
||||
#include "xfs_trace.h"
|
||||
@ -122,15 +119,15 @@ int
|
||||
xfs_dir_isempty(
|
||||
xfs_inode_t *dp)
|
||||
{
|
||||
xfs_dir2_sf_t *sfp;
|
||||
xfs_dir2_sf_hdr_t *sfp;
|
||||
|
||||
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
|
||||
if (dp->i_d.di_size == 0) /* might happen during shutdown. */
|
||||
return 1;
|
||||
if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
|
||||
return 0;
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
return !sfp->hdr.count;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
return !sfp->count;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -500,129 +497,34 @@ xfs_dir_canenter(
|
||||
|
||||
/*
|
||||
* Add a block to the directory.
|
||||
* This routine is for data and free blocks, not leaf/node blocks
|
||||
* which are handled by xfs_da_grow_inode.
|
||||
*
|
||||
* This routine is for data and free blocks, not leaf/node blocks which are
|
||||
* handled by xfs_da_grow_inode.
|
||||
*/
|
||||
int
|
||||
xfs_dir2_grow_inode(
|
||||
xfs_da_args_t *args,
|
||||
int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */
|
||||
xfs_dir2_db_t *dbp) /* out: block number added */
|
||||
struct xfs_da_args *args,
|
||||
int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */
|
||||
xfs_dir2_db_t *dbp) /* out: block number added */
|
||||
{
|
||||
xfs_fileoff_t bno; /* directory offset of new block */
|
||||
int count; /* count of filesystem blocks */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int error;
|
||||
int got; /* blocks actually mapped */
|
||||
int i;
|
||||
xfs_bmbt_irec_t map; /* single structure for bmap */
|
||||
int mapi; /* mapping index */
|
||||
xfs_bmbt_irec_t *mapp; /* bmap mapping structure(s) */
|
||||
xfs_mount_t *mp;
|
||||
int nmap; /* number of bmap entries */
|
||||
xfs_trans_t *tp;
|
||||
xfs_drfsbno_t nblks;
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
xfs_fileoff_t bno; /* directory offset of new block */
|
||||
int count; /* count of filesystem blocks */
|
||||
int error;
|
||||
|
||||
trace_xfs_dir2_grow_inode(args, space);
|
||||
|
||||
dp = args->dp;
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
nblks = dp->i_d.di_nblocks;
|
||||
/*
|
||||
* Set lowest possible block in the space requested.
|
||||
*/
|
||||
bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE);
|
||||
count = mp->m_dirblkfsbs;
|
||||
/*
|
||||
* Find the first hole for our block.
|
||||
*/
|
||||
if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK)))
|
||||
|
||||
error = xfs_da_grow_inode_int(args, &bno, count);
|
||||
if (error)
|
||||
return error;
|
||||
nmap = 1;
|
||||
ASSERT(args->firstblock != NULL);
|
||||
/*
|
||||
* Try mapping the new block contiguously (one extent).
|
||||
*/
|
||||
if ((error = xfs_bmapi(tp, dp, bno, count,
|
||||
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
|
||||
args->firstblock, args->total, &map, &nmap,
|
||||
args->flist)))
|
||||
return error;
|
||||
ASSERT(nmap <= 1);
|
||||
if (nmap == 1) {
|
||||
mapp = ↦
|
||||
mapi = 1;
|
||||
}
|
||||
/*
|
||||
* Didn't work and this is a multiple-fsb directory block.
|
||||
* Try again with contiguous flag turned on.
|
||||
*/
|
||||
else if (nmap == 0 && count > 1) {
|
||||
xfs_fileoff_t b; /* current file offset */
|
||||
|
||||
/*
|
||||
* Space for maximum number of mappings.
|
||||
*/
|
||||
mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
|
||||
/*
|
||||
* Iterate until we get to the end of our block.
|
||||
*/
|
||||
for (b = bno, mapi = 0; b < bno + count; ) {
|
||||
int c; /* current fsb count */
|
||||
|
||||
/*
|
||||
* Can't map more than MAX_NMAP at once.
|
||||
*/
|
||||
nmap = MIN(XFS_BMAP_MAX_NMAP, count);
|
||||
c = (int)(bno + count - b);
|
||||
if ((error = xfs_bmapi(tp, dp, b, c,
|
||||
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
|
||||
args->firstblock, args->total,
|
||||
&mapp[mapi], &nmap, args->flist))) {
|
||||
kmem_free(mapp);
|
||||
return error;
|
||||
}
|
||||
if (nmap < 1)
|
||||
break;
|
||||
/*
|
||||
* Add this bunch into our table, go to the next offset.
|
||||
*/
|
||||
mapi += nmap;
|
||||
b = mapp[mapi - 1].br_startoff +
|
||||
mapp[mapi - 1].br_blockcount;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Didn't work.
|
||||
*/
|
||||
else {
|
||||
mapi = 0;
|
||||
mapp = NULL;
|
||||
}
|
||||
/*
|
||||
* See how many fsb's we got.
|
||||
*/
|
||||
for (i = 0, got = 0; i < mapi; i++)
|
||||
got += mapp[i].br_blockcount;
|
||||
/*
|
||||
* Didn't get enough fsb's, or the first/last block's are wrong.
|
||||
*/
|
||||
if (got != count || mapp[0].br_startoff != bno ||
|
||||
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
|
||||
bno + count) {
|
||||
if (mapp != &map)
|
||||
kmem_free(mapp);
|
||||
return XFS_ERROR(ENOSPC);
|
||||
}
|
||||
/*
|
||||
* Done with the temporary mapping table.
|
||||
*/
|
||||
if (mapp != &map)
|
||||
kmem_free(mapp);
|
||||
|
||||
/* account for newly allocated blocks in reserved blocks total */
|
||||
args->total -= dp->i_d.di_nblocks - nblks;
|
||||
*dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno);
|
||||
|
||||
/*
|
||||
@ -634,7 +536,7 @@ xfs_dir2_grow_inode(
|
||||
size = XFS_FSB_TO_B(mp, bno + count);
|
||||
if (size > dp->i_d.di_size) {
|
||||
dp->i_d.di_size = size;
|
||||
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -16,49 +16,14 @@
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_H__
|
||||
#define __XFS_DIR2_H__
|
||||
#define __XFS_DIR2_H__
|
||||
|
||||
struct uio;
|
||||
struct xfs_dabuf;
|
||||
struct xfs_da_args;
|
||||
struct xfs_dir2_put_args;
|
||||
struct xfs_bmap_free;
|
||||
struct xfs_da_args;
|
||||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* Directory version 2.
|
||||
* There are 4 possible formats:
|
||||
* shortform
|
||||
* single block - data with embedded leaf at the end
|
||||
* multiple data blocks, single leaf+freeindex block
|
||||
* data blocks, node&leaf blocks (btree), freeindex blocks
|
||||
*
|
||||
* The shortform format is in xfs_dir2_sf.h.
|
||||
* The single block format is in xfs_dir2_block.h.
|
||||
* The data block format is in xfs_dir2_data.h.
|
||||
* The leaf and freeindex block formats are in xfs_dir2_leaf.h.
|
||||
* Node blocks are the same as the other version, in xfs_da_btree.h.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Byte offset in data block and shortform entry.
|
||||
*/
|
||||
typedef __uint16_t xfs_dir2_data_off_t;
|
||||
#define NULLDATAOFF 0xffffU
|
||||
typedef uint xfs_dir2_data_aoff_t; /* argument form */
|
||||
|
||||
/*
|
||||
* Directory block number (logical dirblk in file)
|
||||
*/
|
||||
typedef __uint32_t xfs_dir2_db_t;
|
||||
|
||||
/*
|
||||
* Byte offset in a directory.
|
||||
*/
|
||||
typedef xfs_off_t xfs_dir2_off_t;
|
||||
|
||||
extern struct xfs_name xfs_name_dotdot;
|
||||
|
||||
/*
|
||||
@ -86,21 +51,10 @@ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
struct xfs_bmap_free *flist, xfs_extlen_t tot);
|
||||
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
struct xfs_name *name, uint resblks);
|
||||
extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
|
||||
|
||||
/*
|
||||
* Utility routines for v2 directories.
|
||||
* Direct call from the bmap code, bypassing the generic directory layer.
|
||||
*/
|
||||
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
|
||||
xfs_dir2_db_t *dbp);
|
||||
extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
int *vp);
|
||||
extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
int *vp);
|
||||
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
|
||||
struct xfs_dabuf *bp);
|
||||
|
||||
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
|
||||
const unsigned char *name, int len);
|
||||
extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
|
||||
|
||||
#endif /* __XFS_DIR2_H__ */
|
||||
|
@ -23,17 +23,14 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
@ -67,7 +64,7 @@ xfs_dir2_block_addname(
|
||||
xfs_da_args_t *args) /* directory op arguments */
|
||||
{
|
||||
xfs_dir2_data_free_t *bf; /* bestfree table in block */
|
||||
xfs_dir2_block_t *block; /* directory block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
|
||||
xfs_dabuf_t *bp; /* buffer for block */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
@ -105,13 +102,13 @@ xfs_dir2_block_addname(
|
||||
return error;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
block = bp->data;
|
||||
hdr = bp->data;
|
||||
/*
|
||||
* Check the magic number, corrupted if wrong.
|
||||
*/
|
||||
if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
|
||||
if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) {
|
||||
XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
|
||||
XFS_ERRLEVEL_LOW, mp, block);
|
||||
XFS_ERRLEVEL_LOW, mp, hdr);
|
||||
xfs_da_brelse(tp, bp);
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
}
|
||||
@ -119,8 +116,8 @@ xfs_dir2_block_addname(
|
||||
/*
|
||||
* Set up pointers to parts of the block.
|
||||
*/
|
||||
bf = block->hdr.bestfree;
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
bf = hdr->bestfree;
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
/*
|
||||
* No stale entries? Need space for entry and new leaf.
|
||||
@ -133,7 +130,7 @@ xfs_dir2_block_addname(
|
||||
/*
|
||||
* Data object just before the first leaf entry.
|
||||
*/
|
||||
enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
|
||||
enddup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
|
||||
/*
|
||||
* If it's not free then can't do this add without cleaning up:
|
||||
* the space before the first leaf entry needs to be free so it
|
||||
@ -146,7 +143,7 @@ xfs_dir2_block_addname(
|
||||
*/
|
||||
else {
|
||||
dup = (xfs_dir2_data_unused_t *)
|
||||
((char *)block + be16_to_cpu(bf[0].offset));
|
||||
((char *)hdr + be16_to_cpu(bf[0].offset));
|
||||
if (dup == enddup) {
|
||||
/*
|
||||
* It is the biggest freespace, is it too small
|
||||
@ -159,7 +156,7 @@ xfs_dir2_block_addname(
|
||||
*/
|
||||
if (be16_to_cpu(bf[1].length) >= len)
|
||||
dup = (xfs_dir2_data_unused_t *)
|
||||
((char *)block +
|
||||
((char *)hdr +
|
||||
be16_to_cpu(bf[1].offset));
|
||||
else
|
||||
dup = NULL;
|
||||
@ -182,7 +179,7 @@ xfs_dir2_block_addname(
|
||||
*/
|
||||
else if (be16_to_cpu(bf[0].length) >= len) {
|
||||
dup = (xfs_dir2_data_unused_t *)
|
||||
((char *)block + be16_to_cpu(bf[0].offset));
|
||||
((char *)hdr + be16_to_cpu(bf[0].offset));
|
||||
compact = 0;
|
||||
}
|
||||
/*
|
||||
@ -196,7 +193,7 @@ xfs_dir2_block_addname(
|
||||
/*
|
||||
* Data object just before the first leaf entry.
|
||||
*/
|
||||
dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
|
||||
dup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
|
||||
/*
|
||||
* If it's not free then the data will go where the
|
||||
* leaf data starts now, if it works at all.
|
||||
@ -255,7 +252,8 @@ xfs_dir2_block_addname(
|
||||
highstale = lfloghigh = -1;
|
||||
fromidx >= 0;
|
||||
fromidx--) {
|
||||
if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
|
||||
if (blp[fromidx].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) {
|
||||
if (highstale == -1)
|
||||
highstale = toidx;
|
||||
else {
|
||||
@ -272,7 +270,7 @@ xfs_dir2_block_addname(
|
||||
lfloghigh -= be32_to_cpu(btp->stale) - 1;
|
||||
be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
|
||||
xfs_dir2_data_make_free(tp, bp,
|
||||
(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
|
||||
(xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
|
||||
(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
|
||||
&needlog, &needscan);
|
||||
blp += be32_to_cpu(btp->stale) - 1;
|
||||
@ -282,7 +280,7 @@ xfs_dir2_block_addname(
|
||||
* This needs to happen before the next call to use_free.
|
||||
*/
|
||||
if (needscan) {
|
||||
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
needscan = 0;
|
||||
}
|
||||
}
|
||||
@ -318,7 +316,7 @@ xfs_dir2_block_addname(
|
||||
*/
|
||||
xfs_dir2_data_use_free(tp, bp, enddup,
|
||||
(xfs_dir2_data_aoff_t)
|
||||
((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
|
||||
((char *)enddup - (char *)hdr + be16_to_cpu(enddup->length) -
|
||||
sizeof(*blp)),
|
||||
(xfs_dir2_data_aoff_t)sizeof(*blp),
|
||||
&needlog, &needscan);
|
||||
@ -331,8 +329,7 @@ xfs_dir2_block_addname(
|
||||
* This needs to happen before the next call to use_free.
|
||||
*/
|
||||
if (needscan) {
|
||||
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
|
||||
&needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
needscan = 0;
|
||||
}
|
||||
/*
|
||||
@ -353,12 +350,14 @@ xfs_dir2_block_addname(
|
||||
else {
|
||||
for (lowstale = mid;
|
||||
lowstale >= 0 &&
|
||||
be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
|
||||
blp[lowstale].address !=
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
|
||||
lowstale--)
|
||||
continue;
|
||||
for (highstale = mid + 1;
|
||||
highstale < be32_to_cpu(btp->count) &&
|
||||
be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
|
||||
blp[highstale].address !=
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR) &&
|
||||
(lowstale < 0 || mid - lowstale > highstale - mid);
|
||||
highstale++)
|
||||
continue;
|
||||
@ -397,13 +396,13 @@ xfs_dir2_block_addname(
|
||||
*/
|
||||
blp[mid].hashval = cpu_to_be32(args->hashval);
|
||||
blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
|
||||
(char *)dep - (char *)block));
|
||||
(char *)dep - (char *)hdr));
|
||||
xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
|
||||
/*
|
||||
* Mark space for the data entry used.
|
||||
*/
|
||||
xfs_dir2_data_use_free(tp, bp, dup,
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
|
||||
(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
|
||||
/*
|
||||
* Create the new data entry.
|
||||
@ -412,12 +411,12 @@ xfs_dir2_block_addname(
|
||||
dep->namelen = args->namelen;
|
||||
memcpy(dep->name, args->name, args->namelen);
|
||||
tagp = xfs_dir2_data_entry_tag_p(dep);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)block);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
|
||||
/*
|
||||
* Clean up the bestfree array and log the header, tail, and entry.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
if (needlog)
|
||||
xfs_dir2_data_log_header(tp, bp);
|
||||
xfs_dir2_block_log_tail(tp, bp);
|
||||
@ -437,7 +436,7 @@ xfs_dir2_block_getdents(
|
||||
xfs_off_t *offset,
|
||||
filldir_t filldir)
|
||||
{
|
||||
xfs_dir2_block_t *block; /* directory block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dabuf_t *bp; /* buffer for block */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
xfs_dir2_data_entry_t *dep; /* block data entry */
|
||||
@ -470,13 +469,13 @@ xfs_dir2_block_getdents(
|
||||
* We'll skip entries before this.
|
||||
*/
|
||||
wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
|
||||
block = bp->data;
|
||||
hdr = bp->data;
|
||||
xfs_dir2_data_check(dp, bp);
|
||||
/*
|
||||
* Set up values for the loop.
|
||||
*/
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
ptr = (char *)block->u;
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
ptr = (char *)(hdr + 1);
|
||||
endptr = (char *)xfs_dir2_block_leaf_p(btp);
|
||||
|
||||
/*
|
||||
@ -502,11 +501,11 @@ xfs_dir2_block_getdents(
|
||||
/*
|
||||
* The entry is before the desired starting point, skip it.
|
||||
*/
|
||||
if ((char *)dep - (char *)block < wantoff)
|
||||
if ((char *)dep - (char *)hdr < wantoff)
|
||||
continue;
|
||||
|
||||
cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
|
||||
(char *)dep - (char *)block);
|
||||
(char *)dep - (char *)hdr);
|
||||
|
||||
/*
|
||||
* If it didn't fit, set the final offset to here & return.
|
||||
@ -540,17 +539,14 @@ xfs_dir2_block_log_leaf(
|
||||
int first, /* index of first logged leaf */
|
||||
int last) /* index of last logged leaf */
|
||||
{
|
||||
xfs_dir2_block_t *block; /* directory block structure */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
xfs_dir2_data_hdr_t *hdr = bp->data;
|
||||
xfs_dir2_leaf_entry_t *blp;
|
||||
xfs_dir2_block_tail_t *btp;
|
||||
|
||||
mp = tp->t_mountp;
|
||||
block = bp->data;
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block),
|
||||
(uint)((char *)&blp[last + 1] - (char *)block - 1));
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr),
|
||||
(uint)((char *)&blp[last + 1] - (char *)hdr - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -561,15 +557,12 @@ xfs_dir2_block_log_tail(
|
||||
xfs_trans_t *tp, /* transaction structure */
|
||||
xfs_dabuf_t *bp) /* block buffer */
|
||||
{
|
||||
xfs_dir2_block_t *block; /* directory block structure */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
xfs_dir2_data_hdr_t *hdr = bp->data;
|
||||
xfs_dir2_block_tail_t *btp;
|
||||
|
||||
mp = tp->t_mountp;
|
||||
block = bp->data;
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block),
|
||||
(uint)((char *)(btp + 1) - (char *)block - 1));
|
||||
btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr),
|
||||
(uint)((char *)(btp + 1) - (char *)hdr - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -580,7 +573,7 @@ int /* error */
|
||||
xfs_dir2_block_lookup(
|
||||
xfs_da_args_t *args) /* dir lookup arguments */
|
||||
{
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
|
||||
xfs_dabuf_t *bp; /* block buffer */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
@ -600,14 +593,14 @@ xfs_dir2_block_lookup(
|
||||
return error;
|
||||
dp = args->dp;
|
||||
mp = dp->i_mount;
|
||||
block = bp->data;
|
||||
hdr = bp->data;
|
||||
xfs_dir2_data_check(dp, bp);
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
/*
|
||||
* Get the offset from the leaf entry, to point to the data.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)((char *)block +
|
||||
dep = (xfs_dir2_data_entry_t *)((char *)hdr +
|
||||
xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
|
||||
/*
|
||||
* Fill in inode number, CI name if appropriate, release the block.
|
||||
@ -628,7 +621,7 @@ xfs_dir2_block_lookup_int(
|
||||
int *entno) /* returned entry number */
|
||||
{
|
||||
xfs_dir2_dataptr_t addr; /* data entry address */
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
|
||||
xfs_dabuf_t *bp; /* block buffer */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
@ -654,9 +647,9 @@ xfs_dir2_block_lookup_int(
|
||||
return error;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
block = bp->data;
|
||||
hdr = bp->data;
|
||||
xfs_dir2_data_check(dp, bp);
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
/*
|
||||
* Loop doing a binary search for our hash value.
|
||||
@ -694,7 +687,7 @@ xfs_dir2_block_lookup_int(
|
||||
* Get pointer to the entry from the leaf.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
|
||||
((char *)hdr + xfs_dir2_dataptr_to_off(mp, addr));
|
||||
/*
|
||||
* Compare name and if it's an exact match, return the index
|
||||
* and buffer. If it's the first case-insensitive match, store
|
||||
@ -733,7 +726,7 @@ int /* error */
|
||||
xfs_dir2_block_removename(
|
||||
xfs_da_args_t *args) /* directory operation args */
|
||||
{
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */
|
||||
xfs_dabuf_t *bp; /* block buffer */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
@ -760,20 +753,20 @@ xfs_dir2_block_removename(
|
||||
dp = args->dp;
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
block = bp->data;
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
hdr = bp->data;
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
/*
|
||||
* Point to the data entry using the leaf entry.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
|
||||
((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
|
||||
/*
|
||||
* Mark the data entry's space free.
|
||||
*/
|
||||
needlog = needscan = 0;
|
||||
xfs_dir2_data_make_free(tp, bp,
|
||||
(xfs_dir2_data_aoff_t)((char *)dep - (char *)block),
|
||||
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
|
||||
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
|
||||
/*
|
||||
* Fix up the block tail.
|
||||
@ -789,15 +782,15 @@ xfs_dir2_block_removename(
|
||||
* Fix up bestfree, log the header if necessary.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
if (needlog)
|
||||
xfs_dir2_data_log_header(tp, bp);
|
||||
xfs_dir2_data_check(dp, bp);
|
||||
/*
|
||||
* See if the size as a shortform is good enough.
|
||||
*/
|
||||
if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
|
||||
XFS_IFORK_DSIZE(dp)) {
|
||||
size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
|
||||
if (size > XFS_IFORK_DSIZE(dp)) {
|
||||
xfs_da_buf_done(bp);
|
||||
return 0;
|
||||
}
|
||||
@ -815,7 +808,7 @@ int /* error */
|
||||
xfs_dir2_block_replace(
|
||||
xfs_da_args_t *args) /* directory operation args */
|
||||
{
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
|
||||
xfs_dabuf_t *bp; /* block buffer */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
@ -836,14 +829,14 @@ xfs_dir2_block_replace(
|
||||
}
|
||||
dp = args->dp;
|
||||
mp = dp->i_mount;
|
||||
block = bp->data;
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
hdr = bp->data;
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
/*
|
||||
* Point to the data entry we need to change.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
|
||||
((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
|
||||
ASSERT(be64_to_cpu(dep->inumber) != args->inumber);
|
||||
/*
|
||||
* Change the inode number to the new value.
|
||||
@ -882,7 +875,7 @@ xfs_dir2_leaf_to_block(
|
||||
xfs_dabuf_t *dbp) /* data buffer */
|
||||
{
|
||||
__be16 *bestsp; /* leaf bests table */
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
xfs_dir2_data_unused_t *dup; /* unused data entry */
|
||||
@ -906,7 +899,7 @@ xfs_dir2_leaf_to_block(
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
leaf = lbp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
|
||||
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
|
||||
/*
|
||||
* If there are data blocks other than the first one, take this
|
||||
@ -917,7 +910,7 @@ xfs_dir2_leaf_to_block(
|
||||
while (dp->i_d.di_size > mp->m_dirblksize) {
|
||||
bestsp = xfs_dir2_leaf_bests_p(ltp);
|
||||
if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
|
||||
mp->m_dirblksize - (uint)sizeof(block->hdr)) {
|
||||
mp->m_dirblksize - (uint)sizeof(*hdr)) {
|
||||
if ((error =
|
||||
xfs_dir2_leaf_trim_data(args, lbp,
|
||||
(xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
|
||||
@ -935,18 +928,18 @@ xfs_dir2_leaf_to_block(
|
||||
XFS_DATA_FORK))) {
|
||||
goto out;
|
||||
}
|
||||
block = dbp->data;
|
||||
ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC);
|
||||
hdr = dbp->data;
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
|
||||
/*
|
||||
* Size of the "leaf" area in the block.
|
||||
*/
|
||||
size = (uint)sizeof(block->tail) +
|
||||
size = (uint)sizeof(xfs_dir2_block_tail_t) +
|
||||
(uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
|
||||
/*
|
||||
* Look at the last data entry.
|
||||
*/
|
||||
tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1;
|
||||
dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
|
||||
tagp = (__be16 *)((char *)hdr + mp->m_dirblksize) - 1;
|
||||
dup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
|
||||
/*
|
||||
* If it's not free or is too short we can't do it.
|
||||
*/
|
||||
@ -958,7 +951,7 @@ xfs_dir2_leaf_to_block(
|
||||
/*
|
||||
* Start converting it to block form.
|
||||
*/
|
||||
block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
|
||||
hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
|
||||
needlog = 1;
|
||||
needscan = 0;
|
||||
/*
|
||||
@ -969,7 +962,7 @@ xfs_dir2_leaf_to_block(
|
||||
/*
|
||||
* Initialize the block tail.
|
||||
*/
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
|
||||
btp->stale = 0;
|
||||
xfs_dir2_block_log_tail(tp, dbp);
|
||||
@ -978,7 +971,8 @@ xfs_dir2_leaf_to_block(
|
||||
*/
|
||||
lep = xfs_dir2_block_leaf_p(btp);
|
||||
for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
|
||||
if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
|
||||
if (leaf->ents[from].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
continue;
|
||||
lep[to++] = leaf->ents[from];
|
||||
}
|
||||
@ -988,7 +982,7 @@ xfs_dir2_leaf_to_block(
|
||||
* Scan the bestfree if we need it and log the data block header.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
if (needlog)
|
||||
xfs_dir2_data_log_header(tp, dbp);
|
||||
/*
|
||||
@ -1002,8 +996,8 @@ xfs_dir2_leaf_to_block(
|
||||
/*
|
||||
* Now see if the resulting block can be shrunken to shortform.
|
||||
*/
|
||||
if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
|
||||
XFS_IFORK_DSIZE(dp)) {
|
||||
size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
|
||||
if (size > XFS_IFORK_DSIZE(dp)) {
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
@ -1024,12 +1018,10 @@ xfs_dir2_sf_to_block(
|
||||
xfs_da_args_t *args) /* operation arguments */
|
||||
{
|
||||
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
|
||||
xfs_dabuf_t *bp; /* block buffer */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail pointer */
|
||||
char *buf; /* sf buffer */
|
||||
int buf_len;
|
||||
xfs_dir2_data_entry_t *dep; /* data entry pointer */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int dummy; /* trash */
|
||||
@ -1043,7 +1035,8 @@ xfs_dir2_sf_to_block(
|
||||
int newoffset; /* offset from current entry */
|
||||
int offset; /* target block offset */
|
||||
xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *oldsfp; /* old shortform header */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform header */
|
||||
__be16 *tagp; /* end of data entry */
|
||||
xfs_trans_t *tp; /* transaction pointer */
|
||||
struct xfs_name name;
|
||||
@ -1061,32 +1054,30 @@ xfs_dir2_sf_to_block(
|
||||
ASSERT(XFS_FORCED_SHUTDOWN(mp));
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
|
||||
|
||||
/*
|
||||
* Copy the directory into the stack buffer.
|
||||
* Copy the directory into a temporary buffer.
|
||||
* Then pitch the incore inode data so we can make extents.
|
||||
*/
|
||||
sfp = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
|
||||
memcpy(sfp, oldsfp, dp->i_df.if_bytes);
|
||||
|
||||
buf_len = dp->i_df.if_bytes;
|
||||
buf = kmem_alloc(buf_len, KM_SLEEP);
|
||||
|
||||
memcpy(buf, sfp, buf_len);
|
||||
xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
|
||||
xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
|
||||
dp->i_d.di_size = 0;
|
||||
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
|
||||
/*
|
||||
* Reset pointer - old sfp is gone.
|
||||
*/
|
||||
sfp = (xfs_dir2_sf_t *)buf;
|
||||
|
||||
/*
|
||||
* Add block 0 to the inode.
|
||||
*/
|
||||
error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
|
||||
if (error) {
|
||||
kmem_free(buf);
|
||||
kmem_free(sfp);
|
||||
return error;
|
||||
}
|
||||
/*
|
||||
@ -1094,21 +1085,21 @@ xfs_dir2_sf_to_block(
|
||||
*/
|
||||
error = xfs_dir2_data_init(args, blkno, &bp);
|
||||
if (error) {
|
||||
kmem_free(buf);
|
||||
kmem_free(sfp);
|
||||
return error;
|
||||
}
|
||||
block = bp->data;
|
||||
block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
|
||||
hdr = bp->data;
|
||||
hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
|
||||
/*
|
||||
* Compute size of block "tail" area.
|
||||
*/
|
||||
i = (uint)sizeof(*btp) +
|
||||
(sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
|
||||
(sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
|
||||
/*
|
||||
* The whole thing is initialized to free by the init routine.
|
||||
* Say we're using the leaf and tail area.
|
||||
*/
|
||||
dup = (xfs_dir2_data_unused_t *)block->u;
|
||||
dup = (xfs_dir2_data_unused_t *)(hdr + 1);
|
||||
needlog = needscan = 0;
|
||||
xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
|
||||
&needscan);
|
||||
@ -1116,50 +1107,51 @@ xfs_dir2_sf_to_block(
|
||||
/*
|
||||
* Fill in the tail.
|
||||
*/
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
btp->count = cpu_to_be32(sfp->count + 2); /* ., .. */
|
||||
btp->stale = 0;
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
endoffset = (uint)((char *)blp - (char *)block);
|
||||
endoffset = (uint)((char *)blp - (char *)hdr);
|
||||
/*
|
||||
* Remove the freespace, we'll manage it.
|
||||
*/
|
||||
xfs_dir2_data_use_free(tp, bp, dup,
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
|
||||
be16_to_cpu(dup->length), &needlog, &needscan);
|
||||
/*
|
||||
* Create entry for .
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
|
||||
((char *)hdr + XFS_DIR2_DATA_DOT_OFFSET);
|
||||
dep->inumber = cpu_to_be64(dp->i_ino);
|
||||
dep->namelen = 1;
|
||||
dep->name[0] = '.';
|
||||
tagp = xfs_dir2_data_entry_tag_p(dep);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)block);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
|
||||
xfs_dir2_data_log_entry(tp, bp, dep);
|
||||
blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
|
||||
blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
|
||||
(char *)dep - (char *)block));
|
||||
(char *)dep - (char *)hdr));
|
||||
/*
|
||||
* Create entry for ..
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
|
||||
dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
|
||||
((char *)hdr + XFS_DIR2_DATA_DOTDOT_OFFSET);
|
||||
dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
|
||||
dep->namelen = 2;
|
||||
dep->name[0] = dep->name[1] = '.';
|
||||
tagp = xfs_dir2_data_entry_tag_p(dep);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)block);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
|
||||
xfs_dir2_data_log_entry(tp, bp, dep);
|
||||
blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
|
||||
blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
|
||||
(char *)dep - (char *)block));
|
||||
(char *)dep - (char *)hdr));
|
||||
offset = XFS_DIR2_DATA_FIRST_OFFSET;
|
||||
/*
|
||||
* Loop over existing entries, stuff them in.
|
||||
*/
|
||||
if ((i = 0) == sfp->hdr.count)
|
||||
i = 0;
|
||||
if (!sfp->count)
|
||||
sfep = NULL;
|
||||
else
|
||||
sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
@ -1179,43 +1171,40 @@ xfs_dir2_sf_to_block(
|
||||
* There should be a hole here, make one.
|
||||
*/
|
||||
if (offset < newoffset) {
|
||||
dup = (xfs_dir2_data_unused_t *)
|
||||
((char *)block + offset);
|
||||
dup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
|
||||
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
|
||||
dup->length = cpu_to_be16(newoffset - offset);
|
||||
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
|
||||
((char *)dup - (char *)block));
|
||||
((char *)dup - (char *)hdr));
|
||||
xfs_dir2_data_log_unused(tp, bp, dup);
|
||||
(void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
|
||||
dup, &dummy);
|
||||
xfs_dir2_data_freeinsert(hdr, dup, &dummy);
|
||||
offset += be16_to_cpu(dup->length);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Copy a real entry.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
|
||||
dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
|
||||
xfs_dir2_sf_inumberp(sfep)));
|
||||
dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
|
||||
dep->inumber = cpu_to_be64(xfs_dir2_sfe_get_ino(sfp, sfep));
|
||||
dep->namelen = sfep->namelen;
|
||||
memcpy(dep->name, sfep->name, dep->namelen);
|
||||
tagp = xfs_dir2_data_entry_tag_p(dep);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)block);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
|
||||
xfs_dir2_data_log_entry(tp, bp, dep);
|
||||
name.name = sfep->name;
|
||||
name.len = sfep->namelen;
|
||||
blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
|
||||
hashname(&name));
|
||||
blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
|
||||
(char *)dep - (char *)block));
|
||||
offset = (int)((char *)(tagp + 1) - (char *)block);
|
||||
if (++i == sfp->hdr.count)
|
||||
(char *)dep - (char *)hdr));
|
||||
offset = (int)((char *)(tagp + 1) - (char *)hdr);
|
||||
if (++i == sfp->count)
|
||||
sfep = NULL;
|
||||
else
|
||||
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
|
||||
}
|
||||
/* Done with the temporary buffer */
|
||||
kmem_free(buf);
|
||||
kmem_free(sfp);
|
||||
/*
|
||||
* Sort the leaf entries by hash value.
|
||||
*/
|
||||
|
@ -1,92 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_BLOCK_H__
|
||||
#define __XFS_DIR2_BLOCK_H__
|
||||
|
||||
/*
|
||||
* xfs_dir2_block.h
|
||||
* Directory version 2, single block format structures
|
||||
*/
|
||||
|
||||
struct uio;
|
||||
struct xfs_dabuf;
|
||||
struct xfs_da_args;
|
||||
struct xfs_dir2_data_hdr;
|
||||
struct xfs_dir2_leaf_entry;
|
||||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* The single block format is as follows:
|
||||
* xfs_dir2_data_hdr_t structure
|
||||
* xfs_dir2_data_entry_t and xfs_dir2_data_unused_t structures
|
||||
* xfs_dir2_leaf_entry_t structures
|
||||
* xfs_dir2_block_tail_t structure
|
||||
*/
|
||||
|
||||
#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */
|
||||
|
||||
typedef struct xfs_dir2_block_tail {
|
||||
__be32 count; /* count of leaf entries */
|
||||
__be32 stale; /* count of stale lf entries */
|
||||
} xfs_dir2_block_tail_t;
|
||||
|
||||
/*
|
||||
* Generic single-block structure, for xfs_db.
|
||||
*/
|
||||
typedef struct xfs_dir2_block {
|
||||
xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_BLOCK_MAGIC */
|
||||
xfs_dir2_data_union_t u[1];
|
||||
xfs_dir2_leaf_entry_t leaf[1];
|
||||
xfs_dir2_block_tail_t tail;
|
||||
} xfs_dir2_block_t;
|
||||
|
||||
/*
|
||||
* Pointer to the leaf header embedded in a data block (1-block format)
|
||||
*/
|
||||
static inline xfs_dir2_block_tail_t *
|
||||
xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block)
|
||||
{
|
||||
return (((xfs_dir2_block_tail_t *)
|
||||
((char *)(block) + (mp)->m_dirblksize)) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer to the leaf entries embedded in a data block (1-block format)
|
||||
*/
|
||||
static inline struct xfs_dir2_leaf_entry *
|
||||
xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp)
|
||||
{
|
||||
return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function declarations.
|
||||
*/
|
||||
extern int xfs_dir2_block_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent,
|
||||
xfs_off_t *offset, filldir_t filldir);
|
||||
extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
|
||||
extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
|
||||
|
||||
#endif /* __XFS_DIR2_BLOCK_H__ */
|
@ -23,18 +23,18 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_error.h"
|
||||
|
||||
STATIC xfs_dir2_data_free_t *
|
||||
xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Check the consistency of the data block.
|
||||
@ -50,7 +50,7 @@ xfs_dir2_data_check(
|
||||
xfs_dir2_data_free_t *bf; /* bestfree table */
|
||||
xfs_dir2_block_tail_t *btp=NULL; /* block tail */
|
||||
int count; /* count of entries found */
|
||||
xfs_dir2_data_t *d; /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry */
|
||||
xfs_dir2_data_free_t *dfp; /* bestfree entry */
|
||||
xfs_dir2_data_unused_t *dup; /* unused entry */
|
||||
@ -66,17 +66,19 @@ xfs_dir2_data_check(
|
||||
struct xfs_name name;
|
||||
|
||||
mp = dp->i_mount;
|
||||
d = bp->data;
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
bf = d->hdr.bestfree;
|
||||
p = (char *)d->u;
|
||||
if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
|
||||
btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
|
||||
hdr = bp->data;
|
||||
bf = hdr->bestfree;
|
||||
p = (char *)(hdr + 1);
|
||||
|
||||
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
lep = xfs_dir2_block_leaf_p(btp);
|
||||
endp = (char *)lep;
|
||||
} else
|
||||
endp = (char *)d + mp->m_dirblksize;
|
||||
} else {
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
|
||||
endp = (char *)hdr + mp->m_dirblksize;
|
||||
}
|
||||
|
||||
count = lastfree = freeseen = 0;
|
||||
/*
|
||||
* Account for zero bestfree entries.
|
||||
@ -108,8 +110,8 @@ xfs_dir2_data_check(
|
||||
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
|
||||
ASSERT(lastfree == 0);
|
||||
ASSERT(be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
|
||||
(char *)dup - (char *)d);
|
||||
dfp = xfs_dir2_data_freefind(d, dup);
|
||||
(char *)dup - (char *)hdr);
|
||||
dfp = xfs_dir2_data_freefind(hdr, dup);
|
||||
if (dfp) {
|
||||
i = (int)(dfp - bf);
|
||||
ASSERT((freeseen & (1 << i)) == 0);
|
||||
@ -132,13 +134,13 @@ xfs_dir2_data_check(
|
||||
ASSERT(dep->namelen != 0);
|
||||
ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0);
|
||||
ASSERT(be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)) ==
|
||||
(char *)dep - (char *)d);
|
||||
(char *)dep - (char *)hdr);
|
||||
count++;
|
||||
lastfree = 0;
|
||||
if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
|
||||
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
|
||||
addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
|
||||
(xfs_dir2_data_aoff_t)
|
||||
((char *)dep - (char *)d));
|
||||
((char *)dep - (char *)hdr));
|
||||
name.name = dep->name;
|
||||
name.len = dep->namelen;
|
||||
hash = mp->m_dirnameops->hashname(&name);
|
||||
@ -155,9 +157,10 @@ xfs_dir2_data_check(
|
||||
* Need to have seen all the entries and all the bestfree slots.
|
||||
*/
|
||||
ASSERT(freeseen == 7);
|
||||
if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
|
||||
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
|
||||
for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
|
||||
if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR)
|
||||
if (lep[i].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
stale++;
|
||||
if (i > 0)
|
||||
ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval));
|
||||
@ -172,9 +175,9 @@ xfs_dir2_data_check(
|
||||
* Given a data block and an unused entry from that block,
|
||||
* return the bestfree entry if any that corresponds to it.
|
||||
*/
|
||||
xfs_dir2_data_free_t *
|
||||
STATIC xfs_dir2_data_free_t *
|
||||
xfs_dir2_data_freefind(
|
||||
xfs_dir2_data_t *d, /* data block */
|
||||
xfs_dir2_data_hdr_t *hdr, /* data block */
|
||||
xfs_dir2_data_unused_t *dup) /* data unused entry */
|
||||
{
|
||||
xfs_dir2_data_free_t *dfp; /* bestfree entry */
|
||||
@ -184,17 +187,17 @@ xfs_dir2_data_freefind(
|
||||
int seenzero; /* saw a 0 bestfree entry */
|
||||
#endif
|
||||
|
||||
off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)d);
|
||||
off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
|
||||
#if defined(DEBUG) && defined(__KERNEL__)
|
||||
/*
|
||||
* Validate some consistency in the bestfree table.
|
||||
* Check order, non-overlapping entries, and if we find the
|
||||
* one we're looking for it has to be exact.
|
||||
*/
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0;
|
||||
dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
for (dfp = &hdr->bestfree[0], seenzero = matched = 0;
|
||||
dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT];
|
||||
dfp++) {
|
||||
if (!dfp->offset) {
|
||||
ASSERT(!dfp->length);
|
||||
@ -210,7 +213,7 @@ xfs_dir2_data_freefind(
|
||||
else
|
||||
ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off);
|
||||
ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length));
|
||||
if (dfp > &d->hdr.bestfree[0])
|
||||
if (dfp > &hdr->bestfree[0])
|
||||
ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length));
|
||||
}
|
||||
#endif
|
||||
@ -219,13 +222,13 @@ xfs_dir2_data_freefind(
|
||||
* it can't be there since they're sorted.
|
||||
*/
|
||||
if (be16_to_cpu(dup->length) <
|
||||
be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length))
|
||||
be16_to_cpu(hdr->bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length))
|
||||
return NULL;
|
||||
/*
|
||||
* Look at the three bestfree entries for our guy.
|
||||
*/
|
||||
for (dfp = &d->hdr.bestfree[0];
|
||||
dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
|
||||
for (dfp = &hdr->bestfree[0];
|
||||
dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT];
|
||||
dfp++) {
|
||||
if (!dfp->offset)
|
||||
return NULL;
|
||||
@ -243,7 +246,7 @@ xfs_dir2_data_freefind(
|
||||
*/
|
||||
xfs_dir2_data_free_t * /* entry inserted */
|
||||
xfs_dir2_data_freeinsert(
|
||||
xfs_dir2_data_t *d, /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr, /* data block pointer */
|
||||
xfs_dir2_data_unused_t *dup, /* unused space */
|
||||
int *loghead) /* log the data header (out) */
|
||||
{
|
||||
@ -251,12 +254,13 @@ xfs_dir2_data_freeinsert(
|
||||
xfs_dir2_data_free_t new; /* new bestfree entry */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
#endif
|
||||
dfp = d->hdr.bestfree;
|
||||
dfp = hdr->bestfree;
|
||||
new.length = dup->length;
|
||||
new.offset = cpu_to_be16((char *)dup - (char *)d);
|
||||
new.offset = cpu_to_be16((char *)dup - (char *)hdr);
|
||||
|
||||
/*
|
||||
* Insert at position 0, 1, or 2; or not at all.
|
||||
*/
|
||||
@ -286,36 +290,36 @@ xfs_dir2_data_freeinsert(
|
||||
*/
|
||||
STATIC void
|
||||
xfs_dir2_data_freeremove(
|
||||
xfs_dir2_data_t *d, /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr, /* data block header */
|
||||
xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */
|
||||
int *loghead) /* out: log data header */
|
||||
{
|
||||
#ifdef __KERNEL__
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
#endif
|
||||
/*
|
||||
* It's the first entry, slide the next 2 up.
|
||||
*/
|
||||
if (dfp == &d->hdr.bestfree[0]) {
|
||||
d->hdr.bestfree[0] = d->hdr.bestfree[1];
|
||||
d->hdr.bestfree[1] = d->hdr.bestfree[2];
|
||||
if (dfp == &hdr->bestfree[0]) {
|
||||
hdr->bestfree[0] = hdr->bestfree[1];
|
||||
hdr->bestfree[1] = hdr->bestfree[2];
|
||||
}
|
||||
/*
|
||||
* It's the second entry, slide the 3rd entry up.
|
||||
*/
|
||||
else if (dfp == &d->hdr.bestfree[1])
|
||||
d->hdr.bestfree[1] = d->hdr.bestfree[2];
|
||||
else if (dfp == &hdr->bestfree[1])
|
||||
hdr->bestfree[1] = hdr->bestfree[2];
|
||||
/*
|
||||
* Must be the last entry.
|
||||
*/
|
||||
else
|
||||
ASSERT(dfp == &d->hdr.bestfree[2]);
|
||||
ASSERT(dfp == &hdr->bestfree[2]);
|
||||
/*
|
||||
* Clear the 3rd entry, must be zero now.
|
||||
*/
|
||||
d->hdr.bestfree[2].length = 0;
|
||||
d->hdr.bestfree[2].offset = 0;
|
||||
hdr->bestfree[2].length = 0;
|
||||
hdr->bestfree[2].offset = 0;
|
||||
*loghead = 1;
|
||||
}
|
||||
|
||||
@ -325,7 +329,7 @@ xfs_dir2_data_freeremove(
|
||||
void
|
||||
xfs_dir2_data_freescan(
|
||||
xfs_mount_t *mp, /* filesystem mount point */
|
||||
xfs_dir2_data_t *d, /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr, /* data block header */
|
||||
int *loghead) /* out: log data header */
|
||||
{
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
@ -335,23 +339,23 @@ xfs_dir2_data_freescan(
|
||||
char *p; /* current entry pointer */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
#endif
|
||||
/*
|
||||
* Start by clearing the table.
|
||||
*/
|
||||
memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree));
|
||||
memset(hdr->bestfree, 0, sizeof(hdr->bestfree));
|
||||
*loghead = 1;
|
||||
/*
|
||||
* Set up pointers.
|
||||
*/
|
||||
p = (char *)d->u;
|
||||
if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
|
||||
btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
|
||||
p = (char *)(hdr + 1);
|
||||
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
endp = (char *)xfs_dir2_block_leaf_p(btp);
|
||||
} else
|
||||
endp = (char *)d + mp->m_dirblksize;
|
||||
endp = (char *)hdr + mp->m_dirblksize;
|
||||
/*
|
||||
* Loop over the block's entries.
|
||||
*/
|
||||
@ -361,9 +365,9 @@ xfs_dir2_data_freescan(
|
||||
* If it's a free entry, insert it.
|
||||
*/
|
||||
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
|
||||
ASSERT((char *)dup - (char *)d ==
|
||||
ASSERT((char *)dup - (char *)hdr ==
|
||||
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
|
||||
xfs_dir2_data_freeinsert(d, dup, loghead);
|
||||
xfs_dir2_data_freeinsert(hdr, dup, loghead);
|
||||
p += be16_to_cpu(dup->length);
|
||||
}
|
||||
/*
|
||||
@ -371,7 +375,7 @@ xfs_dir2_data_freescan(
|
||||
*/
|
||||
else {
|
||||
dep = (xfs_dir2_data_entry_t *)p;
|
||||
ASSERT((char *)dep - (char *)d ==
|
||||
ASSERT((char *)dep - (char *)hdr ==
|
||||
be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)));
|
||||
p += xfs_dir2_data_entsize(dep->namelen);
|
||||
}
|
||||
@ -389,7 +393,7 @@ xfs_dir2_data_init(
|
||||
xfs_dabuf_t **bpp) /* output block buffer */
|
||||
{
|
||||
xfs_dabuf_t *bp; /* block buffer */
|
||||
xfs_dir2_data_t *d; /* pointer to block */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
|
||||
int error; /* error return value */
|
||||
@ -410,26 +414,28 @@ xfs_dir2_data_init(
|
||||
return error;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
|
||||
/*
|
||||
* Initialize the header.
|
||||
*/
|
||||
d = bp->data;
|
||||
d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
|
||||
d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr));
|
||||
hdr = bp->data;
|
||||
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
|
||||
hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr));
|
||||
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
|
||||
d->hdr.bestfree[i].length = 0;
|
||||
d->hdr.bestfree[i].offset = 0;
|
||||
hdr->bestfree[i].length = 0;
|
||||
hdr->bestfree[i].offset = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up an unused entry for the block's body.
|
||||
*/
|
||||
dup = &d->u[0].unused;
|
||||
dup = (xfs_dir2_data_unused_t *)(hdr + 1);
|
||||
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
|
||||
|
||||
t=mp->m_dirblksize - (uint)sizeof(d->hdr);
|
||||
d->hdr.bestfree[0].length = cpu_to_be16(t);
|
||||
t = mp->m_dirblksize - (uint)sizeof(*hdr);
|
||||
hdr->bestfree[0].length = cpu_to_be16(t);
|
||||
dup->length = cpu_to_be16(t);
|
||||
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)d);
|
||||
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
|
||||
/*
|
||||
* Log it and return it.
|
||||
*/
|
||||
@ -448,14 +454,14 @@ xfs_dir2_data_log_entry(
|
||||
xfs_dabuf_t *bp, /* block buffer */
|
||||
xfs_dir2_data_entry_t *dep) /* data entry pointer */
|
||||
{
|
||||
xfs_dir2_data_t *d; /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr = bp->data;
|
||||
|
||||
d = bp->data;
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d),
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
|
||||
(uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
|
||||
(char *)d - 1));
|
||||
(char *)hdr - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -466,13 +472,12 @@ xfs_dir2_data_log_header(
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_dabuf_t *bp) /* block buffer */
|
||||
{
|
||||
xfs_dir2_data_t *d; /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr = bp->data;
|
||||
|
||||
d = bp->data;
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d),
|
||||
(uint)(sizeof(d->hdr) - 1));
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
|
||||
xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -484,23 +489,23 @@ xfs_dir2_data_log_unused(
|
||||
xfs_dabuf_t *bp, /* block buffer */
|
||||
xfs_dir2_data_unused_t *dup) /* data unused pointer */
|
||||
{
|
||||
xfs_dir2_data_t *d; /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr = bp->data;
|
||||
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
|
||||
d = bp->data;
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
/*
|
||||
* Log the first part of the unused entry.
|
||||
*/
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)d),
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
|
||||
(uint)((char *)&dup->length + sizeof(dup->length) -
|
||||
1 - (char *)d));
|
||||
1 - (char *)hdr));
|
||||
/*
|
||||
* Log the end (tag) of the unused entry.
|
||||
*/
|
||||
xfs_da_log_buf(tp, bp,
|
||||
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d),
|
||||
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d +
|
||||
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr),
|
||||
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr +
|
||||
sizeof(xfs_dir2_data_off_t) - 1));
|
||||
}
|
||||
|
||||
@ -517,7 +522,7 @@ xfs_dir2_data_make_free(
|
||||
int *needlogp, /* out: log header */
|
||||
int *needscanp) /* out: regen bestfree */
|
||||
{
|
||||
xfs_dir2_data_t *d; /* data block pointer */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block pointer */
|
||||
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
|
||||
char *endptr; /* end of data area */
|
||||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
@ -527,28 +532,29 @@ xfs_dir2_data_make_free(
|
||||
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
|
||||
|
||||
mp = tp->t_mountp;
|
||||
d = bp->data;
|
||||
hdr = bp->data;
|
||||
|
||||
/*
|
||||
* Figure out where the end of the data area is.
|
||||
*/
|
||||
if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC)
|
||||
endptr = (char *)d + mp->m_dirblksize;
|
||||
if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC))
|
||||
endptr = (char *)hdr + mp->m_dirblksize;
|
||||
else {
|
||||
xfs_dir2_block_tail_t *btp; /* block tail */
|
||||
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
endptr = (char *)xfs_dir2_block_leaf_p(btp);
|
||||
}
|
||||
/*
|
||||
* If this isn't the start of the block, then back up to
|
||||
* the previous entry and see if it's free.
|
||||
*/
|
||||
if (offset > sizeof(d->hdr)) {
|
||||
if (offset > sizeof(*hdr)) {
|
||||
__be16 *tagp; /* tag just before us */
|
||||
|
||||
tagp = (__be16 *)((char *)d + offset) - 1;
|
||||
prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp));
|
||||
tagp = (__be16 *)((char *)hdr + offset) - 1;
|
||||
prevdup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
|
||||
if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
|
||||
prevdup = NULL;
|
||||
} else
|
||||
@ -557,9 +563,9 @@ xfs_dir2_data_make_free(
|
||||
* If this isn't the end of the block, see if the entry after
|
||||
* us is free.
|
||||
*/
|
||||
if ((char *)d + offset + len < endptr) {
|
||||
if ((char *)hdr + offset + len < endptr) {
|
||||
postdup =
|
||||
(xfs_dir2_data_unused_t *)((char *)d + offset + len);
|
||||
(xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
|
||||
if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
|
||||
postdup = NULL;
|
||||
} else
|
||||
@ -576,21 +582,21 @@ xfs_dir2_data_make_free(
|
||||
/*
|
||||
* See if prevdup and/or postdup are in bestfree table.
|
||||
*/
|
||||
dfp = xfs_dir2_data_freefind(d, prevdup);
|
||||
dfp2 = xfs_dir2_data_freefind(d, postdup);
|
||||
dfp = xfs_dir2_data_freefind(hdr, prevdup);
|
||||
dfp2 = xfs_dir2_data_freefind(hdr, postdup);
|
||||
/*
|
||||
* We need a rescan unless there are exactly 2 free entries
|
||||
* namely our two. Then we know what's happening, otherwise
|
||||
* since the third bestfree is there, there might be more
|
||||
* entries.
|
||||
*/
|
||||
needscan = (d->hdr.bestfree[2].length != 0);
|
||||
needscan = (hdr->bestfree[2].length != 0);
|
||||
/*
|
||||
* Fix up the new big freespace.
|
||||
*/
|
||||
be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length));
|
||||
*xfs_dir2_data_unused_tag_p(prevdup) =
|
||||
cpu_to_be16((char *)prevdup - (char *)d);
|
||||
cpu_to_be16((char *)prevdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, prevdup);
|
||||
if (!needscan) {
|
||||
/*
|
||||
@ -600,18 +606,18 @@ xfs_dir2_data_make_free(
|
||||
* Remove entry 1 first then entry 0.
|
||||
*/
|
||||
ASSERT(dfp && dfp2);
|
||||
if (dfp == &d->hdr.bestfree[1]) {
|
||||
dfp = &d->hdr.bestfree[0];
|
||||
if (dfp == &hdr->bestfree[1]) {
|
||||
dfp = &hdr->bestfree[0];
|
||||
ASSERT(dfp2 == dfp);
|
||||
dfp2 = &d->hdr.bestfree[1];
|
||||
dfp2 = &hdr->bestfree[1];
|
||||
}
|
||||
xfs_dir2_data_freeremove(d, dfp2, needlogp);
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp2, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
/*
|
||||
* Now insert the new entry.
|
||||
*/
|
||||
dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp);
|
||||
ASSERT(dfp == &d->hdr.bestfree[0]);
|
||||
dfp = xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
|
||||
ASSERT(dfp == &hdr->bestfree[0]);
|
||||
ASSERT(dfp->length == prevdup->length);
|
||||
ASSERT(!dfp[1].length);
|
||||
ASSERT(!dfp[2].length);
|
||||
@ -621,10 +627,10 @@ xfs_dir2_data_make_free(
|
||||
* The entry before us is free, merge with it.
|
||||
*/
|
||||
else if (prevdup) {
|
||||
dfp = xfs_dir2_data_freefind(d, prevdup);
|
||||
dfp = xfs_dir2_data_freefind(hdr, prevdup);
|
||||
be16_add_cpu(&prevdup->length, len);
|
||||
*xfs_dir2_data_unused_tag_p(prevdup) =
|
||||
cpu_to_be16((char *)prevdup - (char *)d);
|
||||
cpu_to_be16((char *)prevdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, prevdup);
|
||||
/*
|
||||
* If the previous entry was in the table, the new entry
|
||||
@ -632,27 +638,27 @@ xfs_dir2_data_make_free(
|
||||
* the old one and add the new one.
|
||||
*/
|
||||
if (dfp) {
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
(void)xfs_dir2_data_freeinsert(d, prevdup, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
|
||||
}
|
||||
/*
|
||||
* Otherwise we need a scan if the new entry is big enough.
|
||||
*/
|
||||
else {
|
||||
needscan = be16_to_cpu(prevdup->length) >
|
||||
be16_to_cpu(d->hdr.bestfree[2].length);
|
||||
be16_to_cpu(hdr->bestfree[2].length);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The following entry is free, merge with it.
|
||||
*/
|
||||
else if (postdup) {
|
||||
dfp = xfs_dir2_data_freefind(d, postdup);
|
||||
newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
|
||||
dfp = xfs_dir2_data_freefind(hdr, postdup);
|
||||
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
|
||||
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
|
||||
newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
|
||||
*xfs_dir2_data_unused_tag_p(newdup) =
|
||||
cpu_to_be16((char *)newdup - (char *)d);
|
||||
cpu_to_be16((char *)newdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, newdup);
|
||||
/*
|
||||
* If the following entry was in the table, the new entry
|
||||
@ -660,28 +666,28 @@ xfs_dir2_data_make_free(
|
||||
* the old one and add the new one.
|
||||
*/
|
||||
if (dfp) {
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
(void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
|
||||
}
|
||||
/*
|
||||
* Otherwise we need a scan if the new entry is big enough.
|
||||
*/
|
||||
else {
|
||||
needscan = be16_to_cpu(newdup->length) >
|
||||
be16_to_cpu(d->hdr.bestfree[2].length);
|
||||
be16_to_cpu(hdr->bestfree[2].length);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Neither neighbor is free. Make a new entry.
|
||||
*/
|
||||
else {
|
||||
newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
|
||||
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
|
||||
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
|
||||
newdup->length = cpu_to_be16(len);
|
||||
*xfs_dir2_data_unused_tag_p(newdup) =
|
||||
cpu_to_be16((char *)newdup - (char *)d);
|
||||
cpu_to_be16((char *)newdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, newdup);
|
||||
(void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
|
||||
xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
|
||||
}
|
||||
*needscanp = needscan;
|
||||
}
|
||||
@ -699,7 +705,7 @@ xfs_dir2_data_use_free(
|
||||
int *needlogp, /* out: need to log header */
|
||||
int *needscanp) /* out: need regen bestfree */
|
||||
{
|
||||
xfs_dir2_data_t *d; /* data block */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
|
||||
int matchback; /* matches end of freespace */
|
||||
int matchfront; /* matches start of freespace */
|
||||
@ -708,24 +714,24 @@ xfs_dir2_data_use_free(
|
||||
xfs_dir2_data_unused_t *newdup2; /* another new unused entry */
|
||||
int oldlen; /* old unused entry's length */
|
||||
|
||||
d = bp->data;
|
||||
ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
|
||||
be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
|
||||
hdr = bp->data;
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
|
||||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
|
||||
ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
|
||||
ASSERT(offset >= (char *)dup - (char *)d);
|
||||
ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d);
|
||||
ASSERT((char *)dup - (char *)d == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
|
||||
ASSERT(offset >= (char *)dup - (char *)hdr);
|
||||
ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr);
|
||||
ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
|
||||
/*
|
||||
* Look up the entry in the bestfree table.
|
||||
*/
|
||||
dfp = xfs_dir2_data_freefind(d, dup);
|
||||
dfp = xfs_dir2_data_freefind(hdr, dup);
|
||||
oldlen = be16_to_cpu(dup->length);
|
||||
ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length));
|
||||
ASSERT(dfp || oldlen <= be16_to_cpu(hdr->bestfree[2].length));
|
||||
/*
|
||||
* Check for alignment with front and back of the entry.
|
||||
*/
|
||||
matchfront = (char *)dup - (char *)d == offset;
|
||||
matchback = (char *)dup + oldlen - (char *)d == offset + len;
|
||||
matchfront = (char *)dup - (char *)hdr == offset;
|
||||
matchback = (char *)dup + oldlen - (char *)hdr == offset + len;
|
||||
ASSERT(*needscanp == 0);
|
||||
needscan = 0;
|
||||
/*
|
||||
@ -734,9 +740,9 @@ xfs_dir2_data_use_free(
|
||||
*/
|
||||
if (matchfront && matchback) {
|
||||
if (dfp) {
|
||||
needscan = (d->hdr.bestfree[2].offset != 0);
|
||||
needscan = (hdr->bestfree[2].offset != 0);
|
||||
if (!needscan)
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
}
|
||||
}
|
||||
/*
|
||||
@ -744,27 +750,27 @@ xfs_dir2_data_use_free(
|
||||
* Make a new entry with the remaining freespace.
|
||||
*/
|
||||
else if (matchfront) {
|
||||
newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
|
||||
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
|
||||
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
|
||||
newdup->length = cpu_to_be16(oldlen - len);
|
||||
*xfs_dir2_data_unused_tag_p(newdup) =
|
||||
cpu_to_be16((char *)newdup - (char *)d);
|
||||
cpu_to_be16((char *)newdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, newdup);
|
||||
/*
|
||||
* If it was in the table, remove it and add the new one.
|
||||
*/
|
||||
if (dfp) {
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
|
||||
ASSERT(dfp != NULL);
|
||||
ASSERT(dfp->length == newdup->length);
|
||||
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d);
|
||||
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
|
||||
/*
|
||||
* If we got inserted at the last slot,
|
||||
* that means we don't know if there was a better
|
||||
* choice for the last slot, or not. Rescan.
|
||||
*/
|
||||
needscan = dfp == &d->hdr.bestfree[2];
|
||||
needscan = dfp == &hdr->bestfree[2];
|
||||
}
|
||||
}
|
||||
/*
|
||||
@ -773,25 +779,25 @@ xfs_dir2_data_use_free(
|
||||
*/
|
||||
else if (matchback) {
|
||||
newdup = dup;
|
||||
newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
|
||||
newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup);
|
||||
*xfs_dir2_data_unused_tag_p(newdup) =
|
||||
cpu_to_be16((char *)newdup - (char *)d);
|
||||
cpu_to_be16((char *)newdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, newdup);
|
||||
/*
|
||||
* If it was in the table, remove it and add the new one.
|
||||
*/
|
||||
if (dfp) {
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
|
||||
ASSERT(dfp != NULL);
|
||||
ASSERT(dfp->length == newdup->length);
|
||||
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d);
|
||||
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
|
||||
/*
|
||||
* If we got inserted at the last slot,
|
||||
* that means we don't know if there was a better
|
||||
* choice for the last slot, or not. Rescan.
|
||||
*/
|
||||
needscan = dfp == &d->hdr.bestfree[2];
|
||||
needscan = dfp == &hdr->bestfree[2];
|
||||
}
|
||||
}
|
||||
/*
|
||||
@ -800,15 +806,15 @@ xfs_dir2_data_use_free(
|
||||
*/
|
||||
else {
|
||||
newdup = dup;
|
||||
newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
|
||||
newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup);
|
||||
*xfs_dir2_data_unused_tag_p(newdup) =
|
||||
cpu_to_be16((char *)newdup - (char *)d);
|
||||
cpu_to_be16((char *)newdup - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, newdup);
|
||||
newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
|
||||
newdup2 = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
|
||||
newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
|
||||
newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length));
|
||||
*xfs_dir2_data_unused_tag_p(newdup2) =
|
||||
cpu_to_be16((char *)newdup2 - (char *)d);
|
||||
cpu_to_be16((char *)newdup2 - (char *)hdr);
|
||||
xfs_dir2_data_log_unused(tp, bp, newdup2);
|
||||
/*
|
||||
* If the old entry was in the table, we need to scan
|
||||
@ -819,13 +825,12 @@ xfs_dir2_data_use_free(
|
||||
* the 2 new will work.
|
||||
*/
|
||||
if (dfp) {
|
||||
needscan = (d->hdr.bestfree[2].length != 0);
|
||||
needscan = (hdr->bestfree[2].length != 0);
|
||||
if (!needscan) {
|
||||
xfs_dir2_data_freeremove(d, dfp, needlogp);
|
||||
(void)xfs_dir2_data_freeinsert(d, newdup,
|
||||
needlogp);
|
||||
(void)xfs_dir2_data_freeinsert(d, newdup2,
|
||||
needlogp);
|
||||
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
|
||||
xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
|
||||
xfs_dir2_data_freeinsert(hdr, newdup2,
|
||||
needlogp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,184 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_DATA_H__
|
||||
#define __XFS_DIR2_DATA_H__
|
||||
|
||||
/*
|
||||
* Directory format 2, data block structures.
|
||||
*/
|
||||
|
||||
struct xfs_dabuf;
|
||||
struct xfs_da_args;
|
||||
struct xfs_inode;
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* Constants.
|
||||
*/
|
||||
#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: for multiblock dirs */
|
||||
#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */
|
||||
#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG)
|
||||
#define XFS_DIR2_DATA_FREE_TAG 0xffff
|
||||
#define XFS_DIR2_DATA_FD_COUNT 3
|
||||
|
||||
/*
|
||||
* Directory address space divided into sections,
|
||||
* spaces separated by 32GB.
|
||||
*/
|
||||
#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
|
||||
#define XFS_DIR2_DATA_SPACE 0
|
||||
#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
|
||||
#define XFS_DIR2_DATA_FIRSTDB(mp) \
|
||||
xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET)
|
||||
|
||||
/*
|
||||
* Offsets of . and .. in data space (always block 0)
|
||||
*/
|
||||
#define XFS_DIR2_DATA_DOT_OFFSET \
|
||||
((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t))
|
||||
#define XFS_DIR2_DATA_DOTDOT_OFFSET \
|
||||
(XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1))
|
||||
#define XFS_DIR2_DATA_FIRST_OFFSET \
|
||||
(XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2))
|
||||
|
||||
/*
|
||||
* Structures.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Describe a free area in the data block.
|
||||
* The freespace will be formatted as a xfs_dir2_data_unused_t.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_free {
|
||||
__be16 offset; /* start of freespace */
|
||||
__be16 length; /* length of freespace */
|
||||
} xfs_dir2_data_free_t;
|
||||
|
||||
/*
|
||||
* Header for the data blocks.
|
||||
* Always at the beginning of a directory-sized block.
|
||||
* The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_hdr {
|
||||
__be32 magic; /* XFS_DIR2_DATA_MAGIC */
|
||||
/* or XFS_DIR2_BLOCK_MAGIC */
|
||||
xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT];
|
||||
} xfs_dir2_data_hdr_t;
|
||||
|
||||
/*
|
||||
* Active entry in a data block. Aligned to 8 bytes.
|
||||
* Tag appears as the last 2 bytes.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_entry {
|
||||
__be64 inumber; /* inode number */
|
||||
__u8 namelen; /* name length */
|
||||
__u8 name[1]; /* name bytes, no null */
|
||||
/* variable offset */
|
||||
__be16 tag; /* starting offset of us */
|
||||
} xfs_dir2_data_entry_t;
|
||||
|
||||
/*
|
||||
* Unused entry in a data block. Aligned to 8 bytes.
|
||||
* Tag appears as the last 2 bytes.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_unused {
|
||||
__be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */
|
||||
__be16 length; /* total free length */
|
||||
/* variable offset */
|
||||
__be16 tag; /* starting offset of us */
|
||||
} xfs_dir2_data_unused_t;
|
||||
|
||||
typedef union {
|
||||
xfs_dir2_data_entry_t entry;
|
||||
xfs_dir2_data_unused_t unused;
|
||||
} xfs_dir2_data_union_t;
|
||||
|
||||
/*
|
||||
* Generic data block structure, for xfs_db.
|
||||
*/
|
||||
typedef struct xfs_dir2_data {
|
||||
xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_DATA_MAGIC */
|
||||
xfs_dir2_data_union_t u[1];
|
||||
} xfs_dir2_data_t;
|
||||
|
||||
/*
|
||||
* Macros.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Size of a data entry.
|
||||
*/
|
||||
static inline int xfs_dir2_data_entsize(int n)
|
||||
{
|
||||
return (int)roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \
|
||||
(uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer to an entry's tag word.
|
||||
*/
|
||||
static inline __be16 *
|
||||
xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
|
||||
{
|
||||
return (__be16 *)((char *)dep +
|
||||
xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer to a freespace's tag word.
|
||||
*/
|
||||
static inline __be16 *
|
||||
xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup)
|
||||
{
|
||||
return (__be16 *)((char *)dup +
|
||||
be16_to_cpu(dup->length) - sizeof(__be16));
|
||||
}
|
||||
|
||||
/*
|
||||
* Function declarations.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
|
||||
#else
|
||||
#define xfs_dir2_data_check(dp,bp)
|
||||
#endif
|
||||
extern xfs_dir2_data_free_t *xfs_dir2_data_freefind(xfs_dir2_data_t *d,
|
||||
xfs_dir2_data_unused_t *dup);
|
||||
extern xfs_dir2_data_free_t *xfs_dir2_data_freeinsert(xfs_dir2_data_t *d,
|
||||
xfs_dir2_data_unused_t *dup, int *loghead);
|
||||
extern void xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d,
|
||||
int *loghead);
|
||||
extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
|
||||
struct xfs_dabuf **bpp);
|
||||
extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
xfs_dir2_data_entry_t *dep);
|
||||
extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
|
||||
struct xfs_dabuf *bp);
|
||||
extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
xfs_dir2_data_unused_t *dup);
|
||||
extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
xfs_dir2_data_aoff_t offset,
|
||||
xfs_dir2_data_aoff_t len, int *needlogp,
|
||||
int *needscanp);
|
||||
extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
xfs_dir2_data_unused_t *dup,
|
||||
xfs_dir2_data_aoff_t offset,
|
||||
xfs_dir2_data_aoff_t len, int *needlogp,
|
||||
int *needscanp);
|
||||
|
||||
#endif /* __XFS_DIR2_DATA_H__ */
|
597
fs/xfs/xfs_dir2_format.h
Normal file
597
fs/xfs/xfs_dir2_format.h
Normal file
@ -0,0 +1,597 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_FORMAT_H__
|
||||
#define __XFS_DIR2_FORMAT_H__
|
||||
|
||||
/*
|
||||
* Directory version 2.
|
||||
*
|
||||
* There are 4 possible formats:
|
||||
* - shortform - embedded into the inode
|
||||
* - single block - data with embedded leaf at the end
|
||||
* - multiple data blocks, single leaf+freeindex block
|
||||
* - data blocks, node and leaf blocks (btree), freeindex blocks
|
||||
*
|
||||
* Note: many node blocks structures and constants are shared with the attr
|
||||
* code and defined in xfs_da_btree.h.
|
||||
*/
|
||||
|
||||
#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: single block dirs */
|
||||
#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: multiblock dirs */
|
||||
#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F: free index blocks */
|
||||
|
||||
/*
|
||||
* Byte offset in data block and shortform entry.
|
||||
*/
|
||||
typedef __uint16_t xfs_dir2_data_off_t;
|
||||
#define NULLDATAOFF 0xffffU
|
||||
typedef uint xfs_dir2_data_aoff_t; /* argument form */
|
||||
|
||||
/*
|
||||
* Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t.
|
||||
* Only need 16 bits, this is the byte offset into the single block form.
|
||||
*/
|
||||
typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t;
|
||||
|
||||
/*
|
||||
* Offset in data space of a data entry.
|
||||
*/
|
||||
typedef __uint32_t xfs_dir2_dataptr_t;
|
||||
#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff)
|
||||
#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0)
|
||||
|
||||
/*
|
||||
* Byte offset in a directory.
|
||||
*/
|
||||
typedef xfs_off_t xfs_dir2_off_t;
|
||||
|
||||
/*
|
||||
* Directory block number (logical dirblk in file)
|
||||
*/
|
||||
typedef __uint32_t xfs_dir2_db_t;
|
||||
|
||||
/*
|
||||
* Inode number stored as 8 8-bit values.
|
||||
*/
|
||||
typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t;
|
||||
|
||||
/*
|
||||
* Inode number stored as 4 8-bit values.
|
||||
* Works a lot of the time, when all the inode numbers in a directory
|
||||
* fit in 32 bits.
|
||||
*/
|
||||
typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t;
|
||||
|
||||
typedef union {
|
||||
xfs_dir2_ino8_t i8;
|
||||
xfs_dir2_ino4_t i4;
|
||||
} xfs_dir2_inou_t;
|
||||
#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL)
|
||||
|
||||
/*
|
||||
* Directory layout when stored internal to an inode.
|
||||
*
|
||||
* Small directories are packed as tightly as possible so as to fit into the
|
||||
* literal area of the inode. These "shortform" directories consist of a
|
||||
* single xfs_dir2_sf_hdr header followed by zero or more xfs_dir2_sf_entry
|
||||
* structures. Due the different inode number storage size and the variable
|
||||
* length name field in the xfs_dir2_sf_entry all these structure are
|
||||
* variable length, and the accessors in this file should be used to iterate
|
||||
* over them.
|
||||
*/
|
||||
typedef struct xfs_dir2_sf_hdr {
|
||||
__uint8_t count; /* count of entries */
|
||||
__uint8_t i8count; /* count of 8-byte inode #s */
|
||||
xfs_dir2_inou_t parent; /* parent dir inode number */
|
||||
} __arch_pack xfs_dir2_sf_hdr_t;
|
||||
|
||||
typedef struct xfs_dir2_sf_entry {
|
||||
__u8 namelen; /* actual name length */
|
||||
xfs_dir2_sf_off_t offset; /* saved offset */
|
||||
__u8 name[]; /* name, variable size */
|
||||
/*
|
||||
* A xfs_dir2_ino8_t or xfs_dir2_ino4_t follows here, at a
|
||||
* variable offset after the name.
|
||||
*/
|
||||
} __arch_pack xfs_dir2_sf_entry_t;
|
||||
|
||||
static inline int xfs_dir2_sf_hdr_size(int i8count)
|
||||
{
|
||||
return sizeof(struct xfs_dir2_sf_hdr) -
|
||||
(i8count == 0) *
|
||||
(sizeof(xfs_dir2_ino8_t) - sizeof(xfs_dir2_ino4_t));
|
||||
}
|
||||
|
||||
static inline xfs_dir2_data_aoff_t
|
||||
xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
|
||||
{
|
||||
return get_unaligned_be16(&sfep->offset.i);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
|
||||
{
|
||||
put_unaligned_be16(off, &sfep->offset.i);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xfs_dir2_sf_entsize(struct xfs_dir2_sf_hdr *hdr, int len)
|
||||
{
|
||||
return sizeof(struct xfs_dir2_sf_entry) + /* namelen + offset */
|
||||
len + /* name */
|
||||
(hdr->i8count ? /* ino */
|
||||
sizeof(xfs_dir2_ino8_t) :
|
||||
sizeof(xfs_dir2_ino4_t));
|
||||
}
|
||||
|
||||
static inline struct xfs_dir2_sf_entry *
|
||||
xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr)
|
||||
{
|
||||
return (struct xfs_dir2_sf_entry *)
|
||||
((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count));
|
||||
}
|
||||
|
||||
static inline struct xfs_dir2_sf_entry *
|
||||
xfs_dir2_sf_nextentry(struct xfs_dir2_sf_hdr *hdr,
|
||||
struct xfs_dir2_sf_entry *sfep)
|
||||
{
|
||||
return (struct xfs_dir2_sf_entry *)
|
||||
((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Data block structures.
|
||||
*
|
||||
* A pure data block looks like the following drawing on disk:
|
||||
*
|
||||
* +-------------------------------------------------+
|
||||
* | xfs_dir2_data_hdr_t |
|
||||
* +-------------------------------------------------+
|
||||
* | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
|
||||
* | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
|
||||
* | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
|
||||
* | ... |
|
||||
* +-------------------------------------------------+
|
||||
* | unused space |
|
||||
* +-------------------------------------------------+
|
||||
*
|
||||
* As all the entries are variable size structures the accessors below should
|
||||
* be used to iterate over them.
|
||||
*
|
||||
* In addition to the pure data blocks for the data and node formats,
|
||||
* most structures are also used for the combined data/freespace "block"
|
||||
* format below.
|
||||
*/
|
||||
|
||||
#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */
|
||||
#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG)
|
||||
#define XFS_DIR2_DATA_FREE_TAG 0xffff
|
||||
#define XFS_DIR2_DATA_FD_COUNT 3
|
||||
|
||||
/*
|
||||
* Directory address space divided into sections,
|
||||
* spaces separated by 32GB.
|
||||
*/
|
||||
#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
|
||||
#define XFS_DIR2_DATA_SPACE 0
|
||||
#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
|
||||
#define XFS_DIR2_DATA_FIRSTDB(mp) \
|
||||
xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET)
|
||||
|
||||
/*
|
||||
* Offsets of . and .. in data space (always block 0)
|
||||
*/
|
||||
#define XFS_DIR2_DATA_DOT_OFFSET \
|
||||
((xfs_dir2_data_aoff_t)sizeof(struct xfs_dir2_data_hdr))
|
||||
#define XFS_DIR2_DATA_DOTDOT_OFFSET \
|
||||
(XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1))
|
||||
#define XFS_DIR2_DATA_FIRST_OFFSET \
|
||||
(XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2))
|
||||
|
||||
/*
|
||||
* Describe a free area in the data block.
|
||||
*
|
||||
* The freespace will be formatted as a xfs_dir2_data_unused_t.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_free {
|
||||
__be16 offset; /* start of freespace */
|
||||
__be16 length; /* length of freespace */
|
||||
} xfs_dir2_data_free_t;
|
||||
|
||||
/*
|
||||
* Header for the data blocks.
|
||||
*
|
||||
* The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_hdr {
|
||||
__be32 magic; /* XFS_DIR2_DATA_MAGIC or */
|
||||
/* XFS_DIR2_BLOCK_MAGIC */
|
||||
xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT];
|
||||
} xfs_dir2_data_hdr_t;
|
||||
|
||||
/*
|
||||
* Active entry in a data block.
|
||||
*
|
||||
* Aligned to 8 bytes. After the variable length name field there is a
|
||||
* 2 byte tag field, which can be accessed using xfs_dir2_data_entry_tag_p.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_entry {
|
||||
__be64 inumber; /* inode number */
|
||||
__u8 namelen; /* name length */
|
||||
__u8 name[]; /* name bytes, no null */
|
||||
/* __be16 tag; */ /* starting offset of us */
|
||||
} xfs_dir2_data_entry_t;
|
||||
|
||||
/*
|
||||
* Unused entry in a data block.
|
||||
*
|
||||
* Aligned to 8 bytes. Tag appears as the last 2 bytes and must be accessed
|
||||
* using xfs_dir2_data_unused_tag_p.
|
||||
*/
|
||||
typedef struct xfs_dir2_data_unused {
|
||||
__be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */
|
||||
__be16 length; /* total free length */
|
||||
/* variable offset */
|
||||
__be16 tag; /* starting offset of us */
|
||||
} xfs_dir2_data_unused_t;
|
||||
|
||||
/*
|
||||
* Size of a data entry.
|
||||
*/
|
||||
static inline int xfs_dir2_data_entsize(int n)
|
||||
{
|
||||
return (int)roundup(offsetof(struct xfs_dir2_data_entry, name[0]) + n +
|
||||
(uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer to an entry's tag word.
|
||||
*/
|
||||
static inline __be16 *
|
||||
xfs_dir2_data_entry_tag_p(struct xfs_dir2_data_entry *dep)
|
||||
{
|
||||
return (__be16 *)((char *)dep +
|
||||
xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer to a freespace's tag word.
|
||||
*/
|
||||
static inline __be16 *
|
||||
xfs_dir2_data_unused_tag_p(struct xfs_dir2_data_unused *dup)
|
||||
{
|
||||
return (__be16 *)((char *)dup +
|
||||
be16_to_cpu(dup->length) - sizeof(__be16));
|
||||
}
|
||||
|
||||
/*
|
||||
* Leaf block structures.
|
||||
*
|
||||
* A pure leaf block looks like the following drawing on disk:
|
||||
*
|
||||
* +---------------------------+
|
||||
* | xfs_dir2_leaf_hdr_t |
|
||||
* +---------------------------+
|
||||
* | xfs_dir2_leaf_entry_t |
|
||||
* | xfs_dir2_leaf_entry_t |
|
||||
* | xfs_dir2_leaf_entry_t |
|
||||
* | xfs_dir2_leaf_entry_t |
|
||||
* | ... |
|
||||
* +---------------------------+
|
||||
* | xfs_dir2_data_off_t |
|
||||
* | xfs_dir2_data_off_t |
|
||||
* | xfs_dir2_data_off_t |
|
||||
* | ... |
|
||||
* +---------------------------+
|
||||
* | xfs_dir2_leaf_tail_t |
|
||||
* +---------------------------+
|
||||
*
|
||||
* The xfs_dir2_data_off_t members (bests) and tail are at the end of the block
|
||||
* for single-leaf (magic = XFS_DIR2_LEAF1_MAGIC) blocks only, but not present
|
||||
* for directories with separate leaf nodes and free space blocks
|
||||
* (magic = XFS_DIR2_LEAFN_MAGIC).
|
||||
*
|
||||
* As all the entries are variable size structures the accessors below should
|
||||
* be used to iterate over them.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Offset of the leaf/node space. First block in this space
|
||||
* is the btree root.
|
||||
*/
|
||||
#define XFS_DIR2_LEAF_SPACE 1
|
||||
#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
|
||||
#define XFS_DIR2_LEAF_FIRSTDB(mp) \
|
||||
xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET)
|
||||
|
||||
/*
|
||||
* Leaf block header.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf_hdr {
|
||||
xfs_da_blkinfo_t info; /* header for da routines */
|
||||
__be16 count; /* count of entries */
|
||||
__be16 stale; /* count of stale entries */
|
||||
} xfs_dir2_leaf_hdr_t;
|
||||
|
||||
/*
|
||||
* Leaf block entry.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf_entry {
|
||||
__be32 hashval; /* hash value of name */
|
||||
__be32 address; /* address of data entry */
|
||||
} xfs_dir2_leaf_entry_t;
|
||||
|
||||
/*
|
||||
* Leaf block tail.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf_tail {
|
||||
__be32 bestcount;
|
||||
} xfs_dir2_leaf_tail_t;
|
||||
|
||||
/*
|
||||
* Leaf block.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf {
|
||||
xfs_dir2_leaf_hdr_t hdr; /* leaf header */
|
||||
xfs_dir2_leaf_entry_t ents[]; /* entries */
|
||||
} xfs_dir2_leaf_t;
|
||||
|
||||
/*
|
||||
* DB blocks here are logical directory block numbers, not filesystem blocks.
|
||||
*/
|
||||
|
||||
static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
|
||||
{
|
||||
return (mp->m_dirblksize - (uint)sizeof(struct xfs_dir2_leaf_hdr)) /
|
||||
(uint)sizeof(struct xfs_dir2_leaf_entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get address of the bestcount field in the single-leaf block.
|
||||
*/
|
||||
static inline struct xfs_dir2_leaf_tail *
|
||||
xfs_dir2_leaf_tail_p(struct xfs_mount *mp, struct xfs_dir2_leaf *lp)
|
||||
{
|
||||
return (struct xfs_dir2_leaf_tail *)
|
||||
((char *)lp + mp->m_dirblksize -
|
||||
sizeof(struct xfs_dir2_leaf_tail));
|
||||
}
|
||||
|
||||
/*
|
||||
* Get address of the bests array in the single-leaf block.
|
||||
*/
|
||||
static inline __be16 *
|
||||
xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp)
|
||||
{
|
||||
return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert dataptr to byte in file space
|
||||
*/
|
||||
static inline xfs_dir2_off_t
|
||||
xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
|
||||
{
|
||||
return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in file space to dataptr. It had better be aligned.
|
||||
*/
|
||||
static inline xfs_dir2_dataptr_t
|
||||
xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in space to (DB) block
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return (xfs_dir2_db_t)
|
||||
(by >> (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert dataptr to a block number
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
|
||||
{
|
||||
return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in space to offset in a block
|
||||
*/
|
||||
static inline xfs_dir2_data_aoff_t
|
||||
xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return (xfs_dir2_data_aoff_t)(by &
|
||||
((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert dataptr to a byte offset in a block
|
||||
*/
|
||||
static inline xfs_dir2_data_aoff_t
|
||||
xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
|
||||
{
|
||||
return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block and offset to byte in space
|
||||
*/
|
||||
static inline xfs_dir2_off_t
|
||||
xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
|
||||
xfs_dir2_data_aoff_t o)
|
||||
{
|
||||
return ((xfs_dir2_off_t)db <<
|
||||
(mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) + o;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block (DB) to block (dablk)
|
||||
*/
|
||||
static inline xfs_dablk_t
|
||||
xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
|
||||
{
|
||||
return (xfs_dablk_t)(db << mp->m_sb.sb_dirblklog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in space to (DA) block
|
||||
*/
|
||||
static inline xfs_dablk_t
|
||||
xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block and offset to dataptr
|
||||
*/
|
||||
static inline xfs_dir2_dataptr_t
|
||||
xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
|
||||
xfs_dir2_data_aoff_t o)
|
||||
{
|
||||
return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block (dablk) to block (DB)
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
|
||||
{
|
||||
return (xfs_dir2_db_t)(da >> mp->m_sb.sb_dirblklog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block (dablk) to byte offset in space
|
||||
*/
|
||||
static inline xfs_dir2_off_t
|
||||
xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
|
||||
{
|
||||
return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free space block defintions for the node format.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Offset of the freespace index.
|
||||
*/
|
||||
#define XFS_DIR2_FREE_SPACE 2
|
||||
#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
|
||||
#define XFS_DIR2_FREE_FIRSTDB(mp) \
|
||||
xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET)
|
||||
|
||||
typedef struct xfs_dir2_free_hdr {
|
||||
__be32 magic; /* XFS_DIR2_FREE_MAGIC */
|
||||
__be32 firstdb; /* db of first entry */
|
||||
__be32 nvalid; /* count of valid entries */
|
||||
__be32 nused; /* count of used entries */
|
||||
} xfs_dir2_free_hdr_t;
|
||||
|
||||
typedef struct xfs_dir2_free {
|
||||
xfs_dir2_free_hdr_t hdr; /* block header */
|
||||
__be16 bests[]; /* best free counts */
|
||||
/* unused entries are -1 */
|
||||
} xfs_dir2_free_t;
|
||||
|
||||
static inline int xfs_dir2_free_max_bests(struct xfs_mount *mp)
|
||||
{
|
||||
return (mp->m_dirblksize - sizeof(struct xfs_dir2_free_hdr)) /
|
||||
sizeof(xfs_dir2_data_off_t);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert data space db to the corresponding free db.
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
|
||||
{
|
||||
return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir2_free_max_bests(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert data space db to the corresponding index in a free db.
|
||||
*/
|
||||
static inline int
|
||||
xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
|
||||
{
|
||||
return db % xfs_dir2_free_max_bests(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Single block format.
|
||||
*
|
||||
* The single block format looks like the following drawing on disk:
|
||||
*
|
||||
* +-------------------------------------------------+
|
||||
* | xfs_dir2_data_hdr_t |
|
||||
* +-------------------------------------------------+
|
||||
* | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
|
||||
* | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
|
||||
* | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t :
|
||||
* | ... |
|
||||
* +-------------------------------------------------+
|
||||
* | unused space |
|
||||
* +-------------------------------------------------+
|
||||
* | ... |
|
||||
* | xfs_dir2_leaf_entry_t |
|
||||
* | xfs_dir2_leaf_entry_t |
|
||||
* +-------------------------------------------------+
|
||||
* | xfs_dir2_block_tail_t |
|
||||
* +-------------------------------------------------+
|
||||
*
|
||||
* As all the entries are variable size structures the accessors below should
|
||||
* be used to iterate over them.
|
||||
*/
|
||||
|
||||
typedef struct xfs_dir2_block_tail {
|
||||
__be32 count; /* count of leaf entries */
|
||||
__be32 stale; /* count of stale lf entries */
|
||||
} xfs_dir2_block_tail_t;
|
||||
|
||||
/*
|
||||
* Pointer to the leaf header embedded in a data block (1-block format)
|
||||
*/
|
||||
static inline struct xfs_dir2_block_tail *
|
||||
xfs_dir2_block_tail_p(struct xfs_mount *mp, struct xfs_dir2_data_hdr *hdr)
|
||||
{
|
||||
return ((struct xfs_dir2_block_tail *)
|
||||
((char *)hdr + mp->m_dirblksize)) - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer to the leaf entries embedded in a data block (1-block format)
|
||||
*/
|
||||
static inline struct xfs_dir2_leaf_entry *
|
||||
xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
|
||||
{
|
||||
return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
|
||||
}
|
||||
|
||||
#endif /* __XFS_DIR2_FORMAT_H__ */
|
@ -24,18 +24,14 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2_node.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
@ -64,7 +60,7 @@ xfs_dir2_block_to_leaf(
|
||||
{
|
||||
__be16 *bestsp; /* leaf's bestsp entries */
|
||||
xfs_dablk_t blkno; /* leaf block's bno */
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */
|
||||
xfs_dir2_block_tail_t *btp; /* block's tail */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
@ -101,9 +97,9 @@ xfs_dir2_block_to_leaf(
|
||||
}
|
||||
ASSERT(lbp != NULL);
|
||||
leaf = lbp->data;
|
||||
block = dbp->data;
|
||||
hdr = dbp->data;
|
||||
xfs_dir2_data_check(dp, dbp);
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
/*
|
||||
* Set the counts in the leaf header.
|
||||
@ -123,23 +119,23 @@ xfs_dir2_block_to_leaf(
|
||||
* tail be free.
|
||||
*/
|
||||
xfs_dir2_data_make_free(tp, dbp,
|
||||
(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
|
||||
(xfs_dir2_data_aoff_t)((char *)block + mp->m_dirblksize -
|
||||
(xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
|
||||
(xfs_dir2_data_aoff_t)((char *)hdr + mp->m_dirblksize -
|
||||
(char *)blp),
|
||||
&needlog, &needscan);
|
||||
/*
|
||||
* Fix up the block header, make it a data block.
|
||||
*/
|
||||
block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
|
||||
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
/*
|
||||
* Set up leaf tail and bests table.
|
||||
*/
|
||||
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
|
||||
ltp->bestcount = cpu_to_be32(1);
|
||||
bestsp = xfs_dir2_leaf_bests_p(ltp);
|
||||
bestsp[0] = block->hdr.bestfree[0].length;
|
||||
bestsp[0] = hdr->bestfree[0].length;
|
||||
/*
|
||||
* Log the data header and leaf bests table.
|
||||
*/
|
||||
@ -152,6 +148,131 @@ xfs_dir2_block_to_leaf(
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_dir2_leaf_find_stale(
|
||||
struct xfs_dir2_leaf *leaf,
|
||||
int index,
|
||||
int *lowstale,
|
||||
int *highstale)
|
||||
{
|
||||
/*
|
||||
* Find the first stale entry before our index, if any.
|
||||
*/
|
||||
for (*lowstale = index - 1; *lowstale >= 0; --*lowstale) {
|
||||
if (leaf->ents[*lowstale].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the first stale entry at or after our index, if any.
|
||||
* Stop if the result would require moving more entries than using
|
||||
* lowstale.
|
||||
*/
|
||||
for (*highstale = index;
|
||||
*highstale < be16_to_cpu(leaf->hdr.count);
|
||||
++*highstale) {
|
||||
if (leaf->ents[*highstale].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
break;
|
||||
if (*lowstale >= 0 && index - *lowstale <= *highstale - index)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct xfs_dir2_leaf_entry *
|
||||
xfs_dir2_leaf_find_entry(
|
||||
xfs_dir2_leaf_t *leaf, /* leaf structure */
|
||||
int index, /* leaf table position */
|
||||
int compact, /* need to compact leaves */
|
||||
int lowstale, /* index of prev stale leaf */
|
||||
int highstale, /* index of next stale leaf */
|
||||
int *lfloglow, /* low leaf logging index */
|
||||
int *lfloghigh) /* high leaf logging index */
|
||||
{
|
||||
if (!leaf->hdr.stale) {
|
||||
xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */
|
||||
|
||||
/*
|
||||
* Now we need to make room to insert the leaf entry.
|
||||
*
|
||||
* If there are no stale entries, just insert a hole at index.
|
||||
*/
|
||||
lep = &leaf->ents[index];
|
||||
if (index < be16_to_cpu(leaf->hdr.count))
|
||||
memmove(lep + 1, lep,
|
||||
(be16_to_cpu(leaf->hdr.count) - index) *
|
||||
sizeof(*lep));
|
||||
|
||||
/*
|
||||
* Record low and high logging indices for the leaf.
|
||||
*/
|
||||
*lfloglow = index;
|
||||
*lfloghigh = be16_to_cpu(leaf->hdr.count);
|
||||
be16_add_cpu(&leaf->hdr.count, 1);
|
||||
return lep;
|
||||
}
|
||||
|
||||
/*
|
||||
* There are stale entries.
|
||||
*
|
||||
* We will use one of them for the new entry. It's probably not at
|
||||
* the right location, so we'll have to shift some up or down first.
|
||||
*
|
||||
* If we didn't compact before, we need to find the nearest stale
|
||||
* entries before and after our insertion point.
|
||||
*/
|
||||
if (compact == 0)
|
||||
xfs_dir2_leaf_find_stale(leaf, index, &lowstale, &highstale);
|
||||
|
||||
/*
|
||||
* If the low one is better, use it.
|
||||
*/
|
||||
if (lowstale >= 0 &&
|
||||
(highstale == be16_to_cpu(leaf->hdr.count) ||
|
||||
index - lowstale - 1 < highstale - index)) {
|
||||
ASSERT(index - lowstale - 1 >= 0);
|
||||
ASSERT(leaf->ents[lowstale].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR));
|
||||
|
||||
/*
|
||||
* Copy entries up to cover the stale entry and make room
|
||||
* for the new entry.
|
||||
*/
|
||||
if (index - lowstale - 1 > 0) {
|
||||
memmove(&leaf->ents[lowstale],
|
||||
&leaf->ents[lowstale + 1],
|
||||
(index - lowstale - 1) *
|
||||
sizeof(xfs_dir2_leaf_entry_t));
|
||||
}
|
||||
*lfloglow = MIN(lowstale, *lfloglow);
|
||||
*lfloghigh = MAX(index - 1, *lfloghigh);
|
||||
be16_add_cpu(&leaf->hdr.stale, -1);
|
||||
return &leaf->ents[index - 1];
|
||||
}
|
||||
|
||||
/*
|
||||
* The high one is better, so use that one.
|
||||
*/
|
||||
ASSERT(highstale - index >= 0);
|
||||
ASSERT(leaf->ents[highstale].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR));
|
||||
|
||||
/*
|
||||
* Copy entries down to cover the stale entry and make room for the
|
||||
* new entry.
|
||||
*/
|
||||
if (highstale - index > 0) {
|
||||
memmove(&leaf->ents[index + 1],
|
||||
&leaf->ents[index],
|
||||
(highstale - index) * sizeof(xfs_dir2_leaf_entry_t));
|
||||
}
|
||||
*lfloglow = MIN(index, *lfloglow);
|
||||
*lfloghigh = MAX(highstale, *lfloghigh);
|
||||
be16_add_cpu(&leaf->hdr.stale, -1);
|
||||
return &leaf->ents[index];
|
||||
}
|
||||
|
||||
/*
|
||||
* Add an entry to a leaf form directory.
|
||||
*/
|
||||
@ -161,7 +282,7 @@ xfs_dir2_leaf_addname(
|
||||
{
|
||||
__be16 *bestsp; /* freespace table in leaf */
|
||||
int compact; /* need to compact leaves */
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dabuf_t *dbp; /* data block buffer */
|
||||
xfs_dir2_data_entry_t *dep; /* data block entry */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
@ -225,7 +346,7 @@ xfs_dir2_leaf_addname(
|
||||
continue;
|
||||
i = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
|
||||
ASSERT(i < be32_to_cpu(ltp->bestcount));
|
||||
ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF);
|
||||
ASSERT(bestsp[i] != cpu_to_be16(NULLDATAOFF));
|
||||
if (be16_to_cpu(bestsp[i]) >= length) {
|
||||
use_block = i;
|
||||
break;
|
||||
@ -239,7 +360,8 @@ xfs_dir2_leaf_addname(
|
||||
/*
|
||||
* Remember a block we see that's missing.
|
||||
*/
|
||||
if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1)
|
||||
if (bestsp[i] == cpu_to_be16(NULLDATAOFF) &&
|
||||
use_block == -1)
|
||||
use_block = i;
|
||||
else if (be16_to_cpu(bestsp[i]) >= length) {
|
||||
use_block = i;
|
||||
@ -250,14 +372,17 @@ xfs_dir2_leaf_addname(
|
||||
/*
|
||||
* How many bytes do we need in the leaf block?
|
||||
*/
|
||||
needbytes =
|
||||
(leaf->hdr.stale ? 0 : (uint)sizeof(leaf->ents[0])) +
|
||||
(use_block != -1 ? 0 : (uint)sizeof(leaf->bests[0]));
|
||||
needbytes = 0;
|
||||
if (!leaf->hdr.stale)
|
||||
needbytes += sizeof(xfs_dir2_leaf_entry_t);
|
||||
if (use_block == -1)
|
||||
needbytes += sizeof(xfs_dir2_data_off_t);
|
||||
|
||||
/*
|
||||
* Now kill use_block if it refers to a missing block, so we
|
||||
* can use it as an indication of allocation needed.
|
||||
*/
|
||||
if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF)
|
||||
if (use_block != -1 && bestsp[use_block] == cpu_to_be16(NULLDATAOFF))
|
||||
use_block = -1;
|
||||
/*
|
||||
* If we don't have enough free bytes but we can make enough
|
||||
@ -369,8 +494,8 @@ xfs_dir2_leaf_addname(
|
||||
*/
|
||||
else
|
||||
xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
|
||||
data = dbp->data;
|
||||
bestsp[use_block] = data->hdr.bestfree[0].length;
|
||||
hdr = dbp->data;
|
||||
bestsp[use_block] = hdr->bestfree[0].length;
|
||||
grown = 1;
|
||||
}
|
||||
/*
|
||||
@ -384,7 +509,7 @@ xfs_dir2_leaf_addname(
|
||||
xfs_da_brelse(tp, lbp);
|
||||
return error;
|
||||
}
|
||||
data = dbp->data;
|
||||
hdr = dbp->data;
|
||||
grown = 0;
|
||||
}
|
||||
xfs_dir2_data_check(dp, dbp);
|
||||
@ -392,14 +517,14 @@ xfs_dir2_leaf_addname(
|
||||
* Point to the biggest freespace in our data block.
|
||||
*/
|
||||
dup = (xfs_dir2_data_unused_t *)
|
||||
((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset));
|
||||
((char *)hdr + be16_to_cpu(hdr->bestfree[0].offset));
|
||||
ASSERT(be16_to_cpu(dup->length) >= length);
|
||||
needscan = needlog = 0;
|
||||
/*
|
||||
* Mark the initial part of our freespace in use for the new entry.
|
||||
*/
|
||||
xfs_dir2_data_use_free(tp, dbp, dup,
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length,
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
|
||||
&needlog, &needscan);
|
||||
/*
|
||||
* Initialize our new entry (at last).
|
||||
@ -409,12 +534,12 @@ xfs_dir2_leaf_addname(
|
||||
dep->namelen = args->namelen;
|
||||
memcpy(dep->name, args->name, dep->namelen);
|
||||
tagp = xfs_dir2_data_entry_tag_p(dep);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)data);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
|
||||
/*
|
||||
* Need to scan fix up the bestfree table.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, data, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
/*
|
||||
* Need to log the data block's header.
|
||||
*/
|
||||
@ -425,107 +550,15 @@ xfs_dir2_leaf_addname(
|
||||
* If the bests table needs to be changed, do it.
|
||||
* Log the change unless we've already done that.
|
||||
*/
|
||||
if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) {
|
||||
bestsp[use_block] = data->hdr.bestfree[0].length;
|
||||
if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(hdr->bestfree[0].length)) {
|
||||
bestsp[use_block] = hdr->bestfree[0].length;
|
||||
if (!grown)
|
||||
xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
|
||||
}
|
||||
/*
|
||||
* Now we need to make room to insert the leaf entry.
|
||||
* If there are no stale entries, we just insert a hole at index.
|
||||
*/
|
||||
if (!leaf->hdr.stale) {
|
||||
/*
|
||||
* lep is still good as the index leaf entry.
|
||||
*/
|
||||
if (index < be16_to_cpu(leaf->hdr.count))
|
||||
memmove(lep + 1, lep,
|
||||
(be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
|
||||
/*
|
||||
* Record low and high logging indices for the leaf.
|
||||
*/
|
||||
lfloglow = index;
|
||||
lfloghigh = be16_to_cpu(leaf->hdr.count);
|
||||
be16_add_cpu(&leaf->hdr.count, 1);
|
||||
}
|
||||
/*
|
||||
* There are stale entries.
|
||||
* We will use one of them for the new entry.
|
||||
* It's probably not at the right location, so we'll have to
|
||||
* shift some up or down first.
|
||||
*/
|
||||
else {
|
||||
/*
|
||||
* If we didn't compact before, we need to find the nearest
|
||||
* stale entries before and after our insertion point.
|
||||
*/
|
||||
if (compact == 0) {
|
||||
/*
|
||||
* Find the first stale entry before the insertion
|
||||
* point, if any.
|
||||
*/
|
||||
for (lowstale = index - 1;
|
||||
lowstale >= 0 &&
|
||||
be32_to_cpu(leaf->ents[lowstale].address) !=
|
||||
XFS_DIR2_NULL_DATAPTR;
|
||||
lowstale--)
|
||||
continue;
|
||||
/*
|
||||
* Find the next stale entry at or after the insertion
|
||||
* point, if any. Stop if we go so far that the
|
||||
* lowstale entry would be better.
|
||||
*/
|
||||
for (highstale = index;
|
||||
highstale < be16_to_cpu(leaf->hdr.count) &&
|
||||
be32_to_cpu(leaf->ents[highstale].address) !=
|
||||
XFS_DIR2_NULL_DATAPTR &&
|
||||
(lowstale < 0 ||
|
||||
index - lowstale - 1 >= highstale - index);
|
||||
highstale++)
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* If the low one is better, use it.
|
||||
*/
|
||||
if (lowstale >= 0 &&
|
||||
(highstale == be16_to_cpu(leaf->hdr.count) ||
|
||||
index - lowstale - 1 < highstale - index)) {
|
||||
ASSERT(index - lowstale - 1 >= 0);
|
||||
ASSERT(be32_to_cpu(leaf->ents[lowstale].address) ==
|
||||
XFS_DIR2_NULL_DATAPTR);
|
||||
/*
|
||||
* Copy entries up to cover the stale entry
|
||||
* and make room for the new entry.
|
||||
*/
|
||||
if (index - lowstale - 1 > 0)
|
||||
memmove(&leaf->ents[lowstale],
|
||||
&leaf->ents[lowstale + 1],
|
||||
(index - lowstale - 1) * sizeof(*lep));
|
||||
lep = &leaf->ents[index - 1];
|
||||
lfloglow = MIN(lowstale, lfloglow);
|
||||
lfloghigh = MAX(index - 1, lfloghigh);
|
||||
}
|
||||
/*
|
||||
* The high one is better, so use that one.
|
||||
*/
|
||||
else {
|
||||
ASSERT(highstale - index >= 0);
|
||||
ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
|
||||
XFS_DIR2_NULL_DATAPTR);
|
||||
/*
|
||||
* Copy entries down to cover the stale entry
|
||||
* and make room for the new entry.
|
||||
*/
|
||||
if (highstale - index > 0)
|
||||
memmove(&leaf->ents[index + 1],
|
||||
&leaf->ents[index],
|
||||
(highstale - index) * sizeof(*lep));
|
||||
lep = &leaf->ents[index];
|
||||
lfloglow = MIN(index, lfloglow);
|
||||
lfloghigh = MAX(highstale, lfloghigh);
|
||||
}
|
||||
be16_add_cpu(&leaf->hdr.stale, -1);
|
||||
}
|
||||
|
||||
lep = xfs_dir2_leaf_find_entry(leaf, index, compact, lowstale,
|
||||
highstale, &lfloglow, &lfloghigh);
|
||||
|
||||
/*
|
||||
* Fill in the new leaf entry.
|
||||
*/
|
||||
@ -562,7 +595,7 @@ xfs_dir2_leaf_check(
|
||||
|
||||
leaf = bp->data;
|
||||
mp = dp->i_mount;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
|
||||
/*
|
||||
* This value is not restrictive enough.
|
||||
* Should factor in the size of the bests table as well.
|
||||
@ -582,7 +615,7 @@ xfs_dir2_leaf_check(
|
||||
if (i + 1 < be16_to_cpu(leaf->hdr.count))
|
||||
ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
|
||||
be32_to_cpu(leaf->ents[i + 1].hashval));
|
||||
if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
|
||||
if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
stale++;
|
||||
}
|
||||
ASSERT(be16_to_cpu(leaf->hdr.stale) == stale);
|
||||
@ -611,7 +644,8 @@ xfs_dir2_leaf_compact(
|
||||
* Compress out the stale entries in place.
|
||||
*/
|
||||
for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) {
|
||||
if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
|
||||
if (leaf->ents[from].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
continue;
|
||||
/*
|
||||
* Only actually copy the entries that are different.
|
||||
@ -663,24 +697,9 @@ xfs_dir2_leaf_compact_x1(
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.stale) > 1);
|
||||
index = *indexp;
|
||||
/*
|
||||
* Find the first stale entry before our index, if any.
|
||||
*/
|
||||
for (lowstale = index - 1;
|
||||
lowstale >= 0 &&
|
||||
be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
|
||||
lowstale--)
|
||||
continue;
|
||||
/*
|
||||
* Find the first stale entry at or after our index, if any.
|
||||
* Stop if the answer would be worse than lowstale.
|
||||
*/
|
||||
for (highstale = index;
|
||||
highstale < be16_to_cpu(leaf->hdr.count) &&
|
||||
be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
|
||||
(lowstale < 0 || index - lowstale > highstale - index);
|
||||
highstale++)
|
||||
continue;
|
||||
|
||||
xfs_dir2_leaf_find_stale(leaf, index, &lowstale, &highstale);
|
||||
|
||||
/*
|
||||
* Pick the better of lowstale and highstale.
|
||||
*/
|
||||
@ -701,7 +720,8 @@ xfs_dir2_leaf_compact_x1(
|
||||
if (index == from)
|
||||
newindex = to;
|
||||
if (from != keepstale &&
|
||||
be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) {
|
||||
leaf->ents[from].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) {
|
||||
if (from == to)
|
||||
*lowlogp = to;
|
||||
continue;
|
||||
@ -760,7 +780,7 @@ xfs_dir2_leaf_getdents(
|
||||
int byteoff; /* offset in current block */
|
||||
xfs_dir2_db_t curdb; /* db for current block */
|
||||
xfs_dir2_off_t curoff; /* current overall offset */
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry */
|
||||
xfs_dir2_data_unused_t *dup; /* unused entry */
|
||||
int error = 0; /* error return value */
|
||||
@ -1018,23 +1038,23 @@ xfs_dir2_leaf_getdents(
|
||||
else if (curoff > newoff)
|
||||
ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
|
||||
curdb);
|
||||
data = bp->data;
|
||||
hdr = bp->data;
|
||||
xfs_dir2_data_check(dp, bp);
|
||||
/*
|
||||
* Find our position in the block.
|
||||
*/
|
||||
ptr = (char *)&data->u;
|
||||
ptr = (char *)(hdr + 1);
|
||||
byteoff = xfs_dir2_byte_to_off(mp, curoff);
|
||||
/*
|
||||
* Skip past the header.
|
||||
*/
|
||||
if (byteoff == 0)
|
||||
curoff += (uint)sizeof(data->hdr);
|
||||
curoff += (uint)sizeof(*hdr);
|
||||
/*
|
||||
* Skip past entries until we reach our offset.
|
||||
*/
|
||||
else {
|
||||
while ((char *)ptr - (char *)data < byteoff) {
|
||||
while ((char *)ptr - (char *)hdr < byteoff) {
|
||||
dup = (xfs_dir2_data_unused_t *)ptr;
|
||||
|
||||
if (be16_to_cpu(dup->freetag)
|
||||
@ -1055,8 +1075,8 @@ xfs_dir2_leaf_getdents(
|
||||
curoff =
|
||||
xfs_dir2_db_off_to_byte(mp,
|
||||
xfs_dir2_byte_to_db(mp, curoff),
|
||||
(char *)ptr - (char *)data);
|
||||
if (ptr >= (char *)data + mp->m_dirblksize) {
|
||||
(char *)ptr - (char *)hdr);
|
||||
if (ptr >= (char *)hdr + mp->m_dirblksize) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1179,7 +1199,7 @@ xfs_dir2_leaf_log_bests(
|
||||
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
|
||||
ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf);
|
||||
firstb = xfs_dir2_leaf_bests_p(ltp) + first;
|
||||
lastb = xfs_dir2_leaf_bests_p(ltp) + last;
|
||||
@ -1202,8 +1222,8 @@ xfs_dir2_leaf_log_ents(
|
||||
xfs_dir2_leaf_t *leaf; /* leaf structure */
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC ||
|
||||
be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
|
||||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
firstlep = &leaf->ents[first];
|
||||
lastlep = &leaf->ents[last];
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
|
||||
@ -1221,8 +1241,8 @@ xfs_dir2_leaf_log_header(
|
||||
xfs_dir2_leaf_t *leaf; /* leaf structure */
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC ||
|
||||
be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
|
||||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
|
||||
(uint)(sizeof(leaf->hdr) - 1));
|
||||
}
|
||||
@ -1241,7 +1261,7 @@ xfs_dir2_leaf_log_tail(
|
||||
|
||||
mp = tp->t_mountp;
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
|
||||
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
|
||||
(uint)(mp->m_dirblksize - 1));
|
||||
@ -1437,7 +1457,7 @@ xfs_dir2_leaf_removename(
|
||||
xfs_da_args_t *args) /* operation arguments */
|
||||
{
|
||||
__be16 *bestsp; /* leaf block best freespace */
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_db_t db; /* data block number */
|
||||
xfs_dabuf_t *dbp; /* data block buffer */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry structure */
|
||||
@ -1467,7 +1487,7 @@ xfs_dir2_leaf_removename(
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
leaf = lbp->data;
|
||||
data = dbp->data;
|
||||
hdr = dbp->data;
|
||||
xfs_dir2_data_check(dp, dbp);
|
||||
/*
|
||||
* Point to the leaf entry, use that to point to the data entry.
|
||||
@ -1475,9 +1495,9 @@ xfs_dir2_leaf_removename(
|
||||
lep = &leaf->ents[index];
|
||||
db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)data + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
|
||||
((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
|
||||
needscan = needlog = 0;
|
||||
oldbest = be16_to_cpu(data->hdr.bestfree[0].length);
|
||||
oldbest = be16_to_cpu(hdr->bestfree[0].length);
|
||||
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
|
||||
bestsp = xfs_dir2_leaf_bests_p(ltp);
|
||||
ASSERT(be16_to_cpu(bestsp[db]) == oldbest);
|
||||
@ -1485,7 +1505,7 @@ xfs_dir2_leaf_removename(
|
||||
* Mark the former data entry unused.
|
||||
*/
|
||||
xfs_dir2_data_make_free(tp, dbp,
|
||||
(xfs_dir2_data_aoff_t)((char *)dep - (char *)data),
|
||||
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
|
||||
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
|
||||
/*
|
||||
* We just mark the leaf entry stale by putting a null in it.
|
||||
@ -1499,23 +1519,23 @@ xfs_dir2_leaf_removename(
|
||||
* log the data block header if necessary.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, data, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
if (needlog)
|
||||
xfs_dir2_data_log_header(tp, dbp);
|
||||
/*
|
||||
* If the longest freespace in the data block has changed,
|
||||
* put the new value in the bests table and log that.
|
||||
*/
|
||||
if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) {
|
||||
bestsp[db] = data->hdr.bestfree[0].length;
|
||||
if (be16_to_cpu(hdr->bestfree[0].length) != oldbest) {
|
||||
bestsp[db] = hdr->bestfree[0].length;
|
||||
xfs_dir2_leaf_log_bests(tp, lbp, db, db);
|
||||
}
|
||||
xfs_dir2_data_check(dp, dbp);
|
||||
/*
|
||||
* If the data block is now empty then get rid of the data block.
|
||||
*/
|
||||
if (be16_to_cpu(data->hdr.bestfree[0].length) ==
|
||||
mp->m_dirblksize - (uint)sizeof(data->hdr)) {
|
||||
if (be16_to_cpu(hdr->bestfree[0].length) ==
|
||||
mp->m_dirblksize - (uint)sizeof(*hdr)) {
|
||||
ASSERT(db != mp->m_dirdatablk);
|
||||
if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
|
||||
/*
|
||||
@ -1542,7 +1562,7 @@ xfs_dir2_leaf_removename(
|
||||
* Look for the last active entry (i).
|
||||
*/
|
||||
for (i = db - 1; i > 0; i--) {
|
||||
if (be16_to_cpu(bestsp[i]) != NULLDATAOFF)
|
||||
if (bestsp[i] != cpu_to_be16(NULLDATAOFF))
|
||||
break;
|
||||
}
|
||||
/*
|
||||
@ -1686,9 +1706,6 @@ xfs_dir2_leaf_trim_data(
|
||||
xfs_dir2_db_t db) /* data block number */
|
||||
{
|
||||
__be16 *bestsp; /* leaf bests table */
|
||||
#ifdef DEBUG
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
#endif
|
||||
xfs_dabuf_t *dbp; /* data block buffer */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int error; /* error return value */
|
||||
@ -1707,20 +1724,21 @@ xfs_dir2_leaf_trim_data(
|
||||
XFS_DATA_FORK))) {
|
||||
return error;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
data = dbp->data;
|
||||
ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
|
||||
#endif
|
||||
/* this seems to be an error
|
||||
* data is only valid if DEBUG is defined?
|
||||
* RMC 09/08/1999
|
||||
*/
|
||||
|
||||
leaf = lbp->data;
|
||||
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
|
||||
ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) ==
|
||||
mp->m_dirblksize - (uint)sizeof(data->hdr));
|
||||
|
||||
#ifdef DEBUG
|
||||
{
|
||||
struct xfs_dir2_data_hdr *hdr = dbp->data;
|
||||
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
|
||||
ASSERT(be16_to_cpu(hdr->bestfree[0].length) ==
|
||||
mp->m_dirblksize - (uint)sizeof(*hdr));
|
||||
ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Get rid of the data block.
|
||||
*/
|
||||
@ -1740,6 +1758,20 @@ xfs_dir2_leaf_trim_data(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
xfs_dir2_leaf_size(
|
||||
struct xfs_dir2_leaf_hdr *hdr,
|
||||
int counts)
|
||||
{
|
||||
int entries;
|
||||
|
||||
entries = be16_to_cpu(hdr->count) - be16_to_cpu(hdr->stale);
|
||||
return sizeof(xfs_dir2_leaf_hdr_t) +
|
||||
entries * sizeof(xfs_dir2_leaf_entry_t) +
|
||||
counts * sizeof(xfs_dir2_data_off_t) +
|
||||
sizeof(xfs_dir2_leaf_tail_t);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert node form directory to leaf form directory.
|
||||
* The root of the node form dir needs to already be a LEAFN block.
|
||||
@ -1810,7 +1842,7 @@ xfs_dir2_node_to_leaf(
|
||||
return 0;
|
||||
lbp = state->path.blk[0].bp;
|
||||
leaf = lbp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
/*
|
||||
* Read the freespace block.
|
||||
*/
|
||||
@ -1819,20 +1851,19 @@ xfs_dir2_node_to_leaf(
|
||||
return error;
|
||||
}
|
||||
free = fbp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
ASSERT(!free->hdr.firstdb);
|
||||
|
||||
/*
|
||||
* Now see if the leafn and free data will fit in a leaf1.
|
||||
* If not, release the buffer and give up.
|
||||
*/
|
||||
if ((uint)sizeof(leaf->hdr) +
|
||||
(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) +
|
||||
be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) +
|
||||
(uint)sizeof(leaf->tail) >
|
||||
mp->m_dirblksize) {
|
||||
if (xfs_dir2_leaf_size(&leaf->hdr, be32_to_cpu(free->hdr.nvalid)) >
|
||||
mp->m_dirblksize) {
|
||||
xfs_da_brelse(tp, fbp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the leaf has any stale entries in it, compress them out.
|
||||
* The compact routine will log the header.
|
||||
@ -1851,7 +1882,7 @@ xfs_dir2_node_to_leaf(
|
||||
* Set up the leaf bests table.
|
||||
*/
|
||||
memcpy(xfs_dir2_leaf_bests_p(ltp), free->bests,
|
||||
be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0]));
|
||||
be32_to_cpu(ltp->bestcount) * sizeof(xfs_dir2_data_off_t));
|
||||
xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
|
||||
xfs_dir2_leaf_log_tail(tp, lbp);
|
||||
xfs_dir2_leaf_check(dp, lbp);
|
||||
|
@ -1,253 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_LEAF_H__
|
||||
#define __XFS_DIR2_LEAF_H__
|
||||
|
||||
struct uio;
|
||||
struct xfs_dabuf;
|
||||
struct xfs_da_args;
|
||||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* Offset of the leaf/node space. First block in this space
|
||||
* is the btree root.
|
||||
*/
|
||||
#define XFS_DIR2_LEAF_SPACE 1
|
||||
#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
|
||||
#define XFS_DIR2_LEAF_FIRSTDB(mp) \
|
||||
xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET)
|
||||
|
||||
/*
|
||||
* Offset in data space of a data entry.
|
||||
*/
|
||||
typedef __uint32_t xfs_dir2_dataptr_t;
|
||||
#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff)
|
||||
#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0)
|
||||
|
||||
/*
|
||||
* Leaf block header.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf_hdr {
|
||||
xfs_da_blkinfo_t info; /* header for da routines */
|
||||
__be16 count; /* count of entries */
|
||||
__be16 stale; /* count of stale entries */
|
||||
} xfs_dir2_leaf_hdr_t;
|
||||
|
||||
/*
|
||||
* Leaf block entry.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf_entry {
|
||||
__be32 hashval; /* hash value of name */
|
||||
__be32 address; /* address of data entry */
|
||||
} xfs_dir2_leaf_entry_t;
|
||||
|
||||
/*
|
||||
* Leaf block tail.
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf_tail {
|
||||
__be32 bestcount;
|
||||
} xfs_dir2_leaf_tail_t;
|
||||
|
||||
/*
|
||||
* Leaf block.
|
||||
* bests and tail are at the end of the block for single-leaf only
|
||||
* (magic = XFS_DIR2_LEAF1_MAGIC not XFS_DIR2_LEAFN_MAGIC).
|
||||
*/
|
||||
typedef struct xfs_dir2_leaf {
|
||||
xfs_dir2_leaf_hdr_t hdr; /* leaf header */
|
||||
xfs_dir2_leaf_entry_t ents[1]; /* entries */
|
||||
/* ... */
|
||||
xfs_dir2_data_off_t bests[1]; /* best free counts */
|
||||
xfs_dir2_leaf_tail_t tail; /* leaf tail */
|
||||
} xfs_dir2_leaf_t;
|
||||
|
||||
/*
|
||||
* DB blocks here are logical directory block numbers, not filesystem blocks.
|
||||
*/
|
||||
|
||||
static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
|
||||
{
|
||||
return (int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) /
|
||||
(uint)sizeof(xfs_dir2_leaf_entry_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Get address of the bestcount field in the single-leaf block.
|
||||
*/
|
||||
static inline xfs_dir2_leaf_tail_t *
|
||||
xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp)
|
||||
{
|
||||
return (xfs_dir2_leaf_tail_t *)
|
||||
((char *)(lp) + (mp)->m_dirblksize -
|
||||
(uint)sizeof(xfs_dir2_leaf_tail_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Get address of the bests array in the single-leaf block.
|
||||
*/
|
||||
static inline __be16 *
|
||||
xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
|
||||
{
|
||||
return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert dataptr to byte in file space
|
||||
*/
|
||||
static inline xfs_dir2_off_t
|
||||
xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
|
||||
{
|
||||
return (xfs_dir2_off_t)(dp) << XFS_DIR2_DATA_ALIGN_LOG;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in file space to dataptr. It had better be aligned.
|
||||
*/
|
||||
static inline xfs_dir2_dataptr_t
|
||||
xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return (xfs_dir2_dataptr_t)((by) >> XFS_DIR2_DATA_ALIGN_LOG);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in space to (DB) block
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return (xfs_dir2_db_t)((by) >> \
|
||||
((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert dataptr to a block number
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
|
||||
{
|
||||
return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in space to offset in a block
|
||||
*/
|
||||
static inline xfs_dir2_data_aoff_t
|
||||
xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return (xfs_dir2_data_aoff_t)((by) & \
|
||||
((1 << ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert dataptr to a byte offset in a block
|
||||
*/
|
||||
static inline xfs_dir2_data_aoff_t
|
||||
xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
|
||||
{
|
||||
return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block and offset to byte in space
|
||||
*/
|
||||
static inline xfs_dir2_off_t
|
||||
xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
|
||||
xfs_dir2_data_aoff_t o)
|
||||
{
|
||||
return ((xfs_dir2_off_t)(db) << \
|
||||
((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) + (o);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block (DB) to block (dablk)
|
||||
*/
|
||||
static inline xfs_dablk_t
|
||||
xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
|
||||
{
|
||||
return (xfs_dablk_t)((db) << (mp)->m_sb.sb_dirblklog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert byte in space to (DA) block
|
||||
*/
|
||||
static inline xfs_dablk_t
|
||||
xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by)
|
||||
{
|
||||
return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block and offset to dataptr
|
||||
*/
|
||||
static inline xfs_dir2_dataptr_t
|
||||
xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
|
||||
xfs_dir2_data_aoff_t o)
|
||||
{
|
||||
return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block (dablk) to block (DB)
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
|
||||
{
|
||||
return (xfs_dir2_db_t)((da) >> (mp)->m_sb.sb_dirblklog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert block (dablk) to byte offset in space
|
||||
*/
|
||||
static inline xfs_dir2_off_t
|
||||
xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
|
||||
{
|
||||
return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function declarations.
|
||||
*/
|
||||
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *dbp);
|
||||
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
|
||||
extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *bp);
|
||||
extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
|
||||
int *lowstalep, int *highstalep,
|
||||
int *lowlogp, int *highlogp);
|
||||
extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
|
||||
size_t bufsize, xfs_off_t *offset,
|
||||
filldir_t filldir);
|
||||
extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
|
||||
struct xfs_dabuf **bpp, int magic);
|
||||
extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
int first, int last);
|
||||
extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
|
||||
struct xfs_dabuf *bp);
|
||||
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp);
|
||||
extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp, xfs_dir2_db_t db);
|
||||
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
|
||||
|
||||
#endif /* __XFS_DIR2_LEAF_H__ */
|
@ -23,18 +23,14 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2_node.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
@ -73,7 +69,7 @@ xfs_dir2_free_log_bests(
|
||||
xfs_dir2_free_t *free; /* freespace structure */
|
||||
|
||||
free = bp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
xfs_da_log_buf(tp, bp,
|
||||
(uint)((char *)&free->bests[first] - (char *)free),
|
||||
(uint)((char *)&free->bests[last] - (char *)free +
|
||||
@ -91,7 +87,7 @@ xfs_dir2_free_log_header(
|
||||
xfs_dir2_free_t *free; /* freespace structure */
|
||||
|
||||
free = bp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
|
||||
(uint)(sizeof(xfs_dir2_free_hdr_t) - 1));
|
||||
}
|
||||
@ -244,89 +240,13 @@ xfs_dir2_leafn_add(
|
||||
lfloglow = be16_to_cpu(leaf->hdr.count);
|
||||
lfloghigh = -1;
|
||||
}
|
||||
/*
|
||||
* No stale entries, just insert a space for the new entry.
|
||||
*/
|
||||
if (!leaf->hdr.stale) {
|
||||
lep = &leaf->ents[index];
|
||||
if (index < be16_to_cpu(leaf->hdr.count))
|
||||
memmove(lep + 1, lep,
|
||||
(be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
|
||||
lfloglow = index;
|
||||
lfloghigh = be16_to_cpu(leaf->hdr.count);
|
||||
be16_add_cpu(&leaf->hdr.count, 1);
|
||||
}
|
||||
/*
|
||||
* There are stale entries. We'll use one for the new entry.
|
||||
*/
|
||||
else {
|
||||
/*
|
||||
* If we didn't do a compact then we need to figure out
|
||||
* which stale entry will be used.
|
||||
*/
|
||||
if (compact == 0) {
|
||||
/*
|
||||
* Find first stale entry before our insertion point.
|
||||
*/
|
||||
for (lowstale = index - 1;
|
||||
lowstale >= 0 &&
|
||||
be32_to_cpu(leaf->ents[lowstale].address) !=
|
||||
XFS_DIR2_NULL_DATAPTR;
|
||||
lowstale--)
|
||||
continue;
|
||||
/*
|
||||
* Find next stale entry after insertion point.
|
||||
* Stop looking if the answer would be worse than
|
||||
* lowstale already found.
|
||||
*/
|
||||
for (highstale = index;
|
||||
highstale < be16_to_cpu(leaf->hdr.count) &&
|
||||
be32_to_cpu(leaf->ents[highstale].address) !=
|
||||
XFS_DIR2_NULL_DATAPTR &&
|
||||
(lowstale < 0 ||
|
||||
index - lowstale - 1 >= highstale - index);
|
||||
highstale++)
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Using the low stale entry.
|
||||
* Shift entries up toward the stale slot.
|
||||
*/
|
||||
if (lowstale >= 0 &&
|
||||
(highstale == be16_to_cpu(leaf->hdr.count) ||
|
||||
index - lowstale - 1 < highstale - index)) {
|
||||
ASSERT(be32_to_cpu(leaf->ents[lowstale].address) ==
|
||||
XFS_DIR2_NULL_DATAPTR);
|
||||
ASSERT(index - lowstale - 1 >= 0);
|
||||
if (index - lowstale - 1 > 0)
|
||||
memmove(&leaf->ents[lowstale],
|
||||
&leaf->ents[lowstale + 1],
|
||||
(index - lowstale - 1) * sizeof(*lep));
|
||||
lep = &leaf->ents[index - 1];
|
||||
lfloglow = MIN(lowstale, lfloglow);
|
||||
lfloghigh = MAX(index - 1, lfloghigh);
|
||||
}
|
||||
/*
|
||||
* Using the high stale entry.
|
||||
* Shift entries down toward the stale slot.
|
||||
*/
|
||||
else {
|
||||
ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
|
||||
XFS_DIR2_NULL_DATAPTR);
|
||||
ASSERT(highstale - index >= 0);
|
||||
if (highstale - index > 0)
|
||||
memmove(&leaf->ents[index + 1],
|
||||
&leaf->ents[index],
|
||||
(highstale - index) * sizeof(*lep));
|
||||
lep = &leaf->ents[index];
|
||||
lfloglow = MIN(index, lfloglow);
|
||||
lfloghigh = MAX(highstale, lfloghigh);
|
||||
}
|
||||
be16_add_cpu(&leaf->hdr.stale, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert the new entry, log everything.
|
||||
*/
|
||||
lep = xfs_dir2_leaf_find_entry(leaf, index, compact, lowstale,
|
||||
highstale, &lfloglow, &lfloghigh);
|
||||
|
||||
lep->hashval = cpu_to_be32(args->hashval);
|
||||
lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp,
|
||||
args->blkno, args->index));
|
||||
@ -352,14 +272,14 @@ xfs_dir2_leafn_check(
|
||||
|
||||
leaf = bp->data;
|
||||
mp = dp->i_mount;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp));
|
||||
for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) {
|
||||
if (i + 1 < be16_to_cpu(leaf->hdr.count)) {
|
||||
ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
|
||||
be32_to_cpu(leaf->ents[i + 1].hashval));
|
||||
}
|
||||
if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
|
||||
if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
stale++;
|
||||
}
|
||||
ASSERT(be16_to_cpu(leaf->hdr.stale) == stale);
|
||||
@ -378,7 +298,7 @@ xfs_dir2_leafn_lasthash(
|
||||
xfs_dir2_leaf_t *leaf; /* leaf structure */
|
||||
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
if (count)
|
||||
*count = be16_to_cpu(leaf->hdr.count);
|
||||
if (!leaf->hdr.count)
|
||||
@ -417,7 +337,7 @@ xfs_dir2_leafn_lookup_for_addname(
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
#ifdef __KERNEL__
|
||||
ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
|
||||
#endif
|
||||
@ -434,7 +354,7 @@ xfs_dir2_leafn_lookup_for_addname(
|
||||
curbp = state->extrablk.bp;
|
||||
curfdb = state->extrablk.blkno;
|
||||
free = curbp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
}
|
||||
length = xfs_dir2_data_entsize(args->namelen);
|
||||
/*
|
||||
@ -488,7 +408,7 @@ xfs_dir2_leafn_lookup_for_addname(
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) ==
|
||||
XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT((be32_to_cpu(free->hdr.firstdb) %
|
||||
XFS_DIR2_MAX_FREE_BESTS(mp)) == 0);
|
||||
xfs_dir2_free_max_bests(mp)) == 0);
|
||||
ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb);
|
||||
ASSERT(curdb < be32_to_cpu(free->hdr.firstdb) +
|
||||
be32_to_cpu(free->hdr.nvalid));
|
||||
@ -500,7 +420,8 @@ xfs_dir2_leafn_lookup_for_addname(
|
||||
/*
|
||||
* If it has room, return it.
|
||||
*/
|
||||
if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) {
|
||||
if (unlikely(free->bests[fi] ==
|
||||
cpu_to_be16(NULLDATAOFF))) {
|
||||
XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
if (curfdb != newfdb)
|
||||
@ -561,7 +482,7 @@ xfs_dir2_leafn_lookup_for_entry(
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
#ifdef __KERNEL__
|
||||
ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
|
||||
#endif
|
||||
@ -742,7 +663,8 @@ xfs_dir2_leafn_moveents(
|
||||
int i; /* temp leaf index */
|
||||
|
||||
for (i = start_s, stale = 0; i < start_s + count; i++) {
|
||||
if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
|
||||
if (leaf_s->ents[i].address ==
|
||||
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
|
||||
stale++;
|
||||
}
|
||||
} else
|
||||
@ -789,8 +711,8 @@ xfs_dir2_leafn_order(
|
||||
|
||||
leaf1 = leaf1_bp->data;
|
||||
leaf2 = leaf2_bp->data;
|
||||
ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
if (be16_to_cpu(leaf1->hdr.count) > 0 &&
|
||||
be16_to_cpu(leaf2->hdr.count) > 0 &&
|
||||
(be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) ||
|
||||
@ -918,7 +840,7 @@ xfs_dir2_leafn_remove(
|
||||
xfs_da_state_blk_t *dblk, /* data block */
|
||||
int *rval) /* resulting block needs join */
|
||||
{
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_db_t db; /* data block number */
|
||||
xfs_dabuf_t *dbp; /* data block buffer */
|
||||
xfs_dir2_data_entry_t *dep; /* data block entry */
|
||||
@ -938,7 +860,7 @@ xfs_dir2_leafn_remove(
|
||||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
/*
|
||||
* Point to the entry we're removing.
|
||||
*/
|
||||
@ -963,9 +885,9 @@ xfs_dir2_leafn_remove(
|
||||
* in the data block in case it changes.
|
||||
*/
|
||||
dbp = dblk->bp;
|
||||
data = dbp->data;
|
||||
dep = (xfs_dir2_data_entry_t *)((char *)data + off);
|
||||
longest = be16_to_cpu(data->hdr.bestfree[0].length);
|
||||
hdr = dbp->data;
|
||||
dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
|
||||
longest = be16_to_cpu(hdr->bestfree[0].length);
|
||||
needlog = needscan = 0;
|
||||
xfs_dir2_data_make_free(tp, dbp, off,
|
||||
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
|
||||
@ -974,7 +896,7 @@ xfs_dir2_leafn_remove(
|
||||
* Log the data block header if needed.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, data, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
if (needlog)
|
||||
xfs_dir2_data_log_header(tp, dbp);
|
||||
xfs_dir2_data_check(dp, dbp);
|
||||
@ -982,7 +904,7 @@ xfs_dir2_leafn_remove(
|
||||
* If the longest data block freespace changes, need to update
|
||||
* the corresponding freeblock entry.
|
||||
*/
|
||||
if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) {
|
||||
if (longest < be16_to_cpu(hdr->bestfree[0].length)) {
|
||||
int error; /* error return value */
|
||||
xfs_dabuf_t *fbp; /* freeblock buffer */
|
||||
xfs_dir2_db_t fdb; /* freeblock block number */
|
||||
@ -1000,27 +922,27 @@ xfs_dir2_leafn_remove(
|
||||
return error;
|
||||
}
|
||||
free = fbp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
ASSERT(be32_to_cpu(free->hdr.firstdb) ==
|
||||
XFS_DIR2_MAX_FREE_BESTS(mp) *
|
||||
xfs_dir2_free_max_bests(mp) *
|
||||
(fdb - XFS_DIR2_FREE_FIRSTDB(mp)));
|
||||
/*
|
||||
* Calculate which entry we need to fix.
|
||||
*/
|
||||
findex = xfs_dir2_db_to_fdindex(mp, db);
|
||||
longest = be16_to_cpu(data->hdr.bestfree[0].length);
|
||||
longest = be16_to_cpu(hdr->bestfree[0].length);
|
||||
/*
|
||||
* If the data block is now empty we can get rid of it
|
||||
* (usually).
|
||||
*/
|
||||
if (longest == mp->m_dirblksize - (uint)sizeof(data->hdr)) {
|
||||
if (longest == mp->m_dirblksize - (uint)sizeof(*hdr)) {
|
||||
/*
|
||||
* Try to punch out the data block.
|
||||
*/
|
||||
error = xfs_dir2_shrink_inode(args, db, dbp);
|
||||
if (error == 0) {
|
||||
dblk->bp = NULL;
|
||||
data = NULL;
|
||||
hdr = NULL;
|
||||
}
|
||||
/*
|
||||
* We can get ENOSPC if there's no space reservation.
|
||||
@ -1036,7 +958,7 @@ xfs_dir2_leafn_remove(
|
||||
* If we got rid of the data block, we can eliminate that entry
|
||||
* in the free block.
|
||||
*/
|
||||
if (data == NULL) {
|
||||
if (hdr == NULL) {
|
||||
/*
|
||||
* One less used entry in the free table.
|
||||
*/
|
||||
@ -1052,7 +974,8 @@ xfs_dir2_leafn_remove(
|
||||
int i; /* free entry index */
|
||||
|
||||
for (i = findex - 1;
|
||||
i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF;
|
||||
i >= 0 &&
|
||||
free->bests[i] == cpu_to_be16(NULLDATAOFF);
|
||||
i--)
|
||||
continue;
|
||||
free->hdr.nvalid = cpu_to_be32(i + 1);
|
||||
@ -1209,7 +1132,7 @@ xfs_dir2_leafn_toosmall(
|
||||
*/
|
||||
blk = &state->path.blk[state->path.active - 1];
|
||||
info = blk->bp->data;
|
||||
ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
leaf = (xfs_dir2_leaf_t *)info;
|
||||
count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
|
||||
bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]);
|
||||
@ -1268,7 +1191,7 @@ xfs_dir2_leafn_toosmall(
|
||||
count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
|
||||
bytes = state->blocksize - (state->blocksize >> 2);
|
||||
leaf = bp->data;
|
||||
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
|
||||
bytes -= count * (uint)sizeof(leaf->ents[0]);
|
||||
/*
|
||||
@ -1327,8 +1250,8 @@ xfs_dir2_leafn_unbalance(
|
||||
ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC);
|
||||
drop_leaf = drop_blk->bp->data;
|
||||
save_leaf = save_blk->bp->data;
|
||||
ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
|
||||
ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
|
||||
/*
|
||||
* If there are any stale leaf entries, take this opportunity
|
||||
* to purge them.
|
||||
@ -1432,7 +1355,7 @@ xfs_dir2_node_addname_int(
|
||||
xfs_da_args_t *args, /* operation arguments */
|
||||
xfs_da_state_blk_t *fblk) /* optional freespace block */
|
||||
{
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_db_t dbno; /* data block number */
|
||||
xfs_dabuf_t *dbp; /* data block buffer */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry pointer */
|
||||
@ -1469,7 +1392,7 @@ xfs_dir2_node_addname_int(
|
||||
*/
|
||||
ifbno = fblk->blkno;
|
||||
free = fbp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
findex = fblk->index;
|
||||
/*
|
||||
* This means the free entry showed that the data block had
|
||||
@ -1553,7 +1476,7 @@ xfs_dir2_node_addname_int(
|
||||
continue;
|
||||
}
|
||||
free = fbp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
findex = 0;
|
||||
}
|
||||
/*
|
||||
@ -1680,12 +1603,12 @@ xfs_dir2_node_addname_int(
|
||||
free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC);
|
||||
free->hdr.firstdb = cpu_to_be32(
|
||||
(fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
|
||||
XFS_DIR2_MAX_FREE_BESTS(mp));
|
||||
xfs_dir2_free_max_bests(mp));
|
||||
free->hdr.nvalid = 0;
|
||||
free->hdr.nused = 0;
|
||||
} else {
|
||||
free = fbp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1697,7 +1620,7 @@ xfs_dir2_node_addname_int(
|
||||
* freespace block, extend that table.
|
||||
*/
|
||||
if (findex >= be32_to_cpu(free->hdr.nvalid)) {
|
||||
ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp));
|
||||
ASSERT(findex < xfs_dir2_free_max_bests(mp));
|
||||
free->hdr.nvalid = cpu_to_be32(findex + 1);
|
||||
/*
|
||||
* Tag new entry so nused will go up.
|
||||
@ -1708,7 +1631,7 @@ xfs_dir2_node_addname_int(
|
||||
* If this entry was for an empty data block
|
||||
* (this should always be true) then update the header.
|
||||
*/
|
||||
if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) {
|
||||
if (free->bests[findex] == cpu_to_be16(NULLDATAOFF)) {
|
||||
be32_add_cpu(&free->hdr.nused, 1);
|
||||
xfs_dir2_free_log_header(tp, fbp);
|
||||
}
|
||||
@ -1717,8 +1640,8 @@ xfs_dir2_node_addname_int(
|
||||
* We haven't allocated the data entry yet so this will
|
||||
* change again.
|
||||
*/
|
||||
data = dbp->data;
|
||||
free->bests[findex] = data->hdr.bestfree[0].length;
|
||||
hdr = dbp->data;
|
||||
free->bests[findex] = hdr->bestfree[0].length;
|
||||
logfree = 1;
|
||||
}
|
||||
/*
|
||||
@ -1743,21 +1666,21 @@ xfs_dir2_node_addname_int(
|
||||
xfs_da_buf_done(fbp);
|
||||
return error;
|
||||
}
|
||||
data = dbp->data;
|
||||
hdr = dbp->data;
|
||||
logfree = 0;
|
||||
}
|
||||
ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length);
|
||||
ASSERT(be16_to_cpu(hdr->bestfree[0].length) >= length);
|
||||
/*
|
||||
* Point to the existing unused space.
|
||||
*/
|
||||
dup = (xfs_dir2_data_unused_t *)
|
||||
((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset));
|
||||
((char *)hdr + be16_to_cpu(hdr->bestfree[0].offset));
|
||||
needscan = needlog = 0;
|
||||
/*
|
||||
* Mark the first part of the unused space, inuse for us.
|
||||
*/
|
||||
xfs_dir2_data_use_free(tp, dbp, dup,
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length,
|
||||
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
|
||||
&needlog, &needscan);
|
||||
/*
|
||||
* Fill in the new entry and log it.
|
||||
@ -1767,13 +1690,13 @@ xfs_dir2_node_addname_int(
|
||||
dep->namelen = args->namelen;
|
||||
memcpy(dep->name, args->name, dep->namelen);
|
||||
tagp = xfs_dir2_data_entry_tag_p(dep);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)data);
|
||||
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
|
||||
xfs_dir2_data_log_entry(tp, dbp, dep);
|
||||
/*
|
||||
* Rescan the block for bestfree if needed.
|
||||
*/
|
||||
if (needscan)
|
||||
xfs_dir2_data_freescan(mp, data, &needlog);
|
||||
xfs_dir2_data_freescan(mp, hdr, &needlog);
|
||||
/*
|
||||
* Log the data block header if needed.
|
||||
*/
|
||||
@ -1782,8 +1705,8 @@ xfs_dir2_node_addname_int(
|
||||
/*
|
||||
* If the freespace entry is now wrong, update it.
|
||||
*/
|
||||
if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) {
|
||||
free->bests[findex] = data->hdr.bestfree[0].length;
|
||||
if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(hdr->bestfree[0].length)) {
|
||||
free->bests[findex] = hdr->bestfree[0].length;
|
||||
logfree = 1;
|
||||
}
|
||||
/*
|
||||
@ -1933,7 +1856,7 @@ xfs_dir2_node_replace(
|
||||
xfs_da_args_t *args) /* operation arguments */
|
||||
{
|
||||
xfs_da_state_blk_t *blk; /* leaf block */
|
||||
xfs_dir2_data_t *data; /* data block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry changed */
|
||||
int error; /* error return value */
|
||||
int i; /* btree level */
|
||||
@ -1977,10 +1900,10 @@ xfs_dir2_node_replace(
|
||||
/*
|
||||
* Point to the data entry.
|
||||
*/
|
||||
data = state->extrablk.bp->data;
|
||||
ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
|
||||
hdr = state->extrablk.bp->data;
|
||||
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)data +
|
||||
((char *)hdr +
|
||||
xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address)));
|
||||
ASSERT(inum != be64_to_cpu(dep->inumber));
|
||||
/*
|
||||
@ -2044,7 +1967,7 @@ xfs_dir2_node_trim_free(
|
||||
return 0;
|
||||
}
|
||||
free = bp->data;
|
||||
ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
|
||||
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
|
||||
/*
|
||||
* If there are used entries, there's nothing to do.
|
||||
*/
|
||||
|
@ -1,100 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_NODE_H__
|
||||
#define __XFS_DIR2_NODE_H__
|
||||
|
||||
/*
|
||||
* Directory version 2, btree node format structures
|
||||
*/
|
||||
|
||||
struct uio;
|
||||
struct xfs_dabuf;
|
||||
struct xfs_da_args;
|
||||
struct xfs_da_state;
|
||||
struct xfs_da_state_blk;
|
||||
struct xfs_inode;
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* Offset of the freespace index.
|
||||
*/
|
||||
#define XFS_DIR2_FREE_SPACE 2
|
||||
#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
|
||||
#define XFS_DIR2_FREE_FIRSTDB(mp) \
|
||||
xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET)
|
||||
|
||||
#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */
|
||||
|
||||
typedef struct xfs_dir2_free_hdr {
|
||||
__be32 magic; /* XFS_DIR2_FREE_MAGIC */
|
||||
__be32 firstdb; /* db of first entry */
|
||||
__be32 nvalid; /* count of valid entries */
|
||||
__be32 nused; /* count of used entries */
|
||||
} xfs_dir2_free_hdr_t;
|
||||
|
||||
typedef struct xfs_dir2_free {
|
||||
xfs_dir2_free_hdr_t hdr; /* block header */
|
||||
__be16 bests[1]; /* best free counts */
|
||||
/* unused entries are -1 */
|
||||
} xfs_dir2_free_t;
|
||||
|
||||
#define XFS_DIR2_MAX_FREE_BESTS(mp) \
|
||||
(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_free_hdr_t)) / \
|
||||
(uint)sizeof(xfs_dir2_data_off_t))
|
||||
|
||||
/*
|
||||
* Convert data space db to the corresponding free db.
|
||||
*/
|
||||
static inline xfs_dir2_db_t
|
||||
xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
|
||||
{
|
||||
return (XFS_DIR2_FREE_FIRSTDB(mp) + (db) / XFS_DIR2_MAX_FREE_BESTS(mp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert data space db to the corresponding index in a free db.
|
||||
*/
|
||||
static inline int
|
||||
xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
|
||||
{
|
||||
return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp));
|
||||
}
|
||||
|
||||
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp);
|
||||
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
|
||||
extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
|
||||
struct xfs_da_args *args, int *indexp,
|
||||
struct xfs_da_state *state);
|
||||
extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
|
||||
struct xfs_dabuf *leaf2_bp);
|
||||
extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
|
||||
struct xfs_da_state_blk *oldblk,
|
||||
struct xfs_da_state_blk *newblk);
|
||||
extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
|
||||
extern void xfs_dir2_leafn_unbalance(struct xfs_da_state *state,
|
||||
struct xfs_da_state_blk *drop_blk,
|
||||
struct xfs_da_state_blk *save_blk);
|
||||
extern int xfs_dir2_node_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo,
|
||||
int *rvalp);
|
||||
|
||||
#endif /* __XFS_DIR2_NODE_H__ */
|
135
fs/xfs/xfs_dir2_priv.h
Normal file
135
fs/xfs/xfs_dir2_priv.h
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_PRIV_H__
|
||||
#define __XFS_DIR2_PRIV_H__
|
||||
|
||||
/* xfs_dir2.c */
|
||||
extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
|
||||
extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
|
||||
extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
|
||||
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
|
||||
xfs_dir2_db_t *dbp);
|
||||
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
|
||||
struct xfs_dabuf *bp);
|
||||
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
|
||||
const unsigned char *name, int len);
|
||||
|
||||
/* xfs_dir2_block.c */
|
||||
extern int xfs_dir2_block_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent,
|
||||
xfs_off_t *offset, filldir_t filldir);
|
||||
extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
|
||||
|
||||
/* xfs_dir2_data.c */
|
||||
#ifdef DEBUG
|
||||
extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
|
||||
#else
|
||||
#define xfs_dir2_data_check(dp,bp)
|
||||
#endif
|
||||
extern struct xfs_dir2_data_free *
|
||||
xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
|
||||
struct xfs_dir2_data_unused *dup, int *loghead);
|
||||
extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
|
||||
struct xfs_dir2_data_hdr *hdr, int *loghead);
|
||||
extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
|
||||
struct xfs_dabuf **bpp);
|
||||
extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
struct xfs_dir2_data_entry *dep);
|
||||
extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
|
||||
struct xfs_dabuf *bp);
|
||||
extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
struct xfs_dir2_data_unused *dup);
|
||||
extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
|
||||
int *needlogp, int *needscanp);
|
||||
extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
|
||||
xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
|
||||
|
||||
/* xfs_dir2_leaf.c */
|
||||
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *dbp);
|
||||
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
|
||||
extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *bp);
|
||||
extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
|
||||
int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
|
||||
extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
|
||||
size_t bufsize, xfs_off_t *offset, filldir_t filldir);
|
||||
extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
|
||||
struct xfs_dabuf **bpp, int magic);
|
||||
extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
int first, int last);
|
||||
extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
|
||||
struct xfs_dabuf *bp);
|
||||
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp);
|
||||
extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp, xfs_dir2_db_t db);
|
||||
extern struct xfs_dir2_leaf_entry *
|
||||
xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact,
|
||||
int lowstale, int highstale,
|
||||
int *lfloglow, int *lfloghigh);
|
||||
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
|
||||
|
||||
/* xfs_dir2_node.c */
|
||||
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp);
|
||||
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
|
||||
extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
|
||||
struct xfs_da_args *args, int *indexp,
|
||||
struct xfs_da_state *state);
|
||||
extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
|
||||
struct xfs_dabuf *leaf2_bp);
|
||||
extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
|
||||
struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
|
||||
extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
|
||||
extern void xfs_dir2_leafn_unbalance(struct xfs_da_state *state,
|
||||
struct xfs_da_state_blk *drop_blk,
|
||||
struct xfs_da_state_blk *save_blk);
|
||||
extern int xfs_dir2_node_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo,
|
||||
int *rvalp);
|
||||
|
||||
/* xfs_dir2_sf.c */
|
||||
extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp);
|
||||
extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
|
||||
struct xfs_dir2_sf_entry *sfep);
|
||||
extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
|
||||
struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
|
||||
extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
|
||||
int size, xfs_dir2_sf_hdr_t *sfhp);
|
||||
extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
|
||||
extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent,
|
||||
xfs_off_t *offset, filldir_t filldir);
|
||||
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
|
||||
|
||||
#endif /* __XFS_DIR2_PRIV_H__ */
|
@ -23,18 +23,16 @@
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_dir2_data.h"
|
||||
#include "xfs_dir2_leaf.h"
|
||||
#include "xfs_dir2_block.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_format.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
/*
|
||||
@ -59,6 +57,82 @@ static void xfs_dir2_sf_toino4(xfs_da_args_t *args);
|
||||
static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
|
||||
#endif /* XFS_BIG_INUMS */
|
||||
|
||||
/*
|
||||
* Inode numbers in short-form directories can come in two versions,
|
||||
* either 4 bytes or 8 bytes wide. These helpers deal with the
|
||||
* two forms transparently by looking at the headers i8count field.
|
||||
*
|
||||
* For 64-bit inode number the most significant byte must be zero.
|
||||
*/
|
||||
static xfs_ino_t
|
||||
xfs_dir2_sf_get_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
xfs_dir2_inou_t *from)
|
||||
{
|
||||
if (hdr->i8count)
|
||||
return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
|
||||
else
|
||||
return get_unaligned_be32(&from->i4.i);
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_dir2_sf_put_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
xfs_dir2_inou_t *to,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
ASSERT((ino & 0xff00000000000000ULL) == 0);
|
||||
|
||||
if (hdr->i8count)
|
||||
put_unaligned_be64(ino, &to->i8.i);
|
||||
else
|
||||
put_unaligned_be32(ino, &to->i4.i);
|
||||
}
|
||||
|
||||
xfs_ino_t
|
||||
xfs_dir2_sf_get_parent_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr)
|
||||
{
|
||||
return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_dir2_sf_put_parent_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
|
||||
}
|
||||
|
||||
/*
|
||||
* In short-form directory entries the inode numbers are stored at variable
|
||||
* offset behind the entry name. The inode numbers may only be accessed
|
||||
* through the helpers below.
|
||||
*/
|
||||
static xfs_dir2_inou_t *
|
||||
xfs_dir2_sfe_inop(
|
||||
struct xfs_dir2_sf_entry *sfep)
|
||||
{
|
||||
return (xfs_dir2_inou_t *)&sfep->name[sfep->namelen];
|
||||
}
|
||||
|
||||
xfs_ino_t
|
||||
xfs_dir2_sfe_get_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
struct xfs_dir2_sf_entry *sfep)
|
||||
{
|
||||
return xfs_dir2_sf_get_ino(hdr, xfs_dir2_sfe_inop(sfep));
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_dir2_sfe_put_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
struct xfs_dir2_sf_entry *sfep,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
xfs_dir2_sf_put_ino(hdr, xfs_dir2_sfe_inop(sfep), ino);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a block directory (dp/block), calculate its size as a shortform (sf)
|
||||
* directory and a header for the sf directory, if it will fit it the
|
||||
@ -68,7 +142,7 @@ static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
|
||||
int /* size for sf form */
|
||||
xfs_dir2_block_sfsize(
|
||||
xfs_inode_t *dp, /* incore inode pointer */
|
||||
xfs_dir2_block_t *block, /* block directory data */
|
||||
xfs_dir2_data_hdr_t *hdr, /* block directory data */
|
||||
xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */
|
||||
{
|
||||
xfs_dir2_dataptr_t addr; /* data entry address */
|
||||
@ -88,7 +162,7 @@ xfs_dir2_block_sfsize(
|
||||
mp = dp->i_mount;
|
||||
|
||||
count = i8count = namelen = 0;
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
blp = xfs_dir2_block_leaf_p(btp);
|
||||
|
||||
/*
|
||||
@ -101,7 +175,7 @@ xfs_dir2_block_sfsize(
|
||||
* Calculate the pointer to the entry at hand.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)
|
||||
((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
|
||||
((char *)hdr + xfs_dir2_dataptr_to_off(mp, addr));
|
||||
/*
|
||||
* Detect . and .., so we can special-case them.
|
||||
* . is not included in sf directories.
|
||||
@ -138,7 +212,7 @@ xfs_dir2_block_sfsize(
|
||||
*/
|
||||
sfhp->count = count;
|
||||
sfhp->i8count = i8count;
|
||||
xfs_dir2_sf_put_inumber((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent);
|
||||
xfs_dir2_sf_put_parent_ino(sfhp, parent);
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -153,7 +227,7 @@ xfs_dir2_block_to_sf(
|
||||
int size, /* shortform directory size */
|
||||
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
|
||||
{
|
||||
xfs_dir2_block_t *block; /* block structure */
|
||||
xfs_dir2_data_hdr_t *hdr; /* block header */
|
||||
xfs_dir2_block_tail_t *btp; /* block tail pointer */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry pointer */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
@ -164,8 +238,7 @@ xfs_dir2_block_to_sf(
|
||||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
char *ptr; /* current data pointer */
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_ino_t temp;
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */
|
||||
|
||||
trace_xfs_dir2_block_to_sf(args);
|
||||
|
||||
@ -176,13 +249,14 @@ xfs_dir2_block_to_sf(
|
||||
* Make a copy of the block data, so we can shrink the inode
|
||||
* and add local data.
|
||||
*/
|
||||
block = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
|
||||
memcpy(block, bp->data, mp->m_dirblksize);
|
||||
hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
|
||||
memcpy(hdr, bp->data, mp->m_dirblksize);
|
||||
logflags = XFS_ILOG_CORE;
|
||||
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
|
||||
ASSERT(error != ENOSPC);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The buffer is now unconditionally gone, whether
|
||||
* xfs_dir2_shrink_inode worked or not.
|
||||
@ -198,14 +272,14 @@ xfs_dir2_block_to_sf(
|
||||
/*
|
||||
* Copy the header into the newly allocate local space.
|
||||
*/
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
|
||||
dp->i_d.di_size = size;
|
||||
/*
|
||||
* Set up to loop over the block's entries.
|
||||
*/
|
||||
btp = xfs_dir2_block_tail_p(mp, block);
|
||||
ptr = (char *)block->u;
|
||||
btp = xfs_dir2_block_tail_p(mp, hdr);
|
||||
ptr = (char *)(hdr + 1);
|
||||
endptr = (char *)xfs_dir2_block_leaf_p(btp);
|
||||
sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
/*
|
||||
@ -233,7 +307,7 @@ xfs_dir2_block_to_sf(
|
||||
else if (dep->namelen == 2 &&
|
||||
dep->name[0] == '.' && dep->name[1] == '.')
|
||||
ASSERT(be64_to_cpu(dep->inumber) ==
|
||||
xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
|
||||
xfs_dir2_sf_get_parent_ino(sfp));
|
||||
/*
|
||||
* Normal entry, copy it into shortform.
|
||||
*/
|
||||
@ -241,11 +315,11 @@ xfs_dir2_block_to_sf(
|
||||
sfep->namelen = dep->namelen;
|
||||
xfs_dir2_sf_put_offset(sfep,
|
||||
(xfs_dir2_data_aoff_t)
|
||||
((char *)dep - (char *)block));
|
||||
((char *)dep - (char *)hdr));
|
||||
memcpy(sfep->name, dep->name, dep->namelen);
|
||||
temp = be64_to_cpu(dep->inumber);
|
||||
xfs_dir2_sf_put_inumber(sfp, &temp,
|
||||
xfs_dir2_sf_inumberp(sfep));
|
||||
xfs_dir2_sfe_put_ino(sfp, sfep,
|
||||
be64_to_cpu(dep->inumber));
|
||||
|
||||
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
|
||||
}
|
||||
ptr += xfs_dir2_data_entsize(dep->namelen);
|
||||
@ -254,7 +328,7 @@ xfs_dir2_block_to_sf(
|
||||
xfs_dir2_sf_check(args);
|
||||
out:
|
||||
xfs_trans_log_inode(args->trans, dp, logflags);
|
||||
kmem_free(block);
|
||||
kmem_free(hdr);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -277,7 +351,7 @@ xfs_dir2_sf_addname(
|
||||
xfs_dir2_data_aoff_t offset = 0; /* offset for new entry */
|
||||
int old_isize; /* di_size before adding name */
|
||||
int pick; /* which algorithm to use */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
|
||||
|
||||
trace_xfs_dir2_sf_addname(args);
|
||||
@ -294,19 +368,19 @@ xfs_dir2_sf_addname(
|
||||
}
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
/*
|
||||
* Compute entry (and change in) size.
|
||||
*/
|
||||
add_entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
|
||||
add_entsize = xfs_dir2_sf_entsize(sfp, args->namelen);
|
||||
incr_isize = add_entsize;
|
||||
objchange = 0;
|
||||
#if XFS_BIG_INUMS
|
||||
/*
|
||||
* Do we have to change to 8 byte inodes?
|
||||
*/
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
|
||||
/*
|
||||
* Yes, adjust the entry size and the total size.
|
||||
*/
|
||||
@ -314,7 +388,7 @@ xfs_dir2_sf_addname(
|
||||
(uint)sizeof(xfs_dir2_ino8_t) -
|
||||
(uint)sizeof(xfs_dir2_ino4_t);
|
||||
incr_isize +=
|
||||
(sfp->hdr.count + 2) *
|
||||
(sfp->count + 2) *
|
||||
((uint)sizeof(xfs_dir2_ino8_t) -
|
||||
(uint)sizeof(xfs_dir2_ino4_t));
|
||||
objchange = 1;
|
||||
@ -384,21 +458,21 @@ xfs_dir2_sf_addname_easy(
|
||||
{
|
||||
int byteoff; /* byte offset in sf dir */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
|
||||
dp = args->dp;
|
||||
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
byteoff = (int)((char *)sfep - (char *)sfp);
|
||||
/*
|
||||
* Grow the in-inode space.
|
||||
*/
|
||||
xfs_idata_realloc(dp, xfs_dir2_sf_entsize_byname(sfp, args->namelen),
|
||||
xfs_idata_realloc(dp, xfs_dir2_sf_entsize(sfp, args->namelen),
|
||||
XFS_DATA_FORK);
|
||||
/*
|
||||
* Need to set up again due to realloc of the inode data.
|
||||
*/
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff);
|
||||
/*
|
||||
* Fill in the new entry.
|
||||
@ -406,15 +480,14 @@ xfs_dir2_sf_addname_easy(
|
||||
sfep->namelen = args->namelen;
|
||||
xfs_dir2_sf_put_offset(sfep, offset);
|
||||
memcpy(sfep->name, args->name, sfep->namelen);
|
||||
xfs_dir2_sf_put_inumber(sfp, &args->inumber,
|
||||
xfs_dir2_sf_inumberp(sfep));
|
||||
xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber);
|
||||
/*
|
||||
* Update the header and inode.
|
||||
*/
|
||||
sfp->hdr.count++;
|
||||
sfp->count++;
|
||||
#if XFS_BIG_INUMS
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM)
|
||||
sfp->hdr.i8count++;
|
||||
sfp->i8count++;
|
||||
#endif
|
||||
dp->i_d.di_size = new_isize;
|
||||
xfs_dir2_sf_check(args);
|
||||
@ -444,19 +517,19 @@ xfs_dir2_sf_addname_hard(
|
||||
xfs_dir2_data_aoff_t offset; /* current offset value */
|
||||
int old_isize; /* previous di_size */
|
||||
xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */
|
||||
xfs_dir2_sf_t *oldsfp; /* original shortform dir */
|
||||
xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */
|
||||
xfs_dir2_sf_entry_t *sfep; /* entry in new dir */
|
||||
xfs_dir2_sf_t *sfp; /* new shortform dir */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */
|
||||
|
||||
/*
|
||||
* Copy the old directory to the stack buffer.
|
||||
*/
|
||||
dp = args->dp;
|
||||
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
old_isize = (int)dp->i_d.di_size;
|
||||
buf = kmem_alloc(old_isize, KM_SLEEP);
|
||||
oldsfp = (xfs_dir2_sf_t *)buf;
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)buf;
|
||||
memcpy(oldsfp, sfp, old_isize);
|
||||
/*
|
||||
* Loop over the old directory finding the place we're going
|
||||
@ -485,7 +558,7 @@ xfs_dir2_sf_addname_hard(
|
||||
/*
|
||||
* Reset the pointer since the buffer was reallocated.
|
||||
*/
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
/*
|
||||
* Copy the first part of the directory, including the header.
|
||||
*/
|
||||
@ -498,12 +571,11 @@ xfs_dir2_sf_addname_hard(
|
||||
sfep->namelen = args->namelen;
|
||||
xfs_dir2_sf_put_offset(sfep, offset);
|
||||
memcpy(sfep->name, args->name, sfep->namelen);
|
||||
xfs_dir2_sf_put_inumber(sfp, &args->inumber,
|
||||
xfs_dir2_sf_inumberp(sfep));
|
||||
sfp->hdr.count++;
|
||||
xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber);
|
||||
sfp->count++;
|
||||
#if XFS_BIG_INUMS
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
|
||||
sfp->hdr.i8count++;
|
||||
sfp->i8count++;
|
||||
#endif
|
||||
/*
|
||||
* If there's more left to copy, do that.
|
||||
@ -537,14 +609,14 @@ xfs_dir2_sf_addname_pick(
|
||||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
xfs_dir2_data_aoff_t offset; /* data block offset */
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
int size; /* entry's data size */
|
||||
int used; /* data bytes used */
|
||||
|
||||
dp = args->dp;
|
||||
mp = dp->i_mount;
|
||||
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
size = xfs_dir2_data_entsize(args->namelen);
|
||||
offset = XFS_DIR2_DATA_FIRST_OFFSET;
|
||||
sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
@ -554,7 +626,7 @@ xfs_dir2_sf_addname_pick(
|
||||
* Keep track of data offset and whether we've seen a place
|
||||
* to insert the new entry.
|
||||
*/
|
||||
for (i = 0; i < sfp->hdr.count; i++) {
|
||||
for (i = 0; i < sfp->count; i++) {
|
||||
if (!holefit)
|
||||
holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
|
||||
offset = xfs_dir2_sf_get_offset(sfep) +
|
||||
@ -566,7 +638,7 @@ xfs_dir2_sf_addname_pick(
|
||||
* was a data block (block form directory).
|
||||
*/
|
||||
used = offset +
|
||||
(sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
|
||||
(sfp->count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
|
||||
(uint)sizeof(xfs_dir2_block_tail_t);
|
||||
/*
|
||||
* If it won't fit in a block form then we can't insert it,
|
||||
@ -612,30 +684,30 @@ xfs_dir2_sf_check(
|
||||
xfs_ino_t ino; /* entry inode number */
|
||||
int offset; /* data offset */
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
|
||||
dp = args->dp;
|
||||
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
offset = XFS_DIR2_DATA_FIRST_OFFSET;
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
|
||||
ino = xfs_dir2_sf_get_parent_ino(sfp);
|
||||
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
|
||||
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
i < sfp->hdr.count;
|
||||
i < sfp->count;
|
||||
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
|
||||
ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
|
||||
ino = xfs_dir2_sfe_get_ino(sfp, sfep);
|
||||
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
|
||||
offset =
|
||||
xfs_dir2_sf_get_offset(sfep) +
|
||||
xfs_dir2_data_entsize(sfep->namelen);
|
||||
}
|
||||
ASSERT(i8count == sfp->hdr.i8count);
|
||||
ASSERT(i8count == sfp->i8count);
|
||||
ASSERT(XFS_BIG_INUMS || i8count == 0);
|
||||
ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
|
||||
ASSERT(offset +
|
||||
(sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
|
||||
(sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
|
||||
(uint)sizeof(xfs_dir2_block_tail_t) <=
|
||||
dp->i_mount->m_dirblksize);
|
||||
}
|
||||
@ -651,7 +723,7 @@ xfs_dir2_sf_create(
|
||||
{
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int i8count; /* parent inode is an 8-byte number */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
int size; /* directory size */
|
||||
|
||||
trace_xfs_dir2_sf_create(args);
|
||||
@ -681,13 +753,13 @@ xfs_dir2_sf_create(
|
||||
/*
|
||||
* Fill in the header,
|
||||
*/
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp->hdr.i8count = i8count;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
sfp->i8count = i8count;
|
||||
/*
|
||||
* Now can put in the inode number, since i8count is set.
|
||||
*/
|
||||
xfs_dir2_sf_put_inumber(sfp, &pino, &sfp->hdr.parent);
|
||||
sfp->hdr.count = 0;
|
||||
xfs_dir2_sf_put_parent_ino(sfp, pino);
|
||||
sfp->count = 0;
|
||||
dp->i_d.di_size = size;
|
||||
xfs_dir2_sf_check(args);
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
|
||||
@ -705,7 +777,7 @@ xfs_dir2_sf_getdents(
|
||||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
xfs_dir2_dataptr_t off; /* current entry's offset */
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
xfs_dir2_dataptr_t dot_offset;
|
||||
xfs_dir2_dataptr_t dotdot_offset;
|
||||
xfs_ino_t ino;
|
||||
@ -724,9 +796,9 @@ xfs_dir2_sf_getdents(
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
|
||||
/*
|
||||
* If the block number in the offset is out of range, we're done.
|
||||
@ -759,7 +831,7 @@ xfs_dir2_sf_getdents(
|
||||
* Put .. entry unless we're starting past it.
|
||||
*/
|
||||
if (*offset <= dotdot_offset) {
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
|
||||
ino = xfs_dir2_sf_get_parent_ino(sfp);
|
||||
if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
|
||||
*offset = dotdot_offset & 0x7fffffff;
|
||||
return 0;
|
||||
@ -770,7 +842,7 @@ xfs_dir2_sf_getdents(
|
||||
* Loop while there are more entries and put'ing works.
|
||||
*/
|
||||
sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
for (i = 0; i < sfp->hdr.count; i++) {
|
||||
for (i = 0; i < sfp->count; i++) {
|
||||
off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
|
||||
xfs_dir2_sf_get_offset(sfep));
|
||||
|
||||
@ -779,7 +851,7 @@ xfs_dir2_sf_getdents(
|
||||
continue;
|
||||
}
|
||||
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
|
||||
ino = xfs_dir2_sfe_get_ino(sfp, sfep);
|
||||
if (filldir(dirent, (char *)sfep->name, sfep->namelen,
|
||||
off & 0x7fffffff, ino, DT_UNKNOWN)) {
|
||||
*offset = off & 0x7fffffff;
|
||||
@ -805,7 +877,7 @@ xfs_dir2_sf_lookup(
|
||||
int i; /* entry index */
|
||||
int error;
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
enum xfs_dacmp cmp; /* comparison result */
|
||||
xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
|
||||
|
||||
@ -824,8 +896,8 @@ xfs_dir2_sf_lookup(
|
||||
}
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
/*
|
||||
* Special case for .
|
||||
*/
|
||||
@ -839,7 +911,7 @@ xfs_dir2_sf_lookup(
|
||||
*/
|
||||
if (args->namelen == 2 &&
|
||||
args->name[0] == '.' && args->name[1] == '.') {
|
||||
args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
|
||||
args->inumber = xfs_dir2_sf_get_parent_ino(sfp);
|
||||
args->cmpresult = XFS_CMP_EXACT;
|
||||
return XFS_ERROR(EEXIST);
|
||||
}
|
||||
@ -847,7 +919,7 @@ xfs_dir2_sf_lookup(
|
||||
* Loop over all the entries trying to match ours.
|
||||
*/
|
||||
ci_sfep = NULL;
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
|
||||
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
|
||||
/*
|
||||
* Compare name and if it's an exact match, return the inode
|
||||
@ -858,8 +930,7 @@ xfs_dir2_sf_lookup(
|
||||
sfep->namelen);
|
||||
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
|
||||
args->cmpresult = cmp;
|
||||
args->inumber = xfs_dir2_sf_get_inumber(sfp,
|
||||
xfs_dir2_sf_inumberp(sfep));
|
||||
args->inumber = xfs_dir2_sfe_get_ino(sfp, sfep);
|
||||
if (cmp == XFS_CMP_EXACT)
|
||||
return XFS_ERROR(EEXIST);
|
||||
ci_sfep = sfep;
|
||||
@ -891,7 +962,7 @@ xfs_dir2_sf_removename(
|
||||
int newsize; /* new inode size */
|
||||
int oldsize; /* old inode size */
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
|
||||
trace_xfs_dir2_sf_removename(args);
|
||||
|
||||
@ -908,32 +979,31 @@ xfs_dir2_sf_removename(
|
||||
}
|
||||
ASSERT(dp->i_df.if_bytes == oldsize);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
/*
|
||||
* Loop over the old directory entries.
|
||||
* Find the one we're deleting.
|
||||
*/
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
|
||||
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
|
||||
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
|
||||
XFS_CMP_EXACT) {
|
||||
ASSERT(xfs_dir2_sf_get_inumber(sfp,
|
||||
xfs_dir2_sf_inumberp(sfep)) ==
|
||||
args->inumber);
|
||||
ASSERT(xfs_dir2_sfe_get_ino(sfp, sfep) ==
|
||||
args->inumber);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Didn't find it.
|
||||
*/
|
||||
if (i == sfp->hdr.count)
|
||||
if (i == sfp->count)
|
||||
return XFS_ERROR(ENOENT);
|
||||
/*
|
||||
* Calculate sizes.
|
||||
*/
|
||||
byteoff = (int)((char *)sfep - (char *)sfp);
|
||||
entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
|
||||
entsize = xfs_dir2_sf_entsize(sfp, args->namelen);
|
||||
newsize = oldsize - entsize;
|
||||
/*
|
||||
* Copy the part if any after the removed entry, sliding it down.
|
||||
@ -944,22 +1014,22 @@ xfs_dir2_sf_removename(
|
||||
/*
|
||||
* Fix up the header and file size.
|
||||
*/
|
||||
sfp->hdr.count--;
|
||||
sfp->count--;
|
||||
dp->i_d.di_size = newsize;
|
||||
/*
|
||||
* Reallocate, making it smaller.
|
||||
*/
|
||||
xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK);
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
#if XFS_BIG_INUMS
|
||||
/*
|
||||
* Are we changing inode number size?
|
||||
*/
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) {
|
||||
if (sfp->hdr.i8count == 1)
|
||||
if (sfp->i8count == 1)
|
||||
xfs_dir2_sf_toino4(args);
|
||||
else
|
||||
sfp->hdr.i8count--;
|
||||
sfp->i8count--;
|
||||
}
|
||||
#endif
|
||||
xfs_dir2_sf_check(args);
|
||||
@ -983,7 +1053,7 @@ xfs_dir2_sf_replace(
|
||||
int i8elevated; /* sf_toino8 set i8count=1 */
|
||||
#endif
|
||||
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
|
||||
xfs_dir2_sf_t *sfp; /* shortform structure */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
||||
|
||||
trace_xfs_dir2_sf_replace(args);
|
||||
|
||||
@ -999,19 +1069,19 @@ xfs_dir2_sf_replace(
|
||||
}
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
#if XFS_BIG_INUMS
|
||||
/*
|
||||
* New inode number is large, and need to convert to 8-byte inodes.
|
||||
*/
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
|
||||
int error; /* error return value */
|
||||
int newsize; /* new inode size */
|
||||
|
||||
newsize =
|
||||
dp->i_df.if_bytes +
|
||||
(sfp->hdr.count + 1) *
|
||||
(sfp->count + 1) *
|
||||
((uint)sizeof(xfs_dir2_ino8_t) -
|
||||
(uint)sizeof(xfs_dir2_ino4_t));
|
||||
/*
|
||||
@ -1029,7 +1099,7 @@ xfs_dir2_sf_replace(
|
||||
*/
|
||||
xfs_dir2_sf_toino8(args);
|
||||
i8elevated = 1;
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
} else
|
||||
i8elevated = 0;
|
||||
#endif
|
||||
@ -1040,34 +1110,32 @@ xfs_dir2_sf_replace(
|
||||
if (args->namelen == 2 &&
|
||||
args->name[0] == '.' && args->name[1] == '.') {
|
||||
#if XFS_BIG_INUMS || defined(DEBUG)
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
|
||||
ino = xfs_dir2_sf_get_parent_ino(sfp);
|
||||
ASSERT(args->inumber != ino);
|
||||
#endif
|
||||
xfs_dir2_sf_put_inumber(sfp, &args->inumber, &sfp->hdr.parent);
|
||||
xfs_dir2_sf_put_parent_ino(sfp, args->inumber);
|
||||
}
|
||||
/*
|
||||
* Normal entry, look for the name.
|
||||
*/
|
||||
else {
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
i < sfp->hdr.count;
|
||||
i < sfp->count;
|
||||
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
|
||||
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
|
||||
XFS_CMP_EXACT) {
|
||||
#if XFS_BIG_INUMS || defined(DEBUG)
|
||||
ino = xfs_dir2_sf_get_inumber(sfp,
|
||||
xfs_dir2_sf_inumberp(sfep));
|
||||
ino = xfs_dir2_sfe_get_ino(sfp, sfep);
|
||||
ASSERT(args->inumber != ino);
|
||||
#endif
|
||||
xfs_dir2_sf_put_inumber(sfp, &args->inumber,
|
||||
xfs_dir2_sf_inumberp(sfep));
|
||||
xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Didn't find it.
|
||||
*/
|
||||
if (i == sfp->hdr.count) {
|
||||
if (i == sfp->count) {
|
||||
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
|
||||
#if XFS_BIG_INUMS
|
||||
if (i8elevated)
|
||||
@ -1085,10 +1153,10 @@ xfs_dir2_sf_replace(
|
||||
/*
|
||||
* And the old count was one, so need to convert to small.
|
||||
*/
|
||||
if (sfp->hdr.i8count == 1)
|
||||
if (sfp->i8count == 1)
|
||||
xfs_dir2_sf_toino4(args);
|
||||
else
|
||||
sfp->hdr.i8count--;
|
||||
sfp->i8count--;
|
||||
}
|
||||
/*
|
||||
* See if the old number was small, the new number is large.
|
||||
@ -1099,9 +1167,9 @@ xfs_dir2_sf_replace(
|
||||
* add to the i8count unless we just converted to 8-byte
|
||||
* inodes (which does an implied i8count = 1)
|
||||
*/
|
||||
ASSERT(sfp->hdr.i8count != 0);
|
||||
ASSERT(sfp->i8count != 0);
|
||||
if (!i8elevated)
|
||||
sfp->hdr.i8count++;
|
||||
sfp->i8count++;
|
||||
}
|
||||
#endif
|
||||
xfs_dir2_sf_check(args);
|
||||
@ -1121,13 +1189,12 @@ xfs_dir2_sf_toino4(
|
||||
char *buf; /* old dir's buffer */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int i; /* entry index */
|
||||
xfs_ino_t ino; /* entry inode number */
|
||||
int newsize; /* new inode size */
|
||||
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
|
||||
xfs_dir2_sf_t *oldsfp; /* old sf directory */
|
||||
xfs_dir2_sf_hdr_t *oldsfp; /* old sf directory */
|
||||
int oldsize; /* old inode size */
|
||||
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
|
||||
xfs_dir2_sf_t *sfp; /* new sf directory */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
|
||||
|
||||
trace_xfs_dir2_sf_toino4(args);
|
||||
|
||||
@ -1140,44 +1207,42 @@ xfs_dir2_sf_toino4(
|
||||
*/
|
||||
oldsize = dp->i_df.if_bytes;
|
||||
buf = kmem_alloc(oldsize, KM_SLEEP);
|
||||
oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(oldsfp->hdr.i8count == 1);
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(oldsfp->i8count == 1);
|
||||
memcpy(buf, oldsfp, oldsize);
|
||||
/*
|
||||
* Compute the new inode size.
|
||||
*/
|
||||
newsize =
|
||||
oldsize -
|
||||
(oldsfp->hdr.count + 1) *
|
||||
(oldsfp->count + 1) *
|
||||
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
|
||||
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
|
||||
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
|
||||
/*
|
||||
* Reset our pointers, the data has moved.
|
||||
*/
|
||||
oldsfp = (xfs_dir2_sf_t *)buf;
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)buf;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
/*
|
||||
* Fill in the new header.
|
||||
*/
|
||||
sfp->hdr.count = oldsfp->hdr.count;
|
||||
sfp->hdr.i8count = 0;
|
||||
ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
|
||||
xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
|
||||
sfp->count = oldsfp->count;
|
||||
sfp->i8count = 0;
|
||||
xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
|
||||
/*
|
||||
* Copy the entries field by field.
|
||||
*/
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
|
||||
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
|
||||
i < sfp->hdr.count;
|
||||
i < sfp->count;
|
||||
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
|
||||
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
|
||||
sfep->namelen = oldsfep->namelen;
|
||||
sfep->offset = oldsfep->offset;
|
||||
memcpy(sfep->name, oldsfep->name, sfep->namelen);
|
||||
ino = xfs_dir2_sf_get_inumber(oldsfp,
|
||||
xfs_dir2_sf_inumberp(oldsfep));
|
||||
xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
|
||||
xfs_dir2_sfe_put_ino(sfp, sfep,
|
||||
xfs_dir2_sfe_get_ino(oldsfp, oldsfep));
|
||||
}
|
||||
/*
|
||||
* Clean up the inode.
|
||||
@ -1199,13 +1264,12 @@ xfs_dir2_sf_toino8(
|
||||
char *buf; /* old dir's buffer */
|
||||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int i; /* entry index */
|
||||
xfs_ino_t ino; /* entry inode number */
|
||||
int newsize; /* new inode size */
|
||||
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
|
||||
xfs_dir2_sf_t *oldsfp; /* old sf directory */
|
||||
xfs_dir2_sf_hdr_t *oldsfp; /* old sf directory */
|
||||
int oldsize; /* old inode size */
|
||||
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
|
||||
xfs_dir2_sf_t *sfp; /* new sf directory */
|
||||
xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
|
||||
|
||||
trace_xfs_dir2_sf_toino8(args);
|
||||
|
||||
@ -1218,44 +1282,42 @@ xfs_dir2_sf_toino8(
|
||||
*/
|
||||
oldsize = dp->i_df.if_bytes;
|
||||
buf = kmem_alloc(oldsize, KM_SLEEP);
|
||||
oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(oldsfp->hdr.i8count == 0);
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(oldsfp->i8count == 0);
|
||||
memcpy(buf, oldsfp, oldsize);
|
||||
/*
|
||||
* Compute the new inode size.
|
||||
*/
|
||||
newsize =
|
||||
oldsize +
|
||||
(oldsfp->hdr.count + 1) *
|
||||
(oldsfp->count + 1) *
|
||||
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
|
||||
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
|
||||
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
|
||||
/*
|
||||
* Reset our pointers, the data has moved.
|
||||
*/
|
||||
oldsfp = (xfs_dir2_sf_t *)buf;
|
||||
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)buf;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
/*
|
||||
* Fill in the new header.
|
||||
*/
|
||||
sfp->hdr.count = oldsfp->hdr.count;
|
||||
sfp->hdr.i8count = 1;
|
||||
ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
|
||||
xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
|
||||
sfp->count = oldsfp->count;
|
||||
sfp->i8count = 1;
|
||||
xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
|
||||
/*
|
||||
* Copy the entries field by field.
|
||||
*/
|
||||
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
|
||||
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
|
||||
i < sfp->hdr.count;
|
||||
i < sfp->count;
|
||||
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
|
||||
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
|
||||
sfep->namelen = oldsfep->namelen;
|
||||
sfep->offset = oldsfep->offset;
|
||||
memcpy(sfep->name, oldsfep->name, sfep->namelen);
|
||||
ino = xfs_dir2_sf_get_inumber(oldsfp,
|
||||
xfs_dir2_sf_inumberp(oldsfep));
|
||||
xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
|
||||
xfs_dir2_sfe_put_ino(sfp, sfep,
|
||||
xfs_dir2_sfe_get_ino(oldsfp, oldsfep));
|
||||
}
|
||||
/*
|
||||
* Clean up the inode.
|
||||
|
@ -1,171 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_DIR2_SF_H__
|
||||
#define __XFS_DIR2_SF_H__
|
||||
|
||||
/*
|
||||
* Directory layout when stored internal to an inode.
|
||||
*
|
||||
* Small directories are packed as tightly as possible so as to
|
||||
* fit into the literal area of the inode.
|
||||
*/
|
||||
|
||||
struct uio;
|
||||
struct xfs_dabuf;
|
||||
struct xfs_da_args;
|
||||
struct xfs_dir2_block;
|
||||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* Inode number stored as 8 8-bit values.
|
||||
*/
|
||||
typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t;
|
||||
|
||||
/*
|
||||
* Inode number stored as 4 8-bit values.
|
||||
* Works a lot of the time, when all the inode numbers in a directory
|
||||
* fit in 32 bits.
|
||||
*/
|
||||
typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t;
|
||||
|
||||
typedef union {
|
||||
xfs_dir2_ino8_t i8;
|
||||
xfs_dir2_ino4_t i4;
|
||||
} xfs_dir2_inou_t;
|
||||
#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL)
|
||||
|
||||
/*
|
||||
* Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t.
|
||||
* Only need 16 bits, this is the byte offset into the single block form.
|
||||
*/
|
||||
typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t;
|
||||
|
||||
/*
|
||||
* The parent directory has a dedicated field, and the self-pointer must
|
||||
* be calculated on the fly.
|
||||
*
|
||||
* Entries are packed toward the top as tightly as possible. The header
|
||||
* and the elements must be memcpy'd out into a work area to get correct
|
||||
* alignment for the inode number fields.
|
||||
*/
|
||||
typedef struct xfs_dir2_sf_hdr {
|
||||
__uint8_t count; /* count of entries */
|
||||
__uint8_t i8count; /* count of 8-byte inode #s */
|
||||
xfs_dir2_inou_t parent; /* parent dir inode number */
|
||||
} __arch_pack xfs_dir2_sf_hdr_t;
|
||||
|
||||
typedef struct xfs_dir2_sf_entry {
|
||||
__uint8_t namelen; /* actual name length */
|
||||
xfs_dir2_sf_off_t offset; /* saved offset */
|
||||
__uint8_t name[1]; /* name, variable size */
|
||||
xfs_dir2_inou_t inumber; /* inode number, var. offset */
|
||||
} __arch_pack xfs_dir2_sf_entry_t;
|
||||
|
||||
typedef struct xfs_dir2_sf {
|
||||
xfs_dir2_sf_hdr_t hdr; /* shortform header */
|
||||
xfs_dir2_sf_entry_t list[1]; /* shortform entries */
|
||||
} xfs_dir2_sf_t;
|
||||
|
||||
static inline int xfs_dir2_sf_hdr_size(int i8count)
|
||||
{
|
||||
return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \
|
||||
((i8count) == 0) * \
|
||||
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
|
||||
}
|
||||
|
||||
static inline xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep)
|
||||
{
|
||||
return (xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen];
|
||||
}
|
||||
|
||||
static inline xfs_intino_t
|
||||
xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from)
|
||||
{
|
||||
return ((sfp)->hdr.i8count == 0 ? \
|
||||
(xfs_intino_t)XFS_GET_DIR_INO4((from)->i4) : \
|
||||
(xfs_intino_t)XFS_GET_DIR_INO8((from)->i8));
|
||||
}
|
||||
|
||||
static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from,
|
||||
xfs_dir2_inou_t *to)
|
||||
{
|
||||
if ((sfp)->hdr.i8count == 0)
|
||||
XFS_PUT_DIR_INO4(*(from), (to)->i4);
|
||||
else
|
||||
XFS_PUT_DIR_INO8(*(from), (to)->i8);
|
||||
}
|
||||
|
||||
static inline xfs_dir2_data_aoff_t
|
||||
xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
|
||||
{
|
||||
return INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
|
||||
{
|
||||
INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off);
|
||||
}
|
||||
|
||||
static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len)
|
||||
{
|
||||
return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \
|
||||
((sfp)->hdr.i8count == 0) * \
|
||||
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
|
||||
}
|
||||
|
||||
static inline int
|
||||
xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
|
||||
{
|
||||
return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \
|
||||
((sfp)->hdr.i8count == 0) * \
|
||||
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
|
||||
}
|
||||
|
||||
static inline xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp)
|
||||
{
|
||||
return ((xfs_dir2_sf_entry_t *) \
|
||||
((char *)(sfp) + xfs_dir2_sf_hdr_size(sfp->hdr.i8count)));
|
||||
}
|
||||
|
||||
static inline xfs_dir2_sf_entry_t *
|
||||
xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
|
||||
{
|
||||
return ((xfs_dir2_sf_entry_t *) \
|
||||
((char *)(sfep) + xfs_dir2_sf_entsize_byentry(sfp,sfep)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions.
|
||||
*/
|
||||
extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
|
||||
struct xfs_dir2_block *block,
|
||||
xfs_dir2_sf_hdr_t *sfhp);
|
||||
extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
|
||||
int size, xfs_dir2_sf_hdr_t *sfhp);
|
||||
extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
|
||||
extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent,
|
||||
xfs_off_t *offset, filldir_t filldir);
|
||||
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
|
||||
|
||||
#endif /* __XFS_DIR2_SF_H__ */
|
@ -249,6 +249,11 @@ typedef struct xfs_fsop_resblks {
|
||||
#define XFS_MAX_LOG_BYTES \
|
||||
((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
|
||||
|
||||
/* Used for sanity checks on superblock */
|
||||
#define XFS_MAX_DBLOCKS(s) ((xfs_drfsbno_t)(s)->sb_agcount * (s)->sb_agblocks)
|
||||
#define XFS_MIN_DBLOCKS(s) ((xfs_drfsbno_t)((s)->sb_agcount - 1) * \
|
||||
(s)->sb_agblocks + XFS_MIN_AG_BLOCKS)
|
||||
|
||||
/*
|
||||
* Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT
|
||||
*/
|
||||
|
@ -683,7 +683,7 @@ xfs_dialloc(
|
||||
return 0;
|
||||
}
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
|
||||
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
|
||||
} else {
|
||||
/*
|
||||
* Continue where we left off before. In this case, we
|
||||
@ -691,7 +691,7 @@ xfs_dialloc(
|
||||
*/
|
||||
agbp = *IO_agbp;
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
|
||||
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
|
||||
ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
|
||||
}
|
||||
mp = tp->t_mountp;
|
||||
@ -775,7 +775,7 @@ nextag:
|
||||
if (error)
|
||||
goto nextag;
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
|
||||
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
|
||||
}
|
||||
/*
|
||||
* Here with an allocation group that has a free inode.
|
||||
@ -944,7 +944,7 @@ nextag:
|
||||
* See if the most recently allocated block has any free.
|
||||
*/
|
||||
newino:
|
||||
if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
|
||||
if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
|
||||
error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
|
||||
XFS_LOOKUP_EQ, &i);
|
||||
if (error)
|
||||
@ -1085,7 +1085,7 @@ xfs_difree(
|
||||
return error;
|
||||
}
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
|
||||
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
|
||||
ASSERT(agbno < be32_to_cpu(agi->agi_length));
|
||||
/*
|
||||
* Initialize the cursor.
|
||||
@ -1438,7 +1438,7 @@ xfs_ialloc_log_agi(
|
||||
xfs_agi_t *agi; /* allocation group header */
|
||||
|
||||
agi = XFS_BUF_TO_AGI(bp);
|
||||
ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
|
||||
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
|
||||
#endif
|
||||
/*
|
||||
* Compute byte offsets for the first and last fields.
|
||||
@ -1492,7 +1492,7 @@ xfs_read_agi(
|
||||
/*
|
||||
* Validate the magic number of the agi block.
|
||||
*/
|
||||
agi_ok = be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
|
||||
agi_ok = agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC) &&
|
||||
XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)) &&
|
||||
be32_to_cpu(agi->agi_seqno) == agno;
|
||||
if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_error.h"
|
||||
@ -205,72 +204,6 @@ xfs_inobt_recs_inorder(
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
ktrace_t *xfs_inobt_trace_buf;
|
||||
|
||||
STATIC void
|
||||
xfs_inobt_trace_enter(
|
||||
struct xfs_btree_cur *cur,
|
||||
const char *func,
|
||||
char *s,
|
||||
int type,
|
||||
int line,
|
||||
__psunsigned_t a0,
|
||||
__psunsigned_t a1,
|
||||
__psunsigned_t a2,
|
||||
__psunsigned_t a3,
|
||||
__psunsigned_t a4,
|
||||
__psunsigned_t a5,
|
||||
__psunsigned_t a6,
|
||||
__psunsigned_t a7,
|
||||
__psunsigned_t a8,
|
||||
__psunsigned_t a9,
|
||||
__psunsigned_t a10)
|
||||
{
|
||||
ktrace_enter(xfs_inobt_trace_buf, (void *)(__psint_t)type,
|
||||
(void *)func, (void *)s, NULL, (void *)cur,
|
||||
(void *)a0, (void *)a1, (void *)a2, (void *)a3,
|
||||
(void *)a4, (void *)a5, (void *)a6, (void *)a7,
|
||||
(void *)a8, (void *)a9, (void *)a10);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_inobt_trace_cursor(
|
||||
struct xfs_btree_cur *cur,
|
||||
__uint32_t *s0,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1)
|
||||
{
|
||||
*s0 = cur->bc_private.a.agno;
|
||||
*l0 = cur->bc_rec.i.ir_startino;
|
||||
*l1 = cur->bc_rec.i.ir_free;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_inobt_trace_key(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1)
|
||||
{
|
||||
*l0 = be32_to_cpu(key->inobt.ir_startino);
|
||||
*l1 = 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_inobt_trace_record(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_rec *rec,
|
||||
__uint64_t *l0,
|
||||
__uint64_t *l1,
|
||||
__uint64_t *l2)
|
||||
{
|
||||
*l0 = be32_to_cpu(rec->inobt.ir_startino);
|
||||
*l1 = be32_to_cpu(rec->inobt.ir_freecount);
|
||||
*l2 = be64_to_cpu(rec->inobt.ir_free);
|
||||
}
|
||||
#endif /* XFS_BTREE_TRACE */
|
||||
|
||||
static const struct xfs_btree_ops xfs_inobt_ops = {
|
||||
.rec_len = sizeof(xfs_inobt_rec_t),
|
||||
.key_len = sizeof(xfs_inobt_key_t),
|
||||
@ -286,18 +219,10 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
|
||||
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
|
||||
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
|
||||
.key_diff = xfs_inobt_key_diff,
|
||||
|
||||
#ifdef DEBUG
|
||||
.keys_inorder = xfs_inobt_keys_inorder,
|
||||
.recs_inorder = xfs_inobt_recs_inorder,
|
||||
#endif
|
||||
|
||||
#ifdef XFS_BTREE_TRACE
|
||||
.trace_enter = xfs_inobt_trace_enter,
|
||||
.trace_cursor = xfs_inobt_trace_cursor,
|
||||
.trace_key = xfs_inobt_trace_key,
|
||||
.trace_record = xfs_inobt_trace_record,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "xfs_trans_priv.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_trace.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_bmap.h"
|
||||
@ -52,7 +51,7 @@ kmem_zone_t *xfs_ifork_zone;
|
||||
kmem_zone_t *xfs_inode_zone;
|
||||
|
||||
/*
|
||||
* Used in xfs_itruncate(). This is the maximum number of extents
|
||||
* Used in xfs_itruncate_extents(). This is the maximum number of extents
|
||||
* freed from a file in a single transaction.
|
||||
*/
|
||||
#define XFS_ITRUNC_MAX_EXTENTS 2
|
||||
@ -167,7 +166,7 @@ xfs_imap_to_bp(
|
||||
|
||||
dip = (xfs_dinode_t *)xfs_buf_offset(bp,
|
||||
(i << mp->m_sb.sb_inodelog));
|
||||
di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC &&
|
||||
di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
|
||||
XFS_DINODE_GOOD_VERSION(dip->di_version);
|
||||
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
|
||||
XFS_ERRTAG_ITOBP_INOTOBP,
|
||||
@ -802,7 +801,7 @@ xfs_iread(
|
||||
* If we got something that isn't an inode it means someone
|
||||
* (nfs or dmi) has a stale handle.
|
||||
*/
|
||||
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) {
|
||||
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) {
|
||||
#ifdef DEBUG
|
||||
xfs_alert(mp,
|
||||
"%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
|
||||
@ -1179,15 +1178,15 @@ xfs_ialloc(
|
||||
* at least do it for regular files.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
void
|
||||
STATIC void
|
||||
xfs_isize_check(
|
||||
xfs_mount_t *mp,
|
||||
xfs_inode_t *ip,
|
||||
xfs_fsize_t isize)
|
||||
struct xfs_inode *ip,
|
||||
xfs_fsize_t isize)
|
||||
{
|
||||
xfs_fileoff_t map_first;
|
||||
int nimaps;
|
||||
xfs_bmbt_irec_t imaps[2];
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
xfs_fileoff_t map_first;
|
||||
int nimaps;
|
||||
xfs_bmbt_irec_t imaps[2];
|
||||
|
||||
if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
|
||||
return;
|
||||
@ -1214,168 +1213,14 @@ xfs_isize_check(
|
||||
ASSERT(nimaps == 1);
|
||||
ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
|
||||
}
|
||||
#else /* DEBUG */
|
||||
#define xfs_isize_check(ip, isize)
|
||||
#endif /* DEBUG */
|
||||
|
||||
/*
|
||||
* Calculate the last possible buffered byte in a file. This must
|
||||
* include data that was buffered beyond the EOF by the write code.
|
||||
* This also needs to deal with overflowing the xfs_fsize_t type
|
||||
* which can happen for sizes near the limit.
|
||||
*
|
||||
* We also need to take into account any blocks beyond the EOF. It
|
||||
* may be the case that they were buffered by a write which failed.
|
||||
* In that case the pages will still be in memory, but the inode size
|
||||
* will never have been updated.
|
||||
*/
|
||||
STATIC xfs_fsize_t
|
||||
xfs_file_last_byte(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
xfs_fsize_t last_byte;
|
||||
xfs_fileoff_t last_block;
|
||||
xfs_fileoff_t size_last_block;
|
||||
int error;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
|
||||
|
||||
mp = ip->i_mount;
|
||||
/*
|
||||
* Only check for blocks beyond the EOF if the extents have
|
||||
* been read in. This eliminates the need for the inode lock,
|
||||
* and it also saves us from looking when it really isn't
|
||||
* necessary.
|
||||
*/
|
||||
if (ip->i_df.if_flags & XFS_IFEXTENTS) {
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
error = xfs_bmap_last_offset(NULL, ip, &last_block,
|
||||
XFS_DATA_FORK);
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
if (error) {
|
||||
last_block = 0;
|
||||
}
|
||||
} else {
|
||||
last_block = 0;
|
||||
}
|
||||
size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
|
||||
last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
|
||||
|
||||
last_byte = XFS_FSB_TO_B(mp, last_block);
|
||||
if (last_byte < 0) {
|
||||
return XFS_MAXIOFFSET(mp);
|
||||
}
|
||||
last_byte += (1 << mp->m_writeio_log);
|
||||
if (last_byte < 0) {
|
||||
return XFS_MAXIOFFSET(mp);
|
||||
}
|
||||
return last_byte;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the truncation of the file to new_size. The new size
|
||||
* must be smaller than the current size. This routine will
|
||||
* clear the buffer and page caches of file data in the removed
|
||||
* range, and xfs_itruncate_finish() will remove the underlying
|
||||
* disk blocks.
|
||||
*
|
||||
* The inode must have its I/O lock locked EXCLUSIVELY, and it
|
||||
* must NOT have the inode lock held at all. This is because we're
|
||||
* calling into the buffer/page cache code and we can't hold the
|
||||
* inode lock when we do so.
|
||||
*
|
||||
* We need to wait for any direct I/Os in flight to complete before we
|
||||
* proceed with the truncate. This is needed to prevent the extents
|
||||
* being read or written by the direct I/Os from being removed while the
|
||||
* I/O is in flight as there is no other method of synchronising
|
||||
* direct I/O with the truncate operation. Also, because we hold
|
||||
* the IOLOCK in exclusive mode, we prevent new direct I/Os from being
|
||||
* started until the truncate completes and drops the lock. Essentially,
|
||||
* the xfs_ioend_wait() call forms an I/O barrier that provides strict
|
||||
* ordering between direct I/Os and the truncate operation.
|
||||
*
|
||||
* The flags parameter can have either the value XFS_ITRUNC_DEFINITE
|
||||
* or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
|
||||
* in the case that the caller is locking things out of order and
|
||||
* may not be able to call xfs_itruncate_finish() with the inode lock
|
||||
* held without dropping the I/O lock. If the caller must drop the
|
||||
* I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
|
||||
* must be called again with all the same restrictions as the initial
|
||||
* call.
|
||||
*/
|
||||
int
|
||||
xfs_itruncate_start(
|
||||
xfs_inode_t *ip,
|
||||
uint flags,
|
||||
xfs_fsize_t new_size)
|
||||
{
|
||||
xfs_fsize_t last_byte;
|
||||
xfs_off_t toss_start;
|
||||
xfs_mount_t *mp;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
||||
ASSERT((new_size == 0) || (new_size <= ip->i_size));
|
||||
ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
|
||||
(flags == XFS_ITRUNC_MAYBE));
|
||||
|
||||
mp = ip->i_mount;
|
||||
|
||||
/* wait for the completion of any pending DIOs */
|
||||
if (new_size == 0 || new_size < ip->i_size)
|
||||
xfs_ioend_wait(ip);
|
||||
|
||||
/*
|
||||
* Call toss_pages or flushinval_pages to get rid of pages
|
||||
* overlapping the region being removed. We have to use
|
||||
* the less efficient flushinval_pages in the case that the
|
||||
* caller may not be able to finish the truncate without
|
||||
* dropping the inode's I/O lock. Make sure
|
||||
* to catch any pages brought in by buffers overlapping
|
||||
* the EOF by searching out beyond the isize by our
|
||||
* block size. We round new_size up to a block boundary
|
||||
* so that we don't toss things on the same block as
|
||||
* new_size but before it.
|
||||
*
|
||||
* Before calling toss_page or flushinval_pages, make sure to
|
||||
* call remapf() over the same region if the file is mapped.
|
||||
* This frees up mapped file references to the pages in the
|
||||
* given range and for the flushinval_pages case it ensures
|
||||
* that we get the latest mapped changes flushed out.
|
||||
*/
|
||||
toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
|
||||
toss_start = XFS_FSB_TO_B(mp, toss_start);
|
||||
if (toss_start < 0) {
|
||||
/*
|
||||
* The place to start tossing is beyond our maximum
|
||||
* file size, so there is no way that the data extended
|
||||
* out there.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
last_byte = xfs_file_last_byte(ip);
|
||||
trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte);
|
||||
if (last_byte > toss_start) {
|
||||
if (flags & XFS_ITRUNC_DEFINITE) {
|
||||
xfs_tosspages(ip, toss_start,
|
||||
-1, FI_REMAPF_LOCKED);
|
||||
} else {
|
||||
error = xfs_flushinval_pages(ip, toss_start,
|
||||
-1, FI_REMAPF_LOCKED);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (new_size == 0) {
|
||||
ASSERT(VN_CACHED(VFS_I(ip)) == 0);
|
||||
}
|
||||
#endif
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shrink the file to the given new_size. The new size must be smaller than
|
||||
* the current size. This will free up the underlying blocks in the removed
|
||||
* range after a call to xfs_itruncate_start() or xfs_atruncate_start().
|
||||
* Free up the underlying blocks past new_size. The new size must be smaller
|
||||
* than the current size. This routine can be used both for the attribute and
|
||||
* data fork, and does not modify the inode size, which is left to the caller.
|
||||
*
|
||||
* The transaction passed to this routine must have made a permanent log
|
||||
* reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
|
||||
@ -1387,31 +1232,6 @@ xfs_itruncate_start(
|
||||
* will be "held" within the returned transaction. This routine does NOT
|
||||
* require any disk space to be reserved for it within the transaction.
|
||||
*
|
||||
* The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
|
||||
* indicates the fork which is to be truncated. For the attribute fork we only
|
||||
* support truncation to size 0.
|
||||
*
|
||||
* We use the sync parameter to indicate whether or not the first transaction
|
||||
* we perform might have to be synchronous. For the attr fork, it needs to be
|
||||
* so if the unlink of the inode is not yet known to be permanent in the log.
|
||||
* This keeps us from freeing and reusing the blocks of the attribute fork
|
||||
* before the unlink of the inode becomes permanent.
|
||||
*
|
||||
* For the data fork, we normally have to run synchronously if we're being
|
||||
* called out of the inactive path or we're being called out of the create path
|
||||
* where we're truncating an existing file. Either way, the truncate needs to
|
||||
* be sync so blocks don't reappear in the file with altered data in case of a
|
||||
* crash. wsync filesystems can run the first case async because anything that
|
||||
* shrinks the inode has to run sync so by the time we're called here from
|
||||
* inactive, the inode size is permanently set to 0.
|
||||
*
|
||||
* Calls from the truncate path always need to be sync unless we're in a wsync
|
||||
* filesystem and the file has already been unlinked.
|
||||
*
|
||||
* The caller is responsible for correctly setting the sync parameter. It gets
|
||||
* too hard for us to guess here which path we're being called out of just
|
||||
* based on inode state.
|
||||
*
|
||||
* If we get an error, we must return with the inode locked and linked into the
|
||||
* current transaction. This keeps things simple for the higher level code,
|
||||
* because it always knows that the inode is locked and held in the transaction
|
||||
@ -1419,124 +1239,30 @@ xfs_itruncate_start(
|
||||
* dirty on error so that transactions can be easily aborted if possible.
|
||||
*/
|
||||
int
|
||||
xfs_itruncate_finish(
|
||||
xfs_trans_t **tp,
|
||||
xfs_inode_t *ip,
|
||||
xfs_fsize_t new_size,
|
||||
int fork,
|
||||
int sync)
|
||||
xfs_itruncate_extents(
|
||||
struct xfs_trans **tpp,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
xfs_fsize_t new_size)
|
||||
{
|
||||
xfs_fsblock_t first_block;
|
||||
xfs_fileoff_t first_unmap_block;
|
||||
xfs_fileoff_t last_block;
|
||||
xfs_filblks_t unmap_len=0;
|
||||
xfs_mount_t *mp;
|
||||
xfs_trans_t *ntp;
|
||||
int done;
|
||||
int committed;
|
||||
xfs_bmap_free_t free_list;
|
||||
int error;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp = *tpp;
|
||||
struct xfs_trans *ntp;
|
||||
xfs_bmap_free_t free_list;
|
||||
xfs_fsblock_t first_block;
|
||||
xfs_fileoff_t first_unmap_block;
|
||||
xfs_fileoff_t last_block;
|
||||
xfs_filblks_t unmap_len;
|
||||
int committed;
|
||||
int error = 0;
|
||||
int done = 0;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
|
||||
ASSERT((new_size == 0) || (new_size <= ip->i_size));
|
||||
ASSERT(*tp != NULL);
|
||||
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
ASSERT(ip->i_transp == *tp);
|
||||
ASSERT(new_size <= ip->i_size);
|
||||
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
ASSERT(ip->i_itemp != NULL);
|
||||
ASSERT(ip->i_itemp->ili_lock_flags == 0);
|
||||
|
||||
|
||||
ntp = *tp;
|
||||
mp = (ntp)->t_mountp;
|
||||
ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
|
||||
|
||||
/*
|
||||
* We only support truncating the entire attribute fork.
|
||||
*/
|
||||
if (fork == XFS_ATTR_FORK) {
|
||||
new_size = 0LL;
|
||||
}
|
||||
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
|
||||
trace_xfs_itruncate_finish_start(ip, new_size);
|
||||
|
||||
/*
|
||||
* The first thing we do is set the size to new_size permanently
|
||||
* on disk. This way we don't have to worry about anyone ever
|
||||
* being able to look at the data being freed even in the face
|
||||
* of a crash. What we're getting around here is the case where
|
||||
* we free a block, it is allocated to another file, it is written
|
||||
* to, and then we crash. If the new data gets written to the
|
||||
* file but the log buffers containing the free and reallocation
|
||||
* don't, then we'd end up with garbage in the blocks being freed.
|
||||
* As long as we make the new_size permanent before actually
|
||||
* freeing any blocks it doesn't matter if they get written to.
|
||||
*
|
||||
* The callers must signal into us whether or not the size
|
||||
* setting here must be synchronous. There are a few cases
|
||||
* where it doesn't have to be synchronous. Those cases
|
||||
* occur if the file is unlinked and we know the unlink is
|
||||
* permanent or if the blocks being truncated are guaranteed
|
||||
* to be beyond the inode eof (regardless of the link count)
|
||||
* and the eof value is permanent. Both of these cases occur
|
||||
* only on wsync-mounted filesystems. In those cases, we're
|
||||
* guaranteed that no user will ever see the data in the blocks
|
||||
* that are being truncated so the truncate can run async.
|
||||
* In the free beyond eof case, the file may wind up with
|
||||
* more blocks allocated to it than it needs if we crash
|
||||
* and that won't get fixed until the next time the file
|
||||
* is re-opened and closed but that's ok as that shouldn't
|
||||
* be too many blocks.
|
||||
*
|
||||
* However, we can't just make all wsync xactions run async
|
||||
* because there's one call out of the create path that needs
|
||||
* to run sync where it's truncating an existing file to size
|
||||
* 0 whose size is > 0.
|
||||
*
|
||||
* It's probably possible to come up with a test in this
|
||||
* routine that would correctly distinguish all the above
|
||||
* cases from the values of the function parameters and the
|
||||
* inode state but for sanity's sake, I've decided to let the
|
||||
* layers above just tell us. It's simpler to correctly figure
|
||||
* out in the layer above exactly under what conditions we
|
||||
* can run async and I think it's easier for others read and
|
||||
* follow the logic in case something has to be changed.
|
||||
* cscope is your friend -- rcc.
|
||||
*
|
||||
* The attribute fork is much simpler.
|
||||
*
|
||||
* For the attribute fork we allow the caller to tell us whether
|
||||
* the unlink of the inode that led to this call is yet permanent
|
||||
* in the on disk log. If it is not and we will be freeing extents
|
||||
* in this inode then we make the first transaction synchronous
|
||||
* to make sure that the unlink is permanent by the time we free
|
||||
* the blocks.
|
||||
*/
|
||||
if (fork == XFS_DATA_FORK) {
|
||||
if (ip->i_d.di_nextents > 0) {
|
||||
/*
|
||||
* If we are not changing the file size then do
|
||||
* not update the on-disk file size - we may be
|
||||
* called from xfs_inactive_free_eofblocks(). If we
|
||||
* update the on-disk file size and then the system
|
||||
* crashes before the contents of the file are
|
||||
* flushed to disk then the files may be full of
|
||||
* holes (ie NULL files bug).
|
||||
*/
|
||||
if (ip->i_size != new_size) {
|
||||
ip->i_d.di_size = new_size;
|
||||
ip->i_size = new_size;
|
||||
xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
}
|
||||
} else if (sync) {
|
||||
ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
|
||||
if (ip->i_d.di_anextents > 0)
|
||||
xfs_trans_set_sync(ntp);
|
||||
}
|
||||
ASSERT(fork == XFS_DATA_FORK ||
|
||||
(fork == XFS_ATTR_FORK &&
|
||||
((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
|
||||
(sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
|
||||
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
|
||||
|
||||
/*
|
||||
* Since it is possible for space to become allocated beyond
|
||||
@ -1547,128 +1273,142 @@ xfs_itruncate_finish(
|
||||
* beyond the maximum file size (ie it is the same as last_block),
|
||||
* then there is nothing to do.
|
||||
*/
|
||||
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
|
||||
last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
|
||||
ASSERT(first_unmap_block <= last_block);
|
||||
done = 0;
|
||||
if (last_block == first_unmap_block) {
|
||||
done = 1;
|
||||
} else {
|
||||
unmap_len = last_block - first_unmap_block + 1;
|
||||
}
|
||||
if (first_unmap_block == last_block)
|
||||
return 0;
|
||||
|
||||
ASSERT(first_unmap_block < last_block);
|
||||
unmap_len = last_block - first_unmap_block + 1;
|
||||
while (!done) {
|
||||
/*
|
||||
* Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
|
||||
* will tell us whether it freed the entire range or
|
||||
* not. If this is a synchronous mount (wsync),
|
||||
* then we can tell bunmapi to keep all the
|
||||
* transactions asynchronous since the unlink
|
||||
* transaction that made this inode inactive has
|
||||
* already hit the disk. There's no danger of
|
||||
* the freed blocks being reused, there being a
|
||||
* crash, and the reused blocks suddenly reappearing
|
||||
* in this file with garbage in them once recovery
|
||||
* runs.
|
||||
*/
|
||||
xfs_bmap_init(&free_list, &first_block);
|
||||
error = xfs_bunmapi(ntp, ip,
|
||||
error = xfs_bunmapi(tp, ip,
|
||||
first_unmap_block, unmap_len,
|
||||
xfs_bmapi_aflag(fork),
|
||||
xfs_bmapi_aflag(whichfork),
|
||||
XFS_ITRUNC_MAX_EXTENTS,
|
||||
&first_block, &free_list,
|
||||
&done);
|
||||
if (error) {
|
||||
/*
|
||||
* If the bunmapi call encounters an error,
|
||||
* return to the caller where the transaction
|
||||
* can be properly aborted. We just need to
|
||||
* make sure we're not holding any resources
|
||||
* that we were not when we came in.
|
||||
*/
|
||||
xfs_bmap_cancel(&free_list);
|
||||
return error;
|
||||
}
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
|
||||
/*
|
||||
* Duplicate the transaction that has the permanent
|
||||
* reservation and commit the old transaction.
|
||||
*/
|
||||
error = xfs_bmap_finish(tp, &free_list, &committed);
|
||||
ntp = *tp;
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (committed)
|
||||
xfs_trans_ijoin(ntp, ip);
|
||||
|
||||
if (error) {
|
||||
/*
|
||||
* If the bmap finish call encounters an error, return
|
||||
* to the caller where the transaction can be properly
|
||||
* aborted. We just need to make sure we're not
|
||||
* holding any resources that we were not when we came
|
||||
* in.
|
||||
*
|
||||
* Aborting from this point might lose some blocks in
|
||||
* the file system, but oh well.
|
||||
*/
|
||||
xfs_bmap_cancel(&free_list);
|
||||
return error;
|
||||
}
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
|
||||
if (committed) {
|
||||
/*
|
||||
* Mark the inode dirty so it will be logged and
|
||||
* moved forward in the log as part of every commit.
|
||||
*/
|
||||
xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
||||
ntp = xfs_trans_dup(ntp);
|
||||
error = xfs_trans_commit(*tp, 0);
|
||||
*tp = ntp;
|
||||
ntp = xfs_trans_dup(tp);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
tp = ntp;
|
||||
|
||||
xfs_trans_ijoin(ntp, ip);
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* transaction commit worked ok so we can drop the extra ticket
|
||||
* Transaction commit worked ok so we can drop the extra ticket
|
||||
* reference that we gained in xfs_trans_dup()
|
||||
*/
|
||||
xfs_log_ticket_put(ntp->t_ticket);
|
||||
error = xfs_trans_reserve(ntp, 0,
|
||||
xfs_log_ticket_put(tp->t_ticket);
|
||||
error = xfs_trans_reserve(tp, 0,
|
||||
XFS_ITRUNCATE_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_ITRUNCATE_LOG_COUNT);
|
||||
if (error)
|
||||
return error;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
*tpp = tp;
|
||||
return error;
|
||||
out_bmap_cancel:
|
||||
/*
|
||||
* Only update the size in the case of the data fork, but
|
||||
* always re-log the inode so that our permanent transaction
|
||||
* can keep on rolling it forward in the log.
|
||||
* If the bunmapi call encounters an error, return to the caller where
|
||||
* the transaction can be properly aborted. We just need to make sure
|
||||
* we're not holding any resources that we were not when we came in.
|
||||
*/
|
||||
if (fork == XFS_DATA_FORK) {
|
||||
xfs_isize_check(mp, ip, new_size);
|
||||
xfs_bmap_cancel(&free_list);
|
||||
goto out;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_itruncate_data(
|
||||
struct xfs_trans **tpp,
|
||||
struct xfs_inode *ip,
|
||||
xfs_fsize_t new_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
trace_xfs_itruncate_data_start(ip, new_size);
|
||||
|
||||
/*
|
||||
* The first thing we do is set the size to new_size permanently on
|
||||
* disk. This way we don't have to worry about anyone ever being able
|
||||
* to look at the data being freed even in the face of a crash.
|
||||
* What we're getting around here is the case where we free a block, it
|
||||
* is allocated to another file, it is written to, and then we crash.
|
||||
* If the new data gets written to the file but the log buffers
|
||||
* containing the free and reallocation don't, then we'd end up with
|
||||
* garbage in the blocks being freed. As long as we make the new_size
|
||||
* permanent before actually freeing any blocks it doesn't matter if
|
||||
* they get written to.
|
||||
*/
|
||||
if (ip->i_d.di_nextents > 0) {
|
||||
/*
|
||||
* If we are not changing the file size then do
|
||||
* not update the on-disk file size - we may be
|
||||
* called from xfs_inactive_free_eofblocks(). If we
|
||||
* update the on-disk file size and then the system
|
||||
* crashes before the contents of the file are
|
||||
* flushed to disk then the files may be full of
|
||||
* holes (ie NULL files bug).
|
||||
* If we are not changing the file size then do not update
|
||||
* the on-disk file size - we may be called from
|
||||
* xfs_inactive_free_eofblocks(). If we update the on-disk
|
||||
* file size and then the system crashes before the contents
|
||||
* of the file are flushed to disk then the files may be
|
||||
* full of holes (ie NULL files bug).
|
||||
*/
|
||||
if (ip->i_size != new_size) {
|
||||
ip->i_d.di_size = new_size;
|
||||
ip->i_size = new_size;
|
||||
xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
}
|
||||
xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
|
||||
ASSERT((new_size != 0) ||
|
||||
(fork == XFS_ATTR_FORK) ||
|
||||
(ip->i_delayed_blks == 0));
|
||||
ASSERT((new_size != 0) ||
|
||||
(fork == XFS_ATTR_FORK) ||
|
||||
(ip->i_d.di_nextents == 0));
|
||||
trace_xfs_itruncate_finish_end(ip, new_size);
|
||||
|
||||
error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If we are not changing the file size then do not update the on-disk
|
||||
* file size - we may be called from xfs_inactive_free_eofblocks().
|
||||
* If we update the on-disk file size and then the system crashes
|
||||
* before the contents of the file are flushed to disk then the files
|
||||
* may be full of holes (ie NULL files bug).
|
||||
*/
|
||||
xfs_isize_check(ip, new_size);
|
||||
if (ip->i_size != new_size) {
|
||||
ip->i_d.di_size = new_size;
|
||||
ip->i_size = new_size;
|
||||
}
|
||||
|
||||
ASSERT(new_size != 0 || ip->i_delayed_blks == 0);
|
||||
ASSERT(new_size != 0 || ip->i_d.di_nextents == 0);
|
||||
|
||||
/*
|
||||
* Always re-log the inode so that our permanent transaction can keep
|
||||
* on rolling it forward in the log.
|
||||
*/
|
||||
xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
|
||||
|
||||
trace_xfs_itruncate_data_end(ip, new_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1694,7 +1434,6 @@ xfs_iunlink(
|
||||
|
||||
ASSERT(ip->i_d.di_nlink == 0);
|
||||
ASSERT(ip->i_d.di_mode != 0);
|
||||
ASSERT(ip->i_transp == tp);
|
||||
|
||||
mp = tp->t_mountp;
|
||||
|
||||
@ -1717,7 +1456,7 @@ xfs_iunlink(
|
||||
ASSERT(agi->agi_unlinked[bucket_index]);
|
||||
ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
|
||||
|
||||
if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
|
||||
if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
|
||||
/*
|
||||
* There is already another inode in the bucket we need
|
||||
* to add ourselves to. Add us at the front of the list.
|
||||
@ -1728,8 +1467,7 @@ xfs_iunlink(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
|
||||
/* both on-disk, don't endian flip twice */
|
||||
ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
|
||||
dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
|
||||
offset = ip->i_imap.im_boffset +
|
||||
offsetof(xfs_dinode_t, di_next_unlinked);
|
||||
@ -1794,7 +1532,7 @@ xfs_iunlink_remove(
|
||||
agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
|
||||
ASSERT(agino != 0);
|
||||
bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
|
||||
ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
|
||||
ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
|
||||
ASSERT(agi->agi_unlinked[bucket_index]);
|
||||
|
||||
if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
|
||||
@ -1959,7 +1697,7 @@ xfs_ifree_cluster(
|
||||
* stale first, we will not attempt to lock them in the loop
|
||||
* below as the XFS_ISTALE flag will be set.
|
||||
*/
|
||||
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
lip = bp->b_fspriv;
|
||||
while (lip) {
|
||||
if (lip->li_type == XFS_LI_INODE) {
|
||||
iip = (xfs_inode_log_item_t *)lip;
|
||||
@ -2086,7 +1824,6 @@ xfs_ifree(
|
||||
xfs_buf_t *ibp;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(ip->i_transp == tp);
|
||||
ASSERT(ip->i_d.di_nlink == 0);
|
||||
ASSERT(ip->i_d.di_nextents == 0);
|
||||
ASSERT(ip->i_d.di_anextents == 0);
|
||||
@ -2733,7 +2470,7 @@ cluster_corrupt_out:
|
||||
* mark the buffer as an error and call them. Otherwise
|
||||
* mark it as stale and brelse.
|
||||
*/
|
||||
if (XFS_BUF_IODONE_FUNC(bp)) {
|
||||
if (bp->b_iodone) {
|
||||
XFS_BUF_UNDONE(bp);
|
||||
XFS_BUF_STALE(bp);
|
||||
XFS_BUF_ERROR(bp,EIO);
|
||||
@ -2920,7 +2657,7 @@ xfs_iflush_int(
|
||||
*/
|
||||
xfs_synchronize_times(ip);
|
||||
|
||||
if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC,
|
||||
if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
|
||||
mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
|
||||
@ -3073,8 +2810,8 @@ xfs_iflush_int(
|
||||
*/
|
||||
xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
|
||||
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
|
||||
ASSERT(bp->b_fspriv != NULL);
|
||||
ASSERT(bp->b_iodone != NULL);
|
||||
} else {
|
||||
/*
|
||||
* We're flushing an inode which is not in the AIL and has
|
||||
|
@ -241,7 +241,6 @@ typedef struct xfs_inode {
|
||||
xfs_ifork_t i_df; /* data fork */
|
||||
|
||||
/* Transaction and locking information. */
|
||||
struct xfs_trans *i_transp; /* ptr to owning transaction*/
|
||||
struct xfs_inode_log_item *i_itemp; /* logging information */
|
||||
mrlock_t i_lock; /* inode lock */
|
||||
mrlock_t i_iolock; /* inode IO lock */
|
||||
@ -457,16 +456,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
|
||||
|
||||
extern struct lock_class_key xfs_iolock_reclaimable;
|
||||
|
||||
/*
|
||||
* Flags for xfs_itruncate_start().
|
||||
*/
|
||||
#define XFS_ITRUNC_DEFINITE 0x1
|
||||
#define XFS_ITRUNC_MAYBE 0x2
|
||||
|
||||
#define XFS_ITRUNC_FLAGS \
|
||||
{ XFS_ITRUNC_DEFINITE, "DEFINITE" }, \
|
||||
{ XFS_ITRUNC_MAYBE, "MAYBE" }
|
||||
|
||||
/*
|
||||
* For multiple groups support: if S_ISGID bit is set in the parent
|
||||
* directory, group of new file is set to that of the parent, and
|
||||
@ -501,9 +490,10 @@ uint xfs_ip2xflags(struct xfs_inode *);
|
||||
uint xfs_dic2xflags(struct xfs_dinode *);
|
||||
int xfs_ifree(struct xfs_trans *, xfs_inode_t *,
|
||||
struct xfs_bmap_free *);
|
||||
int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t);
|
||||
int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *,
|
||||
xfs_fsize_t, int, int);
|
||||
int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
|
||||
int, xfs_fsize_t);
|
||||
int xfs_itruncate_data(struct xfs_trans **, struct xfs_inode *,
|
||||
xfs_fsize_t);
|
||||
int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
|
||||
|
||||
void xfs_iext_realloc(xfs_inode_t *, int, int);
|
||||
@ -579,13 +569,6 @@ void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int);
|
||||
|
||||
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
|
||||
|
||||
#ifdef DEBUG
|
||||
void xfs_isize_check(struct xfs_mount *, struct xfs_inode *,
|
||||
xfs_fsize_t);
|
||||
#else /* DEBUG */
|
||||
#define xfs_isize_check(mp, ip, isize)
|
||||
#endif /* DEBUG */
|
||||
|
||||
#if defined(DEBUG)
|
||||
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
|
||||
#else
|
||||
|
@ -632,13 +632,8 @@ xfs_inode_item_unlock(
|
||||
struct xfs_inode *ip = iip->ili_inode;
|
||||
unsigned short lock_flags;
|
||||
|
||||
ASSERT(iip->ili_inode->i_itemp != NULL);
|
||||
ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
|
||||
|
||||
/*
|
||||
* Clear the transaction pointer in the inode.
|
||||
*/
|
||||
ip->i_transp = NULL;
|
||||
ASSERT(ip->i_itemp != NULL);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
/*
|
||||
* If the inode needed a separate buffer with which to log
|
||||
@ -664,8 +659,8 @@ xfs_inode_item_unlock(
|
||||
lock_flags = iip->ili_lock_flags;
|
||||
iip->ili_lock_flags = 0;
|
||||
if (lock_flags) {
|
||||
xfs_iunlock(iip->ili_inode, lock_flags);
|
||||
IRELE(iip->ili_inode);
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
IRELE(ip);
|
||||
}
|
||||
}
|
||||
|
||||
@ -879,7 +874,7 @@ xfs_iflush_done(
|
||||
* Scan the buffer IO completions for other inodes being completed and
|
||||
* attach them to the current inode log item.
|
||||
*/
|
||||
blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
blip = bp->b_fspriv;
|
||||
prev = NULL;
|
||||
while (blip != NULL) {
|
||||
if (lip->li_cb != xfs_iflush_done) {
|
||||
@ -891,7 +886,7 @@ xfs_iflush_done(
|
||||
/* remove from list */
|
||||
next = blip->li_bio_list;
|
||||
if (!prev) {
|
||||
XFS_BUF_SET_FSPRIVATE(bp, next);
|
||||
bp->b_fspriv = next;
|
||||
} else {
|
||||
prev->li_bio_list = next;
|
||||
}
|
||||
|
@ -28,17 +28,6 @@
|
||||
|
||||
typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */
|
||||
|
||||
/*
|
||||
* Useful inode bits for this kernel.
|
||||
* Used in some places where having 64-bits in the 32-bit kernels
|
||||
* costs too much.
|
||||
*/
|
||||
#if XFS_BIG_INUMS
|
||||
typedef xfs_ino_t xfs_intino_t;
|
||||
#else
|
||||
typedef __uint32_t xfs_intino_t;
|
||||
#endif
|
||||
|
||||
#define NULLFSINO ((xfs_ino_t)-1)
|
||||
#define NULLAGINO ((xfs_agino_t)-1)
|
||||
|
||||
|
@ -871,15 +871,9 @@ xlog_space_left(
|
||||
void
|
||||
xlog_iodone(xfs_buf_t *bp)
|
||||
{
|
||||
xlog_in_core_t *iclog;
|
||||
xlog_t *l;
|
||||
int aborted;
|
||||
|
||||
iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
||||
aborted = 0;
|
||||
l = iclog->ic_log;
|
||||
xlog_in_core_t *iclog = bp->b_fspriv;
|
||||
xlog_t *l = iclog->ic_log;
|
||||
int aborted = 0;
|
||||
|
||||
/*
|
||||
* Race to shutdown the filesystem if we see an error.
|
||||
@ -1056,10 +1050,9 @@ xlog_alloc_log(xfs_mount_t *mp,
|
||||
bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
|
||||
if (!bp)
|
||||
goto out_free_log;
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
||||
bp->b_iodone = xlog_iodone;
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
log->l_xbuf = bp;
|
||||
|
||||
spin_lock_init(&log->l_icloglock);
|
||||
@ -1090,10 +1083,8 @@ xlog_alloc_log(xfs_mount_t *mp,
|
||||
log->l_iclog_size, 0);
|
||||
if (!bp)
|
||||
goto out_free_iclog;
|
||||
if (!XFS_BUF_CPSEMA(bp))
|
||||
ASSERT(0);
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
||||
|
||||
bp->b_iodone = xlog_iodone;
|
||||
iclog->ic_bp = bp;
|
||||
iclog->ic_data = bp->b_addr;
|
||||
#ifdef DEBUG
|
||||
@ -1118,7 +1109,7 @@ xlog_alloc_log(xfs_mount_t *mp,
|
||||
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(iclog->ic_bp));
|
||||
init_waitqueue_head(&iclog->ic_force_wait);
|
||||
init_waitqueue_head(&iclog->ic_write_wait);
|
||||
|
||||
@ -1254,9 +1245,8 @@ STATIC int
|
||||
xlog_bdstrat(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xlog_in_core *iclog;
|
||||
struct xlog_in_core *iclog = bp->b_fspriv;
|
||||
|
||||
iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
XFS_BUF_STALE(bp);
|
||||
@ -1269,7 +1259,6 @@ xlog_bdstrat(
|
||||
return 0;
|
||||
}
|
||||
|
||||
bp->b_flags |= _XBF_RUN_QUEUES;
|
||||
xfs_buf_iorequest(bp);
|
||||
return 0;
|
||||
}
|
||||
@ -1351,8 +1340,6 @@ xlog_sync(xlog_t *log,
|
||||
}
|
||||
|
||||
bp = iclog->ic_bp;
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
|
||||
XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
|
||||
|
||||
XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
|
||||
@ -1366,22 +1353,28 @@ xlog_sync(xlog_t *log,
|
||||
iclog->ic_bwritecnt = 1;
|
||||
}
|
||||
XFS_BUF_SET_COUNT(bp, count);
|
||||
XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */
|
||||
bp->b_fspriv = iclog;
|
||||
XFS_BUF_ZEROFLAGS(bp);
|
||||
XFS_BUF_BUSY(bp);
|
||||
XFS_BUF_ASYNC(bp);
|
||||
bp->b_flags |= XBF_LOG_BUFFER;
|
||||
bp->b_flags |= XBF_SYNCIO;
|
||||
|
||||
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
|
||||
bp->b_flags |= XBF_FUA;
|
||||
|
||||
/*
|
||||
* If we have an external log device, flush the data device
|
||||
* before flushing the log to make sure all meta data
|
||||
* written back from the AIL actually made it to disk
|
||||
* before writing out the new log tail LSN in the log buffer.
|
||||
* Flush the data device before flushing the log to make
|
||||
* sure all meta data written back from the AIL actually made
|
||||
* it to disk before stamping the new log tail LSN into the
|
||||
* log buffer. For an external log we need to issue the
|
||||
* flush explicitly, and unfortunately synchronously here;
|
||||
* for an internal log we can simply use the block layer
|
||||
* state machine for preflushes.
|
||||
*/
|
||||
if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
|
||||
xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
|
||||
XFS_BUF_ORDERED(bp);
|
||||
else
|
||||
bp->b_flags |= XBF_FLUSH;
|
||||
}
|
||||
|
||||
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
|
||||
@ -1404,19 +1397,16 @@ xlog_sync(xlog_t *log,
|
||||
}
|
||||
if (split) {
|
||||
bp = iclog->ic_log->l_xbuf;
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
|
||||
(unsigned long)1);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
|
||||
XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */
|
||||
XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
|
||||
(__psint_t)count), split);
|
||||
XFS_BUF_SET_FSPRIVATE(bp, iclog);
|
||||
bp->b_fspriv = iclog;
|
||||
XFS_BUF_ZEROFLAGS(bp);
|
||||
XFS_BUF_BUSY(bp);
|
||||
XFS_BUF_ASYNC(bp);
|
||||
bp->b_flags |= XBF_LOG_BUFFER;
|
||||
bp->b_flags |= XBF_SYNCIO;
|
||||
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
|
||||
XFS_BUF_ORDERED(bp);
|
||||
bp->b_flags |= XBF_FUA;
|
||||
dptr = XFS_BUF_PTR(bp);
|
||||
/*
|
||||
* Bump the cycle numbers at the start of each block
|
||||
@ -3521,13 +3511,13 @@ xlog_verify_iclog(xlog_t *log,
|
||||
spin_unlock(&log->l_icloglock);
|
||||
|
||||
/* check log magic numbers */
|
||||
if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM)
|
||||
if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
|
||||
xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
|
||||
|
||||
ptr = (xfs_caddr_t) &iclog->ic_header;
|
||||
for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
|
||||
ptr += BBSIZE) {
|
||||
if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
|
||||
if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
|
||||
xfs_emerg(log->l_mp, "%s: unexpected magic num",
|
||||
__func__);
|
||||
}
|
||||
|
@ -91,6 +91,8 @@ xlog_get_bp(
|
||||
xlog_t *log,
|
||||
int nbblks)
|
||||
{
|
||||
struct xfs_buf *bp;
|
||||
|
||||
if (!xlog_buf_bbcount_valid(log, nbblks)) {
|
||||
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
|
||||
nbblks);
|
||||
@ -118,8 +120,10 @@ xlog_get_bp(
|
||||
nbblks += log->l_sectBBsize;
|
||||
nbblks = round_up(nbblks, log->l_sectBBsize);
|
||||
|
||||
return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
|
||||
BBTOB(nbblks), 0);
|
||||
bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0);
|
||||
if (bp)
|
||||
xfs_buf_unlock(bp);
|
||||
return bp;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
@ -264,7 +268,7 @@ xlog_bwrite(
|
||||
XFS_BUF_ZEROFLAGS(bp);
|
||||
XFS_BUF_BUSY(bp);
|
||||
XFS_BUF_HOLD(bp);
|
||||
XFS_BUF_PSEMA(bp, PRIBIO);
|
||||
xfs_buf_lock(bp);
|
||||
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
|
||||
XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
|
||||
|
||||
@ -300,14 +304,14 @@ xlog_header_check_recover(
|
||||
xfs_mount_t *mp,
|
||||
xlog_rec_header_t *head)
|
||||
{
|
||||
ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
|
||||
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
|
||||
|
||||
/*
|
||||
* IRIX doesn't write the h_fmt field and leaves it zeroed
|
||||
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
|
||||
* a dirty log created in IRIX.
|
||||
*/
|
||||
if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
|
||||
if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
|
||||
xfs_warn(mp,
|
||||
"dirty log written in incompatible format - can't recover");
|
||||
xlog_header_check_dump(mp, head);
|
||||
@ -333,7 +337,7 @@ xlog_header_check_mount(
|
||||
xfs_mount_t *mp,
|
||||
xlog_rec_header_t *head)
|
||||
{
|
||||
ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
|
||||
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
|
||||
|
||||
if (uuid_is_nil(&head->h_fs_uuid)) {
|
||||
/*
|
||||
@ -367,7 +371,7 @@ xlog_recover_iodone(
|
||||
xfs_force_shutdown(bp->b_target->bt_mount,
|
||||
SHUTDOWN_META_IO_ERROR);
|
||||
}
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
bp->b_iodone = NULL;
|
||||
xfs_buf_ioend(bp, 0);
|
||||
}
|
||||
|
||||
@ -534,7 +538,7 @@ xlog_find_verify_log_record(
|
||||
|
||||
head = (xlog_rec_header_t *)offset;
|
||||
|
||||
if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
|
||||
if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
|
||||
break;
|
||||
|
||||
if (!smallmem)
|
||||
@ -916,7 +920,7 @@ xlog_find_tail(
|
||||
if (error)
|
||||
goto done;
|
||||
|
||||
if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
|
||||
if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -933,8 +937,8 @@ xlog_find_tail(
|
||||
if (error)
|
||||
goto done;
|
||||
|
||||
if (XLOG_HEADER_MAGIC_NUM ==
|
||||
be32_to_cpu(*(__be32 *)offset)) {
|
||||
if (*(__be32 *)offset ==
|
||||
cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
|
||||
found = 2;
|
||||
break;
|
||||
}
|
||||
@ -1947,7 +1951,7 @@ xfs_qm_dqcheck(
|
||||
* This is all fine; things are still consistent, and we haven't lost
|
||||
* any quota information. Just don't complain about bad dquot blks.
|
||||
*/
|
||||
if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
|
||||
if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
|
||||
if (flags & XFS_QMOPT_DOWARN)
|
||||
xfs_alert(mp,
|
||||
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
|
||||
@ -2174,7 +2178,7 @@ xlog_recover_buffer_pass2(
|
||||
error = xfs_bwrite(mp, bp);
|
||||
} else {
|
||||
ASSERT(bp->b_target->bt_mount == mp);
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
|
||||
bp->b_iodone = xlog_recover_iodone;
|
||||
xfs_bdwrite(mp, bp);
|
||||
}
|
||||
|
||||
@ -2238,7 +2242,7 @@ xlog_recover_inode_pass2(
|
||||
* Make sure the place we're flushing out to really looks
|
||||
* like an inode!
|
||||
*/
|
||||
if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
|
||||
if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
|
||||
xfs_buf_relse(bp);
|
||||
xfs_alert(mp,
|
||||
"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
|
||||
@ -2434,7 +2438,7 @@ xlog_recover_inode_pass2(
|
||||
|
||||
write_inode_buffer:
|
||||
ASSERT(bp->b_target->bt_mount == mp);
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
|
||||
bp->b_iodone = xlog_recover_iodone;
|
||||
xfs_bdwrite(mp, bp);
|
||||
error:
|
||||
if (need_free)
|
||||
@ -2556,7 +2560,7 @@ xlog_recover_dquot_pass2(
|
||||
|
||||
ASSERT(dq_f->qlf_size == 2);
|
||||
ASSERT(bp->b_target->bt_mount == mp);
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
|
||||
bp->b_iodone = xlog_recover_iodone;
|
||||
xfs_bdwrite(mp, bp);
|
||||
|
||||
return (0);
|
||||
@ -3295,7 +3299,7 @@ xlog_valid_rec_header(
|
||||
{
|
||||
int hlen;
|
||||
|
||||
if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
|
||||
if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
|
||||
XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
|
||||
XFS_ERRLEVEL_LOW, log->l_mp);
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
|
@ -348,7 +348,7 @@ xfs_mount_validate_sb(
|
||||
}
|
||||
|
||||
/*
|
||||
* More sanity checking. These were stolen directly from
|
||||
* More sanity checking. Most of these were stolen directly from
|
||||
* xfs_repair.
|
||||
*/
|
||||
if (unlikely(
|
||||
@ -371,23 +371,13 @@ xfs_mount_validate_sb(
|
||||
(sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
|
||||
(sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
|
||||
(sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
|
||||
(sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) {
|
||||
(sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */) ||
|
||||
sbp->sb_dblocks == 0 ||
|
||||
sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
|
||||
sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
|
||||
if (loud)
|
||||
xfs_warn(mp, "SB sanity check 1 failed");
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity check AG count, size fields against data size field
|
||||
*/
|
||||
if (unlikely(
|
||||
sbp->sb_dblocks == 0 ||
|
||||
sbp->sb_dblocks >
|
||||
(xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
|
||||
sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
|
||||
sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
|
||||
if (loud)
|
||||
xfs_warn(mp, "SB sanity check 2 failed");
|
||||
XFS_CORRUPTION_ERROR("SB sanity check failed",
|
||||
XFS_ERRLEVEL_LOW, mp, sbp);
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
}
|
||||
|
||||
@ -864,7 +854,8 @@ xfs_update_alignment(xfs_mount_t *mp)
|
||||
if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
|
||||
(BBTOB(mp->m_swidth) & mp->m_blockmask)) {
|
||||
if (mp->m_flags & XFS_MOUNT_RETERR) {
|
||||
xfs_warn(mp, "alignment check 1 failed");
|
||||
xfs_warn(mp, "alignment check failed: "
|
||||
"(sunit/swidth vs. blocksize)");
|
||||
return XFS_ERROR(EINVAL);
|
||||
}
|
||||
mp->m_dalign = mp->m_swidth = 0;
|
||||
@ -875,6 +866,8 @@ xfs_update_alignment(xfs_mount_t *mp)
|
||||
mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
|
||||
if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
|
||||
if (mp->m_flags & XFS_MOUNT_RETERR) {
|
||||
xfs_warn(mp, "alignment check failed: "
|
||||
"(sunit/swidth vs. ag size)");
|
||||
return XFS_ERROR(EINVAL);
|
||||
}
|
||||
xfs_warn(mp,
|
||||
@ -889,8 +882,8 @@ xfs_update_alignment(xfs_mount_t *mp)
|
||||
mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
|
||||
} else {
|
||||
if (mp->m_flags & XFS_MOUNT_RETERR) {
|
||||
xfs_warn(mp,
|
||||
"stripe alignment turned off: sunit(%d) less than bsize(%d)",
|
||||
xfs_warn(mp, "alignment check failed: "
|
||||
"sunit(%d) less than bsize(%d)",
|
||||
mp->m_dalign,
|
||||
mp->m_blockmask +1);
|
||||
return XFS_ERROR(EINVAL);
|
||||
@ -1096,10 +1089,6 @@ xfs_mount_reset_sbqflags(
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return 0;
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
xfs_notice(mp, "Writing superblock quota changes");
|
||||
#endif
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
||||
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
@ -1532,7 +1521,7 @@ xfs_unmountfs(
|
||||
xfs_warn(mp, "Unable to free reserved block pool. "
|
||||
"Freespace may not be correct on next mount.");
|
||||
|
||||
error = xfs_log_sbcount(mp, 1);
|
||||
error = xfs_log_sbcount(mp);
|
||||
if (error)
|
||||
xfs_warn(mp, "Unable to update superblock counters. "
|
||||
"Freespace may not be correct on next mount.");
|
||||
@ -1568,18 +1557,14 @@ xfs_fs_writable(xfs_mount_t *mp)
|
||||
/*
|
||||
* xfs_log_sbcount
|
||||
*
|
||||
* Called either periodically to keep the on disk superblock values
|
||||
* roughly up to date or from unmount to make sure the values are
|
||||
* correct on a clean unmount.
|
||||
* Sync the superblock counters to disk.
|
||||
*
|
||||
* Note this code can be called during the process of freezing, so
|
||||
* we may need to use the transaction allocator which does not not
|
||||
* we may need to use the transaction allocator which does not
|
||||
* block when the transaction subsystem is in its frozen state.
|
||||
*/
|
||||
int
|
||||
xfs_log_sbcount(
|
||||
xfs_mount_t *mp,
|
||||
uint sync)
|
||||
xfs_log_sbcount(xfs_mount_t *mp)
|
||||
{
|
||||
xfs_trans_t *tp;
|
||||
int error;
|
||||
@ -1605,8 +1590,7 @@ xfs_log_sbcount(
|
||||
}
|
||||
|
||||
xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
|
||||
if (sync)
|
||||
xfs_trans_set_sync(tp);
|
||||
xfs_trans_set_sync(tp);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
return error;
|
||||
}
|
||||
@ -1941,22 +1925,19 @@ unwind:
|
||||
* the superblock buffer if it can be locked without sleeping.
|
||||
* If it can't then we'll return NULL.
|
||||
*/
|
||||
xfs_buf_t *
|
||||
struct xfs_buf *
|
||||
xfs_getsb(
|
||||
xfs_mount_t *mp,
|
||||
int flags)
|
||||
struct xfs_mount *mp,
|
||||
int flags)
|
||||
{
|
||||
xfs_buf_t *bp;
|
||||
struct xfs_buf *bp = mp->m_sb_bp;
|
||||
|
||||
ASSERT(mp->m_sb_bp != NULL);
|
||||
bp = mp->m_sb_bp;
|
||||
if (flags & XBF_TRYLOCK) {
|
||||
if (!XFS_BUF_CPSEMA(bp)) {
|
||||
if (!xfs_buf_trylock(bp)) {
|
||||
if (flags & XBF_TRYLOCK)
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
XFS_BUF_PSEMA(bp, PRIBIO);
|
||||
xfs_buf_lock(bp);
|
||||
}
|
||||
|
||||
XFS_BUF_HOLD(bp);
|
||||
ASSERT(XFS_BUF_ISDONE(bp));
|
||||
return bp;
|
||||
|
@ -371,7 +371,7 @@ typedef struct xfs_mod_sb {
|
||||
int64_t msb_delta; /* Change to make to specified field */
|
||||
} xfs_mod_sb_t;
|
||||
|
||||
extern int xfs_log_sbcount(xfs_mount_t *, uint);
|
||||
extern int xfs_log_sbcount(xfs_mount_t *);
|
||||
extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
|
||||
extern int xfs_mountfs(xfs_mount_t *mp);
|
||||
|
||||
|
@ -1426,6 +1426,7 @@ xfs_trans_committed(
|
||||
static inline void
|
||||
xfs_log_item_batch_insert(
|
||||
struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
struct xfs_log_item **log_items,
|
||||
int nr_items,
|
||||
xfs_lsn_t commit_lsn)
|
||||
@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
|
||||
|
||||
spin_lock(&ailp->xa_lock);
|
||||
/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
|
||||
xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
|
||||
xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
|
||||
|
||||
for (i = 0; i < nr_items; i++)
|
||||
IOP_UNPIN(log_items[i], 0);
|
||||
@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
|
||||
* as an iclog write error even though we haven't started any IO yet. Hence in
|
||||
* this case all we need to do is IOP_COMMITTED processing, followed by an
|
||||
* IOP_UNPIN(aborted) call.
|
||||
*
|
||||
* The AIL cursor is used to optimise the insert process. If commit_lsn is not
|
||||
* at the end of the AIL, the insert cursor avoids the need to walk
|
||||
* the AIL to find the insertion point on every xfs_log_item_batch_insert()
|
||||
* call. This saves a lot of needless list walking and is a net win, even
|
||||
* though it slightly increases that amount of AIL lock traffic to set it up
|
||||
* and tear it down.
|
||||
*/
|
||||
void
|
||||
xfs_trans_committed_bulk(
|
||||
@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
|
||||
#define LOG_ITEM_BATCH_SIZE 32
|
||||
struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
|
||||
struct xfs_log_vec *lv;
|
||||
struct xfs_ail_cursor cur;
|
||||
int i = 0;
|
||||
|
||||
spin_lock(&ailp->xa_lock);
|
||||
xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
|
||||
spin_unlock(&ailp->xa_lock);
|
||||
|
||||
/* unpin all the log items */
|
||||
for (lv = log_vector; lv; lv = lv->lv_next ) {
|
||||
struct xfs_log_item *lip = lv->lv_item;
|
||||
@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
|
||||
/*
|
||||
* Not a bulk update option due to unusual item_lsn.
|
||||
* Push into AIL immediately, rechecking the lsn once
|
||||
* we have the ail lock. Then unpin the item.
|
||||
* we have the ail lock. Then unpin the item. This does
|
||||
* not affect the AIL cursor the bulk insert path is
|
||||
* using.
|
||||
*/
|
||||
spin_lock(&ailp->xa_lock);
|
||||
if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
|
||||
@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
|
||||
/* Item is a candidate for bulk AIL insert. */
|
||||
log_items[i++] = lv->lv_item;
|
||||
if (i >= LOG_ITEM_BATCH_SIZE) {
|
||||
xfs_log_item_batch_insert(ailp, log_items,
|
||||
xfs_log_item_batch_insert(ailp, &cur, log_items,
|
||||
LOG_ITEM_BATCH_SIZE, commit_lsn);
|
||||
i = 0;
|
||||
}
|
||||
@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
|
||||
|
||||
/* make sure we insert the remainder! */
|
||||
if (i)
|
||||
xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
|
||||
xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
|
||||
|
||||
spin_lock(&ailp->xa_lock);
|
||||
xfs_trans_ail_cursor_done(ailp, &cur);
|
||||
spin_unlock(&ailp->xa_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -163,17 +163,11 @@ xfs_ail_max_lsn(
|
||||
}
|
||||
|
||||
/*
|
||||
* AIL traversal cursor initialisation.
|
||||
*
|
||||
* The cursor keeps track of where our current traversal is up
|
||||
* to by tracking the next ƣtem in the list for us. However, for
|
||||
* this to be safe, removing an object from the AIL needs to invalidate
|
||||
* any cursor that points to it. hence the traversal cursor needs to
|
||||
* be linked to the struct xfs_ail so that deletion can search all the
|
||||
* active cursors for invalidation.
|
||||
*
|
||||
* We don't link the push cursor because it is embedded in the struct
|
||||
* xfs_ail and hence easily findable.
|
||||
* The cursor keeps track of where our current traversal is up to by tracking
|
||||
* the next item in the list for us. However, for this to be safe, removing an
|
||||
* object from the AIL needs to invalidate any cursor that points to it. hence
|
||||
* the traversal cursor needs to be linked to the struct xfs_ail so that
|
||||
* deletion can search all the active cursors for invalidation.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_trans_ail_cursor_init(
|
||||
@ -181,31 +175,12 @@ xfs_trans_ail_cursor_init(
|
||||
struct xfs_ail_cursor *cur)
|
||||
{
|
||||
cur->item = NULL;
|
||||
if (cur == &ailp->xa_cursors)
|
||||
return;
|
||||
|
||||
cur->next = ailp->xa_cursors.next;
|
||||
ailp->xa_cursors.next = cur;
|
||||
list_add_tail(&cur->list, &ailp->xa_cursors);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cursor to the next item, because when we look
|
||||
* up the cursor the current item may have been freed.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_trans_ail_cursor_set(
|
||||
struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
struct xfs_log_item *lip)
|
||||
{
|
||||
if (lip)
|
||||
cur->item = xfs_ail_next(ailp, lip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the next item in the traversal and advance the cursor.
|
||||
* If the cursor was invalidated (inidicated by a lip of 1),
|
||||
* restart the traversal.
|
||||
* Get the next item in the traversal and advance the cursor. If the cursor
|
||||
* was invalidated (indicated by a lip of 1), restart the traversal.
|
||||
*/
|
||||
struct xfs_log_item *
|
||||
xfs_trans_ail_cursor_next(
|
||||
@ -216,45 +191,31 @@ xfs_trans_ail_cursor_next(
|
||||
|
||||
if ((__psint_t)lip & 1)
|
||||
lip = xfs_ail_min(ailp);
|
||||
xfs_trans_ail_cursor_set(ailp, cur, lip);
|
||||
if (lip)
|
||||
cur->item = xfs_ail_next(ailp, lip);
|
||||
return lip;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that the traversal is complete, we need to remove the cursor
|
||||
* from the list of traversing cursors. Avoid removing the embedded
|
||||
* push cursor, but use the fact it is always present to make the
|
||||
* list deletion simple.
|
||||
* When the traversal is complete, we need to remove the cursor from the list
|
||||
* of traversing cursors.
|
||||
*/
|
||||
void
|
||||
xfs_trans_ail_cursor_done(
|
||||
struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *done)
|
||||
struct xfs_ail_cursor *cur)
|
||||
{
|
||||
struct xfs_ail_cursor *prev = NULL;
|
||||
struct xfs_ail_cursor *cur;
|
||||
|
||||
done->item = NULL;
|
||||
if (done == &ailp->xa_cursors)
|
||||
return;
|
||||
prev = &ailp->xa_cursors;
|
||||
for (cur = prev->next; cur; prev = cur, cur = prev->next) {
|
||||
if (cur == done) {
|
||||
prev->next = cur->next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(cur);
|
||||
cur->item = NULL;
|
||||
list_del_init(&cur->list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate any cursor that is pointing to this item. This is
|
||||
* called when an item is removed from the AIL. Any cursor pointing
|
||||
* to this object is now invalid and the traversal needs to be
|
||||
* terminated so it doesn't reference a freed object. We set the
|
||||
* cursor item to a value of 1 so we can distinguish between an
|
||||
* invalidation and the end of the list when getting the next item
|
||||
* from the cursor.
|
||||
* Invalidate any cursor that is pointing to this item. This is called when an
|
||||
* item is removed from the AIL. Any cursor pointing to this object is now
|
||||
* invalid and the traversal needs to be terminated so it doesn't reference a
|
||||
* freed object. We set the low bit of the cursor item pointer so we can
|
||||
* distinguish between an invalidation and the end of the list when getting the
|
||||
* next item from the cursor.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_trans_ail_cursor_clear(
|
||||
@ -263,8 +224,7 @@ xfs_trans_ail_cursor_clear(
|
||||
{
|
||||
struct xfs_ail_cursor *cur;
|
||||
|
||||
/* need to search all cursors */
|
||||
for (cur = &ailp->xa_cursors; cur; cur = cur->next) {
|
||||
list_for_each_entry(cur, &ailp->xa_cursors, list) {
|
||||
if (cur->item == lip)
|
||||
cur->item = (struct xfs_log_item *)
|
||||
((__psint_t)cur->item | 1);
|
||||
@ -272,9 +232,10 @@ xfs_trans_ail_cursor_clear(
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the item in the AIL with the current lsn.
|
||||
* Return the current tree generation number for use
|
||||
* in calls to xfs_trans_next_ail().
|
||||
* Find the first item in the AIL with the given @lsn by searching in ascending
|
||||
* LSN order and initialise the cursor to point to the next item for a
|
||||
* ascending traversal. Pass a @lsn of zero to initialise the cursor to the
|
||||
* first item in the AIL. Returns NULL if the list is empty.
|
||||
*/
|
||||
xfs_log_item_t *
|
||||
xfs_trans_ail_cursor_first(
|
||||
@ -285,46 +246,112 @@ xfs_trans_ail_cursor_first(
|
||||
xfs_log_item_t *lip;
|
||||
|
||||
xfs_trans_ail_cursor_init(ailp, cur);
|
||||
lip = xfs_ail_min(ailp);
|
||||
if (lsn == 0)
|
||||
|
||||
if (lsn == 0) {
|
||||
lip = xfs_ail_min(ailp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
|
||||
if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
|
||||
goto out;
|
||||
}
|
||||
lip = NULL;
|
||||
return NULL;
|
||||
|
||||
out:
|
||||
xfs_trans_ail_cursor_set(ailp, cur, lip);
|
||||
if (lip)
|
||||
cur->item = xfs_ail_next(ailp, lip);
|
||||
return lip;
|
||||
}
|
||||
|
||||
static struct xfs_log_item *
|
||||
__xfs_trans_ail_cursor_last(
|
||||
struct xfs_ail *ailp,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
xfs_log_item_t *lip;
|
||||
|
||||
list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
|
||||
if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
|
||||
return lip;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* splice the log item list into the AIL at the given LSN.
|
||||
* Find the last item in the AIL with the given @lsn by searching in descending
|
||||
* LSN order and initialise the cursor to point to that item. If there is no
|
||||
* item with the value of @lsn, then it sets the cursor to the last item with an
|
||||
* LSN lower than @lsn. Returns NULL if the list is empty.
|
||||
*/
|
||||
struct xfs_log_item *
|
||||
xfs_trans_ail_cursor_last(
|
||||
struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
xfs_trans_ail_cursor_init(ailp, cur);
|
||||
cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
|
||||
return cur->item;
|
||||
}
|
||||
|
||||
/*
|
||||
* Splice the log item list into the AIL at the given LSN. We splice to the
|
||||
* tail of the given LSN to maintain insert order for push traversals. The
|
||||
* cursor is optional, allowing repeated updates to the same LSN to avoid
|
||||
* repeated traversals.
|
||||
*/
|
||||
static void
|
||||
xfs_ail_splice(
|
||||
struct xfs_ail *ailp,
|
||||
struct list_head *list,
|
||||
xfs_lsn_t lsn)
|
||||
struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
struct list_head *list,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
xfs_log_item_t *next_lip;
|
||||
struct xfs_log_item *lip = cur ? cur->item : NULL;
|
||||
struct xfs_log_item *next_lip;
|
||||
|
||||
/* If the list is empty, just insert the item. */
|
||||
if (list_empty(&ailp->xa_ail)) {
|
||||
list_splice(list, &ailp->xa_ail);
|
||||
return;
|
||||
/*
|
||||
* Get a new cursor if we don't have a placeholder or the existing one
|
||||
* has been invalidated.
|
||||
*/
|
||||
if (!lip || (__psint_t)lip & 1) {
|
||||
lip = __xfs_trans_ail_cursor_last(ailp, lsn);
|
||||
|
||||
if (!lip) {
|
||||
/* The list is empty, so just splice and return. */
|
||||
if (cur)
|
||||
cur->item = NULL;
|
||||
list_splice(list, &ailp->xa_ail);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
|
||||
if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
|
||||
break;
|
||||
/*
|
||||
* Our cursor points to the item we want to insert _after_, so we have
|
||||
* to update the cursor to point to the end of the list we are splicing
|
||||
* in so that it points to the correct location for the next splice.
|
||||
* i.e. before the splice
|
||||
*
|
||||
* lsn -> lsn -> lsn + x -> lsn + x ...
|
||||
* ^
|
||||
* | cursor points here
|
||||
*
|
||||
* After the splice we have:
|
||||
*
|
||||
* lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
|
||||
* ^ ^
|
||||
* | cursor points here | needs to move here
|
||||
*
|
||||
* So we set the cursor to the last item in the list to be spliced
|
||||
* before we execute the splice, resulting in the cursor pointing to
|
||||
* the correct item after the splice occurs.
|
||||
*/
|
||||
if (cur) {
|
||||
next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
|
||||
cur->item = next_lip;
|
||||
}
|
||||
|
||||
ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
|
||||
XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
|
||||
|
||||
list_splice_init(list, &next_lip->li_ail);
|
||||
list_splice(list, &lip->li_ail);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -351,7 +378,7 @@ xfs_ail_worker(
|
||||
struct xfs_ail *ailp = container_of(to_delayed_work(work),
|
||||
struct xfs_ail, xa_work);
|
||||
xfs_mount_t *mp = ailp->xa_mount;
|
||||
struct xfs_ail_cursor *cur = &ailp->xa_cursors;
|
||||
struct xfs_ail_cursor cur;
|
||||
xfs_log_item_t *lip;
|
||||
xfs_lsn_t lsn;
|
||||
xfs_lsn_t target;
|
||||
@ -363,13 +390,12 @@ xfs_ail_worker(
|
||||
|
||||
spin_lock(&ailp->xa_lock);
|
||||
target = ailp->xa_target;
|
||||
xfs_trans_ail_cursor_init(ailp, cur);
|
||||
lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
|
||||
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
|
||||
if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
|
||||
/*
|
||||
* AIL is empty or our push has reached the end.
|
||||
*/
|
||||
xfs_trans_ail_cursor_done(ailp, cur);
|
||||
xfs_trans_ail_cursor_done(ailp, &cur);
|
||||
spin_unlock(&ailp->xa_lock);
|
||||
goto out_done;
|
||||
}
|
||||
@ -457,12 +483,12 @@ xfs_ail_worker(
|
||||
if (stuck > 100)
|
||||
break;
|
||||
|
||||
lip = xfs_trans_ail_cursor_next(ailp, cur);
|
||||
lip = xfs_trans_ail_cursor_next(ailp, &cur);
|
||||
if (lip == NULL)
|
||||
break;
|
||||
lsn = lip->li_lsn;
|
||||
}
|
||||
xfs_trans_ail_cursor_done(ailp, cur);
|
||||
xfs_trans_ail_cursor_done(ailp, &cur);
|
||||
spin_unlock(&ailp->xa_lock);
|
||||
|
||||
if (flush_log) {
|
||||
@ -645,6 +671,7 @@ xfs_trans_unlocked_item(
|
||||
void
|
||||
xfs_trans_ail_update_bulk(
|
||||
struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
struct xfs_log_item **log_items,
|
||||
int nr_items,
|
||||
xfs_lsn_t lsn) __releases(ailp->xa_lock)
|
||||
@ -674,7 +701,7 @@ xfs_trans_ail_update_bulk(
|
||||
list_add(&lip->li_ail, &tmp);
|
||||
}
|
||||
|
||||
xfs_ail_splice(ailp, &tmp, lsn);
|
||||
xfs_ail_splice(ailp, cur, &tmp, lsn);
|
||||
|
||||
if (!mlip_changed) {
|
||||
spin_unlock(&ailp->xa_lock);
|
||||
@ -793,6 +820,7 @@ xfs_trans_ail_init(
|
||||
|
||||
ailp->xa_mount = mp;
|
||||
INIT_LIST_HEAD(&ailp->xa_ail);
|
||||
INIT_LIST_HEAD(&ailp->xa_cursors);
|
||||
spin_lock_init(&ailp->xa_lock);
|
||||
INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
|
||||
mp->m_ail = ailp;
|
||||
|
@ -81,7 +81,7 @@ _xfs_trans_bjoin(
|
||||
struct xfs_buf_log_item *bip;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
|
||||
ASSERT(bp->b_transp == NULL);
|
||||
|
||||
/*
|
||||
* The xfs_buf_log_item pointer is stored in b_fsprivate. If
|
||||
@ -89,7 +89,7 @@ _xfs_trans_bjoin(
|
||||
* The checks to see if one is there are in xfs_buf_item_init().
|
||||
*/
|
||||
xfs_buf_item_init(bp, tp->t_mountp);
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
bip = bp->b_fspriv;
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
|
||||
@ -110,7 +110,7 @@ _xfs_trans_bjoin(
|
||||
* Initialize b_fsprivate2 so we can find it with incore_match()
|
||||
* in xfs_trans_get_buf() and friends above.
|
||||
*/
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, tp);
|
||||
bp->b_transp = tp;
|
||||
|
||||
}
|
||||
|
||||
@ -160,7 +160,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
|
||||
*/
|
||||
bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
|
||||
if (bp != NULL) {
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
|
||||
XFS_BUF_SUPER_STALE(bp);
|
||||
|
||||
@ -172,8 +172,8 @@ xfs_trans_get_buf(xfs_trans_t *tp,
|
||||
else if (XFS_BUF_ISSTALE(bp))
|
||||
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
|
||||
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
bip = bp->b_fspriv;
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
bip->bli_recur++;
|
||||
@ -232,8 +232,8 @@ xfs_trans_getsb(xfs_trans_t *tp,
|
||||
* recursion count and return the buffer to the caller.
|
||||
*/
|
||||
bp = mp->m_sb_bp;
|
||||
if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) {
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
|
||||
if (bp->b_transp == tp) {
|
||||
bip = bp->b_fspriv;
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
bip->bli_recur++;
|
||||
@ -327,9 +327,9 @@ xfs_trans_read_buf(
|
||||
*/
|
||||
bp = xfs_trans_buf_item_match(tp, target, blkno, len);
|
||||
if (bp != NULL) {
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
ASSERT(xfs_buf_islocked(bp));
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bp->b_fspriv != NULL);
|
||||
ASSERT((XFS_BUF_ISERROR(bp)) == 0);
|
||||
if (!(XFS_BUF_ISDONE(bp))) {
|
||||
trace_xfs_trans_read_buf_io(bp, _RET_IP_);
|
||||
@ -363,7 +363,7 @@ xfs_trans_read_buf(
|
||||
}
|
||||
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
|
||||
bip = bp->b_fspriv;
|
||||
bip->bli_recur++;
|
||||
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
@ -460,32 +460,30 @@ xfs_trans_brelse(xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_log_item_t *lip;
|
||||
|
||||
/*
|
||||
* Default to a normal brelse() call if the tp is NULL.
|
||||
*/
|
||||
if (tp == NULL) {
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
|
||||
struct xfs_log_item *lip = bp->b_fspriv;
|
||||
|
||||
ASSERT(bp->b_transp == NULL);
|
||||
|
||||
/*
|
||||
* If there's a buf log item attached to the buffer,
|
||||
* then let the AIL know that the buffer is being
|
||||
* unlocked.
|
||||
*/
|
||||
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
|
||||
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
if (lip->li_type == XFS_LI_BUF) {
|
||||
bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
|
||||
xfs_trans_unlocked_item(bip->bli_item.li_ailp,
|
||||
lip);
|
||||
}
|
||||
if (lip != NULL && lip->li_type == XFS_LI_BUF) {
|
||||
bip = bp->b_fspriv;
|
||||
xfs_trans_unlocked_item(bip->bli_item.li_ailp, lip);
|
||||
}
|
||||
xfs_buf_relse(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
bip = bp->b_fspriv;
|
||||
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
|
||||
@ -556,7 +554,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
|
||||
xfs_buf_item_relse(bp);
|
||||
bip = NULL;
|
||||
}
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
|
||||
bp->b_transp = NULL;
|
||||
|
||||
/*
|
||||
* If we've still got a buf log item on the buffer, then
|
||||
@ -581,16 +579,15 @@ void
|
||||
xfs_trans_bhold(xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
bip->bli_flags |= XFS_BLI_HOLD;
|
||||
trace_xfs_trans_bhold(bip);
|
||||
}
|
||||
@ -603,19 +600,17 @@ void
|
||||
xfs_trans_bhold_release(xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
ASSERT(bip->bli_flags & XFS_BLI_HOLD);
|
||||
bip->bli_flags &= ~XFS_BLI_HOLD;
|
||||
|
||||
bip->bli_flags &= ~XFS_BLI_HOLD;
|
||||
trace_xfs_trans_bhold_release(bip);
|
||||
}
|
||||
|
||||
@ -634,14 +629,14 @@ xfs_trans_log_buf(xfs_trans_t *tp,
|
||||
uint first,
|
||||
uint last)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
|
||||
ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) ||
|
||||
(XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks));
|
||||
ASSERT(bp->b_iodone == NULL ||
|
||||
bp->b_iodone == xfs_buf_iodone_callbacks);
|
||||
|
||||
/*
|
||||
* Mark the buffer as needing to be written out eventually,
|
||||
@ -656,9 +651,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
|
||||
XFS_BUF_DELAYWRITE(bp);
|
||||
XFS_BUF_DONE(bp);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
|
||||
bp->b_iodone = xfs_buf_iodone_callbacks;
|
||||
bip->bli_item.li_cb = xfs_buf_iodone;
|
||||
|
||||
trace_xfs_trans_log_buf(bip);
|
||||
@ -706,13 +700,11 @@ xfs_trans_binval(
|
||||
xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
trace_xfs_trans_binval(bip);
|
||||
@ -780,13 +772,11 @@ xfs_trans_inode_buf(
|
||||
xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
bip->bli_flags |= XFS_BLI_INODE_BUF;
|
||||
@ -806,13 +796,11 @@ xfs_trans_stale_inode_buf(
|
||||
xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
bip->bli_flags |= XFS_BLI_STALE_INODE;
|
||||
@ -833,13 +821,11 @@ xfs_trans_inode_alloc_buf(
|
||||
xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
|
||||
@ -863,16 +849,14 @@ xfs_trans_dquot_buf(
|
||||
xfs_buf_t *bp,
|
||||
uint type)
|
||||
{
|
||||
xfs_buf_log_item_t *bip;
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
|
||||
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
ASSERT(bip != NULL);
|
||||
ASSERT(type == XFS_BLF_UDQUOT_BUF ||
|
||||
type == XFS_BLF_PDQUOT_BUF ||
|
||||
type == XFS_BLF_GDQUOT_BUF);
|
||||
|
||||
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
bip->bli_format.blf_flags |= type;
|
||||
|
@ -55,7 +55,6 @@ xfs_trans_ijoin(
|
||||
{
|
||||
xfs_inode_log_item_t *iip;
|
||||
|
||||
ASSERT(ip->i_transp == NULL);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
if (ip->i_itemp == NULL)
|
||||
xfs_inode_item_init(ip, ip->i_mount);
|
||||
@ -68,12 +67,6 @@ xfs_trans_ijoin(
|
||||
xfs_trans_add_item(tp, &iip->ili_item);
|
||||
|
||||
xfs_trans_inode_broot_debug(ip);
|
||||
|
||||
/*
|
||||
* Initialize i_transp so we can find it with xfs_inode_incore()
|
||||
* in xfs_trans_iget() above.
|
||||
*/
|
||||
ip->i_transp = tp;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -111,7 +104,6 @@ xfs_trans_ichgtime(
|
||||
|
||||
ASSERT(tp);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(ip->i_transp == tp);
|
||||
|
||||
tv = current_fs_time(inode->i_sb);
|
||||
|
||||
@ -140,7 +132,6 @@ xfs_trans_log_inode(
|
||||
xfs_inode_t *ip,
|
||||
uint flags)
|
||||
{
|
||||
ASSERT(ip->i_transp == tp);
|
||||
ASSERT(ip->i_itemp != NULL);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
|
@ -53,7 +53,7 @@ void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
|
||||
* of the list to trigger traversal restarts.
|
||||
*/
|
||||
struct xfs_ail_cursor {
|
||||
struct xfs_ail_cursor *next;
|
||||
struct list_head list;
|
||||
struct xfs_log_item *item;
|
||||
};
|
||||
|
||||
@ -66,7 +66,7 @@ struct xfs_ail {
|
||||
struct xfs_mount *xa_mount;
|
||||
struct list_head xa_ail;
|
||||
xfs_lsn_t xa_target;
|
||||
struct xfs_ail_cursor xa_cursors;
|
||||
struct list_head xa_cursors;
|
||||
spinlock_t xa_lock;
|
||||
struct delayed_work xa_work;
|
||||
xfs_lsn_t xa_last_pushed_lsn;
|
||||
@ -82,6 +82,7 @@ struct xfs_ail {
|
||||
extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
|
||||
|
||||
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
struct xfs_log_item **log_items, int nr_items,
|
||||
xfs_lsn_t lsn) __releases(ailp->xa_lock);
|
||||
static inline void
|
||||
@ -90,7 +91,7 @@ xfs_trans_ail_update(
|
||||
struct xfs_log_item *lip,
|
||||
xfs_lsn_t lsn) __releases(ailp->xa_lock)
|
||||
{
|
||||
xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
|
||||
xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
|
||||
}
|
||||
|
||||
void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
|
||||
@ -111,10 +112,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
|
||||
void xfs_trans_unlocked_item(struct xfs_ail *,
|
||||
xfs_log_item_t *);
|
||||
|
||||
struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
|
||||
struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
xfs_lsn_t lsn);
|
||||
struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
|
||||
struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur,
|
||||
xfs_lsn_t lsn);
|
||||
struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur);
|
||||
void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
|
||||
struct xfs_ail_cursor *cur);
|
||||
|
@ -50,430 +50,6 @@
|
||||
#include "xfs_vnodeops.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
int
|
||||
xfs_setattr(
|
||||
struct xfs_inode *ip,
|
||||
struct iattr *iattr,
|
||||
int flags)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
int mask = iattr->ia_valid;
|
||||
xfs_trans_t *tp;
|
||||
int code;
|
||||
uint lock_flags;
|
||||
uint commit_flags=0;
|
||||
uid_t uid=0, iuid=0;
|
||||
gid_t gid=0, igid=0;
|
||||
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
|
||||
int need_iolock = 1;
|
||||
|
||||
trace_xfs_setattr(ip);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
code = -inode_change_ok(inode, iattr);
|
||||
if (code)
|
||||
return code;
|
||||
|
||||
olddquot1 = olddquot2 = NULL;
|
||||
udqp = gdqp = NULL;
|
||||
|
||||
/*
|
||||
* If disk quotas is on, we make sure that the dquots do exist on disk,
|
||||
* before we start any other transactions. Trying to do this later
|
||||
* is messy. We don't care to take a readlock to look at the ids
|
||||
* in inode here, because we can't hold it across the trans_reserve.
|
||||
* If the IDs do change before we take the ilock, we're covered
|
||||
* because the i_*dquot fields will get updated anyway.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
|
||||
uint qflags = 0;
|
||||
|
||||
if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
|
||||
uid = iattr->ia_uid;
|
||||
qflags |= XFS_QMOPT_UQUOTA;
|
||||
} else {
|
||||
uid = ip->i_d.di_uid;
|
||||
}
|
||||
if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
|
||||
gid = iattr->ia_gid;
|
||||
qflags |= XFS_QMOPT_GQUOTA;
|
||||
} else {
|
||||
gid = ip->i_d.di_gid;
|
||||
}
|
||||
|
||||
/*
|
||||
* We take a reference when we initialize udqp and gdqp,
|
||||
* so it is important that we never blindly double trip on
|
||||
* the same variable. See xfs_create() for an example.
|
||||
*/
|
||||
ASSERT(udqp == NULL);
|
||||
ASSERT(gdqp == NULL);
|
||||
code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
|
||||
qflags, &udqp, &gdqp);
|
||||
if (code)
|
||||
return code;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the other attributes, we acquire the inode lock and
|
||||
* first do an error checking pass.
|
||||
*/
|
||||
tp = NULL;
|
||||
lock_flags = XFS_ILOCK_EXCL;
|
||||
if (flags & XFS_ATTR_NOLOCK)
|
||||
need_iolock = 0;
|
||||
if (!(mask & ATTR_SIZE)) {
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
|
||||
commit_flags = 0;
|
||||
code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
|
||||
0, 0, 0);
|
||||
if (code) {
|
||||
lock_flags = 0;
|
||||
goto error_return;
|
||||
}
|
||||
} else {
|
||||
if (need_iolock)
|
||||
lock_flags |= XFS_IOLOCK_EXCL;
|
||||
}
|
||||
|
||||
xfs_ilock(ip, lock_flags);
|
||||
|
||||
/*
|
||||
* Change file ownership. Must be the owner or privileged.
|
||||
*/
|
||||
if (mask & (ATTR_UID|ATTR_GID)) {
|
||||
/*
|
||||
* These IDs could have changed since we last looked at them.
|
||||
* But, we're assured that if the ownership did change
|
||||
* while we didn't have the inode locked, inode's dquot(s)
|
||||
* would have changed also.
|
||||
*/
|
||||
iuid = ip->i_d.di_uid;
|
||||
igid = ip->i_d.di_gid;
|
||||
gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
|
||||
uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
|
||||
|
||||
/*
|
||||
* Do a quota reservation only if uid/gid is actually
|
||||
* going to change.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) &&
|
||||
((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
|
||||
(XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
|
||||
ASSERT(tp);
|
||||
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
|
||||
capable(CAP_FOWNER) ?
|
||||
XFS_QMOPT_FORCE_RES : 0);
|
||||
if (code) /* out of quota */
|
||||
goto error_return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncate file. Must have write permission and not be a directory.
|
||||
*/
|
||||
if (mask & ATTR_SIZE) {
|
||||
/* Short circuit the truncate case for zero length files */
|
||||
if (iattr->ia_size == 0 &&
|
||||
ip->i_size == 0 && ip->i_d.di_nextents == 0) {
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
lock_flags &= ~XFS_ILOCK_EXCL;
|
||||
if (mask & ATTR_CTIME) {
|
||||
inode->i_mtime = inode->i_ctime =
|
||||
current_fs_time(inode->i_sb);
|
||||
xfs_mark_inode_dirty_sync(ip);
|
||||
}
|
||||
code = 0;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
if (S_ISDIR(ip->i_d.di_mode)) {
|
||||
code = XFS_ERROR(EISDIR);
|
||||
goto error_return;
|
||||
} else if (!S_ISREG(ip->i_d.di_mode)) {
|
||||
code = XFS_ERROR(EINVAL);
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that the dquots are attached to the inode.
|
||||
*/
|
||||
code = xfs_qm_dqattach_locked(ip, 0);
|
||||
if (code)
|
||||
goto error_return;
|
||||
|
||||
/*
|
||||
* Now we can make the changes. Before we join the inode
|
||||
* to the transaction, if ATTR_SIZE is set then take care of
|
||||
* the part of the truncation that must be done without the
|
||||
* inode lock. This needs to be done before joining the inode
|
||||
* to the transaction, because the inode cannot be unlocked
|
||||
* once it is a part of the transaction.
|
||||
*/
|
||||
if (iattr->ia_size > ip->i_size) {
|
||||
/*
|
||||
* Do the first part of growing a file: zero any data
|
||||
* in the last block that is beyond the old EOF. We
|
||||
* need to do this before the inode is joined to the
|
||||
* transaction to modify the i_size.
|
||||
*/
|
||||
code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
|
||||
if (code)
|
||||
goto error_return;
|
||||
}
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
lock_flags &= ~XFS_ILOCK_EXCL;
|
||||
|
||||
/*
|
||||
* We are going to log the inode size change in this
|
||||
* transaction so any previous writes that are beyond the on
|
||||
* disk EOF and the new EOF that have not been written out need
|
||||
* to be written here. If we do not write the data out, we
|
||||
* expose ourselves to the null files problem.
|
||||
*
|
||||
* Only flush from the on disk size to the smaller of the in
|
||||
* memory file size or the new size as that's the range we
|
||||
* really care about here and prevents waiting for other data
|
||||
* not within the range we care about here.
|
||||
*/
|
||||
if (ip->i_size != ip->i_d.di_size &&
|
||||
iattr->ia_size > ip->i_d.di_size) {
|
||||
code = xfs_flush_pages(ip,
|
||||
ip->i_d.di_size, iattr->ia_size,
|
||||
XBF_ASYNC, FI_NONE);
|
||||
if (code)
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
/* wait for all I/O to complete */
|
||||
xfs_ioend_wait(ip);
|
||||
|
||||
code = -block_truncate_page(inode->i_mapping, iattr->ia_size,
|
||||
xfs_get_blocks);
|
||||
if (code)
|
||||
goto error_return;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
|
||||
code = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_ITRUNCATE_LOG_COUNT);
|
||||
if (code)
|
||||
goto error_return;
|
||||
|
||||
truncate_setsize(inode, iattr->ia_size);
|
||||
|
||||
commit_flags = XFS_TRANS_RELEASE_LOG_RES;
|
||||
lock_flags |= XFS_ILOCK_EXCL;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
/*
|
||||
* Only change the c/mtime if we are changing the size
|
||||
* or we are explicitly asked to change it. This handles
|
||||
* the semantic difference between truncate() and ftruncate()
|
||||
* as implemented in the VFS.
|
||||
*
|
||||
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME
|
||||
* is a special case where we need to update the times despite
|
||||
* not having these flags set. For all other operations the
|
||||
* VFS set these flags explicitly if it wants a timestamp
|
||||
* update.
|
||||
*/
|
||||
if (iattr->ia_size != ip->i_size &&
|
||||
(!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
|
||||
iattr->ia_ctime = iattr->ia_mtime =
|
||||
current_fs_time(inode->i_sb);
|
||||
mask |= ATTR_CTIME | ATTR_MTIME;
|
||||
}
|
||||
|
||||
if (iattr->ia_size > ip->i_size) {
|
||||
ip->i_d.di_size = iattr->ia_size;
|
||||
ip->i_size = iattr->ia_size;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
} else if (iattr->ia_size <= ip->i_size ||
|
||||
(iattr->ia_size == 0 && ip->i_d.di_nextents)) {
|
||||
/*
|
||||
* signal a sync transaction unless
|
||||
* we're truncating an already unlinked
|
||||
* file on a wsync filesystem
|
||||
*/
|
||||
code = xfs_itruncate_finish(&tp, ip, iattr->ia_size,
|
||||
XFS_DATA_FORK,
|
||||
((ip->i_d.di_nlink != 0 ||
|
||||
!(mp->m_flags & XFS_MOUNT_WSYNC))
|
||||
? 1 : 0));
|
||||
if (code)
|
||||
goto abort_return;
|
||||
/*
|
||||
* Truncated "down", so we're removing references
|
||||
* to old data here - if we now delay flushing for
|
||||
* a long time, we expose ourselves unduly to the
|
||||
* notorious NULL files problem. So, we mark this
|
||||
* vnode and flush it when the file is closed, and
|
||||
* do not wait the usual (long) time for writeout.
|
||||
*/
|
||||
xfs_iflags_set(ip, XFS_ITRUNCATED);
|
||||
}
|
||||
} else if (tp) {
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change file ownership. Must be the owner or privileged.
|
||||
*/
|
||||
if (mask & (ATTR_UID|ATTR_GID)) {
|
||||
/*
|
||||
* CAP_FSETID overrides the following restrictions:
|
||||
*
|
||||
* The set-user-ID and set-group-ID bits of a file will be
|
||||
* cleared upon successful return from chown()
|
||||
*/
|
||||
if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
|
||||
!capable(CAP_FSETID)) {
|
||||
ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change the ownerships and register quota modifications
|
||||
* in the transaction.
|
||||
*/
|
||||
if (iuid != uid) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
|
||||
ASSERT(mask & ATTR_UID);
|
||||
ASSERT(udqp);
|
||||
olddquot1 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_udquot, udqp);
|
||||
}
|
||||
ip->i_d.di_uid = uid;
|
||||
inode->i_uid = uid;
|
||||
}
|
||||
if (igid != gid) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
|
||||
ASSERT(!XFS_IS_PQUOTA_ON(mp));
|
||||
ASSERT(mask & ATTR_GID);
|
||||
ASSERT(gdqp);
|
||||
olddquot2 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_gdquot, gdqp);
|
||||
}
|
||||
ip->i_d.di_gid = gid;
|
||||
inode->i_gid = gid;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Change file access modes.
|
||||
*/
|
||||
if (mask & ATTR_MODE) {
|
||||
umode_t mode = iattr->ia_mode;
|
||||
|
||||
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
|
||||
mode &= ~S_ISGID;
|
||||
|
||||
ip->i_d.di_mode &= S_IFMT;
|
||||
ip->i_d.di_mode |= mode & ~S_IFMT;
|
||||
|
||||
inode->i_mode &= S_IFMT;
|
||||
inode->i_mode |= mode & ~S_IFMT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Change file access or modified times.
|
||||
*/
|
||||
if (mask & ATTR_ATIME) {
|
||||
inode->i_atime = iattr->ia_atime;
|
||||
ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
|
||||
ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
if (mask & ATTR_CTIME) {
|
||||
inode->i_ctime = iattr->ia_ctime;
|
||||
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
|
||||
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
if (mask & ATTR_MTIME) {
|
||||
inode->i_mtime = iattr->ia_mtime;
|
||||
ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
|
||||
ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
|
||||
ip->i_update_core = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* And finally, log the inode core if any attribute in it
|
||||
* has been changed.
|
||||
*/
|
||||
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
|
||||
ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
XFS_STATS_INC(xs_ig_attrchg);
|
||||
|
||||
/*
|
||||
* If this is a synchronous mount, make sure that the
|
||||
* transaction goes to disk before returning to the user.
|
||||
* This is slightly sub-optimal in that truncates require
|
||||
* two sync transactions instead of one for wsync filesystems.
|
||||
* One for the truncate and one for the timestamps since we
|
||||
* don't want to change the timestamps unless we're sure the
|
||||
* truncate worked. Truncates are less than 1% of the laddis
|
||||
* mix so this probably isn't worth the trouble to optimize.
|
||||
*/
|
||||
code = 0;
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
xfs_trans_set_sync(tp);
|
||||
|
||||
code = xfs_trans_commit(tp, commit_flags);
|
||||
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
|
||||
/*
|
||||
* Release any dquot(s) the inode had kept before chown.
|
||||
*/
|
||||
xfs_qm_dqrele(olddquot1);
|
||||
xfs_qm_dqrele(olddquot2);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
if (code)
|
||||
return code;
|
||||
|
||||
/*
|
||||
* XXX(hch): Updating the ACL entries is not atomic vs the i_mode
|
||||
* update. We could avoid this with linked transactions
|
||||
* and passing down the transaction pointer all the way
|
||||
* to attr_set. No previous user of the generic
|
||||
* Posix ACL code seems to care about this issue either.
|
||||
*/
|
||||
if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
|
||||
code = -xfs_acl_chmod(inode);
|
||||
if (code)
|
||||
return XFS_ERROR(code);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
abort_return:
|
||||
commit_flags |= XFS_TRANS_ABORT;
|
||||
error_return:
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
if (tp) {
|
||||
xfs_trans_cancel(tp, commit_flags);
|
||||
}
|
||||
if (lock_flags != 0) {
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
/*
|
||||
* The maximum pathlen is 1024 bytes. Since the minimum file system
|
||||
* blocksize is 512 bytes, we can get a max of 2 extents back from
|
||||
@ -621,13 +197,6 @@ xfs_free_eofblocks(
|
||||
*/
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
|
||||
|
||||
/*
|
||||
* Do the xfs_itruncate_start() call before
|
||||
* reserving any log space because
|
||||
* itruncate_start will call into the buffer
|
||||
* cache and we can't
|
||||
* do that within a transaction.
|
||||
*/
|
||||
if (flags & XFS_FREE_EOF_TRYLOCK) {
|
||||
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
@ -636,13 +205,6 @@ xfs_free_eofblocks(
|
||||
} else {
|
||||
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
||||
}
|
||||
error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
|
||||
ip->i_size);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = xfs_trans_reserve(tp, 0,
|
||||
XFS_ITRUNCATE_LOG_RES(mp),
|
||||
@ -658,15 +220,12 @@ xfs_free_eofblocks(
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
error = xfs_itruncate_finish(&tp, ip,
|
||||
ip->i_size,
|
||||
XFS_DATA_FORK,
|
||||
0);
|
||||
/*
|
||||
* If we get an error at this point we
|
||||
* simply don't bother truncating the file.
|
||||
*/
|
||||
error = xfs_itruncate_data(&tp, ip, ip->i_size);
|
||||
if (error) {
|
||||
/*
|
||||
* If we get an error at this point we simply don't
|
||||
* bother truncating the file.
|
||||
*/
|
||||
xfs_trans_cancel(tp,
|
||||
(XFS_TRANS_RELEASE_LOG_RES |
|
||||
XFS_TRANS_ABORT));
|
||||
@ -1084,20 +643,9 @@ xfs_inactive(
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
|
||||
if (truncate) {
|
||||
/*
|
||||
* Do the xfs_itruncate_start() call before
|
||||
* reserving any log space because itruncate_start
|
||||
* will call into the buffer cache and we can't
|
||||
* do that within a transaction.
|
||||
*/
|
||||
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
||||
|
||||
error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||
return VN_INACTIVE_CACHE;
|
||||
}
|
||||
xfs_ioend_wait(ip);
|
||||
|
||||
error = xfs_trans_reserve(tp, 0,
|
||||
XFS_ITRUNCATE_LOG_RES(mp),
|
||||
@ -1114,16 +662,7 @@ xfs_inactive(
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip);
|
||||
|
||||
/*
|
||||
* normally, we have to run xfs_itruncate_finish sync.
|
||||
* But if filesystem is wsync and we're in the inactive
|
||||
* path, then we know that nlink == 0, and that the
|
||||
* xaction that made nlink == 0 is permanently committed
|
||||
* since xfs_remove runs as a synchronous transaction.
|
||||
*/
|
||||
error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK,
|
||||
(!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0));
|
||||
|
||||
error = xfs_itruncate_data(&tp, ip, 0);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp,
|
||||
XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
|
||||
@ -2430,6 +1969,8 @@ xfs_zero_remaining_bytes(
|
||||
if (!bp)
|
||||
return XFS_ERROR(ENOMEM);
|
||||
|
||||
xfs_buf_unlock(bp);
|
||||
|
||||
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
nimap = 1;
|
||||
@ -2784,7 +2325,7 @@ xfs_change_file_space(
|
||||
iattr.ia_valid = ATTR_SIZE;
|
||||
iattr.ia_size = startoffset;
|
||||
|
||||
error = xfs_setattr(ip, &iattr, attr_flags);
|
||||
error = xfs_setattr_size(ip, &iattr, attr_flags);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -13,7 +13,8 @@ struct xfs_inode;
|
||||
struct xfs_iomap;
|
||||
|
||||
|
||||
int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
|
||||
int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags);
|
||||
int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
|
||||
#define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */
|
||||
#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
|
||||
#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
|
||||
|
Loading…
Reference in New Issue
Block a user