Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-quota-2.6

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-quota-2.6: (27 commits)
  ext2: Zero our b_size in ext2_quota_read()
  trivial: fix typos/grammar errors in fs/Kconfig
  quota: Coding style fixes
  quota: Remove superfluous inlines
  quota: Remove uppercase aliases for quota functions.
  nfsd: Use lowercase names of quota functions
  jfs: Use lowercase names of quota functions
  udf: Use lowercase names of quota functions
  ufs: Use lowercase names of quota functions
  reiserfs: Use lowercase names of quota functions
  ext4: Use lowercase names of quota functions
  ext3: Use lowercase names of quota functions
  ext2: Use lowercase names of quota functions
  ramfs: Remove quota call
  vfs: Use lowercase names of quota functions
  quota: Remove dqbuf_t and other cleanups
  quota: Remove NODQUOT macro
  quota: Make global quota locks cacheline aligned
  quota: Move quota files into separate directory
  ext4: quota reservation for delayed allocation
  ...
This commit is contained in:
Linus Torvalds 2009-03-27 14:48:34 -07:00
commit 2c9e15a011
58 changed files with 903 additions and 698 deletions

View File

@ -56,61 +56,7 @@ endif # BLOCK
source "fs/notify/Kconfig"
config QUOTA
bool "Quota support"
help
If you say Y here, you will be able to set per user limits for disk
usage (also called disk quotas). Currently, it works for the
ext2, ext3, and reiserfs file system. ext3 also supports journalled
quotas for which you don't need to run quotacheck(8) after an unclean
shutdown.
For further details, read the Quota mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, or the documentation provided
with the quota tools. Probably the quota support is only useful for
multi user systems. If unsure, say N.
config QUOTA_NETLINK_INTERFACE
bool "Report quota messages through netlink interface"
depends on QUOTA && NET
help
If you say Y here, quota warnings (about exceeding softlimit, reaching
hardlimit, etc.) will be reported through netlink interface. If unsure,
say Y.
config PRINT_QUOTA_WARNING
bool "Print quota warnings to console (OBSOLETE)"
depends on QUOTA
default y
help
If you say Y here, quota warnings (about exceeding softlimit, reaching
hardlimit, etc.) will be printed to the process' controlling terminal.
Note that this behavior is currently deprecated and may go away in
future. Please use notification via netlink socket instead.
# Generic support for tree structured quota files. Seleted when needed.
config QUOTA_TREE
tristate
config QFMT_V1
tristate "Old quota format support"
depends on QUOTA
help
This quota format was (is) used by kernels earlier than 2.4.22. If
you have quota working and you don't want to convert to new quota
format say Y here.
config QFMT_V2
tristate "Quota format v2 support"
depends on QUOTA
select QUOTA_TREE
help
This quota format allows using quotas with 32-bit UIDs/GIDs. If you
need this functionality say Y here.
config QUOTACTL
bool
depends on XFS_QUOTA || QUOTA
default y
source "fs/quota/Kconfig"
source "fs/autofs/Kconfig"
source "fs/autofs4/Kconfig"

View File

@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
obj-$(CONFIG_QUOTA) += dquot.o
obj-$(CONFIG_QFMT_V1) += quota_v1.o
obj-$(CONFIG_QFMT_V2) += quota_v2.o
obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
obj-$(CONFIG_QUOTACTL) += quota.o
obj-y += quota/
obj-$(CONFIG_PROC_FS) += proc/
obj-y += partitions/

View File

@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
error = vfs_dq_transfer(inode, attr) ?
-EDQUOT : 0;
if (!error)
error = inode_setattr(inode, attr);
}

View File

@ -570,7 +570,7 @@ do_more:
error_return:
brelse(bitmap_bh);
release_blocks(sb, freed);
DQUOT_FREE_BLOCK(inode, freed);
vfs_dq_free_block(inode, freed);
}
/**
@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
/*
* Check quota for allocation of this block.
*/
if (DQUOT_ALLOC_BLOCK(inode, num)) {
if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@ -1409,7 +1409,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
DQUOT_FREE_BLOCK(inode, *count-num);
vfs_dq_free_block(inode, *count-num);
*count = num;
return ret_block;
@ -1420,7 +1420,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
DQUOT_FREE_BLOCK(inode, *count);
vfs_dq_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}

View File

@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
if (!is_bad_inode(inode)) {
/* Quota is already initialized in iput() */
ext2_xattr_delete_inode(inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
}
es = EXT2_SB(sb)->s_es;
@ -586,7 +586,7 @@ got:
goto fail_drop;
}
if (DQUOT_ALLOC_INODE(inode)) {
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@ -605,10 +605,10 @@ got:
return inode;
fail_free_drop:
DQUOT_FREE_INODE(inode);
vfs_dq_free_inode(inode);
fail_drop:
DQUOT_DROP(inode);
vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);

View File

@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
return error;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
if (error)
return error;
}

View File

@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
tmp_bh.b_size = sb->s_blocksize;
err = ext2_get_block(inode, blk, &tmp_bh, 0);
if (err < 0)
return err;

View File

@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
ea_bdebug(new_bh, "reusing block");
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1)) {
if (vfs_dq_alloc_block(inode, 1)) {
unlock_buffer(new_bh);
goto cleanup;
}
@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh)
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
goto cleanup;
}
} else
@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;

View File

@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
}
ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
if (dquot_freed_blocks)
DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
vfs_dq_free_block(inode, dquot_freed_blocks);
return;
}
@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
if (DQUOT_ALLOC_BLOCK(inode, num)) {
if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@ -1714,7 +1714,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
DQUOT_FREE_BLOCK(inode, *count-num);
vfs_dq_free_block(inode, *count-num);
*count = num;
return ret_block;
@ -1729,7 +1729,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
DQUOT_FREE_BLOCK(inode, *count);
vfs_dq_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}

View File

@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
DQUOT_INIT(inode);
vfs_dq_init(inode);
ext3_xattr_delete_inode(handle, inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@ -589,7 +589,7 @@ got:
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
ret = inode;
if(DQUOT_ALLOC_INODE(inode)) {
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@ -620,10 +620,10 @@ really_out:
return ret;
fail_free_drop:
DQUOT_FREE_INODE(inode);
vfs_dq_free_inode(inode);
fail_drop:
DQUOT_DROP(inode);
vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);

View File

@ -3063,7 +3063,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
ext3_journal_stop(handle);
return error;
@ -3154,7 +3154,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
ret = 2 * (bpp + indirects) + 2;
#ifdef CONFIG_QUOTA
/* We know that structure was already allocated during DQUOT_INIT so
/* We know that structure was already allocated during vfs_dq_init so
* we will be updating only the data blocks + inodes */
ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
#endif
@ -3245,7 +3245,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
* Also, vfs_dq_alloc_space() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing

View File

@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
DQUOT_INIT(dentry->d_inode);
vfs_dq_init(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
DQUOT_INIT(dentry->d_inode);
vfs_dq_init(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
DQUOT_INIT(new_dentry->d_inode);
vfs_dq_init(new_dentry->d_inode);
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);

View File

@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
static int ext3_dquot_initialize(struct inode *inode, int type);
static int ext3_dquot_drop(struct inode *inode);
static int ext3_write_dquot(struct dquot *dquot);
static int ext3_acquire_dquot(struct dquot *dquot);
static int ext3_release_dquot(struct dquot *dquot);
@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext3_quota_operations = {
.initialize = ext3_dquot_initialize,
.drop = ext3_dquot_drop,
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@ -1438,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
}
list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
DQUOT_INIT(inode);
vfs_dq_init(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %Ld bytes\n",
@ -2702,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
* Process 1 Process 2
* ext3_create() quota_sync()
* journal_start() write_dquot()
* DQUOT_INIT() down(dqio_mutex)
* vfs_dq_init() down(dqio_mutex)
* down(dqio_mutex) journal_start()
*
*/
@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
static int ext3_dquot_initialize(struct inode *inode, int type)
{
handle_t *handle;
int ret, err;
/* We may create quota structure so we need to reserve enough blocks */
handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_initialize(inode, type);
err = ext3_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext3_dquot_drop(struct inode *inode)
{
handle_t *handle;
int ret, err;
/* We may delete quota structure so we need to reserve enough blocks */
handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
/*
* We call dquot_drop() anyway to at least release references
* to quota structures so that umount does not hang.
*/
dquot_drop(inode);
return PTR_ERR(handle);
}
ret = dquot_drop(inode);
err = ext3_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext3_write_dquot(struct dquot *dquot)
{
int ret, err;

View File

@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@ -774,7 +774,7 @@ inserted:
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1))
if (vfs_dq_alloc_block(inode, 1))
goto cleanup;
error = ext3_journal_get_write_access(handle,
new_bh);
@ -848,7 +848,7 @@ cleanup:
return error;
cleanup_dquot:
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
goto cleanup;
bad_block:

View File

@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
ext4_mb_free_blocks(handle, inode, block, count,
metadata, &dquot_freed_blocks);
if (dquot_freed_blocks)
DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
vfs_dq_free_block(inode, dquot_freed_blocks);
return;
}

View File

@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/magic.h>
#include <linux/jbd2.h>
#include <linux/quota.h>
#include "ext4_i.h"
/*
@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
extern qsize_t ext4_get_reserved_space(struct inode *inode);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);

View File

@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
DQUOT_INIT(inode);
vfs_dq_init(inode);
ext4_xattr_delete_inode(handle, inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@ -915,7 +915,7 @@ got:
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
ret = inode;
if (DQUOT_ALLOC_INODE(inode)) {
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@ -956,10 +956,10 @@ really_out:
return ret;
fail_free_drop:
DQUOT_FREE_INODE(inode);
vfs_dq_free_inode(inode);
fail_drop:
DQUOT_DROP(inode);
vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);

View File

@ -975,6 +975,17 @@ out:
return err;
}
qsize_t ext4_get_reserved_space(struct inode *inode)
{
unsigned long long total;
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
total = EXT4_I(inode)->i_reserved_data_blocks +
EXT4_I(inode)->i_reserved_meta_blocks;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
return total;
}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate @blocks for non extent file based file
@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
/* update per-inode reservations */
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
EXT4_I(inode)->i_reserved_data_blocks -= used;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/*
* free those over-booking quota for metadata blocks
*/
if (mdb_free)
vfs_dq_release_reservation_block(inode, mdb_free);
}
/*
@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file,
static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
{
int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned long md_needed, mdblocks, total = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned long md_needed, mdblocks, total = 0;
/*
* recalculate the amount of metadata blocks to reserve
@ -1570,12 +1587,23 @@ repeat:
md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
total = md_needed + nrblocks;
/*
* Make quota reservation here to prevent quota overflow
* later. Real quota accounting is done at pages writeout
* time.
*/
if (vfs_dq_reserve_block(inode, total)) {
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
return -EDQUOT;
}
if (ext4_claim_free_blocks(sbi, total)) {
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
vfs_dq_release_reservation_block(inode, total);
return -ENOSPC;
}
EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
vfs_dq_release_reservation_block(inode, release);
}
static void ext4_da_page_release_reservation(struct page *page,
@ -4612,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
ext4_journal_stop(handle);
return error;
@ -4991,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
* Also, vfs_dq_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing

View File

@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
else
else {
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
ac->ac_b_ex.fe_len);
/* convert reserved quota blocks to real quota blocks */
vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
}
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block = 0;
unsigned int inquota;
unsigned int inquota = 0;
unsigned int reserv_blks = 0;
sb = ar->inode->i_sb;
@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
(unsigned long long) ar->pleft,
(unsigned long long) ar->pright);
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
/*
* With delalloc we already reserved the blocks
/*
* For delayed allocation, we could skip the ENOSPC and
* EDQUOT check, as blocks and quotas have been already
* reserved when data being copied into pagecache.
*/
if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
ar->flags |= EXT4_MB_DELALLOC_RESERVED;
else {
/* Without delayed allocation we need to verify
* there is enough free blocks to do block allocation
* and verify allocation doesn't exceed the quota limits.
*/
while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
/* let others to free the space */
@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0;
}
reserv_blks = ar->len;
while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
ar->len--;
}
inquota = ar->len;
if (ar->len == 0) {
*errp = -EDQUOT;
goto out3;
}
}
while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
ar->len--;
}
if (ar->len == 0) {
*errp = -EDQUOT;
goto out3;
}
inquota = ar->len;
if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
ar->flags |= EXT4_MB_DELALLOC_RESERVED;
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (!ac) {
@ -4654,8 +4662,8 @@ repeat:
out2:
kmem_cache_free(ext4_ac_cachep, ac);
out1:
if (ar->len < inquota)
DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
if (inquota && ar->len < inquota)
vfs_dq_free_block(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)

View File

@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
DQUOT_INIT(dentry->d_inode);
vfs_dq_init(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
DQUOT_INIT(dentry->d_inode);
vfs_dq_init(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
DQUOT_INIT(new_dentry->d_inode);
vfs_dq_init(new_dentry->d_inode);
handle = ext4_journal_start(old_dir, 2 *
EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);

View File

@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
static int ext4_dquot_initialize(struct inode *inode, int type);
static int ext4_dquot_drop(struct inode *inode);
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext4_quota_operations = {
.initialize = ext4_dquot_initialize,
.drop = ext4_dquot_drop,
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.reserve_space = dquot_reserve_space,
.claim_space = dquot_claim_space,
.release_rsv = dquot_release_reserved_space,
.get_reserved_space = ext4_get_reserved_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
@ -1802,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
}
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
DQUOT_INIT(inode);
vfs_dq_init(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %lld bytes\n",
@ -3367,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
* is locked for write. Otherwise the are possible deadlocks:
* Process 1 Process 2
* ext4_create() quota_sync()
* jbd2_journal_start() write_dquot()
* DQUOT_INIT() down(dqio_mutex)
* jbd2_journal_start() write_dquot()
* vfs_dq_init() down(dqio_mutex)
* down(dqio_mutex) jbd2_journal_start()
*
*/
@ -3380,44 +3382,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
static int ext4_dquot_initialize(struct inode *inode, int type)
{
handle_t *handle;
int ret, err;
/* We may create quota structure so we need to reserve enough blocks */
handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_initialize(inode, type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext4_dquot_drop(struct inode *inode)
{
handle_t *handle;
int ret, err;
/* We may delete quota structure so we need to reserve enough blocks */
handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
/*
* We call dquot_drop() anyway to at least release references
* to quota structures so that umount does not hang.
*/
dquot_drop(inode);
return PTR_ERR(handle);
}
ret = dquot_drop(inode);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext4_write_dquot(struct dquot *dquot)
{
int ret, err;

View File

@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@ -784,7 +784,7 @@ inserted:
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1))
if (vfs_dq_alloc_block(inode, 1))
goto cleanup;
error = ext4_journal_get_write_access(handle,
new_bh);
@ -860,7 +860,7 @@ cleanup:
return error;
cleanup_dquot:
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
goto cleanup;
bad_block:

View File

@ -294,7 +294,7 @@ void clear_inode(struct inode *inode)
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
inode_sync_wait(inode);
DQUOT_DROP(inode);
vfs_dq_drop(inode);
if (inode->i_sb->s_op->clear_inode)
inode->i_sb->s_op->clear_inode(inode);
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@ -1168,7 +1168,7 @@ void generic_delete_inode(struct inode *inode)
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
if (!is_bad_inode(inode))
DQUOT_INIT(inode);
vfs_dq_init(inode);
/* Filesystems implementing their own
* s_op->delete_inode are required to call
* truncate_inode_pages and clear_inode()

View File

@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
if (DQUOT_TRANSFER(inode, iattr))
if (vfs_dq_transfer(inode, iattr))
return -EDQUOT;
}

View File

@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode)
/*
* Free the inode from the quota allocation.
*/
DQUOT_INIT(inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
vfs_dq_init(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
}
clear_inode(inode);

View File

@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
if (vfs_dq_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
vfs_dq_free_block(ip, sbi->nbperpage);
goto clean_up;
}
@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
vfs_dq_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid,
n = xlen;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, n)) {
if (vfs_dq_alloc_block(ip, n)) {
rc = -EDQUOT;
goto extendOut;
}
@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid,
/* Rollback quota allocation */
if (rc && quota_allocation)
DQUOT_FREE_BLOCK(ip, quota_allocation);
vfs_dq_free_block(ip, quota_allocation);
dtSplitUp_Exit:
@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
return -EIO;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid,
rp = rmp->data;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
DQUOT_FREE_BLOCK(ip, xlen);
vfs_dq_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
DQUOT_FREE_BLOCK(ip, xlen);
vfs_dq_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);

View File

@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
}
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
if (vfs_dq_alloc_block(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
vfs_dq_free_block(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
goto exit;
/* Allocat blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
if (vfs_dq_alloc_block(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
DQUOT_FREE_BLOCK(ip, nxlen);
vfs_dq_free_block(ip, nxlen);
goto exit;
}
} else {
@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
vfs_dq_free_block(ip, nxlen);
goto exit;
}
}

View File

@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
/*
* Allocate inode to quota.
*/
if (DQUOT_ALLOC_INODE(inode)) {
if (vfs_dq_alloc_inode(inode)) {
rc = -EDQUOT;
goto fail_drop;
}
@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
return inode;
fail_drop:
DQUOT_DROP(inode);
vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
fail_unlock:
inode->i_nlink = 0;

View File

@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
if ((rc = vfs_dq_alloc_block(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
DQUOT_FREE_BLOCK(ip, xlen);
vfs_dq_free_block(ip, xlen);
goto out;
}
}
@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
DQUOT_FREE_BLOCK(ip, xlen);
vfs_dq_free_block(ip, xlen);
}
return rc;
}
@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
}
@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
/* Rollback quota allocation. */
if (quota_allocation)
DQUOT_FREE_BLOCK(ip, quota_allocation);
vfs_dq_free_block(ip, quota_allocation);
return (rc);
}
@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid,
return -EIO;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
ip->i_size = newsize;
/* update quota allocation to reflect freed blocks */
DQUOT_FREE_BLOCK(ip, nfreed);
vfs_dq_free_block(ip, nfreed);
/*
* free tlock of invalidated pages

View File

@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
DQUOT_INIT(ip);
vfs_dq_init(ip);
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
DQUOT_INIT(ip);
vfs_dq_init(ip);
if ((rc = get_UCSname(&dname, dentry)))
goto out;
@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} else if (new_ip) {
IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
/* Init inode for quota operations. */
DQUOT_INIT(new_ip);
vfs_dq_init(new_ip);
}
/*

View File

@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
if (vfs_dq_alloc_block(ip, nblocks)) {
return -EDQUOT;
}
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
/*Rollback quota allocation. */
DQUOT_FREE_BLOCK(ip, nblocks);
vfs_dq_free_block(ip, nblocks);
return rc;
}
@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
failed:
/* Rollback quota allocation. */
DQUOT_FREE_BLOCK(ip, nblocks);
vfs_dq_free_block(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
if (vfs_dq_alloc_block(inode, blocks_needed))
return -EDQUOT;
quota_allocation = blocks_needed;
@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
DQUOT_FREE_BLOCK(inode, quota_allocation);
vfs_dq_free_block(inode, quota_allocation);
return (rc);
}
@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
DQUOT_FREE_BLOCK(inode, old_blocks);
vfs_dq_free_block(inode, old_blocks);
inode->i_ctime = CURRENT_TIME;

View File

@ -1473,7 +1473,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
DQUOT_INIT(dir);
vfs_dq_init(dir);
error = dir->i_op->create(dir, dentry, mode, nd);
if (!error)
fsnotify_create(dir, dentry);
@ -1552,7 +1552,7 @@ int may_open(struct path *path, int acc_mode, int flag)
error = security_path_truncate(path, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
if (!error) {
DQUOT_INIT(inode);
vfs_dq_init(inode);
error = do_truncate(dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@ -1563,7 +1563,7 @@ int may_open(struct path *path, int acc_mode, int flag)
return error;
} else
if (flag & FMODE_WRITE)
DQUOT_INIT(inode);
vfs_dq_init(inode);
return 0;
}
@ -1946,7 +1946,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
if (error)
return error;
DQUOT_INIT(dir);
vfs_dq_init(dir);
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
@ -2045,7 +2045,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (error)
return error;
DQUOT_INIT(dir);
vfs_dq_init(dir);
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
@ -2131,7 +2131,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->rmdir)
return -EPERM;
DQUOT_INIT(dir);
vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
@ -2218,7 +2218,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->unlink)
return -EPERM;
DQUOT_INIT(dir);
vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
@ -2329,7 +2329,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
if (error)
return error;
DQUOT_INIT(dir);
vfs_dq_init(dir);
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
@ -2413,7 +2413,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
return error;
mutex_lock(&inode->i_mutex);
DQUOT_INIT(dir);
vfs_dq_init(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&inode->i_mutex);
if (!error)
@ -2612,8 +2612,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir->i_op->rename)
return -EPERM;
DQUOT_INIT(old_dir);
DQUOT_INIT(new_dir);
vfs_dq_init(old_dir);
vfs_dq_init(new_dir);
old_name = fsnotify_oldname_init(old_dentry->d_name.name);

View File

@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
put_write_access(inode);
goto out_nfserr;
}
DQUOT_INIT(inode);
vfs_dq_init(inode);
}
/* sanitize the mode change */
@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
else
flags = O_WRONLY|O_LARGEFILE;
DQUOT_INIT(inode);
vfs_dq_init(inode);
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
flags, cred);

View File

@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
if (!error)
error = security_path_truncate(&path, length, 0);
if (!error) {
DQUOT_INIT(inode);
vfs_dq_init(inode);
error = do_truncate(path.dentry, length, 0, NULL);
}

59
fs/quota/Kconfig Normal file
View File

@ -0,0 +1,59 @@
#
# Quota configuration
#
config QUOTA
bool "Quota support"
help
If you say Y here, you will be able to set per user limits for disk
usage (also called disk quotas). Currently, it works for the
ext2, ext3, and reiserfs file system. ext3 also supports journalled
quotas for which you don't need to run quotacheck(8) after an unclean
shutdown.
For further details, read the Quota mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, or the documentation provided
with the quota tools. Probably the quota support is only useful for
multi user systems. If unsure, say N.
config QUOTA_NETLINK_INTERFACE
bool "Report quota messages through netlink interface"
depends on QUOTA && NET
help
If you say Y here, quota warnings (about exceeding softlimit, reaching
hardlimit, etc.) will be reported through netlink interface. If unsure,
say Y.
config PRINT_QUOTA_WARNING
bool "Print quota warnings to console (OBSOLETE)"
depends on QUOTA
default y
help
If you say Y here, quota warnings (about exceeding softlimit, reaching
hardlimit, etc.) will be printed to the process' controlling terminal.
Note that this behavior is currently deprecated and may go away in
future. Please use notification via netlink socket instead.
# Generic support for tree structured quota files. Selected when needed.
config QUOTA_TREE
tristate
config QFMT_V1
tristate "Old quota format support"
depends on QUOTA
help
This quota format was (is) used by kernels earlier than 2.4.22. If
you have quota working and you don't want to convert to new quota
format say Y here.
config QFMT_V2
tristate "Quota format v2 support"
depends on QUOTA
select QUOTA_TREE
help
This quota format allows using quotas with 32-bit UIDs/GIDs. If you
need this functionality say Y here.
config QUOTACTL
bool
depends on XFS_QUOTA || QUOTA
default y

14
fs/quota/Makefile Normal file
View File

@ -0,0 +1,14 @@
#
# Makefile for the Linux filesystems.
#
# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
# Rewritten to use lists instead of if-statements.
#
obj-y :=
obj-$(CONFIG_QUOTA) += dquot.o
obj-$(CONFIG_QFMT_V1) += quota_v1.o
obj-$(CONFIG_QFMT_V2) += quota_v2.o
obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
obj-$(CONFIG_QUOTACTL) += quota.o

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,8 @@
#include <linux/types.h>
/* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
qid_t id)
{
if (type >= MAXQUOTAS)
return -EINVAL;
@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
case Q_SETINFO:
case Q_SETQUOTA:
case Q_GETQUOTA:
/* This is just informative test so we are satisfied without a lock */
/* This is just an informative test so we are satisfied
* without the lock */
if (!sb_has_quota_active(sb, type))
return -ESRCH;
}
@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
}
/* Check validity of XFS Quota Manager commands */
static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
qid_t id)
{
if (type >= XQM_MAXQUOTAS)
return -EINVAL;
@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i
return 0;
}
static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
qid_t id)
{
int error;
@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
I_MUTEX_QUOTA);
truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
}
@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
/* This test just improves performance so it needn't be reliable... */
/* This test just improves performance so it needn't be
* reliable... */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && type != cnt)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
continue;
break;
}
@ -227,7 +233,8 @@ restart:
}
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr)
{
int ret;
@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_QUOTAON: {
char *pathname;
if (IS_ERR(pathname = getname(addr)))
pathname = getname(addr);
if (IS_ERR(pathname))
return PTR_ERR(pathname);
ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
putname(pathname);
@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_GETINFO: {
struct if_dqinfo info;
if ((ret = sb->s_qcop->get_info(sb, type, &info)))
ret = sb->s_qcop->get_info(sb, type, &info);
if (ret)
return ret;
if (copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_GETQUOTA: {
struct if_dqblk idq;
if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
if (ret)
return ret;
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_XGETQUOTA: {
struct fs_disk_quota fdq;
if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
if (ret)
return ret;
if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
@ -341,7 +352,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
*/
static inline struct super_block *quotactl_block(const char __user *special)
static struct super_block *quotactl_block(const char __user *special)
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;

View File

@ -22,8 +22,6 @@ MODULE_LICENSE("GPL");
#define __QUOTA_QT_PARANOIA
typedef char *dqbuf_t;
static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
{
unsigned int epb = info->dqi_usable_bs >> 2;
@ -35,46 +33,42 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
}
/* Number of entries in one blocks */
static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
{
return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
/ info->dqi_entry_size;
}
static dqbuf_t getdqbuf(size_t size)
static char *getdqbuf(size_t size)
{
dqbuf_t buf = kmalloc(size, GFP_NOFS);
char *buf = kmalloc(size, GFP_NOFS);
if (!buf)
printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
printk(KERN_WARNING
"VFS: Not enough memory for quota buffers.\n");
return buf;
}
static inline void freedqbuf(dqbuf_t buf)
{
kfree(buf);
}
static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
memset(buf, 0, info->dqi_usable_bs);
return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf,
return sb->s_op->quota_read(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
}
static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf,
return sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
}
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int ret, blk;
@ -98,12 +92,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
mark_info_dirty(info->dqi_sb, info->dqi_type);
ret = blk;
out_buf:
freedqbuf(buf);
kfree(buf);
return ret;
}
/* Insert empty block to the list */
static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
{
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@ -120,9 +114,10 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
}
/* Remove given block from the list of blocks with free entries */
static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
uint blk)
{
dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
char *tmpbuf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
uint nextblk = le32_to_cpu(dh->dqdh_next_free);
uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@ -153,21 +148,24 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
info->dqi_free_entry = nextblk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
}
freedqbuf(tmpbuf);
kfree(tmpbuf);
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
/* No matter whether write succeeds block is out of list */
if (write_blk(info, blk, buf) < 0)
printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
printk(KERN_ERR
"VFS: Can't write block (%u) with free entries.\n",
blk);
return 0;
out_buf:
freedqbuf(tmpbuf);
kfree(tmpbuf);
return err;
}
/* Insert given block to the beginning of list with free entries */
static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
uint blk)
{
dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
char *tmpbuf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@ -188,12 +186,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
if (err < 0)
goto out_buf;
}
freedqbuf(tmpbuf);
kfree(tmpbuf);
info->dqi_free_entry = blk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
return 0;
out_buf:
freedqbuf(tmpbuf);
kfree(tmpbuf);
return err;
}
@ -215,7 +213,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
{
uint blk, i;
struct qt_disk_dqdbheader *dh;
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
char *ddquot;
*err = 0;
@ -233,11 +231,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
blk = get_free_dqblk(info);
if ((int)blk < 0) {
*err = blk;
freedqbuf(buf);
kfree(buf);
return 0;
}
memset(buf, 0, info->dqi_usable_bs);
/* This is enough as block is already zeroed and entry list is empty... */
/* This is enough as the block is already zeroed and the entry
* list is empty... */
info->dqi_free_entry = blk;
mark_info_dirty(dquot->dq_sb, dquot->dq_type);
}
@ -253,9 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
}
le16_add_cpu(&dh->dqdh_entries, 1);
/* Find free structure in block */
for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot);
i++, ddquot += info->dqi_entry_size);
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
if (qtree_entry_unused(info, ddquot))
break;
ddquot += info->dqi_entry_size;
}
#ifdef __QUOTA_QT_PARANOIA
if (i == qtree_dqstr_in_blk(info)) {
printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@ -273,10 +275,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
dquot->dq_off = (blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
freedqbuf(buf);
kfree(buf);
return blk;
out_buf:
freedqbuf(buf);
kfree(buf);
return 0;
}
@ -284,7 +286,7 @@ out_buf:
static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *treeblk, int depth)
{
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0, newson = 0, newact = 0;
__le32 *ref;
uint newblk;
@ -333,7 +335,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
put_free_dqblk(info, buf, *treeblk);
}
out_buf:
freedqbuf(buf);
kfree(buf);
return ret;
}
@ -346,14 +348,15 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
}
/*
* We don't have to be afraid of deadlocks as we never have quotas on quota files...
* We don't have to be afraid of deadlocks as we never have quotas on quota
* files...
*/
int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
int type = dquot->dq_type;
struct super_block *sb = dquot->dq_sb;
ssize_t ret;
dqbuf_t ddquot = getdqbuf(info->dqi_entry_size);
char *ddquot = getdqbuf(info->dqi_entry_size);
if (!ddquot)
return -ENOMEM;
@ -364,15 +367,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (ret < 0) {
printk(KERN_ERR "VFS: Error %zd occurred while "
"creating quota.\n", ret);
freedqbuf(ddquot);
kfree(ddquot);
return ret;
}
}
spin_lock(&dq_data_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
spin_unlock(&dq_data_lock);
ret = sb->s_op->quota_write(sb, type, (char *)ddquot,
info->dqi_entry_size, dquot->dq_off);
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
sb->s_id);
@ -382,7 +385,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ret = 0;
}
dqstats.writes++;
freedqbuf(ddquot);
kfree(ddquot);
return ret;
}
@ -393,7 +396,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint blk)
{
struct qt_disk_dqdbheader *dh;
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0;
if (!buf)
@ -444,7 +447,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
dquot->dq_off = 0; /* Quota is now unattached */
out_buf:
freedqbuf(buf);
kfree(buf);
return ret;
}
@ -452,7 +455,7 @@ out_buf:
static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blk, int depth)
{
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0;
uint newblk;
__le32 *ref = (__le32 *)buf;
@ -475,9 +478,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
int i;
ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
/* Block got empty? */
for (i = 0;
i < (info->dqi_usable_bs >> 2) && !ref[i];
i++);
for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
;
/* Don't put the root block into the free block list */
if (i == (info->dqi_usable_bs >> 2)
&& *blk != QT_TREEOFF) {
@ -491,7 +493,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
}
out_buf:
freedqbuf(buf);
kfree(buf);
return ret;
}
@ -510,7 +512,7 @@ EXPORT_SYMBOL(qtree_delete_dquot);
static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk)
{
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
loff_t ret = 0;
int i;
char *ddquot;
@ -522,9 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
goto out_buf;
}
for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot);
i++, ddquot += info->dqi_entry_size);
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
if (info->dqi_ops->is_id(ddquot, dquot))
break;
ddquot += info->dqi_entry_size;
}
if (i == qtree_dqstr_in_blk(info)) {
printk(KERN_ERR "VFS: Quota for id %u referenced "
"but not present.\n", dquot->dq_id);
@ -535,7 +540,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:
freedqbuf(buf);
kfree(buf);
return ret;
}
@ -543,7 +548,7 @@ out_buf:
static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk, int depth)
{
dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
char *buf = getdqbuf(info->dqi_usable_bs);
loff_t ret = 0;
__le32 *ref = (__le32 *)buf;
@ -563,7 +568,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
else
ret = find_block_dqentry(info, dquot, blk);
out_buf:
freedqbuf(buf);
kfree(buf);
return ret;
}
@ -579,7 +584,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
int type = dquot->dq_type;
struct super_block *sb = dquot->dq_sb;
loff_t offset;
dqbuf_t ddquot;
char *ddquot;
int ret = 0;
#ifdef __QUOTA_QT_PARANOIA
@ -607,8 +612,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ddquot = getdqbuf(info->dqi_entry_size);
if (!ddquot)
return -ENOMEM;
ret = sb->s_op->quota_read(sb, type, (char *)ddquot,
info->dqi_entry_size, dquot->dq_off);
ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
if (ret >= 0)
ret = -EIO;
@ -616,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
"structure for id %u.\n", dquot->dq_id);
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
freedqbuf(ddquot);
kfree(ddquot);
goto out;
}
spin_lock(&dq_data_lock);
@ -627,7 +632,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dq_data_lock);
freedqbuf(ddquot);
kfree(ddquot);
out:
dqstats.reads++;
return ret;
@ -638,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot);
* the only one operating on dquot (thanks to dq_lock) */
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
!(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
return qtree_delete_dquot(info, dquot);
return 0;
}

View File

@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot)
/* Set structure to 0s in case read fails/is after end of file */
memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 &&
dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0)
if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
dquot->dq_dqb.dqb_bsoftlimit == 0 &&
dquot->dq_dqb.dqb_ihardlimit == 0 &&
dquot->dq_dqb.dqb_isoftlimit == 0)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
dqstats.reads++;
@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot)
v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
if (dquot->dq_id == 0) {
dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
dqblk.dqb_btime =
sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
dqblk.dqb_itime =
sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
}
ret = 0;
if (sb_dqopt(dquot->dq_sb)->files[type])
ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
(char *)&dqblk, sizeof(struct v1_disk_dqblk),
v1_dqoff(dquot->dq_id));
if (ret != sizeof(struct v1_disk_dqblk)) {
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
dquot->dq_sb->s_id);
@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type)
return 0;
blocks = isize >> BLOCK_SIZE_BITS;
off = isize & (BLOCK_SIZE - 1);
if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
sizeof(struct v1_disk_dqblk))
return 0;
/* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
/* Doublecheck whether we didn't get file with new format - with old
* quotactl() this could happen */
size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader))
return 1; /* Probably not new format */
if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
return 1; /* Definitely not new format */
printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id);
printk(KERN_INFO
"VFS: %s: Refusing to turn on old quota format on given file."
" It probably contains newer quota format.\n", sb->s_id);
return 0; /* Seems like a new format file -> refuse it */
}
@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
struct v1_disk_dqblk dqblk;
int ret;
if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type)
/* limits are stored as unsigned 32-bit data */
dqopt->info[type].dqi_maxblimit = 0xffffffff;
dqopt->info[type].dqi_maxilimit = 0xffffffff;
dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
dqopt->info[type].dqi_igrace =
dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace =
dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
out:
return ret;
}
@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type)
int ret;
dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;

View File

@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type)
static const uint quota_magics[] = V2_INITQMAGICS;
static const uint quota_versions[] = V2_INITQVERSIONS;
size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader)) {
printk("quota_v2: failed read expected=%zd got=%zd\n",
sizeof(struct v2_disk_dqheader), size);

View File

@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/ramfs.h>
#include <linux/quotaops.h>
#include <linux/pagevec.h>
#include <linux/mman.h>
@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
if (ret)
return ret;
/* by providing our own setattr() method, we skip this quotaism */
if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
(old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
/* pick out size-changing events */
if (ia->ia_valid & ATTR_SIZE) {
loff_t size = i_size_read(inode);

View File

@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
journal_mark_dirty(th, s, sbh);
if (for_unformatted)
DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
vfs_dq_free_block_nodirty(inode, 1);
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
amount_needed, hint->inode->i_uid);
#endif
quota_ret =
DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
if (quota_ret) /* Quota exceeded? */
return QUOTA_EXCEEDED;
if (hint->preallocate && hint->prealloc_size) {
@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
"reiserquota: allocating (prealloc) %d blocks id=%u",
hint->prealloc_size, hint->inode->i_uid);
#endif
quota_ret =
DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
hint->prealloc_size);
if (quota_ret)
hint->preallocate = hint->prealloc_size = 0;
@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
nr_allocated,
hint->inode->i_uid);
#endif
DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */
/* Free not allocated blocks */
vfs_dq_free_block_nodirty(hint->inode,
amount_needed + hint->prealloc_size -
nr_allocated);
}
while (nr_allocated--)
reiserfs_free_block(hint->th, hint->inode,
@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
vfs_dq_free_block_nodirty(hint->inode, amount_needed +
hint->prealloc_size - nr_allocated -
REISERFS_I(hint->inode)->
i_prealloc_count);

View File

@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode)
* after delete_object so that quota updates go into the same transaction as
* stat data deletion */
if (!err)
DQUOT_FREE_INODE(inode);
vfs_dq_free_inode(inode);
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
if (DQUOT_ALLOC_INODE(inode)) {
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto out_end_trans;
}
@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
INODE_PKEY(inode)->k_objectid = 0;
/* Quota change must be inside a transaction for journaling */
DQUOT_FREE_INODE(inode);
vfs_dq_free_inode(inode);
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
/* Drop can be outside and it needs more credits so it's better to have it outside */
DQUOT_DROP(inode);
vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
goto out;
error =
DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
journal_end(&th, inode->i_sb,
jbegin_count);

View File

@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
*/
static int drop_new_inode(struct inode *inode)
{
DQUOT_DROP(inode);
vfs_dq_drop(inode);
make_bad_inode(inode);
inode->i_flags |= S_NOQUOTA;
iput(inode);
@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode)
}
/* utility function that does setup for reiserfs_new_inode.
** DQUOT_INIT needs lots of credits so it's better to have it
** vfs_dq_init needs lots of credits so it's better to have it
** outside of a transaction, so we had to pull some bits of
** reiserfs_new_inode out into this func.
*/
@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
} else {
inode->i_gid = current_fsgid();
}
DQUOT_INIT(inode);
vfs_dq_init(inode);
return 0;
}

View File

@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
#endif
DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
/* Return deleted body length */
return n_ret_value;
@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
DQUOT_FREE_SPACE_NODIRTY(inode,
vfs_dq_free_space_nodirty(inode,
quota_cut_bytes);
}
break;
@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, '?');
#endif
DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
return n_ret_value;
}
@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
key2type(&(p_s_key->on_disk_key)));
#endif
if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
pathrelse(p_s_search_path);
return -EDQUOT;
}
@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
n_pasted_size, inode->i_uid,
key2type(&(p_s_key->on_disk_key)));
#endif
DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
vfs_dq_free_space_nodirty(inode, n_pasted_size);
return retval;
}
@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
#endif
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
pathrelse(p_s_path);
return -EDQUOT;
}
@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
quota_bytes, inode->i_uid, head2type(p_s_ih));
#endif
if (inode)
DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
vfs_dq_free_space_nodirty(inode, quota_bytes);
return retval;
}

View File

@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s)
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
DQUOT_INIT(inode);
vfs_dq_init(inode);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
#ifdef CONFIG_QUOTA
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
static int reiserfs_dquot_initialize(struct inode *, int);
static int reiserfs_dquot_drop(struct inode *);
static int reiserfs_write_dquot(struct dquot *);
static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
static struct dquot_operations reiserfs_quota_operations = {
.initialize = reiserfs_dquot_initialize,
.drop = reiserfs_dquot_drop,
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
#ifdef CONFIG_QUOTA
static int reiserfs_dquot_initialize(struct inode *inode, int type)
{
struct reiserfs_transaction_handle th;
int ret, err;
/* We may create quota structure so we need to reserve enough blocks */
reiserfs_write_lock(inode->i_sb);
ret =
journal_begin(&th, inode->i_sb,
2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
if (ret)
goto out;
ret = dquot_initialize(inode, type);
err =
journal_end(&th, inode->i_sb,
2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
if (!ret && err)
ret = err;
out:
reiserfs_write_unlock(inode->i_sb);
return ret;
}
static int reiserfs_dquot_drop(struct inode *inode)
{
struct reiserfs_transaction_handle th;
int ret, err;
/* We may delete quota structure so we need to reserve enough blocks */
reiserfs_write_lock(inode->i_sb);
ret =
journal_begin(&th, inode->i_sb,
2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
if (ret) {
/*
* We call dquot_drop() anyway to at least release references
* to quota structures so that umount does not hang.
*/
dquot_drop(inode);
goto out;
}
ret = dquot_drop(inode);
err =
journal_end(&th, inode->i_sb,
2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
if (!ret && err)
ret = err;
out:
reiserfs_write_unlock(inode->i_sb);
return ret;
}
static int reiserfs_write_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;

View File

@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s)
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
s->s_count -= S_BIAS-1;
spin_unlock(&sb_lock);
DQUOT_OFF(s, 0);
vfs_dq_off(s, 0);
down_write(&s->s_umount);
fs->kill_sb(s);
put_filesystem(fs);
@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super);
void __fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
vfs_dq_sync(sb);
lock_super(sb);
if (sb->s_dirt && sb->s_op->write_super)
sb->s_op->write_super(sb);
@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
mark_files_ro(sb);
else if (!fs_may_remount_ro(sb))
return -EBUSY;
retval = DQUOT_OFF(sb, 1);
retval = vfs_dq_off(sb, 1);
if (retval < 0 && retval != -ENOSYS)
return -EBUSY;
}
@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
}
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
if (remount_rw)
DQUOT_ON_REMOUNT(sb);
vfs_dq_quota_on_remount(sb);
return 0;
}

View File

@ -25,7 +25,7 @@ static void do_sync(unsigned long wait)
{
wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
DQUOT_SYNC(NULL);
vfs_dq_sync(NULL);
sync_supers(); /* Write the superblocks */
sync_filesystems(0); /* Start syncing the filesystems */
sync_filesystems(wait); /* Waitingly sync the filesystems */

View File

@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
((char *)bh->b_data)[(bit + i) >> 3]);
} else {
if (inode)
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
udf_add_free_space(sbi, sbi->s_partition, 1);
}
}
@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
while (bit < (sb->s_blocksize << 3) && block_count > 0) {
if (!udf_test_bit(bit, bh->b_data))
goto out;
else if (DQUOT_PREALLOC_BLOCK(inode, 1))
else if (vfs_dq_prealloc_block(inode, 1))
goto out;
else if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
DQUOT_FREE_BLOCK(inode, 1);
vfs_dq_free_block(inode, 1);
goto out;
}
block_count--;
@ -393,7 +393,7 @@ got_block:
/*
* Check quota for allocation of this block.
*/
if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
if (inode && vfs_dq_alloc_block(inode, 1)) {
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
return 0;
@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb,
/* We do this up front - There are some error conditions that
could occure, but.. oh well */
if (inode)
DQUOT_FREE_BLOCK(inode, count);
vfs_dq_free_block(inode, count);
if (udf_add_free_space(sbi, sbi->s_partition, count))
mark_buffer_dirty(sbi->s_lvid_bh);
@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
epos.offset -= adsize;
alloc_count = (elen >> sb->s_blocksize_bits);
if (inode && DQUOT_PREALLOC_BLOCK(inode,
if (inode && vfs_dq_prealloc_block(inode,
alloc_count > block_count ? block_count : alloc_count))
alloc_count = 0;
else if (alloc_count > block_count) {
@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb,
goal_eloc.logicalBlockNum++;
goal_elen -= sb->s_blocksize;
if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
if (inode && vfs_dq_alloc_block(inode, 1)) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;

View File

@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
clear_inode(inode);
@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
insert_inode_hash(inode);
mark_inode_dirty(inode);
if (DQUOT_ALLOC_INODE(inode)) {
DQUOT_DROP(inode);
if (vfs_dq_alloc_inode(inode)) {
vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);

View File

@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
"bit already cleared for fragment %u", i);
}
DQUOT_FREE_BLOCK (inode, count);
vfs_dq_free_block(inode, count);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@ -195,7 +195,7 @@ do_more:
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
vfs_dq_free_block(inode, uspi->s_fpb);
fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
uspi->cs_total.cs_nbfree++;
@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
for (i = oldcount; i < newcount; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
if(DQUOT_ALLOC_BLOCK(inode, count)) {
if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
return 0;
}
@ -664,7 +664,7 @@ cg_found:
for (i = count; i < uspi->s_fpb; i++)
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
DQUOT_FREE_BLOCK(inode, i);
vfs_dq_free_block(inode, i);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
@ -676,7 +676,7 @@ cg_found:
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
if(DQUOT_ALLOC_BLOCK(inode, count)) {
if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
return 0;
}
@ -747,7 +747,7 @@ gotit:
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, -1);
if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
*err = -EDQUOT;
return INVBLOCK;
}

View File

@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
is_directory = S_ISDIR(inode->i_mode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
clear_inode (inode);
@ -355,8 +355,8 @@ cg_found:
unlock_super (sb);
if (DQUOT_ALLOC_INODE(inode)) {
DQUOT_DROP(inode);
if (vfs_dq_alloc_inode(inode)) {
vfs_dq_drop(inode);
err = -EDQUOT;
goto fail_without_unlock;
}

View File

@ -198,6 +198,7 @@ struct mem_dqblk {
qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
qsize_t dqb_curspace; /* current used space */
qsize_t dqb_rsvspace; /* current reserved space for delalloc*/
qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
qsize_t dqb_isoftlimit; /* preferred inode limit */
qsize_t dqb_curinodes; /* current # allocated inodes */
@ -276,8 +277,6 @@ struct dquot {
struct mem_dqblk dq_dqb; /* Diskquota usage */
};
#define NODQUOT (struct dquot *)NULL
#define QUOTA_OK 0
#define NO_QUOTA 1
@ -308,6 +307,14 @@ struct dquot_operations {
int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
/* reserve quota for delayed block allocation */
int (*reserve_space) (struct inode *, qsize_t, int);
/* claim reserved quota for delayed alloc */
int (*claim_space) (struct inode *, qsize_t);
/* release rsved quota for delayed alloc */
void (*release_rsv) (struct inode *, qsize_t);
/* get reserved quota for delayed alloc */
qsize_t (*get_reserved_space) (struct inode *);
};
/* Operations handling requests from userspace */

View File

@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot);
int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
int dquot_alloc_inode(const struct inode *inode, qsize_t number);
int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
int dquot_claim_space(struct inode *inode, qsize_t number);
void dquot_release_reserved_space(struct inode *inode, qsize_t number);
qsize_t dquot_get_reserved_space(struct inode *inode);
int dquot_free_space(struct inode *inode, qsize_t number);
int dquot_free_inode(const struct inode *inode, qsize_t number);
@ -183,6 +188,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
return ret;
}
static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb)) {
/* Used space is updated in alloc_space() */
if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
return 1;
}
return 0;
}
static inline int vfs_dq_alloc_inode(struct inode *inode)
{
if (sb_any_quota_active(inode->i_sb)) {
@ -193,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode)
return 0;
}
/*
* Convert in-memory reserved quotas to real consumed quotas
*/
static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb)) {
if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
return 1;
} else
inode_add_bytes(inode, nr);
mark_inode_dirty(inode);
return 0;
}
/*
* Release reserved (in-memory) quotas
*/
static inline
void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb))
inode->i_sb->dq_op->release_rsv(inode, nr);
}
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb))
@ -339,6 +379,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
return 0;
}
static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
{
return 0;
}
static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
{
return vfs_dq_alloc_space(inode, nr);
}
static inline
int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
{
return 0;
}
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
inode_sub_bytes(inode, nr);
@ -354,67 +410,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
{
return vfs_dq_prealloc_space_nodirty(inode,
nr << inode->i_sb->s_blocksize_bits);
return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
{
return vfs_dq_prealloc_space(inode,
nr << inode->i_sb->s_blocksize_bits);
return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
{
return vfs_dq_alloc_space_nodirty(inode,
nr << inode->i_sb->s_blocksize_bits);
return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
{
return vfs_dq_alloc_space(inode,
nr << inode->i_sb->s_blocksize_bits);
return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
{
return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
{
return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
}
static inline
void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
{
vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
}
static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
{
vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits);
vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
{
vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits);
vfs_dq_free_space(inode, nr << inode->i_blkbits);
}
/*
* Define uppercase equivalents for compatibility with old function names
* Can go away when we think all users have been converted (15/04/2008)
*/
#define DQUOT_INIT(inode) vfs_dq_init(inode)
#define DQUOT_DROP(inode) vfs_dq_drop(inode)
#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
vfs_dq_prealloc_space_nodirty(inode, nr)
#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
vfs_dq_alloc_space_nodirty(inode, nr)
#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
vfs_dq_prealloc_block_nodirty(inode, nr)
#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
vfs_dq_alloc_block_nodirty(inode, nr)
#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
vfs_dq_free_space_nodirty(inode, nr)
#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
vfs_dq_free_block_nodirty(inode, nr)
#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
#endif /* _LINUX_QUOTAOPS_ */