xfs: simplify xfs_qm_dqusage_adjust

There is no need to have the users and group/project quota locked at the
same time.  Get rid of xfs_qm_dqget_noattach and just do a xfs_qm_dqget
inside xfs_qm_quotacheck_dqadjust for the quota we are operating on
right now.  The new version of xfs_qm_quotacheck_dqadjust holds the
inode lock over it's operations, which is not a problem as it simply
increments counters and there is no concern about log contention
during mount time.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
Christoph Hellwig 2010-09-06 01:44:22 +00:00 committed by Alex Elder
parent 4472235205
commit 52fda11424

View File

@ -1199,87 +1199,6 @@ xfs_qm_list_destroy(
mutex_destroy(&(list->qh_lock)); mutex_destroy(&(list->qh_lock));
} }
/*
* Stripped down version of dqattach. This doesn't attach, or even look at the
* dquots attached to the inode. The rationale is that there won't be any
* attached at the time this is called from quotacheck.
*/
STATIC int
xfs_qm_dqget_noattach(
xfs_inode_t *ip,
xfs_dquot_t **O_udqpp,
xfs_dquot_t **O_gdqpp)
{
int error;
xfs_mount_t *mp;
xfs_dquot_t *udqp, *gdqp;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
mp = ip->i_mount;
udqp = NULL;
gdqp = NULL;
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
/*
* We want the dquot allocated if it doesn't exist.
*/
if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
&udqp))) {
/*
* Shouldn't be able to turn off quotas here.
*/
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
ASSERT(udqp);
}
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
if (udqp)
xfs_dqunlock(udqp);
error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqget(mp, ip,
ip->i_d.di_gid, XFS_DQ_GROUP,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp) :
xfs_qm_dqget(mp, ip,
ip->i_d.di_projid, XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp);
if (error) {
if (udqp)
xfs_qm_dqrele(udqp);
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
ASSERT(gdqp);
/* Reacquire the locks in the right order */
if (udqp) {
if (! xfs_qm_dqlock_nowait(udqp)) {
xfs_dqunlock(gdqp);
xfs_dqlock(udqp);
xfs_dqlock(gdqp);
}
}
}
*O_udqpp = udqp;
*O_gdqpp = gdqp;
#ifdef QUOTADEBUG
if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
#endif
return 0;
}
/* /*
* Create an inode and return with a reference already taken, but unlocked * Create an inode and return with a reference already taken, but unlocked
* This is how we create quota inodes * This is how we create quota inodes
@ -1546,18 +1465,34 @@ xfs_qm_dqiterate(
/* /*
* Called by dqusage_adjust in doing a quotacheck. * Called by dqusage_adjust in doing a quotacheck.
* Given the inode, and a dquot (either USR or GRP, doesn't matter), *
* this updates its incore copy as well as the buffer copy. This is * Given the inode, and a dquot id this updates both the incore dqout as well
* so that once the quotacheck is done, we can just log all the buffers, * as the buffer copy. This is so that once the quotacheck is done, we can
* as opposed to logging numerous updates to individual dquots. * just log all the buffers, as opposed to logging numerous updates to
* individual dquots.
*/ */
STATIC void STATIC int
xfs_qm_quotacheck_dqadjust( xfs_qm_quotacheck_dqadjust(
xfs_dquot_t *dqp, struct xfs_inode *ip,
xfs_dqid_t id,
uint type,
xfs_qcnt_t nblks, xfs_qcnt_t nblks,
xfs_qcnt_t rtblks) xfs_qcnt_t rtblks)
{ {
ASSERT(XFS_DQ_IS_LOCKED(dqp)); struct xfs_mount *mp = ip->i_mount;
struct xfs_dquot *dqp;
int error;
error = xfs_qm_dqget(mp, ip, id, type,
XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
if (error) {
/*
* Shouldn't be able to turn off quotas here.
*/
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
trace_xfs_dqadjust(dqp); trace_xfs_dqadjust(dqp);
@ -1582,11 +1517,13 @@ xfs_qm_quotacheck_dqadjust(
* There are no timers for the default values set in the root dquot. * There are no timers for the default values set in the root dquot.
*/ */
if (dqp->q_core.d_id) { if (dqp->q_core.d_id) {
xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core); xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
} }
dqp->dq_flags |= XFS_DQ_DIRTY; dqp->dq_flags |= XFS_DQ_DIRTY;
xfs_qm_dqput(dqp);
return 0;
} }
STATIC int STATIC int
@ -1629,8 +1566,7 @@ xfs_qm_dqusage_adjust(
int *res) /* result code value */ int *res) /* result code value */
{ {
xfs_inode_t *ip; xfs_inode_t *ip;
xfs_dquot_t *udqp, *gdqp; xfs_qcnt_t nblks, rtblks = 0;
xfs_qcnt_t nblks, rtblks;
int error; int error;
ASSERT(XFS_IS_QUOTA_RUNNING(mp)); ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@ -1650,51 +1586,24 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because * the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time. * quotacheck is done only at mount time.
*/ */
if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
if (error) {
*res = BULKSTAT_RV_NOTHING; *res = BULKSTAT_RV_NOTHING;
return error; return error;
} }
/* ASSERT(ip->i_delayed_blks == 0);
* Obtain the locked dquots. In case of an error (eg. allocation
* fails for ENOSPC), we return the negative of the error number
* to bulkstat, so that it can get propagated to quotacheck() and
* making us disable quotas for the file system.
*/
if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
rtblks = 0; if (XFS_IS_REALTIME_INODE(ip)) {
if (! XFS_IS_REALTIME_INODE(ip)) {
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
} else {
/* /*
* Walk thru the extent list and count the realtime blocks. * Walk thru the extent list and count the realtime blocks.
*/ */
if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { error = xfs_qm_get_rtblks(ip, &rtblks);
xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error)
IRELE(ip); goto error0;
if (udqp)
xfs_qm_dqput(udqp);
if (gdqp)
xfs_qm_dqput(gdqp);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
} }
ASSERT(ip->i_delayed_blks == 0);
/* nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
* We can't release the inode while holding its dquot locks.
* The inode can go into inactive and might try to acquire the dquotlocks.
* So, just unlock here and do a vn_rele at the end.
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* /*
* Add the (disk blocks and inode) resources occupied by this * Add the (disk blocks and inode) resources occupied by this
@ -1709,26 +1618,36 @@ xfs_qm_dqusage_adjust(
* and quotaoffs don't race. (Quotachecks happen at mount time only). * and quotaoffs don't race. (Quotachecks happen at mount time only).
*/ */
if (XFS_IS_UQUOTA_ON(mp)) { if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(udqp); error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks); XFS_DQ_USER, nblks, rtblks);
xfs_qm_dqput(udqp); if (error)
goto error0;
} }
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(gdqp);
xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
xfs_qm_dqput(gdqp);
}
/*
* Now release the inode. This will send it to 'inactive', and
* possibly even free blocks.
*/
IRELE(ip);
/* if (XFS_IS_GQUOTA_ON(mp)) {
* Goto next inode. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
*/ XFS_DQ_GROUP, nblks, rtblks);
if (error)
goto error0;
}
if (XFS_IS_PQUOTA_ON(mp)) {
error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_projid,
XFS_DQ_PROJ, nblks, rtblks);
if (error)
goto error0;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_DIDONE; *res = BULKSTAT_RV_DIDONE;
return 0; return 0;
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
} }
/* /*