xfs: remove SYNC_BDFLUSH
SYNC_BDFLUSH is a leftover from IRIX and rather misnamed for todays code. Make xfs_sync_fsdata and xfs_dq_sync use the SYNC_TRYLOCK flag for not blocking on logs just as the inode sync code already does. For xfs_sync_fsdata it's a trivial 1:1 replacement, but for xfs_qm_sync I use the opportunity to decouple the non-blocking lock case from the different flushing modes, similar to the inode sync code. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
This commit is contained in:
parent
b0710ccc6d
commit
8b5403a6d7
3 changed files with 11 additions and 30 deletions
|
@ -353,7 +353,7 @@ xfs_sync_fsdata(
|
||||||
* If this is xfssyncd() then only sync the superblock if we can
|
* If this is xfssyncd() then only sync the superblock if we can
|
||||||
* lock it without sleeping and it is not pinned.
|
* lock it without sleeping and it is not pinned.
|
||||||
*/
|
*/
|
||||||
if (flags & SYNC_BDFLUSH) {
|
if (flags & SYNC_TRYLOCK) {
|
||||||
ASSERT(!(flags & SYNC_WAIT));
|
ASSERT(!(flags & SYNC_WAIT));
|
||||||
|
|
||||||
bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
|
bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
|
||||||
|
@ -418,7 +418,7 @@ xfs_quiesce_data(
|
||||||
|
|
||||||
/* push non-blocking */
|
/* push non-blocking */
|
||||||
xfs_sync_data(mp, 0);
|
xfs_sync_data(mp, 0);
|
||||||
xfs_qm_sync(mp, SYNC_BDFLUSH);
|
xfs_qm_sync(mp, SYNC_TRYLOCK);
|
||||||
xfs_filestream_flush(mp);
|
xfs_filestream_flush(mp);
|
||||||
|
|
||||||
/* push and block */
|
/* push and block */
|
||||||
|
@ -568,8 +568,8 @@ xfs_sync_worker(
|
||||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||||
/* dgc: errors ignored here */
|
/* dgc: errors ignored here */
|
||||||
error = xfs_qm_sync(mp, SYNC_BDFLUSH);
|
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
|
||||||
error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
|
error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
|
||||||
if (xfs_log_need_covered(mp))
|
if (xfs_log_need_covered(mp))
|
||||||
error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
|
error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,9 +29,8 @@ typedef struct xfs_sync_work {
|
||||||
struct completion *w_completion;
|
struct completion *w_completion;
|
||||||
} xfs_sync_work_t;
|
} xfs_sync_work_t;
|
||||||
|
|
||||||
#define SYNC_WAIT 0x0004 /* wait for i/o to complete */
|
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
|
||||||
#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
|
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
|
||||||
#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */
|
|
||||||
|
|
||||||
int xfs_syncd_init(struct xfs_mount *mp);
|
int xfs_syncd_init(struct xfs_mount *mp);
|
||||||
void xfs_syncd_stop(struct xfs_mount *mp);
|
void xfs_syncd_stop(struct xfs_mount *mp);
|
||||||
|
|
|
@ -905,11 +905,6 @@ xfs_qm_dqdetach(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This is called to sync quotas. We can be told to use non-blocking
|
|
||||||
* semantics by either the SYNC_BDFLUSH flag or the absence of the
|
|
||||||
* SYNC_WAIT flag.
|
|
||||||
*/
|
|
||||||
int
|
int
|
||||||
xfs_qm_sync(
|
xfs_qm_sync(
|
||||||
xfs_mount_t *mp,
|
xfs_mount_t *mp,
|
||||||
|
@ -918,17 +913,13 @@ xfs_qm_sync(
|
||||||
int recl, restarts;
|
int recl, restarts;
|
||||||
xfs_dquot_t *dqp;
|
xfs_dquot_t *dqp;
|
||||||
uint flush_flags;
|
uint flush_flags;
|
||||||
boolean_t nowait;
|
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI;
|
||||||
restarts = 0;
|
restarts = 0;
|
||||||
/*
|
|
||||||
* We won't block unless we are asked to.
|
|
||||||
*/
|
|
||||||
nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
xfs_qm_mplist_lock(mp);
|
xfs_qm_mplist_lock(mp);
|
||||||
|
@ -948,18 +939,10 @@ xfs_qm_sync(
|
||||||
* don't 'seem' to be dirty. ie. don't acquire dqlock.
|
* don't 'seem' to be dirty. ie. don't acquire dqlock.
|
||||||
* This is very similar to what xfs_sync does with inodes.
|
* This is very similar to what xfs_sync does with inodes.
|
||||||
*/
|
*/
|
||||||
if (flags & SYNC_BDFLUSH) {
|
if (flags & SYNC_TRYLOCK) {
|
||||||
if (! XFS_DQ_IS_DIRTY(dqp))
|
if (!XFS_DQ_IS_DIRTY(dqp))
|
||||||
continue;
|
continue;
|
||||||
}
|
if (!xfs_qm_dqlock_nowait(dqp))
|
||||||
|
|
||||||
if (nowait) {
|
|
||||||
/*
|
|
||||||
* Try to acquire the dquot lock. We are NOT out of
|
|
||||||
* lock order, but we just don't want to wait for this
|
|
||||||
* lock, unless somebody wanted us to.
|
|
||||||
*/
|
|
||||||
if (! xfs_qm_dqlock_nowait(dqp))
|
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
xfs_dqlock(dqp);
|
xfs_dqlock(dqp);
|
||||||
|
@ -976,7 +959,7 @@ xfs_qm_sync(
|
||||||
/* XXX a sentinel would be better */
|
/* XXX a sentinel would be better */
|
||||||
recl = XFS_QI_MPLRECLAIMS(mp);
|
recl = XFS_QI_MPLRECLAIMS(mp);
|
||||||
if (!xfs_dqflock_nowait(dqp)) {
|
if (!xfs_dqflock_nowait(dqp)) {
|
||||||
if (nowait) {
|
if (flags & SYNC_TRYLOCK) {
|
||||||
xfs_dqunlock(dqp);
|
xfs_dqunlock(dqp);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -994,7 +977,6 @@ xfs_qm_sync(
|
||||||
* Let go of the mplist lock. We don't want to hold it
|
* Let go of the mplist lock. We don't want to hold it
|
||||||
* across a disk write
|
* across a disk write
|
||||||
*/
|
*/
|
||||||
flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
|
|
||||||
xfs_qm_mplist_unlock(mp);
|
xfs_qm_mplist_unlock(mp);
|
||||||
xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
|
xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
|
||||||
error = xfs_qm_dqflush(dqp, flush_flags);
|
error = xfs_qm_dqflush(dqp, flush_flags);
|
||||||
|
|
Loading…
Reference in a new issue