dect
/
linux-2.6
Archived
13
0
Fork 0

[XFS] Unwrap AIL_LOCK

SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29739a

Signed-off-by: Donald Douwsma <donaldd@sgi.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
Donald Douwsma 2007-10-11 17:36:05 +10:00 committed by Lachlan McIlroy
parent 541d7d3c4b
commit 287f3dad14
12 changed files with 81 additions and 114 deletions

View File

@ -1209,7 +1209,6 @@ xfs_qm_dqflush(
xfs_buf_t *bp;
xfs_disk_dquot_t *ddqp;
int error;
SPLDECL(s);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
@ -1270,9 +1269,9 @@ xfs_qm_dqflush(
mp = dqp->q_mount;
/* lsn is 64 bits */
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn;
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
/*
* Attach an iodone routine so that we can remove this dquot from the
@ -1318,7 +1317,6 @@ xfs_qm_dqflush_done(
xfs_dq_logitem_t *qip)
{
xfs_dquot_t *dqp;
SPLDECL(s);
dqp = qip->qli_dquot;
@ -1333,15 +1331,15 @@ xfs_qm_dqflush_done(
if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
qip->qli_item.li_lsn == qip->qli_flush_lsn) {
AIL_LOCK(dqp->q_mount, s);
spin_lock(&dqp->q_mount->m_ail_lock);
/*
* xfs_trans_delete_ail() drops the AIL lock.
*/
if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
xfs_trans_delete_ail(dqp->q_mount,
(xfs_log_item_t*)qip, s);
(xfs_log_item_t*)qip);
else
AIL_UNLOCK(dqp->q_mount, s);
spin_unlock(&dqp->q_mount->m_ail_lock);
}
/*

View File

@ -216,8 +216,8 @@ xfs_qm_dqunpin_wait(
* If so, we want to push it out to help us take this item off the AIL as soon
* as possible.
*
* We must not be holding the AIL_LOCK at this point. Calling incore() to
* search the buffer cache can be a time consuming thing, and AIL_LOCK is a
* We must not be holding the AIL lock at this point. Calling incore() to
* search the buffer cache can be a time consuming thing, and AIL lock is a
* spinlock.
*/
STATIC void
@ -322,7 +322,7 @@ xfs_qm_dquot_logitem_trylock(
* want to do that now since we might sleep in the device
* strategy routine. We also don't want to grab the buffer lock
* here because we'd like not to call into the buffer cache
* while holding the AIL_LOCK.
* while holding the AIL lock.
* Make sure to only return PUSHBUF if we set pushbuf_flag
* ourselves. If someone else is doing it then we don't
* want to go to the push routine and duplicate their efforts.
@ -562,15 +562,14 @@ xfs_qm_qoffend_logitem_committed(
xfs_lsn_t lsn)
{
xfs_qoff_logitem_t *qfs;
SPLDECL(s);
qfs = qfe->qql_start_lip;
AIL_LOCK(qfs->qql_item.li_mountp,s);
spin_lock(&qfs->qql_item.li_mountp->m_ail_lock);
/*
* Delete the qoff-start logitem from the AIL.
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s);
xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs);
kmem_free(qfs, sizeof(xfs_qoff_logitem_t));
kmem_free(qfe, sizeof(xfs_qoff_logitem_t));
return (xfs_lsn_t)-1;

View File

@ -378,7 +378,6 @@ xfs_buf_item_unpin(
xfs_mount_t *mp;
xfs_buf_t *bp;
int freed;
SPLDECL(s);
bp = bip->bli_buf;
ASSERT(bp != NULL);
@ -409,8 +408,8 @@ xfs_buf_item_unpin(
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
} else {
AIL_LOCK(mp,s);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s);
spin_lock(&mp->m_ail_lock);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip);
xfs_buf_item_relse(bp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
}
@ -1113,7 +1112,6 @@ xfs_buf_iodone(
xfs_buf_log_item_t *bip)
{
struct xfs_mount *mp;
SPLDECL(s);
ASSERT(bip->bli_buf == bp);
@ -1128,11 +1126,11 @@ xfs_buf_iodone(
*
* Either way, AIL is useless if we're forcing a shutdown.
*/
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
/*
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip);
#ifdef XFS_TRANS_DEBUG
kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));

View File

@ -110,19 +110,18 @@ STATIC void
xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
{
xfs_mount_t *mp;
SPLDECL(s);
mp = efip->efi_item.li_mountp;
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
if (efip->efi_flags & XFS_EFI_CANCELED) {
/*
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip);
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
}
@ -138,10 +137,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
{
xfs_mount_t *mp;
xfs_log_item_desc_t *lidp;
SPLDECL(s);
mp = efip->efi_item.li_mountp;
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
if (efip->efi_flags & XFS_EFI_CANCELED) {
/*
* free the xaction descriptor pointing to this item
@ -152,11 +150,11 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
* pull the item off the AIL.
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip);
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
}
@ -350,13 +348,12 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
{
xfs_mount_t *mp;
int extents_left;
SPLDECL(s);
mp = efip->efi_item.li_mountp;
ASSERT(efip->efi_next_extent > 0);
ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
ASSERT(efip->efi_next_extent >= nextents);
efip->efi_next_extent -= nextents;
extents_left = efip->efi_next_extent;
@ -364,10 +361,10 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
/*
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip);
xfs_efi_item_free(efip);
} else {
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
}

View File

@ -2205,7 +2205,6 @@ xfs_ifree_cluster(
xfs_inode_log_item_t *iip;
xfs_log_item_t *lip;
xfs_perag_t *pag = xfs_get_perag(mp, inum);
SPLDECL(s);
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
blks_per_cluster = 1;
@ -2307,9 +2306,9 @@ xfs_ifree_cluster(
iip = (xfs_inode_log_item_t *)lip;
ASSERT(iip->ili_logged == 1);
lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
iip->ili_flush_lsn = iip->ili_item.li_lsn;
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
pre_flushed++;
}
@ -2330,9 +2329,9 @@ xfs_ifree_cluster(
iip->ili_last_fields = iip->ili_format.ilf_fields;
iip->ili_format.ilf_fields = 0;
iip->ili_logged = 1;
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
iip->ili_flush_lsn = iip->ili_item.li_lsn;
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
xfs_buf_attach_iodone(bp,
(void(*)(xfs_buf_t*,xfs_log_item_t*))
@ -2731,7 +2730,6 @@ void
xfs_idestroy(
xfs_inode_t *ip)
{
switch (ip->i_d.di_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
@ -2771,16 +2769,15 @@ xfs_idestroy(
*/
xfs_mount_t *mp = ip->i_mount;
xfs_log_item_t *lip = &ip->i_itemp->ili_item;
int s;
ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
XFS_FORCED_SHUTDOWN(ip->i_mount));
if (lip->li_flags & XFS_LI_IN_AIL) {
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
if (lip->li_flags & XFS_LI_IN_AIL)
xfs_trans_delete_ail(mp, lip, s);
xfs_trans_delete_ail(mp, lip);
else
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
xfs_inode_item_destroy(ip);
}
@ -3334,7 +3331,6 @@ xfs_iflush_int(
#ifdef XFS_TRANS_DEBUG
int first;
#endif
SPLDECL(s);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
ASSERT(issemalocked(&(ip->i_flock)));
@ -3529,9 +3525,9 @@ xfs_iflush_int(
iip->ili_logged = 1;
ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
iip->ili_flush_lsn = iip->ili_item.li_lsn;
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
/*
* Attach the function xfs_iflush_done to the inode's

View File

@ -615,7 +615,7 @@ xfs_inode_item_trylock(
return XFS_ITEM_PUSHBUF;
} else {
/*
* We hold the AIL_LOCK, so we must specify the
* We hold the AIL lock, so we must specify the
* NONOTIFY flag so that we won't double trip.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
@ -749,7 +749,7 @@ xfs_inode_item_committed(
* marked delayed write. If that's the case, we'll initiate a bawrite on that
* buffer to expedite the process.
*
* We aren't holding the AIL_LOCK (or the flush lock) when this gets called,
* We aren't holding the AIL lock (or the flush lock) when this gets called,
* so it is inherently race-y.
*/
STATIC void
@ -792,7 +792,7 @@ xfs_inode_item_pushbuf(
if (XFS_BUF_ISDELAYWRITE(bp)) {
/*
* We were racing with iflush because we don't hold
* the AIL_LOCK or the flush lock. However, at this point,
* the AIL lock or the flush lock. However, at this point,
* we have the buffer, and we know that it's dirty.
* So, it's possible that iflush raced with us, and
* this item is already taken off the AIL.
@ -968,7 +968,6 @@ xfs_iflush_done(
xfs_inode_log_item_t *iip)
{
xfs_inode_t *ip;
SPLDECL(s);
ip = iip->ili_inode;
@ -983,15 +982,15 @@ xfs_iflush_done(
*/
if (iip->ili_logged &&
(iip->ili_item.li_lsn == iip->ili_flush_lsn)) {
AIL_LOCK(ip->i_mount, s);
spin_lock(&ip->i_mount->m_ail_lock);
if (iip->ili_item.li_lsn == iip->ili_flush_lsn) {
/*
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(ip->i_mount,
(xfs_log_item_t*)iip, s);
(xfs_log_item_t*)iip);
} else {
AIL_UNLOCK(ip->i_mount, s);
spin_unlock(&ip->i_mount->m_ail_lock);
}
}
@ -1025,21 +1024,19 @@ xfs_iflush_abort(
{
xfs_inode_log_item_t *iip;
xfs_mount_t *mp;
SPLDECL(s);
iip = ip->i_itemp;
mp = ip->i_mount;
if (iip) {
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
/*
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip,
s);
xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip);
} else
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
iip->ili_logged = 0;
/*

View File

@ -2660,7 +2660,6 @@ xlog_recover_do_efi_trans(
xfs_mount_t *mp;
xfs_efi_log_item_t *efip;
xfs_efi_log_format_t *efi_formatp;
SPLDECL(s);
if (pass == XLOG_RECOVER_PASS1) {
return 0;
@ -2678,11 +2677,11 @@ xlog_recover_do_efi_trans(
efip->efi_next_extent = efi_formatp->efi_nextents;
efip->efi_flags |= XFS_EFI_COMMITTED;
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
/*
* xfs_trans_update_ail() drops the AIL lock.
*/
xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s);
xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn);
return 0;
}
@ -2707,7 +2706,6 @@ xlog_recover_do_efd_trans(
xfs_log_item_t *lip;
int gen;
__uint64_t efi_id;
SPLDECL(s);
if (pass == XLOG_RECOVER_PASS1) {
return;
@ -2725,7 +2723,7 @@ xlog_recover_do_efd_trans(
* in the AIL.
*/
mp = log->l_mp;
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
lip = xfs_trans_first_ail(mp, &gen);
while (lip != NULL) {
if (lip->li_type == XFS_LI_EFI) {
@ -2735,7 +2733,7 @@ xlog_recover_do_efd_trans(
* xfs_trans_delete_ail() drops the
* AIL lock.
*/
xfs_trans_delete_ail(mp, lip, s);
xfs_trans_delete_ail(mp, lip);
break;
}
}
@ -2749,7 +2747,7 @@ xlog_recover_do_efd_trans(
if (lip != NULL) {
xfs_efi_item_free(efip);
} else {
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
}
@ -3075,10 +3073,9 @@ xlog_recover_process_efis(
xfs_efi_log_item_t *efip;
int gen;
xfs_mount_t *mp;
SPLDECL(s);
mp = log->l_mp;
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
lip = xfs_trans_first_ail(mp, &gen);
while (lip != NULL) {
@ -3099,12 +3096,12 @@ xlog_recover_process_efis(
continue;
}
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
xlog_recover_process_efi(mp, efip);
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
}
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
/*

View File

@ -136,7 +136,7 @@ xfs_mount_init(void)
mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
}
AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
spinlock_init(&mp->m_ail_lock, "xfs_ail");
spinlock_init(&mp->m_sb_lock, "xfs_sb");
mutex_init(&mp->m_ilock);
mutex_init(&mp->m_growlock);
@ -171,7 +171,7 @@ xfs_mount_free(
sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
}
AIL_LOCK_DESTROY(&mp->m_ail_lock);
spinlock_destroy(&mp->m_ail_lock);
spinlock_destroy(&mp->m_sb_lock);
mutex_destroy(&mp->m_ilock);
mutex_destroy(&mp->m_growlock);

View File

@ -63,13 +63,6 @@ struct xfs_extdelta;
struct xfs_swapext;
struct xfs_mru_cache;
#define AIL_LOCK_T lock_t
#define AIL_LOCKINIT(x,y) spinlock_init(x,y)
#define AIL_LOCK_DESTROY(x) spinlock_destroy(x)
#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock)
#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s)
/*
* Prototypes and functions for the Data Migration subsystem.
*/
@ -230,7 +223,7 @@ extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int);
typedef struct xfs_mount {
struct super_block *m_super;
xfs_tid_t m_tid; /* next unused tid for fs */
AIL_LOCK_T m_ail_lock; /* fs AIL mutex */
spinlock_t m_ail_lock; /* fs AIL mutex */
xfs_ail_entry_t m_ail; /* fs active log item list */
uint m_ail_gen; /* fs AIL generation count */
xfs_sb_t m_sb; /* copy of fs superblock */

View File

@ -1322,7 +1322,6 @@ xfs_trans_chunk_committed(
xfs_lsn_t item_lsn;
struct xfs_mount *mp;
int i;
SPLDECL(s);
lidp = licp->lic_descs;
for (i = 0; i < licp->lic_unused; i++, lidp++) {
@ -1363,7 +1362,7 @@ xfs_trans_chunk_committed(
* the test below.
*/
mp = lip->li_mountp;
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
/*
* This will set the item's lsn to item_lsn
@ -1372,9 +1371,9 @@ xfs_trans_chunk_committed(
*
* xfs_trans_update_ail() drops the AIL lock.
*/
xfs_trans_update_ail(mp, lip, item_lsn, s);
xfs_trans_update_ail(mp, lip, item_lsn);
} else {
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
/*

View File

@ -55,16 +55,15 @@ xfs_trans_tail_ail(
{
xfs_lsn_t lsn;
xfs_log_item_t *lip;
SPLDECL(s);
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
lip = xfs_ail_min(&(mp->m_ail));
if (lip == NULL) {
lsn = (xfs_lsn_t)0;
} else {
lsn = lip->li_lsn;
}
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
return lsn;
}
@ -89,17 +88,16 @@ xfs_trans_push_ail(
int restarts;
int lock_result;
int flush_log;
SPLDECL(s);
#define XFS_TRANS_PUSH_AIL_RESTARTS 1000
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
lip = xfs_trans_first_ail(mp, &gen);
if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) {
/*
* Just return if the AIL is empty.
*/
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
return (xfs_lsn_t)0;
}
@ -112,7 +110,7 @@ xfs_trans_push_ail(
* beginning of the list. We'd like not to stop until we've at least
* tried to push on everything in the AIL with an LSN less than
* the given threshold. However, we may give up before that if
* we realize that we've been holding the AIL_LOCK for 'too long',
* we realize that we've been holding the AIL lock for 'too long',
* blocking interrupts. Currently, too long is < 500us roughly.
*/
flush_log = 0;
@ -136,14 +134,14 @@ xfs_trans_push_ail(
lock_result = IOP_TRYLOCK(lip);
switch (lock_result) {
case XFS_ITEM_SUCCESS:
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
XFS_STATS_INC(xs_push_ail_success);
IOP_PUSH(lip);
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
break;
case XFS_ITEM_PUSHBUF:
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
XFS_STATS_INC(xs_push_ail_pushbuf);
#ifdef XFSRACEDEBUG
delay_for_intr();
@ -152,7 +150,7 @@ xfs_trans_push_ail(
ASSERT(lip->li_ops->iop_pushbuf);
ASSERT(lip);
IOP_PUSHBUF(lip);
AIL_LOCK(mp,s);
spin_lock(&mp->m_ail_lock);
break;
case XFS_ITEM_PINNED:
@ -181,7 +179,7 @@ xfs_trans_push_ail(
/*
* Just return if we shut down during the last try.
*/
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
return (xfs_lsn_t)0;
}
@ -193,10 +191,10 @@ xfs_trans_push_ail(
* push out the log so it will become unpinned and
* move forward in the AIL.
*/
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
XFS_STATS_INC(xs_push_ail_flush);
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
AIL_LOCK(mp, s);
spin_lock(&mp->m_ail_lock);
}
lip = xfs_ail_min(&(mp->m_ail));
@ -206,7 +204,7 @@ xfs_trans_push_ail(
lsn = lip->li_lsn;
}
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
return lsn;
} /* xfs_trans_push_ail */
@ -269,15 +267,13 @@ xfs_trans_unlocked_item(
* has changed.
*
* This function must be called with the AIL lock held. The lock
* is dropped before returning, so the caller must pass in the
* cookie returned by AIL_LOCK.
* is dropped before returning.
*/
void
xfs_trans_update_ail(
xfs_mount_t *mp,
xfs_log_item_t *lip,
xfs_lsn_t lsn,
unsigned long s) __releases(mp->m_ail_lock)
xfs_lsn_t lsn) __releases(mp->m_ail_lock)
{
xfs_ail_entry_t *ailp;
xfs_log_item_t *dlip=NULL;
@ -300,10 +296,10 @@ xfs_trans_update_ail(
if (mlip == dlip) {
mlip = xfs_ail_min(&(mp->m_ail));
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
xfs_log_move_tail(mp, mlip->li_lsn);
} else {
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
@ -322,14 +318,12 @@ xfs_trans_update_ail(
* has changed.
*
* This function must be called with the AIL lock held. The lock
* is dropped before returning, so the caller must pass in the
* cookie returned by AIL_LOCK.
* is dropped before returning.
*/
void
xfs_trans_delete_ail(
xfs_mount_t *mp,
xfs_log_item_t *lip,
unsigned long s) __releases(mp->m_ail_lock)
xfs_log_item_t *lip) __releases(mp->m_ail_lock)
{
xfs_ail_entry_t *ailp;
xfs_log_item_t *dlip;
@ -348,10 +342,10 @@ xfs_trans_delete_ail(
if (mlip == dlip) {
mlip = xfs_ail_min(&(mp->m_ail));
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0));
} else {
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
}
}
else {
@ -360,12 +354,12 @@ xfs_trans_delete_ail(
* serious trouble if we get to this stage.
*/
if (XFS_FORCED_SHUTDOWN(mp))
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
else {
xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
"%s: attempting to delete a log item that is not in the AIL",
__FUNCTION__);
AIL_UNLOCK(mp, s);
spin_unlock(&mp->m_ail_lock);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
}

View File

@ -47,11 +47,10 @@ xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp,
* From xfs_trans_ail.c
*/
void xfs_trans_update_ail(struct xfs_mount *mp,
struct xfs_log_item *lip, xfs_lsn_t lsn,
unsigned long s)
struct xfs_log_item *lip, xfs_lsn_t lsn)
__releases(mp->m_ail_lock);
void xfs_trans_delete_ail(struct xfs_mount *mp,
struct xfs_log_item *lip, unsigned long s)
struct xfs_log_item *lip)
__releases(mp->m_ail_lock);
struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *);
struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *,