dect
/
linux-2.6
Archived
13
0
Fork 0

RDMA/amso1100: Prevent deadlock in destroy QP

It is possible to swap the CQs used for send_cq and recv_cq when
creating two different QPs.  If these two QPs are then destroyed at
the same time, an AB-BA deadlock can occur because the CQ locks are
taken our of order.  Fix this by always taking CQ locks in a fixed
order.

Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Krishna Kumar 2006-11-24 16:03:48 +05:30 committed by Roland Dreier
parent 7013696a5f
commit c9edea298e
1 changed files with 28 additions and 8 deletions

View File

@ -564,6 +564,32 @@ int c2_alloc_qp(struct c2_dev *c2dev,
return err;
}
static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_lock_irq(&send_cq->lock);
else if (send_cq > recv_cq) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_unlock_irq(&send_cq->lock);
else if (send_cq > recv_cq) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
}
}
void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
struct c2_cq *send_cq;
@ -576,15 +602,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
c2_lock_cqs(send_cq, recv_cq);
c2_free_qpn(c2dev, qp->qpn);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
c2_unlock_cqs(send_cq, recv_cq);
/*
* Destory qp in the rnic...