dect
/
linux-2.6
Archived
13
0
Fork 0

InfiniBand/RDMA fixes for 3.5-rc:

- Fixes to new ocrdma driver
  - Typo in test in CMA
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABCAAGBQJP5wISAAoJEENa44ZhAt0hWzAQAIV1qnXTz+HDXzz8MLTX8wR0
 6uUAcGxuP1PXB4K+DmYc7Ht0FnvJN0E9KN7cCDo20XbZU9NzXkqP36WL3fWt5+34
 9YKQYK14Rbi65ex+UwbASnyZ35iuNjPSVnnnEWMAoE3ZmEXWdM9FzIX41xav+FTy
 Lw0qIVO28d6Rx9LiODnNgbDqqyPzgqmam6SQszcfDwm+PqgcI/RaI/578GY+88qA
 +qofgDpL5rnzyWa2GMIGlbB21xKlqlhF3GuSSp5hHmDCaiNtfS0KsyHE+ys6r1UC
 uLKkBxlXngPF//DWFPUVVwXLHSHgtQ7qfHbk2kUqZjhrt/Famdju16q5oRSLiVrW
 MoilOEMzbt0AaRn2tEQ4srXHvhNnHkxQsPlSZ+alMWRDZS1jf4ESMOj+V+4gUUUQ
 +xYhyLge3GBUE8qt+zerbmLqRcyG6bF3HHMGFKJZrh3up++yZgZF/E5UXFTLYOv5
 6j9sG+dELFgtAhbC13ysvj5IyiVrFl3oh7Ynf2q5/Fyo2yPtde/hk+TGzXwRFOQg
 WF9SjnQsM3LsXhSX0D30kcjE76pCAcy6SyhE5FKA+mumyf7HG+UC+1lo2bUfNqO0
 HAXcgsc7wGXoOvBO1YOCmpUTyWrCQT3OikltDibnWdoUYpfaYFAiWJKdtLA0HylK
 3QJBh6/0/QGS5WhKr0KD
 =LFia
 -----END PGP SIGNATURE-----

Merge tag 'rdma-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull InfiniBand/RDMA fixes from Roland Dreier:
 - Fixes to new ocrdma driver
 - Typo in test in CMA

* tag 'rdma-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cma: QP type check on received REQs should be AND not OR
  RDMA/ocrdma: Fix off by one in ocrdma_query_gid()
  RDMA/ocrdma: Fixed RQ error CQE polling
  RDMA/ocrdma: Correct queue SGE calculation
  RDMA/ocrdma: Correct reported max queue sizes
  RDMA/ocrdma: Fixed GID table for vlan and events
This commit is contained in:
Linus Torvalds 2012-06-24 11:00:07 -07:00
commit 08d49c46cf
6 changed files with 55 additions and 46 deletions

View File

@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
{ {
return (((ib_event->event == IB_CM_REQ_RECEIVED) || return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) || (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
(id->qp_type == IB_QPT_UD)) || (id->qp_type == IB_QPT_UD)) ||

View File

@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
u32 max_inline_data; u32 max_inline_data;
int max_send_sge; int max_send_sge;
int max_recv_sge; int max_recv_sge;
int max_srq_sge;
int max_mr; int max_mr;
u64 max_mr_size; u64 max_mr_size;
u32 max_num_mr_pbl; u32 max_num_mr_pbl;

View File

@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr, struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp) struct ocrdma_mbx_query_config *rsp)
{ {
int max_q_mem;
attr->max_pd = attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_recv_sge = (rsp->max_write_send_sge & attr->max_recv_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data = attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge)); sizeof(struct ocrdma_sge));
max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
/* hw can queue one less then the configured size,
* so publish less by one to stack.
*/
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
attr->ird = 1; attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
} else }
dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
} }
static int ocrdma_check_fw_config(struct ocrdma_dev *dev, static int ocrdma_check_fw_config(struct ocrdma_dev *dev,

View File

@ -97,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
sgid->raw[15] = mac_addr[5]; sgid->raw[15] = mac_addr[5];
} }
static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
bool is_vlan, u16 vlan_id) bool is_vlan, u16 vlan_id)
{ {
int i; int i;
bool found = false;
union ib_gid new_sgid; union ib_gid new_sgid;
int free_idx = OCRDMA_MAX_SGID;
unsigned long flags; unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@ -115,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) { sizeof(union ib_gid))) {
/* found free entry */ /* found free entry */
if (!found) { memcpy(&dev->sgid_tbl[i], &new_sgid,
free_idx = i; sizeof(union ib_gid));
found = true; spin_unlock_irqrestore(&dev->sgid_lock, flags);
break; return true;
}
} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
sizeof(union ib_gid))) { sizeof(union ib_gid))) {
/* entry already present, no addition is required. */ /* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags); spin_unlock_irqrestore(&dev->sgid_lock, flags);
return; return false;
} }
} }
/* if entry doesn't exist and if table has some space, add entry */
if (found)
memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags); spin_unlock_irqrestore(&dev->sgid_lock, flags);
return false;
} }
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@ -167,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
ocrdma_get_guid(dev, &sgid->raw[8]); ocrdma_get_guid(dev, &sgid->raw[8]);
} }
static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{ {
struct net_device *netdev, *tmp; struct net_device *netdev, *tmp;
u16 vlan_id; u16 vlan_id;
@ -175,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
netdev = dev->nic_info.netdev; netdev = dev->nic_info.netdev;
ocrdma_add_default_sgid(dev);
rcu_read_lock(); rcu_read_lock();
for_each_netdev_rcu(&init_net, tmp) { for_each_netdev_rcu(&init_net, tmp) {
if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@ -194,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
} }
} }
rcu_read_unlock(); rcu_read_unlock();
}
#else
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
}
#endif /* VLAN */
static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
{
ocrdma_add_default_sgid(dev);
ocrdma_add_vlan_sgids(dev);
return 0; return 0;
} }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
static int ocrdma_inet6addr_event(struct notifier_block *notifier, static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr) unsigned long event, void *ptr)
@ -208,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
struct ib_event gid_event; struct ib_event gid_event;
struct ocrdma_dev *dev; struct ocrdma_dev *dev;
bool found = false; bool found = false;
bool updated = false;
bool is_vlan = false; bool is_vlan = false;
u16 vid = 0; u16 vid = 0;
@ -233,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
mutex_lock(&dev->dev_lock); mutex_lock(&dev->dev_lock);
switch (event) { switch (event) {
case NETDEV_UP: case NETDEV_UP:
ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
break; break;
case NETDEV_DOWN: case NETDEV_DOWN:
found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
if (found) {
/* found the matching entry, notify
* the consumers about it
*/
gid_event.device = &dev->ibdev;
gid_event.element.port_num = 1;
gid_event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&gid_event);
}
break; break;
default: default:
break; break;
} }
if (updated) {
/* GID table updated, notify the consumers about it */
gid_event.device = &dev->ibdev;
gid_event.element.port_num = 1;
gid_event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&gid_event);
}
mutex_unlock(&dev->dev_lock); mutex_unlock(&dev->dev_lock);
return NOTIFY_OK; return NOTIFY_OK;
} }
@ -258,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event .notifier_call = ocrdma_inet6addr_event
}; };
#endif /* IPV6 */ #endif /* IPV6 and VLAN */
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u8 port_num) u8 port_num)

View File

@ -418,6 +418,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@ -458,7 +461,7 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<

View File

@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev); dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid)); memset(sgid, 0, sizeof(*sgid));
if (index > OCRDMA_MAX_SGID) if (index >= OCRDMA_MAX_SGID)
return -EINVAL; return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY; IB_DEVICE_LOCAL_DMA_LKEY;
attr->max_sge = dev->attr.max_send_sge; attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
attr->max_sge_rd = dev->attr.max_send_sge; attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq; attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe; attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr; attr->max_mr = dev->attr.max_mr;
@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
attr->max_srq = (dev->attr.max_qp - 1); attr->max_srq = (dev->attr.max_qp - 1);
attr->max_srq_sge = attr->max_sge; attr->max_srq_sge = attr->max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe; attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
attr->max_fast_reg_page_list_len = 0; attr->max_fast_reg_page_list_len = 0;
@ -2301,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true; *stop = true;
expand = false; expand = false;
} }
} else } else {
*polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
}
return expand; return expand;
} }