Archived
14
0
Fork 0

RDMA/cma: Avoid assigning an IS_ERR value to cm_id pointer in CMA id object

Avoid assigning an IS_ERR value to the cm_id pointer.  This fixes a
few anomalies in the error flow due to confusion about checking for
NULL vs IS_ERR, and eliminates the need to test for the IS_ERR value
every time we wish to determine if the cma_id object has a cm device
associated with it.

Also, eliminate the now-unnecessary procedure cma_has_cm_dev (we can
check directly for the existence of the device pointer -- for a
non-NULL check, makes no difference if it is the iwarp or the ib
pointer).

Finally, make a few code changes here to improve coding consistency.

Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Jack Morgenstein 2011-07-17 10:46:47 +00:00 committed by Roland Dreier
parent 620917de59
commit 0c9361fcde

View file

@ -406,11 +406,6 @@ static int cma_disable_callback(struct rdma_id_private *id_priv,
return 0; return 0;
} }
static int cma_has_cm_dev(struct rdma_id_private *id_priv)
{
return (id_priv->id.device && id_priv->cm_id.ib);
}
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps, void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type) enum ib_qp_type qp_type)
@ -920,11 +915,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
if (id_priv->cma_dev) { if (id_priv->cma_dev) {
switch (rdma_node_get_transport(id_priv->id.device->node_type)) { switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB: case RDMA_TRANSPORT_IB:
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib); ib_destroy_cm_id(id_priv->cm_id.ib);
break; break;
case RDMA_TRANSPORT_IWARP: case RDMA_TRANSPORT_IWARP:
if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) if (id_priv->cm_id.iw)
iw_destroy_cm_id(id_priv->cm_id.iw); iw_destroy_cm_id(id_priv->cm_id.iw);
break; break;
default: default:
@ -1085,12 +1080,12 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (cma_get_net_info(ib_event->private_data, listen_id->ps, if (cma_get_net_info(ib_event->private_data, listen_id->ps,
&ip_ver, &port, &src, &dst)) &ip_ver, &port, &src, &dst))
goto err; return NULL;
id = rdma_create_id(listen_id->event_handler, listen_id->context, id = rdma_create_id(listen_id->event_handler, listen_id->context,
listen_id->ps, ib_event->param.req_rcvd.qp_type); listen_id->ps, ib_event->param.req_rcvd.qp_type);
if (IS_ERR(id)) if (IS_ERR(id))
goto err; return NULL;
cma_save_net_info(&id->route.addr, &listen_id->route.addr, cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst); ip_ver, port, src, dst);
@ -1100,7 +1095,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
GFP_KERNEL); GFP_KERNEL);
if (!rt->path_rec) if (!rt->path_rec)
goto destroy_id; goto err;
rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
if (rt->num_paths == 2) if (rt->num_paths == 2)
@ -1114,7 +1109,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
&rt->addr.dev_addr); &rt->addr.dev_addr);
if (ret) if (ret)
goto destroy_id; goto err;
} }
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
@ -1122,9 +1117,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
id_priv->state = RDMA_CM_CONNECT; id_priv->state = RDMA_CM_CONNECT;
return id_priv; return id_priv;
destroy_id:
rdma_destroy_id(id);
err: err:
rdma_destroy_id(id);
return NULL; return NULL;
} }
@ -1468,13 +1462,15 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
{ {
struct ib_cm_compare_data compare_data; struct ib_cm_compare_data compare_data;
struct sockaddr *addr; struct sockaddr *addr;
struct ib_cm_id *id;
__be64 svc_id; __be64 svc_id;
int ret; int ret;
id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
id_priv); if (IS_ERR(id))
if (IS_ERR(id_priv->cm_id.ib)) return PTR_ERR(id);
return PTR_ERR(id_priv->cm_id.ib);
id_priv->cm_id.ib = id;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
svc_id = cma_get_service_id(id_priv->id.ps, addr); svc_id = cma_get_service_id(id_priv->id.ps, addr);
@ -1497,12 +1493,15 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{ {
int ret; int ret;
struct sockaddr_in *sin; struct sockaddr_in *sin;
struct iw_cm_id *id;
id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, id = iw_create_cm_id(id_priv->id.device,
iw_conn_req_handler, iw_conn_req_handler,
id_priv); id_priv);
if (IS_ERR(id_priv->cm_id.iw)) if (IS_ERR(id))
return PTR_ERR(id_priv->cm_id.iw); return PTR_ERR(id);
id_priv->cm_id.iw = id;
sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
id_priv->cm_id.iw->local_addr = *sin; id_priv->cm_id.iw->local_addr = *sin;
@ -2484,6 +2483,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
{ {
struct ib_cm_sidr_req_param req; struct ib_cm_sidr_req_param req;
struct rdma_route *route; struct rdma_route *route;
struct ib_cm_id *id;
int ret; int ret;
req.private_data_len = sizeof(struct cma_hdr) + req.private_data_len = sizeof(struct cma_hdr) +
@ -2501,12 +2501,13 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
if (ret) if (ret)
goto out; goto out;
id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
cma_sidr_rep_handler, id_priv); id_priv);
if (IS_ERR(id_priv->cm_id.ib)) { if (IS_ERR(id)) {
ret = PTR_ERR(id_priv->cm_id.ib); ret = PTR_ERR(id);
goto out; goto out;
} }
id_priv->cm_id.ib = id;
req.path = route->path_rec; req.path = route->path_rec;
req.service_id = cma_get_service_id(id_priv->id.ps, req.service_id = cma_get_service_id(id_priv->id.ps,
@ -2530,6 +2531,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct ib_cm_req_param req; struct ib_cm_req_param req;
struct rdma_route *route; struct rdma_route *route;
void *private_data; void *private_data;
struct ib_cm_id *id;
int offset, ret; int offset, ret;
memset(&req, 0, sizeof req); memset(&req, 0, sizeof req);
@ -2543,12 +2545,12 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memcpy(private_data + offset, conn_param->private_data, memcpy(private_data + offset, conn_param->private_data,
conn_param->private_data_len); conn_param->private_data_len);
id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
id_priv); if (IS_ERR(id)) {
if (IS_ERR(id_priv->cm_id.ib)) { ret = PTR_ERR(id);
ret = PTR_ERR(id_priv->cm_id.ib);
goto out; goto out;
} }
id_priv->cm_id.ib = id;
route = &id_priv->id.route; route = &id_priv->id.route;
ret = cma_format_hdr(private_data, id_priv->id.ps, route); ret = cma_format_hdr(private_data, id_priv->id.ps, route);
@ -2577,8 +2579,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
ret = ib_send_cm_req(id_priv->cm_id.ib, &req); ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out: out:
if (ret && !IS_ERR(id_priv->cm_id.ib)) { if (ret && !IS_ERR(id)) {
ib_destroy_cm_id(id_priv->cm_id.ib); ib_destroy_cm_id(id);
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
} }
@ -2595,10 +2597,8 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
struct iw_cm_conn_param iw_param; struct iw_cm_conn_param iw_param;
cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
if (IS_ERR(cm_id)) { if (IS_ERR(cm_id))
ret = PTR_ERR(cm_id); return PTR_ERR(cm_id);
goto out;
}
id_priv->cm_id.iw = cm_id; id_priv->cm_id.iw = cm_id;
@ -2622,7 +2622,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
iw_param.qpn = conn_param->qp_num; iw_param.qpn = conn_param->qp_num;
ret = iw_cm_connect(cm_id, &iw_param); ret = iw_cm_connect(cm_id, &iw_param);
out: out:
if (ret && !IS_ERR(cm_id)) { if (ret) {
iw_destroy_cm_id(cm_id); iw_destroy_cm_id(cm_id);
id_priv->cm_id.iw = NULL; id_priv->cm_id.iw = NULL;
} }
@ -2795,7 +2795,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_has_cm_dev(id_priv)) if (!id_priv->cm_id.ib)
return -EINVAL; return -EINVAL;
switch (id->device->node_type) { switch (id->device->node_type) {
@ -2817,7 +2817,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_has_cm_dev(id_priv)) if (!id_priv->cm_id.ib)
return -EINVAL; return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {
@ -2848,7 +2848,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_has_cm_dev(id_priv)) if (!id_priv->cm_id.ib)
return -EINVAL; return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {