cma.c revision c1a0b23bf477c2e1068905f4e2b5c3cee139e853
1/* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This Software is licensed under one of the following licenses: 8 * 9 * 1) under the terms of the "Common Public License 1.0" a copy of which is 10 * available from the Open Source Initiative, see 11 * http://www.opensource.org/licenses/cpl.php. 12 * 13 * 2) under the terms of the "The BSD License" a copy of which is 14 * available from the Open Source Initiative, see 15 * http://www.opensource.org/licenses/bsd-license.php. 16 * 17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a 18 * copy of which is available from the Open Source Initiative, see 19 * http://www.opensource.org/licenses/gpl-license.php. 20 * 21 * Licensee has the right to choose one of the above licenses. 22 * 23 * Redistributions of source code must retain the above copyright 24 * notice and one of the license notices. 25 * 26 * Redistributions in binary form must reproduce both the above copyright 27 * notice, one of the license notices in the documentation 28 * and/or other materials provided with the distribution. 29 * 30 */ 31 32#include <linux/completion.h> 33#include <linux/in.h> 34#include <linux/in6.h> 35#include <linux/mutex.h> 36#include <linux/random.h> 37#include <linux/idr.h> 38#include <linux/inetdevice.h> 39 40#include <net/tcp.h> 41 42#include <rdma/rdma_cm.h> 43#include <rdma/rdma_cm_ib.h> 44#include <rdma/ib_cache.h> 45#include <rdma/ib_cm.h> 46#include <rdma/ib_sa.h> 47#include <rdma/iw_cm.h> 48 49MODULE_AUTHOR("Sean Hefty"); 50MODULE_DESCRIPTION("Generic RDMA CM Agent"); 51MODULE_LICENSE("Dual BSD/GPL"); 52 53#define CMA_CM_RESPONSE_TIMEOUT 20 54#define CMA_MAX_CM_RETRIES 15 55 56static void cma_add_one(struct ib_device *device); 57static void cma_remove_one(struct ib_device *device); 58 59static struct ib_client cma_client = { 60 .name = "cma", 61 .add = cma_add_one, 62 .remove = cma_remove_one 63}; 64 65static struct ib_sa_client sa_client; 66static LIST_HEAD(dev_list); 67static LIST_HEAD(listen_any_list); 68static DEFINE_MUTEX(lock); 69static struct workqueue_struct *cma_wq; 70static DEFINE_IDR(sdp_ps); 71static DEFINE_IDR(tcp_ps); 72 73struct cma_device { 74 struct list_head list; 75 struct ib_device *device; 76 __be64 node_guid; 77 struct completion comp; 78 atomic_t refcount; 79 struct list_head id_list; 80}; 81 82enum cma_state { 83 CMA_IDLE, 84 CMA_ADDR_QUERY, 85 CMA_ADDR_RESOLVED, 86 CMA_ROUTE_QUERY, 87 CMA_ROUTE_RESOLVED, 88 CMA_CONNECT, 89 CMA_DISCONNECT, 90 CMA_ADDR_BOUND, 91 CMA_LISTEN, 92 CMA_DEVICE_REMOVAL, 93 CMA_DESTROYING 94}; 95 96struct rdma_bind_list { 97 struct idr *ps; 98 struct hlist_head owners; 99 unsigned short port; 100}; 101 102/* 103 * Device removal can occur at anytime, so we need extra handling to 104 * serialize notifying the user of device removal with other callbacks. 105 * We do this by disabling removal notification while a callback is in process, 106 * and reporting it after the callback completes. 107 */ 108struct rdma_id_private { 109 struct rdma_cm_id id; 110 111 struct rdma_bind_list *bind_list; 112 struct hlist_node node; 113 struct list_head list; 114 struct list_head listen_list; 115 struct cma_device *cma_dev; 116 117 enum cma_state state; 118 spinlock_t lock; 119 struct completion comp; 120 atomic_t refcount; 121 wait_queue_head_t wait_remove; 122 atomic_t dev_remove; 123 124 int backlog; 125 int timeout_ms; 126 struct ib_sa_query *query; 127 int query_id; 128 union { 129 struct ib_cm_id *ib; 130 struct iw_cm_id *iw; 131 } cm_id; 132 133 u32 seq_num; 134 u32 qp_num; 135 enum ib_qp_type qp_type; 136 u8 srq; 137}; 138 139struct cma_work { 140 struct work_struct work; 141 struct rdma_id_private *id; 142 enum cma_state old_state; 143 enum cma_state new_state; 144 struct rdma_cm_event event; 145}; 146 147union cma_ip_addr { 148 struct in6_addr ip6; 149 struct { 150 __u32 pad[3]; 151 __u32 addr; 152 } ip4; 153}; 154 155struct cma_hdr { 156 u8 cma_version; 157 u8 ip_version; /* IP version: 7:4 */ 158 __u16 port; 159 union cma_ip_addr src_addr; 160 union cma_ip_addr dst_addr; 161}; 162 163struct sdp_hh { 164 u8 bsdh[16]; 165 u8 sdp_version; /* Major version: 7:4 */ 166 u8 ip_version; /* IP version: 7:4 */ 167 u8 sdp_specific1[10]; 168 __u16 port; 169 __u16 sdp_specific2; 170 union cma_ip_addr src_addr; 171 union cma_ip_addr dst_addr; 172}; 173 174struct sdp_hah { 175 u8 bsdh[16]; 176 u8 sdp_version; 177}; 178 179#define CMA_VERSION 0x00 180#define SDP_MAJ_VERSION 0x2 181 182static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 183{ 184 unsigned long flags; 185 int ret; 186 187 spin_lock_irqsave(&id_priv->lock, flags); 188 ret = (id_priv->state == comp); 189 spin_unlock_irqrestore(&id_priv->lock, flags); 190 return ret; 191} 192 193static int cma_comp_exch(struct rdma_id_private *id_priv, 194 enum cma_state comp, enum cma_state exch) 195{ 196 unsigned long flags; 197 int ret; 198 199 spin_lock_irqsave(&id_priv->lock, flags); 200 if ((ret = (id_priv->state == comp))) 201 id_priv->state = exch; 202 spin_unlock_irqrestore(&id_priv->lock, flags); 203 return ret; 204} 205 206static enum cma_state cma_exch(struct rdma_id_private *id_priv, 207 enum cma_state exch) 208{ 209 unsigned long flags; 210 enum cma_state old; 211 212 spin_lock_irqsave(&id_priv->lock, flags); 213 old = id_priv->state; 214 id_priv->state = exch; 215 spin_unlock_irqrestore(&id_priv->lock, flags); 216 return old; 217} 218 219static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 220{ 221 return hdr->ip_version >> 4; 222} 223 224static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 225{ 226 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 227} 228 229static inline u8 sdp_get_majv(u8 sdp_version) 230{ 231 return sdp_version >> 4; 232} 233 234static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 235{ 236 return hh->ip_version >> 4; 237} 238 239static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 240{ 241 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 242} 243 244static void cma_attach_to_dev(struct rdma_id_private *id_priv, 245 struct cma_device *cma_dev) 246{ 247 atomic_inc(&cma_dev->refcount); 248 id_priv->cma_dev = cma_dev; 249 id_priv->id.device = cma_dev->device; 250 list_add_tail(&id_priv->list, &cma_dev->id_list); 251} 252 253static inline void cma_deref_dev(struct cma_device *cma_dev) 254{ 255 if (atomic_dec_and_test(&cma_dev->refcount)) 256 complete(&cma_dev->comp); 257} 258 259static void cma_detach_from_dev(struct rdma_id_private *id_priv) 260{ 261 list_del(&id_priv->list); 262 cma_deref_dev(id_priv->cma_dev); 263 id_priv->cma_dev = NULL; 264} 265 266static int cma_acquire_dev(struct rdma_id_private *id_priv) 267{ 268 enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type; 269 struct cma_device *cma_dev; 270 union ib_gid gid; 271 int ret = -ENODEV; 272 273 switch (rdma_node_get_transport(dev_type)) { 274 case RDMA_TRANSPORT_IB: 275 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 276 break; 277 case RDMA_TRANSPORT_IWARP: 278 iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 279 break; 280 default: 281 return -ENODEV; 282 } 283 284 list_for_each_entry(cma_dev, &dev_list, list) { 285 ret = ib_find_cached_gid(cma_dev->device, &gid, 286 &id_priv->id.port_num, NULL); 287 if (!ret) { 288 cma_attach_to_dev(id_priv, cma_dev); 289 break; 290 } 291 } 292 return ret; 293} 294 295static void cma_deref_id(struct rdma_id_private *id_priv) 296{ 297 if (atomic_dec_and_test(&id_priv->refcount)) 298 complete(&id_priv->comp); 299} 300 301static void cma_release_remove(struct rdma_id_private *id_priv) 302{ 303 if (atomic_dec_and_test(&id_priv->dev_remove)) 304 wake_up(&id_priv->wait_remove); 305} 306 307struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 308 void *context, enum rdma_port_space ps) 309{ 310 struct rdma_id_private *id_priv; 311 312 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 313 if (!id_priv) 314 return ERR_PTR(-ENOMEM); 315 316 id_priv->state = CMA_IDLE; 317 id_priv->id.context = context; 318 id_priv->id.event_handler = event_handler; 319 id_priv->id.ps = ps; 320 spin_lock_init(&id_priv->lock); 321 init_completion(&id_priv->comp); 322 atomic_set(&id_priv->refcount, 1); 323 init_waitqueue_head(&id_priv->wait_remove); 324 atomic_set(&id_priv->dev_remove, 0); 325 INIT_LIST_HEAD(&id_priv->listen_list); 326 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 327 328 return &id_priv->id; 329} 330EXPORT_SYMBOL(rdma_create_id); 331 332static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 333{ 334 struct ib_qp_attr qp_attr; 335 struct rdma_dev_addr *dev_addr; 336 int ret; 337 338 dev_addr = &id_priv->id.route.addr.dev_addr; 339 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 340 ib_addr_get_pkey(dev_addr), 341 &qp_attr.pkey_index); 342 if (ret) 343 return ret; 344 345 qp_attr.qp_state = IB_QPS_INIT; 346 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 347 qp_attr.port_num = id_priv->id.port_num; 348 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | 349 IB_QP_PKEY_INDEX | IB_QP_PORT); 350} 351 352static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 353{ 354 struct ib_qp_attr qp_attr; 355 356 qp_attr.qp_state = IB_QPS_INIT; 357 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 358 359 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS); 360} 361 362int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 363 struct ib_qp_init_attr *qp_init_attr) 364{ 365 struct rdma_id_private *id_priv; 366 struct ib_qp *qp; 367 int ret; 368 369 id_priv = container_of(id, struct rdma_id_private, id); 370 if (id->device != pd->device) 371 return -EINVAL; 372 373 qp = ib_create_qp(pd, qp_init_attr); 374 if (IS_ERR(qp)) 375 return PTR_ERR(qp); 376 377 switch (rdma_node_get_transport(id->device->node_type)) { 378 case RDMA_TRANSPORT_IB: 379 ret = cma_init_ib_qp(id_priv, qp); 380 break; 381 case RDMA_TRANSPORT_IWARP: 382 ret = cma_init_iw_qp(id_priv, qp); 383 break; 384 default: 385 ret = -ENOSYS; 386 break; 387 } 388 389 if (ret) 390 goto err; 391 392 id->qp = qp; 393 id_priv->qp_num = qp->qp_num; 394 id_priv->qp_type = qp->qp_type; 395 id_priv->srq = (qp->srq != NULL); 396 return 0; 397err: 398 ib_destroy_qp(qp); 399 return ret; 400} 401EXPORT_SYMBOL(rdma_create_qp); 402 403void rdma_destroy_qp(struct rdma_cm_id *id) 404{ 405 ib_destroy_qp(id->qp); 406} 407EXPORT_SYMBOL(rdma_destroy_qp); 408 409static int cma_modify_qp_rtr(struct rdma_cm_id *id) 410{ 411 struct ib_qp_attr qp_attr; 412 int qp_attr_mask, ret; 413 414 if (!id->qp) 415 return 0; 416 417 /* Need to update QP attributes from default values. */ 418 qp_attr.qp_state = IB_QPS_INIT; 419 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); 420 if (ret) 421 return ret; 422 423 ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask); 424 if (ret) 425 return ret; 426 427 qp_attr.qp_state = IB_QPS_RTR; 428 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); 429 if (ret) 430 return ret; 431 432 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask); 433} 434 435static int cma_modify_qp_rts(struct rdma_cm_id *id) 436{ 437 struct ib_qp_attr qp_attr; 438 int qp_attr_mask, ret; 439 440 if (!id->qp) 441 return 0; 442 443 qp_attr.qp_state = IB_QPS_RTS; 444 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); 445 if (ret) 446 return ret; 447 448 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask); 449} 450 451static int cma_modify_qp_err(struct rdma_cm_id *id) 452{ 453 struct ib_qp_attr qp_attr; 454 455 if (!id->qp) 456 return 0; 457 458 qp_attr.qp_state = IB_QPS_ERR; 459 return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE); 460} 461 462int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 463 int *qp_attr_mask) 464{ 465 struct rdma_id_private *id_priv; 466 int ret; 467 468 id_priv = container_of(id, struct rdma_id_private, id); 469 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 470 case RDMA_TRANSPORT_IB: 471 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 472 qp_attr_mask); 473 if (qp_attr->qp_state == IB_QPS_RTR) 474 qp_attr->rq_psn = id_priv->seq_num; 475 break; 476 case RDMA_TRANSPORT_IWARP: 477 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 478 qp_attr_mask); 479 break; 480 default: 481 ret = -ENOSYS; 482 break; 483 } 484 485 return ret; 486} 487EXPORT_SYMBOL(rdma_init_qp_attr); 488 489static inline int cma_zero_addr(struct sockaddr *addr) 490{ 491 struct in6_addr *ip6; 492 493 if (addr->sa_family == AF_INET) 494 return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr); 495 else { 496 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 497 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 498 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 499 } 500} 501 502static inline int cma_loopback_addr(struct sockaddr *addr) 503{ 504 return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr); 505} 506 507static inline int cma_any_addr(struct sockaddr *addr) 508{ 509 return cma_zero_addr(addr) || cma_loopback_addr(addr); 510} 511 512static inline int cma_any_port(struct sockaddr *addr) 513{ 514 return !((struct sockaddr_in *) addr)->sin_port; 515} 516 517static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 518 u8 *ip_ver, __u16 *port, 519 union cma_ip_addr **src, union cma_ip_addr **dst) 520{ 521 switch (ps) { 522 case RDMA_PS_SDP: 523 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 524 SDP_MAJ_VERSION) 525 return -EINVAL; 526 527 *ip_ver = sdp_get_ip_ver(hdr); 528 *port = ((struct sdp_hh *) hdr)->port; 529 *src = &((struct sdp_hh *) hdr)->src_addr; 530 *dst = &((struct sdp_hh *) hdr)->dst_addr; 531 break; 532 default: 533 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 534 return -EINVAL; 535 536 *ip_ver = cma_get_ip_ver(hdr); 537 *port = ((struct cma_hdr *) hdr)->port; 538 *src = &((struct cma_hdr *) hdr)->src_addr; 539 *dst = &((struct cma_hdr *) hdr)->dst_addr; 540 break; 541 } 542 543 if (*ip_ver != 4 && *ip_ver != 6) 544 return -EINVAL; 545 return 0; 546} 547 548static void cma_save_net_info(struct rdma_addr *addr, 549 struct rdma_addr *listen_addr, 550 u8 ip_ver, __u16 port, 551 union cma_ip_addr *src, union cma_ip_addr *dst) 552{ 553 struct sockaddr_in *listen4, *ip4; 554 struct sockaddr_in6 *listen6, *ip6; 555 556 switch (ip_ver) { 557 case 4: 558 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 559 ip4 = (struct sockaddr_in *) &addr->src_addr; 560 ip4->sin_family = listen4->sin_family; 561 ip4->sin_addr.s_addr = dst->ip4.addr; 562 ip4->sin_port = listen4->sin_port; 563 564 ip4 = (struct sockaddr_in *) &addr->dst_addr; 565 ip4->sin_family = listen4->sin_family; 566 ip4->sin_addr.s_addr = src->ip4.addr; 567 ip4->sin_port = port; 568 break; 569 case 6: 570 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 571 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 572 ip6->sin6_family = listen6->sin6_family; 573 ip6->sin6_addr = dst->ip6; 574 ip6->sin6_port = listen6->sin6_port; 575 576 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 577 ip6->sin6_family = listen6->sin6_family; 578 ip6->sin6_addr = src->ip6; 579 ip6->sin6_port = port; 580 break; 581 default: 582 break; 583 } 584} 585 586static inline int cma_user_data_offset(enum rdma_port_space ps) 587{ 588 switch (ps) { 589 case RDMA_PS_SDP: 590 return 0; 591 default: 592 return sizeof(struct cma_hdr); 593 } 594} 595 596static int cma_notify_user(struct rdma_id_private *id_priv, 597 enum rdma_cm_event_type type, int status, 598 void *data, u8 data_len) 599{ 600 struct rdma_cm_event event; 601 602 event.event = type; 603 event.status = status; 604 event.private_data = data; 605 event.private_data_len = data_len; 606 607 return id_priv->id.event_handler(&id_priv->id, &event); 608} 609 610static void cma_cancel_route(struct rdma_id_private *id_priv) 611{ 612 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 613 case RDMA_TRANSPORT_IB: 614 if (id_priv->query) 615 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 616 break; 617 default: 618 break; 619 } 620} 621 622static inline int cma_internal_listen(struct rdma_id_private *id_priv) 623{ 624 return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev && 625 cma_any_addr(&id_priv->id.route.addr.src_addr); 626} 627 628static void cma_destroy_listen(struct rdma_id_private *id_priv) 629{ 630 cma_exch(id_priv, CMA_DESTROYING); 631 632 if (id_priv->cma_dev) { 633 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 634 case RDMA_TRANSPORT_IB: 635 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 636 ib_destroy_cm_id(id_priv->cm_id.ib); 637 break; 638 case RDMA_TRANSPORT_IWARP: 639 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 640 iw_destroy_cm_id(id_priv->cm_id.iw); 641 break; 642 default: 643 break; 644 } 645 cma_detach_from_dev(id_priv); 646 } 647 list_del(&id_priv->listen_list); 648 649 cma_deref_id(id_priv); 650 wait_for_completion(&id_priv->comp); 651 652 kfree(id_priv); 653} 654 655static void cma_cancel_listens(struct rdma_id_private *id_priv) 656{ 657 struct rdma_id_private *dev_id_priv; 658 659 mutex_lock(&lock); 660 list_del(&id_priv->list); 661 662 while (!list_empty(&id_priv->listen_list)) { 663 dev_id_priv = list_entry(id_priv->listen_list.next, 664 struct rdma_id_private, listen_list); 665 cma_destroy_listen(dev_id_priv); 666 } 667 mutex_unlock(&lock); 668} 669 670static void cma_cancel_operation(struct rdma_id_private *id_priv, 671 enum cma_state state) 672{ 673 switch (state) { 674 case CMA_ADDR_QUERY: 675 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 676 break; 677 case CMA_ROUTE_QUERY: 678 cma_cancel_route(id_priv); 679 break; 680 case CMA_LISTEN: 681 if (cma_any_addr(&id_priv->id.route.addr.src_addr) && 682 !id_priv->cma_dev) 683 cma_cancel_listens(id_priv); 684 break; 685 default: 686 break; 687 } 688} 689 690static void cma_release_port(struct rdma_id_private *id_priv) 691{ 692 struct rdma_bind_list *bind_list = id_priv->bind_list; 693 694 if (!bind_list) 695 return; 696 697 mutex_lock(&lock); 698 hlist_del(&id_priv->node); 699 if (hlist_empty(&bind_list->owners)) { 700 idr_remove(bind_list->ps, bind_list->port); 701 kfree(bind_list); 702 } 703 mutex_unlock(&lock); 704} 705 706void rdma_destroy_id(struct rdma_cm_id *id) 707{ 708 struct rdma_id_private *id_priv; 709 enum cma_state state; 710 711 id_priv = container_of(id, struct rdma_id_private, id); 712 state = cma_exch(id_priv, CMA_DESTROYING); 713 cma_cancel_operation(id_priv, state); 714 715 mutex_lock(&lock); 716 if (id_priv->cma_dev) { 717 mutex_unlock(&lock); 718 switch (rdma_node_get_transport(id->device->node_type)) { 719 case RDMA_TRANSPORT_IB: 720 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 721 ib_destroy_cm_id(id_priv->cm_id.ib); 722 break; 723 case RDMA_TRANSPORT_IWARP: 724 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 725 iw_destroy_cm_id(id_priv->cm_id.iw); 726 break; 727 default: 728 break; 729 } 730 mutex_lock(&lock); 731 cma_detach_from_dev(id_priv); 732 } 733 mutex_unlock(&lock); 734 735 cma_release_port(id_priv); 736 cma_deref_id(id_priv); 737 wait_for_completion(&id_priv->comp); 738 739 kfree(id_priv->id.route.path_rec); 740 kfree(id_priv); 741} 742EXPORT_SYMBOL(rdma_destroy_id); 743 744static int cma_rep_recv(struct rdma_id_private *id_priv) 745{ 746 int ret; 747 748 ret = cma_modify_qp_rtr(&id_priv->id); 749 if (ret) 750 goto reject; 751 752 ret = cma_modify_qp_rts(&id_priv->id); 753 if (ret) 754 goto reject; 755 756 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 757 if (ret) 758 goto reject; 759 760 return 0; 761reject: 762 cma_modify_qp_err(&id_priv->id); 763 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 764 NULL, 0, NULL, 0); 765 return ret; 766} 767 768static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 769{ 770 if (id_priv->id.ps == RDMA_PS_SDP && 771 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 772 SDP_MAJ_VERSION) 773 return -EINVAL; 774 775 return 0; 776} 777 778static int cma_rtu_recv(struct rdma_id_private *id_priv) 779{ 780 int ret; 781 782 ret = cma_modify_qp_rts(&id_priv->id); 783 if (ret) 784 goto reject; 785 786 return 0; 787reject: 788 cma_modify_qp_err(&id_priv->id); 789 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 790 NULL, 0, NULL, 0); 791 return ret; 792} 793 794static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 795{ 796 struct rdma_id_private *id_priv = cm_id->context; 797 enum rdma_cm_event_type event; 798 u8 private_data_len = 0; 799 int ret = 0, status = 0; 800 801 atomic_inc(&id_priv->dev_remove); 802 if (!cma_comp(id_priv, CMA_CONNECT)) 803 goto out; 804 805 switch (ib_event->event) { 806 case IB_CM_REQ_ERROR: 807 case IB_CM_REP_ERROR: 808 event = RDMA_CM_EVENT_UNREACHABLE; 809 status = -ETIMEDOUT; 810 break; 811 case IB_CM_REP_RECEIVED: 812 status = cma_verify_rep(id_priv, ib_event->private_data); 813 if (status) 814 event = RDMA_CM_EVENT_CONNECT_ERROR; 815 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 816 status = cma_rep_recv(id_priv); 817 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 818 RDMA_CM_EVENT_ESTABLISHED; 819 } else 820 event = RDMA_CM_EVENT_CONNECT_RESPONSE; 821 private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 822 break; 823 case IB_CM_RTU_RECEIVED: 824 status = cma_rtu_recv(id_priv); 825 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 826 RDMA_CM_EVENT_ESTABLISHED; 827 break; 828 case IB_CM_DREQ_ERROR: 829 status = -ETIMEDOUT; /* fall through */ 830 case IB_CM_DREQ_RECEIVED: 831 case IB_CM_DREP_RECEIVED: 832 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 833 goto out; 834 event = RDMA_CM_EVENT_DISCONNECTED; 835 break; 836 case IB_CM_TIMEWAIT_EXIT: 837 case IB_CM_MRA_RECEIVED: 838 /* ignore event */ 839 goto out; 840 case IB_CM_REJ_RECEIVED: 841 cma_modify_qp_err(&id_priv->id); 842 status = ib_event->param.rej_rcvd.reason; 843 event = RDMA_CM_EVENT_REJECTED; 844 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 845 break; 846 default: 847 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 848 ib_event->event); 849 goto out; 850 } 851 852 ret = cma_notify_user(id_priv, event, status, ib_event->private_data, 853 private_data_len); 854 if (ret) { 855 /* Destroy the CM ID by returning a non-zero value. */ 856 id_priv->cm_id.ib = NULL; 857 cma_exch(id_priv, CMA_DESTROYING); 858 cma_release_remove(id_priv); 859 rdma_destroy_id(&id_priv->id); 860 return ret; 861 } 862out: 863 cma_release_remove(id_priv); 864 return ret; 865} 866 867static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, 868 struct ib_cm_event *ib_event) 869{ 870 struct rdma_id_private *id_priv; 871 struct rdma_cm_id *id; 872 struct rdma_route *rt; 873 union cma_ip_addr *src, *dst; 874 __u16 port; 875 u8 ip_ver; 876 877 id = rdma_create_id(listen_id->event_handler, listen_id->context, 878 listen_id->ps); 879 if (IS_ERR(id)) 880 return NULL; 881 882 rt = &id->route; 883 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 884 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, GFP_KERNEL); 885 if (!rt->path_rec) 886 goto err; 887 888 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 889 &ip_ver, &port, &src, &dst)) 890 goto err; 891 892 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 893 ip_ver, port, src, dst); 894 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 895 if (rt->num_paths == 2) 896 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 897 898 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 899 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 900 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 901 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA; 902 903 id_priv = container_of(id, struct rdma_id_private, id); 904 id_priv->state = CMA_CONNECT; 905 return id_priv; 906err: 907 rdma_destroy_id(id); 908 return NULL; 909} 910 911static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 912{ 913 struct rdma_id_private *listen_id, *conn_id; 914 int offset, ret; 915 916 listen_id = cm_id->context; 917 atomic_inc(&listen_id->dev_remove); 918 if (!cma_comp(listen_id, CMA_LISTEN)) { 919 ret = -ECONNABORTED; 920 goto out; 921 } 922 923 conn_id = cma_new_id(&listen_id->id, ib_event); 924 if (!conn_id) { 925 ret = -ENOMEM; 926 goto out; 927 } 928 929 atomic_inc(&conn_id->dev_remove); 930 mutex_lock(&lock); 931 ret = cma_acquire_dev(conn_id); 932 mutex_unlock(&lock); 933 if (ret) { 934 ret = -ENODEV; 935 cma_release_remove(conn_id); 936 rdma_destroy_id(&conn_id->id); 937 goto out; 938 } 939 940 conn_id->cm_id.ib = cm_id; 941 cm_id->context = conn_id; 942 cm_id->cm_handler = cma_ib_handler; 943 944 offset = cma_user_data_offset(listen_id->id.ps); 945 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 946 ib_event->private_data + offset, 947 IB_CM_REQ_PRIVATE_DATA_SIZE - offset); 948 if (ret) { 949 /* Destroy the CM ID by returning a non-zero value. */ 950 conn_id->cm_id.ib = NULL; 951 cma_exch(conn_id, CMA_DESTROYING); 952 cma_release_remove(conn_id); 953 rdma_destroy_id(&conn_id->id); 954 } 955out: 956 cma_release_remove(listen_id); 957 return ret; 958} 959 960static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 961{ 962 return cpu_to_be64(((u64)ps << 16) + 963 be16_to_cpu(((struct sockaddr_in *) addr)->sin_port)); 964} 965 966static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 967 struct ib_cm_compare_data *compare) 968{ 969 struct cma_hdr *cma_data, *cma_mask; 970 struct sdp_hh *sdp_data, *sdp_mask; 971 __u32 ip4_addr; 972 struct in6_addr ip6_addr; 973 974 memset(compare, 0, sizeof *compare); 975 cma_data = (void *) compare->data; 976 cma_mask = (void *) compare->mask; 977 sdp_data = (void *) compare->data; 978 sdp_mask = (void *) compare->mask; 979 980 switch (addr->sa_family) { 981 case AF_INET: 982 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 983 if (ps == RDMA_PS_SDP) { 984 sdp_set_ip_ver(sdp_data, 4); 985 sdp_set_ip_ver(sdp_mask, 0xF); 986 sdp_data->dst_addr.ip4.addr = ip4_addr; 987 sdp_mask->dst_addr.ip4.addr = ~0; 988 } else { 989 cma_set_ip_ver(cma_data, 4); 990 cma_set_ip_ver(cma_mask, 0xF); 991 cma_data->dst_addr.ip4.addr = ip4_addr; 992 cma_mask->dst_addr.ip4.addr = ~0; 993 } 994 break; 995 case AF_INET6: 996 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 997 if (ps == RDMA_PS_SDP) { 998 sdp_set_ip_ver(sdp_data, 6); 999 sdp_set_ip_ver(sdp_mask, 0xF); 1000 sdp_data->dst_addr.ip6 = ip6_addr; 1001 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1002 sizeof sdp_mask->dst_addr.ip6); 1003 } else { 1004 cma_set_ip_ver(cma_data, 6); 1005 cma_set_ip_ver(cma_mask, 0xF); 1006 cma_data->dst_addr.ip6 = ip6_addr; 1007 memset(&cma_mask->dst_addr.ip6, 0xFF, 1008 sizeof cma_mask->dst_addr.ip6); 1009 } 1010 break; 1011 default: 1012 break; 1013 } 1014} 1015 1016static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1017{ 1018 struct rdma_id_private *id_priv = iw_id->context; 1019 enum rdma_cm_event_type event = 0; 1020 struct sockaddr_in *sin; 1021 int ret = 0; 1022 1023 atomic_inc(&id_priv->dev_remove); 1024 1025 switch (iw_event->event) { 1026 case IW_CM_EVENT_CLOSE: 1027 event = RDMA_CM_EVENT_DISCONNECTED; 1028 break; 1029 case IW_CM_EVENT_CONNECT_REPLY: 1030 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1031 *sin = iw_event->local_addr; 1032 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1033 *sin = iw_event->remote_addr; 1034 if (iw_event->status) 1035 event = RDMA_CM_EVENT_REJECTED; 1036 else 1037 event = RDMA_CM_EVENT_ESTABLISHED; 1038 break; 1039 case IW_CM_EVENT_ESTABLISHED: 1040 event = RDMA_CM_EVENT_ESTABLISHED; 1041 break; 1042 default: 1043 BUG_ON(1); 1044 } 1045 1046 ret = cma_notify_user(id_priv, event, iw_event->status, 1047 iw_event->private_data, 1048 iw_event->private_data_len); 1049 if (ret) { 1050 /* Destroy the CM ID by returning a non-zero value. */ 1051 id_priv->cm_id.iw = NULL; 1052 cma_exch(id_priv, CMA_DESTROYING); 1053 cma_release_remove(id_priv); 1054 rdma_destroy_id(&id_priv->id); 1055 return ret; 1056 } 1057 1058 cma_release_remove(id_priv); 1059 return ret; 1060} 1061 1062static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1063 struct iw_cm_event *iw_event) 1064{ 1065 struct rdma_cm_id *new_cm_id; 1066 struct rdma_id_private *listen_id, *conn_id; 1067 struct sockaddr_in *sin; 1068 struct net_device *dev = NULL; 1069 int ret; 1070 1071 listen_id = cm_id->context; 1072 atomic_inc(&listen_id->dev_remove); 1073 if (!cma_comp(listen_id, CMA_LISTEN)) { 1074 ret = -ECONNABORTED; 1075 goto out; 1076 } 1077 1078 /* Create a new RDMA id for the new IW CM ID */ 1079 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1080 listen_id->id.context, 1081 RDMA_PS_TCP); 1082 if (!new_cm_id) { 1083 ret = -ENOMEM; 1084 goto out; 1085 } 1086 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1087 atomic_inc(&conn_id->dev_remove); 1088 conn_id->state = CMA_CONNECT; 1089 1090 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); 1091 if (!dev) { 1092 ret = -EADDRNOTAVAIL; 1093 cma_release_remove(conn_id); 1094 rdma_destroy_id(new_cm_id); 1095 goto out; 1096 } 1097 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1098 if (ret) { 1099 cma_release_remove(conn_id); 1100 rdma_destroy_id(new_cm_id); 1101 goto out; 1102 } 1103 1104 mutex_lock(&lock); 1105 ret = cma_acquire_dev(conn_id); 1106 mutex_unlock(&lock); 1107 if (ret) { 1108 cma_release_remove(conn_id); 1109 rdma_destroy_id(new_cm_id); 1110 goto out; 1111 } 1112 1113 conn_id->cm_id.iw = cm_id; 1114 cm_id->context = conn_id; 1115 cm_id->cm_handler = cma_iw_handler; 1116 1117 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1118 *sin = iw_event->local_addr; 1119 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1120 *sin = iw_event->remote_addr; 1121 1122 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1123 iw_event->private_data, 1124 iw_event->private_data_len); 1125 if (ret) { 1126 /* User wants to destroy the CM ID */ 1127 conn_id->cm_id.iw = NULL; 1128 cma_exch(conn_id, CMA_DESTROYING); 1129 cma_release_remove(conn_id); 1130 rdma_destroy_id(&conn_id->id); 1131 } 1132 1133out: 1134 if (dev) 1135 dev_put(dev); 1136 cma_release_remove(listen_id); 1137 return ret; 1138} 1139 1140static int cma_ib_listen(struct rdma_id_private *id_priv) 1141{ 1142 struct ib_cm_compare_data compare_data; 1143 struct sockaddr *addr; 1144 __be64 svc_id; 1145 int ret; 1146 1147 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1148 id_priv); 1149 if (IS_ERR(id_priv->cm_id.ib)) 1150 return PTR_ERR(id_priv->cm_id.ib); 1151 1152 addr = &id_priv->id.route.addr.src_addr; 1153 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1154 if (cma_any_addr(addr)) 1155 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1156 else { 1157 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1158 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1159 } 1160 1161 if (ret) { 1162 ib_destroy_cm_id(id_priv->cm_id.ib); 1163 id_priv->cm_id.ib = NULL; 1164 } 1165 1166 return ret; 1167} 1168 1169static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1170{ 1171 int ret; 1172 struct sockaddr_in *sin; 1173 1174 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1175 iw_conn_req_handler, 1176 id_priv); 1177 if (IS_ERR(id_priv->cm_id.iw)) 1178 return PTR_ERR(id_priv->cm_id.iw); 1179 1180 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1181 id_priv->cm_id.iw->local_addr = *sin; 1182 1183 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1184 1185 if (ret) { 1186 iw_destroy_cm_id(id_priv->cm_id.iw); 1187 id_priv->cm_id.iw = NULL; 1188 } 1189 1190 return ret; 1191} 1192 1193static int cma_listen_handler(struct rdma_cm_id *id, 1194 struct rdma_cm_event *event) 1195{ 1196 struct rdma_id_private *id_priv = id->context; 1197 1198 id->context = id_priv->id.context; 1199 id->event_handler = id_priv->id.event_handler; 1200 return id_priv->id.event_handler(id, event); 1201} 1202 1203static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1204 struct cma_device *cma_dev) 1205{ 1206 struct rdma_id_private *dev_id_priv; 1207 struct rdma_cm_id *id; 1208 int ret; 1209 1210 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1211 if (IS_ERR(id)) 1212 return; 1213 1214 dev_id_priv = container_of(id, struct rdma_id_private, id); 1215 1216 dev_id_priv->state = CMA_ADDR_BOUND; 1217 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1218 ip_addr_size(&id_priv->id.route.addr.src_addr)); 1219 1220 cma_attach_to_dev(dev_id_priv, cma_dev); 1221 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1222 1223 ret = rdma_listen(id, id_priv->backlog); 1224 if (ret) 1225 goto err; 1226 1227 return; 1228err: 1229 cma_destroy_listen(dev_id_priv); 1230} 1231 1232static void cma_listen_on_all(struct rdma_id_private *id_priv) 1233{ 1234 struct cma_device *cma_dev; 1235 1236 mutex_lock(&lock); 1237 list_add_tail(&id_priv->list, &listen_any_list); 1238 list_for_each_entry(cma_dev, &dev_list, list) 1239 cma_listen_on_dev(id_priv, cma_dev); 1240 mutex_unlock(&lock); 1241} 1242 1243static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af) 1244{ 1245 struct sockaddr_in addr_in; 1246 1247 memset(&addr_in, 0, sizeof addr_in); 1248 addr_in.sin_family = af; 1249 return rdma_bind_addr(id, (struct sockaddr *) &addr_in); 1250} 1251 1252int rdma_listen(struct rdma_cm_id *id, int backlog) 1253{ 1254 struct rdma_id_private *id_priv; 1255 int ret; 1256 1257 id_priv = container_of(id, struct rdma_id_private, id); 1258 if (id_priv->state == CMA_IDLE) { 1259 ret = cma_bind_any(id, AF_INET); 1260 if (ret) 1261 return ret; 1262 } 1263 1264 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1265 return -EINVAL; 1266 1267 id_priv->backlog = backlog; 1268 if (id->device) { 1269 switch (rdma_node_get_transport(id->device->node_type)) { 1270 case RDMA_TRANSPORT_IB: 1271 ret = cma_ib_listen(id_priv); 1272 if (ret) 1273 goto err; 1274 break; 1275 case RDMA_TRANSPORT_IWARP: 1276 ret = cma_iw_listen(id_priv, backlog); 1277 if (ret) 1278 goto err; 1279 break; 1280 default: 1281 ret = -ENOSYS; 1282 goto err; 1283 } 1284 } else 1285 cma_listen_on_all(id_priv); 1286 1287 return 0; 1288err: 1289 id_priv->backlog = 0; 1290 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1291 return ret; 1292} 1293EXPORT_SYMBOL(rdma_listen); 1294 1295static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1296 void *context) 1297{ 1298 struct cma_work *work = context; 1299 struct rdma_route *route; 1300 1301 route = &work->id->id.route; 1302 1303 if (!status) { 1304 route->num_paths = 1; 1305 *route->path_rec = *path_rec; 1306 } else { 1307 work->old_state = CMA_ROUTE_QUERY; 1308 work->new_state = CMA_ADDR_RESOLVED; 1309 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1310 } 1311 1312 queue_work(cma_wq, &work->work); 1313} 1314 1315static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1316 struct cma_work *work) 1317{ 1318 struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr; 1319 struct ib_sa_path_rec path_rec; 1320 1321 memset(&path_rec, 0, sizeof path_rec); 1322 ib_addr_get_sgid(addr, &path_rec.sgid); 1323 ib_addr_get_dgid(addr, &path_rec.dgid); 1324 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1325 path_rec.numb_path = 1; 1326 1327 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1328 id_priv->id.port_num, &path_rec, 1329 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1330 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, 1331 timeout_ms, GFP_KERNEL, 1332 cma_query_handler, work, &id_priv->query); 1333 1334 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1335} 1336 1337static void cma_work_handler(void *data) 1338{ 1339 struct cma_work *work = data; 1340 struct rdma_id_private *id_priv = work->id; 1341 int destroy = 0; 1342 1343 atomic_inc(&id_priv->dev_remove); 1344 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1345 goto out; 1346 1347 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1348 cma_exch(id_priv, CMA_DESTROYING); 1349 destroy = 1; 1350 } 1351out: 1352 cma_release_remove(id_priv); 1353 cma_deref_id(id_priv); 1354 if (destroy) 1355 rdma_destroy_id(&id_priv->id); 1356 kfree(work); 1357} 1358 1359static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1360{ 1361 struct rdma_route *route = &id_priv->id.route; 1362 struct cma_work *work; 1363 int ret; 1364 1365 work = kzalloc(sizeof *work, GFP_KERNEL); 1366 if (!work) 1367 return -ENOMEM; 1368 1369 work->id = id_priv; 1370 INIT_WORK(&work->work, cma_work_handler, work); 1371 work->old_state = CMA_ROUTE_QUERY; 1372 work->new_state = CMA_ROUTE_RESOLVED; 1373 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1374 1375 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1376 if (!route->path_rec) { 1377 ret = -ENOMEM; 1378 goto err1; 1379 } 1380 1381 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1382 if (ret) 1383 goto err2; 1384 1385 return 0; 1386err2: 1387 kfree(route->path_rec); 1388 route->path_rec = NULL; 1389err1: 1390 kfree(work); 1391 return ret; 1392} 1393 1394int rdma_set_ib_paths(struct rdma_cm_id *id, 1395 struct ib_sa_path_rec *path_rec, int num_paths) 1396{ 1397 struct rdma_id_private *id_priv; 1398 int ret; 1399 1400 id_priv = container_of(id, struct rdma_id_private, id); 1401 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1402 return -EINVAL; 1403 1404 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1405 if (!id->route.path_rec) { 1406 ret = -ENOMEM; 1407 goto err; 1408 } 1409 1410 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1411 return 0; 1412err: 1413 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1414 return ret; 1415} 1416EXPORT_SYMBOL(rdma_set_ib_paths); 1417 1418static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1419{ 1420 struct cma_work *work; 1421 1422 work = kzalloc(sizeof *work, GFP_KERNEL); 1423 if (!work) 1424 return -ENOMEM; 1425 1426 work->id = id_priv; 1427 INIT_WORK(&work->work, cma_work_handler, work); 1428 work->old_state = CMA_ROUTE_QUERY; 1429 work->new_state = CMA_ROUTE_RESOLVED; 1430 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1431 queue_work(cma_wq, &work->work); 1432 return 0; 1433} 1434 1435int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1436{ 1437 struct rdma_id_private *id_priv; 1438 int ret; 1439 1440 id_priv = container_of(id, struct rdma_id_private, id); 1441 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1442 return -EINVAL; 1443 1444 atomic_inc(&id_priv->refcount); 1445 switch (rdma_node_get_transport(id->device->node_type)) { 1446 case RDMA_TRANSPORT_IB: 1447 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1448 break; 1449 case RDMA_TRANSPORT_IWARP: 1450 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1451 break; 1452 default: 1453 ret = -ENOSYS; 1454 break; 1455 } 1456 if (ret) 1457 goto err; 1458 1459 return 0; 1460err: 1461 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1462 cma_deref_id(id_priv); 1463 return ret; 1464} 1465EXPORT_SYMBOL(rdma_resolve_route); 1466 1467static int cma_bind_loopback(struct rdma_id_private *id_priv) 1468{ 1469 struct cma_device *cma_dev; 1470 struct ib_port_attr port_attr; 1471 union ib_gid gid; 1472 u16 pkey; 1473 int ret; 1474 u8 p; 1475 1476 mutex_lock(&lock); 1477 list_for_each_entry(cma_dev, &dev_list, list) 1478 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1479 if (!ib_query_port (cma_dev->device, p, &port_attr) && 1480 port_attr.state == IB_PORT_ACTIVE) 1481 goto port_found; 1482 1483 if (!list_empty(&dev_list)) { 1484 p = 1; 1485 cma_dev = list_entry(dev_list.next, struct cma_device, list); 1486 } else { 1487 ret = -ENODEV; 1488 goto out; 1489 } 1490 1491port_found: 1492 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1493 if (ret) 1494 goto out; 1495 1496 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 1497 if (ret) 1498 goto out; 1499 1500 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1501 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1502 id_priv->id.port_num = p; 1503 cma_attach_to_dev(id_priv, cma_dev); 1504out: 1505 mutex_unlock(&lock); 1506 return ret; 1507} 1508 1509static void addr_handler(int status, struct sockaddr *src_addr, 1510 struct rdma_dev_addr *dev_addr, void *context) 1511{ 1512 struct rdma_id_private *id_priv = context; 1513 enum rdma_cm_event_type event; 1514 1515 atomic_inc(&id_priv->dev_remove); 1516 1517 /* 1518 * Grab mutex to block rdma_destroy_id() from removing the device while 1519 * we're trying to acquire it. 1520 */ 1521 mutex_lock(&lock); 1522 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 1523 mutex_unlock(&lock); 1524 goto out; 1525 } 1526 1527 if (!status && !id_priv->cma_dev) 1528 status = cma_acquire_dev(id_priv); 1529 mutex_unlock(&lock); 1530 1531 if (status) { 1532 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1533 goto out; 1534 event = RDMA_CM_EVENT_ADDR_ERROR; 1535 } else { 1536 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1537 ip_addr_size(src_addr)); 1538 event = RDMA_CM_EVENT_ADDR_RESOLVED; 1539 } 1540 1541 if (cma_notify_user(id_priv, event, status, NULL, 0)) { 1542 cma_exch(id_priv, CMA_DESTROYING); 1543 cma_release_remove(id_priv); 1544 cma_deref_id(id_priv); 1545 rdma_destroy_id(&id_priv->id); 1546 return; 1547 } 1548out: 1549 cma_release_remove(id_priv); 1550 cma_deref_id(id_priv); 1551} 1552 1553static int cma_resolve_loopback(struct rdma_id_private *id_priv) 1554{ 1555 struct cma_work *work; 1556 struct sockaddr_in *src_in, *dst_in; 1557 union ib_gid gid; 1558 int ret; 1559 1560 work = kzalloc(sizeof *work, GFP_KERNEL); 1561 if (!work) 1562 return -ENOMEM; 1563 1564 if (!id_priv->cma_dev) { 1565 ret = cma_bind_loopback(id_priv); 1566 if (ret) 1567 goto err; 1568 } 1569 1570 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1571 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1572 1573 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1574 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1575 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1576 src_in->sin_family = dst_in->sin_family; 1577 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr; 1578 } 1579 1580 work->id = id_priv; 1581 INIT_WORK(&work->work, cma_work_handler, work); 1582 work->old_state = CMA_ADDR_QUERY; 1583 work->new_state = CMA_ADDR_RESOLVED; 1584 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1585 queue_work(cma_wq, &work->work); 1586 return 0; 1587err: 1588 kfree(work); 1589 return ret; 1590} 1591 1592static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1593 struct sockaddr *dst_addr) 1594{ 1595 if (src_addr && src_addr->sa_family) 1596 return rdma_bind_addr(id, src_addr); 1597 else 1598 return cma_bind_any(id, dst_addr->sa_family); 1599} 1600 1601int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1602 struct sockaddr *dst_addr, int timeout_ms) 1603{ 1604 struct rdma_id_private *id_priv; 1605 int ret; 1606 1607 id_priv = container_of(id, struct rdma_id_private, id); 1608 if (id_priv->state == CMA_IDLE) { 1609 ret = cma_bind_addr(id, src_addr, dst_addr); 1610 if (ret) 1611 return ret; 1612 } 1613 1614 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 1615 return -EINVAL; 1616 1617 atomic_inc(&id_priv->refcount); 1618 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 1619 if (cma_any_addr(dst_addr)) 1620 ret = cma_resolve_loopback(id_priv); 1621 else 1622 ret = rdma_resolve_ip(&id->route.addr.src_addr, dst_addr, 1623 &id->route.addr.dev_addr, 1624 timeout_ms, addr_handler, id_priv); 1625 if (ret) 1626 goto err; 1627 1628 return 0; 1629err: 1630 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 1631 cma_deref_id(id_priv); 1632 return ret; 1633} 1634EXPORT_SYMBOL(rdma_resolve_addr); 1635 1636static void cma_bind_port(struct rdma_bind_list *bind_list, 1637 struct rdma_id_private *id_priv) 1638{ 1639 struct sockaddr_in *sin; 1640 1641 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1642 sin->sin_port = htons(bind_list->port); 1643 id_priv->bind_list = bind_list; 1644 hlist_add_head(&id_priv->node, &bind_list->owners); 1645} 1646 1647static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 1648 unsigned short snum) 1649{ 1650 struct rdma_bind_list *bind_list; 1651 int port, start, ret; 1652 1653 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1654 if (!bind_list) 1655 return -ENOMEM; 1656 1657 start = snum ? snum : sysctl_local_port_range[0]; 1658 1659 do { 1660 ret = idr_get_new_above(ps, bind_list, start, &port); 1661 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1662 1663 if (ret) 1664 goto err; 1665 1666 if ((snum && port != snum) || 1667 (!snum && port > sysctl_local_port_range[1])) { 1668 idr_remove(ps, port); 1669 ret = -EADDRNOTAVAIL; 1670 goto err; 1671 } 1672 1673 bind_list->ps = ps; 1674 bind_list->port = (unsigned short) port; 1675 cma_bind_port(bind_list, id_priv); 1676 return 0; 1677err: 1678 kfree(bind_list); 1679 return ret; 1680} 1681 1682static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 1683{ 1684 struct rdma_id_private *cur_id; 1685 struct sockaddr_in *sin, *cur_sin; 1686 struct rdma_bind_list *bind_list; 1687 struct hlist_node *node; 1688 unsigned short snum; 1689 1690 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1691 snum = ntohs(sin->sin_port); 1692 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 1693 return -EACCES; 1694 1695 bind_list = idr_find(ps, snum); 1696 if (!bind_list) 1697 return cma_alloc_port(ps, id_priv, snum); 1698 1699 /* 1700 * We don't support binding to any address if anyone is bound to 1701 * a specific address on the same port. 1702 */ 1703 if (cma_any_addr(&id_priv->id.route.addr.src_addr)) 1704 return -EADDRNOTAVAIL; 1705 1706 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 1707 if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 1708 return -EADDRNOTAVAIL; 1709 1710 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 1711 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 1712 return -EADDRINUSE; 1713 } 1714 1715 cma_bind_port(bind_list, id_priv); 1716 return 0; 1717} 1718 1719static int cma_get_port(struct rdma_id_private *id_priv) 1720{ 1721 struct idr *ps; 1722 int ret; 1723 1724 switch (id_priv->id.ps) { 1725 case RDMA_PS_SDP: 1726 ps = &sdp_ps; 1727 break; 1728 case RDMA_PS_TCP: 1729 ps = &tcp_ps; 1730 break; 1731 default: 1732 return -EPROTONOSUPPORT; 1733 } 1734 1735 mutex_lock(&lock); 1736 if (cma_any_port(&id_priv->id.route.addr.src_addr)) 1737 ret = cma_alloc_port(ps, id_priv, 0); 1738 else 1739 ret = cma_use_port(ps, id_priv); 1740 mutex_unlock(&lock); 1741 1742 return ret; 1743} 1744 1745int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 1746{ 1747 struct rdma_id_private *id_priv; 1748 int ret; 1749 1750 if (addr->sa_family != AF_INET) 1751 return -EAFNOSUPPORT; 1752 1753 id_priv = container_of(id, struct rdma_id_private, id); 1754 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 1755 return -EINVAL; 1756 1757 if (!cma_any_addr(addr)) { 1758 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 1759 if (!ret) { 1760 mutex_lock(&lock); 1761 ret = cma_acquire_dev(id_priv); 1762 mutex_unlock(&lock); 1763 } 1764 if (ret) 1765 goto err; 1766 } 1767 1768 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 1769 ret = cma_get_port(id_priv); 1770 if (ret) 1771 goto err; 1772 1773 return 0; 1774err: 1775 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 1776 return ret; 1777} 1778EXPORT_SYMBOL(rdma_bind_addr); 1779 1780static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 1781 struct rdma_route *route) 1782{ 1783 struct sockaddr_in *src4, *dst4; 1784 struct cma_hdr *cma_hdr; 1785 struct sdp_hh *sdp_hdr; 1786 1787 src4 = (struct sockaddr_in *) &route->addr.src_addr; 1788 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 1789 1790 switch (ps) { 1791 case RDMA_PS_SDP: 1792 sdp_hdr = hdr; 1793 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 1794 return -EINVAL; 1795 sdp_set_ip_ver(sdp_hdr, 4); 1796 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 1797 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 1798 sdp_hdr->port = src4->sin_port; 1799 break; 1800 default: 1801 cma_hdr = hdr; 1802 cma_hdr->cma_version = CMA_VERSION; 1803 cma_set_ip_ver(cma_hdr, 4); 1804 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 1805 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 1806 cma_hdr->port = src4->sin_port; 1807 break; 1808 } 1809 return 0; 1810} 1811 1812static int cma_connect_ib(struct rdma_id_private *id_priv, 1813 struct rdma_conn_param *conn_param) 1814{ 1815 struct ib_cm_req_param req; 1816 struct rdma_route *route; 1817 void *private_data; 1818 int offset, ret; 1819 1820 memset(&req, 0, sizeof req); 1821 offset = cma_user_data_offset(id_priv->id.ps); 1822 req.private_data_len = offset + conn_param->private_data_len; 1823 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 1824 if (!private_data) 1825 return -ENOMEM; 1826 1827 if (conn_param->private_data && conn_param->private_data_len) 1828 memcpy(private_data + offset, conn_param->private_data, 1829 conn_param->private_data_len); 1830 1831 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 1832 id_priv); 1833 if (IS_ERR(id_priv->cm_id.ib)) { 1834 ret = PTR_ERR(id_priv->cm_id.ib); 1835 goto out; 1836 } 1837 1838 route = &id_priv->id.route; 1839 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 1840 if (ret) 1841 goto out; 1842 req.private_data = private_data; 1843 1844 req.primary_path = &route->path_rec[0]; 1845 if (route->num_paths == 2) 1846 req.alternate_path = &route->path_rec[1]; 1847 1848 req.service_id = cma_get_service_id(id_priv->id.ps, 1849 &route->addr.dst_addr); 1850 req.qp_num = id_priv->qp_num; 1851 req.qp_type = id_priv->qp_type; 1852 req.starting_psn = id_priv->seq_num; 1853 req.responder_resources = conn_param->responder_resources; 1854 req.initiator_depth = conn_param->initiator_depth; 1855 req.flow_control = conn_param->flow_control; 1856 req.retry_count = conn_param->retry_count; 1857 req.rnr_retry_count = conn_param->rnr_retry_count; 1858 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 1859 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 1860 req.max_cm_retries = CMA_MAX_CM_RETRIES; 1861 req.srq = id_priv->srq ? 1 : 0; 1862 1863 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 1864out: 1865 kfree(private_data); 1866 return ret; 1867} 1868 1869static int cma_connect_iw(struct rdma_id_private *id_priv, 1870 struct rdma_conn_param *conn_param) 1871{ 1872 struct iw_cm_id *cm_id; 1873 struct sockaddr_in* sin; 1874 int ret; 1875 struct iw_cm_conn_param iw_param; 1876 1877 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 1878 if (IS_ERR(cm_id)) { 1879 ret = PTR_ERR(cm_id); 1880 goto out; 1881 } 1882 1883 id_priv->cm_id.iw = cm_id; 1884 1885 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 1886 cm_id->local_addr = *sin; 1887 1888 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 1889 cm_id->remote_addr = *sin; 1890 1891 ret = cma_modify_qp_rtr(&id_priv->id); 1892 if (ret) { 1893 iw_destroy_cm_id(cm_id); 1894 return ret; 1895 } 1896 1897 iw_param.ord = conn_param->initiator_depth; 1898 iw_param.ird = conn_param->responder_resources; 1899 iw_param.private_data = conn_param->private_data; 1900 iw_param.private_data_len = conn_param->private_data_len; 1901 if (id_priv->id.qp) 1902 iw_param.qpn = id_priv->qp_num; 1903 else 1904 iw_param.qpn = conn_param->qp_num; 1905 ret = iw_cm_connect(cm_id, &iw_param); 1906out: 1907 return ret; 1908} 1909 1910int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 1911{ 1912 struct rdma_id_private *id_priv; 1913 int ret; 1914 1915 id_priv = container_of(id, struct rdma_id_private, id); 1916 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 1917 return -EINVAL; 1918 1919 if (!id->qp) { 1920 id_priv->qp_num = conn_param->qp_num; 1921 id_priv->qp_type = conn_param->qp_type; 1922 id_priv->srq = conn_param->srq; 1923 } 1924 1925 switch (rdma_node_get_transport(id->device->node_type)) { 1926 case RDMA_TRANSPORT_IB: 1927 ret = cma_connect_ib(id_priv, conn_param); 1928 break; 1929 case RDMA_TRANSPORT_IWARP: 1930 ret = cma_connect_iw(id_priv, conn_param); 1931 break; 1932 default: 1933 ret = -ENOSYS; 1934 break; 1935 } 1936 if (ret) 1937 goto err; 1938 1939 return 0; 1940err: 1941 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 1942 return ret; 1943} 1944EXPORT_SYMBOL(rdma_connect); 1945 1946static int cma_accept_ib(struct rdma_id_private *id_priv, 1947 struct rdma_conn_param *conn_param) 1948{ 1949 struct ib_cm_rep_param rep; 1950 int ret; 1951 1952 ret = cma_modify_qp_rtr(&id_priv->id); 1953 if (ret) 1954 return ret; 1955 1956 memset(&rep, 0, sizeof rep); 1957 rep.qp_num = id_priv->qp_num; 1958 rep.starting_psn = id_priv->seq_num; 1959 rep.private_data = conn_param->private_data; 1960 rep.private_data_len = conn_param->private_data_len; 1961 rep.responder_resources = conn_param->responder_resources; 1962 rep.initiator_depth = conn_param->initiator_depth; 1963 rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT; 1964 rep.failover_accepted = 0; 1965 rep.flow_control = conn_param->flow_control; 1966 rep.rnr_retry_count = conn_param->rnr_retry_count; 1967 rep.srq = id_priv->srq ? 1 : 0; 1968 1969 return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 1970} 1971 1972static int cma_accept_iw(struct rdma_id_private *id_priv, 1973 struct rdma_conn_param *conn_param) 1974{ 1975 struct iw_cm_conn_param iw_param; 1976 int ret; 1977 1978 ret = cma_modify_qp_rtr(&id_priv->id); 1979 if (ret) 1980 return ret; 1981 1982 iw_param.ord = conn_param->initiator_depth; 1983 iw_param.ird = conn_param->responder_resources; 1984 iw_param.private_data = conn_param->private_data; 1985 iw_param.private_data_len = conn_param->private_data_len; 1986 if (id_priv->id.qp) { 1987 iw_param.qpn = id_priv->qp_num; 1988 } else 1989 iw_param.qpn = conn_param->qp_num; 1990 1991 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 1992} 1993 1994int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 1995{ 1996 struct rdma_id_private *id_priv; 1997 int ret; 1998 1999 id_priv = container_of(id, struct rdma_id_private, id); 2000 if (!cma_comp(id_priv, CMA_CONNECT)) 2001 return -EINVAL; 2002 2003 if (!id->qp && conn_param) { 2004 id_priv->qp_num = conn_param->qp_num; 2005 id_priv->qp_type = conn_param->qp_type; 2006 id_priv->srq = conn_param->srq; 2007 } 2008 2009 switch (rdma_node_get_transport(id->device->node_type)) { 2010 case RDMA_TRANSPORT_IB: 2011 if (conn_param) 2012 ret = cma_accept_ib(id_priv, conn_param); 2013 else 2014 ret = cma_rep_recv(id_priv); 2015 break; 2016 case RDMA_TRANSPORT_IWARP: 2017 ret = cma_accept_iw(id_priv, conn_param); 2018 break; 2019 default: 2020 ret = -ENOSYS; 2021 break; 2022 } 2023 2024 if (ret) 2025 goto reject; 2026 2027 return 0; 2028reject: 2029 cma_modify_qp_err(id); 2030 rdma_reject(id, NULL, 0); 2031 return ret; 2032} 2033EXPORT_SYMBOL(rdma_accept); 2034 2035int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2036 u8 private_data_len) 2037{ 2038 struct rdma_id_private *id_priv; 2039 int ret; 2040 2041 id_priv = container_of(id, struct rdma_id_private, id); 2042 if (!cma_comp(id_priv, CMA_CONNECT)) 2043 return -EINVAL; 2044 2045 switch (rdma_node_get_transport(id->device->node_type)) { 2046 case RDMA_TRANSPORT_IB: 2047 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2048 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2049 private_data, private_data_len); 2050 break; 2051 case RDMA_TRANSPORT_IWARP: 2052 ret = iw_cm_reject(id_priv->cm_id.iw, 2053 private_data, private_data_len); 2054 break; 2055 default: 2056 ret = -ENOSYS; 2057 break; 2058 } 2059 return ret; 2060} 2061EXPORT_SYMBOL(rdma_reject); 2062 2063int rdma_disconnect(struct rdma_cm_id *id) 2064{ 2065 struct rdma_id_private *id_priv; 2066 int ret; 2067 2068 id_priv = container_of(id, struct rdma_id_private, id); 2069 if (!cma_comp(id_priv, CMA_CONNECT) && 2070 !cma_comp(id_priv, CMA_DISCONNECT)) 2071 return -EINVAL; 2072 2073 switch (rdma_node_get_transport(id->device->node_type)) { 2074 case RDMA_TRANSPORT_IB: 2075 ret = cma_modify_qp_err(id); 2076 if (ret) 2077 goto out; 2078 /* Initiate or respond to a disconnect. */ 2079 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2080 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2081 break; 2082 case RDMA_TRANSPORT_IWARP: 2083 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2084 break; 2085 default: 2086 ret = -EINVAL; 2087 break; 2088 } 2089out: 2090 return ret; 2091} 2092EXPORT_SYMBOL(rdma_disconnect); 2093 2094static void cma_add_one(struct ib_device *device) 2095{ 2096 struct cma_device *cma_dev; 2097 struct rdma_id_private *id_priv; 2098 2099 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 2100 if (!cma_dev) 2101 return; 2102 2103 cma_dev->device = device; 2104 cma_dev->node_guid = device->node_guid; 2105 if (!cma_dev->node_guid) 2106 goto err; 2107 2108 init_completion(&cma_dev->comp); 2109 atomic_set(&cma_dev->refcount, 1); 2110 INIT_LIST_HEAD(&cma_dev->id_list); 2111 ib_set_client_data(device, &cma_client, cma_dev); 2112 2113 mutex_lock(&lock); 2114 list_add_tail(&cma_dev->list, &dev_list); 2115 list_for_each_entry(id_priv, &listen_any_list, list) 2116 cma_listen_on_dev(id_priv, cma_dev); 2117 mutex_unlock(&lock); 2118 return; 2119err: 2120 kfree(cma_dev); 2121} 2122 2123static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2124{ 2125 enum cma_state state; 2126 2127 /* Record that we want to remove the device */ 2128 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 2129 if (state == CMA_DESTROYING) 2130 return 0; 2131 2132 cma_cancel_operation(id_priv, state); 2133 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove)); 2134 2135 /* Check for destruction from another callback. */ 2136 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2137 return 0; 2138 2139 return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL, 2140 0, NULL, 0); 2141} 2142 2143static void cma_process_remove(struct cma_device *cma_dev) 2144{ 2145 struct list_head remove_list; 2146 struct rdma_id_private *id_priv; 2147 int ret; 2148 2149 INIT_LIST_HEAD(&remove_list); 2150 2151 mutex_lock(&lock); 2152 while (!list_empty(&cma_dev->id_list)) { 2153 id_priv = list_entry(cma_dev->id_list.next, 2154 struct rdma_id_private, list); 2155 2156 if (cma_internal_listen(id_priv)) { 2157 cma_destroy_listen(id_priv); 2158 continue; 2159 } 2160 2161 list_del(&id_priv->list); 2162 list_add_tail(&id_priv->list, &remove_list); 2163 atomic_inc(&id_priv->refcount); 2164 mutex_unlock(&lock); 2165 2166 ret = cma_remove_id_dev(id_priv); 2167 cma_deref_id(id_priv); 2168 if (ret) 2169 rdma_destroy_id(&id_priv->id); 2170 2171 mutex_lock(&lock); 2172 } 2173 mutex_unlock(&lock); 2174 2175 cma_deref_dev(cma_dev); 2176 wait_for_completion(&cma_dev->comp); 2177} 2178 2179static void cma_remove_one(struct ib_device *device) 2180{ 2181 struct cma_device *cma_dev; 2182 2183 cma_dev = ib_get_client_data(device, &cma_client); 2184 if (!cma_dev) 2185 return; 2186 2187 mutex_lock(&lock); 2188 list_del(&cma_dev->list); 2189 mutex_unlock(&lock); 2190 2191 cma_process_remove(cma_dev); 2192 kfree(cma_dev); 2193} 2194 2195static int cma_init(void) 2196{ 2197 int ret; 2198 2199 cma_wq = create_singlethread_workqueue("rdma_cm_wq"); 2200 if (!cma_wq) 2201 return -ENOMEM; 2202 2203 ib_sa_register_client(&sa_client); 2204 2205 ret = ib_register_client(&cma_client); 2206 if (ret) 2207 goto err; 2208 return 0; 2209 2210err: 2211 ib_sa_unregister_client(&sa_client); 2212 destroy_workqueue(cma_wq); 2213 return ret; 2214} 2215 2216static void cma_cleanup(void) 2217{ 2218 ib_unregister_client(&cma_client); 2219 ib_sa_unregister_client(&sa_client); 2220 destroy_workqueue(cma_wq); 2221 idr_destroy(&sdp_ps); 2222 idr_destroy(&tcp_ps); 2223} 2224 2225module_init(cma_init); 2226module_exit(cma_cleanup); 2227