cm.c revision e1444b5a163e81138754cab27c4fa1637b5a2239
1/* 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38#include <linux/completion.h> 39#include <linux/dma-mapping.h> 40#include <linux/err.h> 41#include <linux/idr.h> 42#include <linux/interrupt.h> 43#include <linux/pci.h> 44#include <linux/random.h> 45#include <linux/rbtree.h> 46#include <linux/spinlock.h> 47#include <linux/workqueue.h> 48 49#include <rdma/ib_cache.h> 50#include <rdma/ib_cm.h> 51#include "cm_msgs.h" 52 53MODULE_AUTHOR("Sean Hefty"); 54MODULE_DESCRIPTION("InfiniBand CM"); 55MODULE_LICENSE("Dual BSD/GPL"); 56 57static void cm_add_one(struct ib_device *device); 58static void cm_remove_one(struct ib_device *device); 59 60static struct ib_client cm_client = { 61 .name = "cm", 62 .add = cm_add_one, 63 .remove = cm_remove_one 64}; 65 66static struct ib_cm { 67 spinlock_t lock; 68 struct list_head device_list; 69 rwlock_t device_lock; 70 struct rb_root listen_service_table; 71 u64 listen_service_id; 72 /* struct rb_root peer_service_table; todo: fix peer to peer */ 73 struct rb_root remote_qp_table; 74 struct rb_root remote_id_table; 75 struct rb_root remote_sidr_table; 76 struct idr local_id_table; 77 __be32 random_id_operand; 78 struct list_head timewait_list; 79 struct workqueue_struct *wq; 80} cm; 81 82struct cm_port { 83 struct cm_device *cm_dev; 84 struct ib_mad_agent *mad_agent; 85 u8 port_num; 86}; 87 88struct cm_device { 89 struct list_head list; 90 struct ib_device *device; 91 __be64 ca_guid; 92 struct cm_port port[0]; 93}; 94 95struct cm_av { 96 struct cm_port *port; 97 union ib_gid dgid; 98 struct ib_ah_attr ah_attr; 99 u16 pkey_index; 100 u8 packet_life_time; 101}; 102 103struct cm_work { 104 struct work_struct work; 105 struct list_head list; 106 struct cm_port *port; 107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 108 __be32 local_id; /* Established / timewait */ 109 __be32 remote_id; 110 struct ib_cm_event cm_event; 111 struct ib_sa_path_rec path[0]; 112}; 113 114struct cm_timewait_info { 115 struct cm_work work; /* Must be first. */ 116 struct list_head list; 117 struct rb_node remote_qp_node; 118 struct rb_node remote_id_node; 119 __be64 remote_ca_guid; 120 __be32 remote_qpn; 121 u8 inserted_remote_qp; 122 u8 inserted_remote_id; 123}; 124 125struct cm_id_private { 126 struct ib_cm_id id; 127 128 struct rb_node service_node; 129 struct rb_node sidr_id_node; 130 spinlock_t lock; /* Do not acquire inside cm.lock */ 131 struct completion comp; 132 atomic_t refcount; 133 134 struct ib_mad_send_buf *msg; 135 struct cm_timewait_info *timewait_info; 136 /* todo: use alternate port on send failure */ 137 struct cm_av av; 138 struct cm_av alt_av; 139 struct ib_cm_compare_data *compare_data; 140 141 void *private_data; 142 __be64 tid; 143 __be32 local_qpn; 144 __be32 remote_qpn; 145 enum ib_qp_type qp_type; 146 __be32 sq_psn; 147 __be32 rq_psn; 148 int timeout_ms; 149 enum ib_mtu path_mtu; 150 __be16 pkey; 151 u8 private_data_len; 152 u8 max_cm_retries; 153 u8 peer_to_peer; 154 u8 responder_resources; 155 u8 initiator_depth; 156 u8 retry_count; 157 u8 rnr_retry_count; 158 u8 service_timeout; 159 160 struct list_head work_list; 161 atomic_t work_count; 162}; 163 164static void cm_work_handler(void *data); 165 166static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 167{ 168 if (atomic_dec_and_test(&cm_id_priv->refcount)) 169 complete(&cm_id_priv->comp); 170} 171 172static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 173 struct ib_mad_send_buf **msg) 174{ 175 struct ib_mad_agent *mad_agent; 176 struct ib_mad_send_buf *m; 177 struct ib_ah *ah; 178 179 mad_agent = cm_id_priv->av.port->mad_agent; 180 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 181 if (IS_ERR(ah)) 182 return PTR_ERR(ah); 183 184 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 185 cm_id_priv->av.pkey_index, 186 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 187 GFP_ATOMIC); 188 if (IS_ERR(m)) { 189 ib_destroy_ah(ah); 190 return PTR_ERR(m); 191 } 192 193 /* Timeout set by caller if response is expected. */ 194 m->ah = ah; 195 m->retries = cm_id_priv->max_cm_retries; 196 197 atomic_inc(&cm_id_priv->refcount); 198 m->context[0] = cm_id_priv; 199 *msg = m; 200 return 0; 201} 202 203static int cm_alloc_response_msg(struct cm_port *port, 204 struct ib_mad_recv_wc *mad_recv_wc, 205 struct ib_mad_send_buf **msg) 206{ 207 struct ib_mad_send_buf *m; 208 struct ib_ah *ah; 209 210 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 211 mad_recv_wc->recv_buf.grh, port->port_num); 212 if (IS_ERR(ah)) 213 return PTR_ERR(ah); 214 215 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 216 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 217 GFP_ATOMIC); 218 if (IS_ERR(m)) { 219 ib_destroy_ah(ah); 220 return PTR_ERR(m); 221 } 222 m->ah = ah; 223 *msg = m; 224 return 0; 225} 226 227static void cm_free_msg(struct ib_mad_send_buf *msg) 228{ 229 ib_destroy_ah(msg->ah); 230 if (msg->context[0]) 231 cm_deref_id(msg->context[0]); 232 ib_free_send_mad(msg); 233} 234 235static void * cm_copy_private_data(const void *private_data, 236 u8 private_data_len) 237{ 238 void *data; 239 240 if (!private_data || !private_data_len) 241 return NULL; 242 243 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 244 if (!data) 245 return ERR_PTR(-ENOMEM); 246 247 return data; 248} 249 250static void cm_set_private_data(struct cm_id_private *cm_id_priv, 251 void *private_data, u8 private_data_len) 252{ 253 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 254 kfree(cm_id_priv->private_data); 255 256 cm_id_priv->private_data = private_data; 257 cm_id_priv->private_data_len = private_data_len; 258} 259 260static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 261 struct ib_grh *grh, struct cm_av *av) 262{ 263 av->port = port; 264 av->pkey_index = wc->pkey_index; 265 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 266 grh, &av->ah_attr); 267} 268 269static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 270{ 271 struct cm_device *cm_dev; 272 struct cm_port *port = NULL; 273 unsigned long flags; 274 int ret; 275 u8 p; 276 277 read_lock_irqsave(&cm.device_lock, flags); 278 list_for_each_entry(cm_dev, &cm.device_list, list) { 279 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 280 &p, NULL)) { 281 port = &cm_dev->port[p-1]; 282 break; 283 } 284 } 285 read_unlock_irqrestore(&cm.device_lock, flags); 286 287 if (!port) 288 return -EINVAL; 289 290 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 291 be16_to_cpu(path->pkey), &av->pkey_index); 292 if (ret) 293 return ret; 294 295 av->port = port; 296 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 297 &av->ah_attr); 298 av->packet_life_time = path->packet_life_time; 299 return 0; 300} 301 302static int cm_alloc_id(struct cm_id_private *cm_id_priv) 303{ 304 unsigned long flags; 305 int ret, id; 306 static int next_id; 307 308 do { 309 spin_lock_irqsave(&cm.lock, flags); 310 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 311 next_id++, &id); 312 spin_unlock_irqrestore(&cm.lock, flags); 313 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 314 315 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); 316 return ret; 317} 318 319static void cm_free_id(__be32 local_id) 320{ 321 unsigned long flags; 322 323 spin_lock_irqsave(&cm.lock, flags); 324 idr_remove(&cm.local_id_table, 325 (__force int) (local_id ^ cm.random_id_operand)); 326 spin_unlock_irqrestore(&cm.lock, flags); 327} 328 329static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 330{ 331 struct cm_id_private *cm_id_priv; 332 333 cm_id_priv = idr_find(&cm.local_id_table, 334 (__force int) (local_id ^ cm.random_id_operand)); 335 if (cm_id_priv) { 336 if (cm_id_priv->id.remote_id == remote_id) 337 atomic_inc(&cm_id_priv->refcount); 338 else 339 cm_id_priv = NULL; 340 } 341 342 return cm_id_priv; 343} 344 345static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 346{ 347 struct cm_id_private *cm_id_priv; 348 unsigned long flags; 349 350 spin_lock_irqsave(&cm.lock, flags); 351 cm_id_priv = cm_get_id(local_id, remote_id); 352 spin_unlock_irqrestore(&cm.lock, flags); 353 354 return cm_id_priv; 355} 356 357static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 358{ 359 int i; 360 361 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 362 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 363 ((unsigned long *) mask)[i]; 364} 365 366static int cm_compare_data(struct ib_cm_compare_data *src_data, 367 struct ib_cm_compare_data *dst_data) 368{ 369 u8 src[IB_CM_COMPARE_SIZE]; 370 u8 dst[IB_CM_COMPARE_SIZE]; 371 372 if (!src_data || !dst_data) 373 return 0; 374 375 cm_mask_copy(src, src_data->data, dst_data->mask); 376 cm_mask_copy(dst, dst_data->data, src_data->mask); 377 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 378} 379 380static int cm_compare_private_data(u8 *private_data, 381 struct ib_cm_compare_data *dst_data) 382{ 383 u8 src[IB_CM_COMPARE_SIZE]; 384 385 if (!dst_data) 386 return 0; 387 388 cm_mask_copy(src, private_data, dst_data->mask); 389 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 390} 391 392static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 393{ 394 struct rb_node **link = &cm.listen_service_table.rb_node; 395 struct rb_node *parent = NULL; 396 struct cm_id_private *cur_cm_id_priv; 397 __be64 service_id = cm_id_priv->id.service_id; 398 __be64 service_mask = cm_id_priv->id.service_mask; 399 int data_cmp; 400 401 while (*link) { 402 parent = *link; 403 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 404 service_node); 405 data_cmp = cm_compare_data(cm_id_priv->compare_data, 406 cur_cm_id_priv->compare_data); 407 if ((cur_cm_id_priv->id.service_mask & service_id) == 408 (service_mask & cur_cm_id_priv->id.service_id) && 409 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 410 !data_cmp) 411 return cur_cm_id_priv; 412 413 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 414 link = &(*link)->rb_left; 415 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 416 link = &(*link)->rb_right; 417 else if (service_id < cur_cm_id_priv->id.service_id) 418 link = &(*link)->rb_left; 419 else if (service_id > cur_cm_id_priv->id.service_id) 420 link = &(*link)->rb_right; 421 else if (data_cmp < 0) 422 link = &(*link)->rb_left; 423 else 424 link = &(*link)->rb_right; 425 } 426 rb_link_node(&cm_id_priv->service_node, parent, link); 427 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 428 return NULL; 429} 430 431static struct cm_id_private * cm_find_listen(struct ib_device *device, 432 __be64 service_id, 433 u8 *private_data) 434{ 435 struct rb_node *node = cm.listen_service_table.rb_node; 436 struct cm_id_private *cm_id_priv; 437 int data_cmp; 438 439 while (node) { 440 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 441 data_cmp = cm_compare_private_data(private_data, 442 cm_id_priv->compare_data); 443 if ((cm_id_priv->id.service_mask & service_id) == 444 cm_id_priv->id.service_id && 445 (cm_id_priv->id.device == device) && !data_cmp) 446 return cm_id_priv; 447 448 if (device < cm_id_priv->id.device) 449 node = node->rb_left; 450 else if (device > cm_id_priv->id.device) 451 node = node->rb_right; 452 else if (service_id < cm_id_priv->id.service_id) 453 node = node->rb_left; 454 else if (service_id > cm_id_priv->id.service_id) 455 node = node->rb_right; 456 else if (data_cmp < 0) 457 node = node->rb_left; 458 else 459 node = node->rb_right; 460 } 461 return NULL; 462} 463 464static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 465 *timewait_info) 466{ 467 struct rb_node **link = &cm.remote_id_table.rb_node; 468 struct rb_node *parent = NULL; 469 struct cm_timewait_info *cur_timewait_info; 470 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 471 __be32 remote_id = timewait_info->work.remote_id; 472 473 while (*link) { 474 parent = *link; 475 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 476 remote_id_node); 477 if (remote_id < cur_timewait_info->work.remote_id) 478 link = &(*link)->rb_left; 479 else if (remote_id > cur_timewait_info->work.remote_id) 480 link = &(*link)->rb_right; 481 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 482 link = &(*link)->rb_left; 483 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 484 link = &(*link)->rb_right; 485 else 486 return cur_timewait_info; 487 } 488 timewait_info->inserted_remote_id = 1; 489 rb_link_node(&timewait_info->remote_id_node, parent, link); 490 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 491 return NULL; 492} 493 494static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 495 __be32 remote_id) 496{ 497 struct rb_node *node = cm.remote_id_table.rb_node; 498 struct cm_timewait_info *timewait_info; 499 500 while (node) { 501 timewait_info = rb_entry(node, struct cm_timewait_info, 502 remote_id_node); 503 if (remote_id < timewait_info->work.remote_id) 504 node = node->rb_left; 505 else if (remote_id > timewait_info->work.remote_id) 506 node = node->rb_right; 507 else if (remote_ca_guid < timewait_info->remote_ca_guid) 508 node = node->rb_left; 509 else if (remote_ca_guid > timewait_info->remote_ca_guid) 510 node = node->rb_right; 511 else 512 return timewait_info; 513 } 514 return NULL; 515} 516 517static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 518 *timewait_info) 519{ 520 struct rb_node **link = &cm.remote_qp_table.rb_node; 521 struct rb_node *parent = NULL; 522 struct cm_timewait_info *cur_timewait_info; 523 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 524 __be32 remote_qpn = timewait_info->remote_qpn; 525 526 while (*link) { 527 parent = *link; 528 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 529 remote_qp_node); 530 if (remote_qpn < cur_timewait_info->remote_qpn) 531 link = &(*link)->rb_left; 532 else if (remote_qpn > cur_timewait_info->remote_qpn) 533 link = &(*link)->rb_right; 534 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 535 link = &(*link)->rb_left; 536 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 537 link = &(*link)->rb_right; 538 else 539 return cur_timewait_info; 540 } 541 timewait_info->inserted_remote_qp = 1; 542 rb_link_node(&timewait_info->remote_qp_node, parent, link); 543 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 544 return NULL; 545} 546 547static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 548 *cm_id_priv) 549{ 550 struct rb_node **link = &cm.remote_sidr_table.rb_node; 551 struct rb_node *parent = NULL; 552 struct cm_id_private *cur_cm_id_priv; 553 union ib_gid *port_gid = &cm_id_priv->av.dgid; 554 __be32 remote_id = cm_id_priv->id.remote_id; 555 556 while (*link) { 557 parent = *link; 558 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 559 sidr_id_node); 560 if (remote_id < cur_cm_id_priv->id.remote_id) 561 link = &(*link)->rb_left; 562 else if (remote_id > cur_cm_id_priv->id.remote_id) 563 link = &(*link)->rb_right; 564 else { 565 int cmp; 566 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 567 sizeof *port_gid); 568 if (cmp < 0) 569 link = &(*link)->rb_left; 570 else if (cmp > 0) 571 link = &(*link)->rb_right; 572 else 573 return cur_cm_id_priv; 574 } 575 } 576 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 577 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 578 return NULL; 579} 580 581static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 582 enum ib_cm_sidr_status status) 583{ 584 struct ib_cm_sidr_rep_param param; 585 586 memset(¶m, 0, sizeof param); 587 param.status = status; 588 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 589} 590 591struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 592 ib_cm_handler cm_handler, 593 void *context) 594{ 595 struct cm_id_private *cm_id_priv; 596 int ret; 597 598 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 599 if (!cm_id_priv) 600 return ERR_PTR(-ENOMEM); 601 602 cm_id_priv->id.state = IB_CM_IDLE; 603 cm_id_priv->id.device = device; 604 cm_id_priv->id.cm_handler = cm_handler; 605 cm_id_priv->id.context = context; 606 cm_id_priv->id.remote_cm_qpn = 1; 607 ret = cm_alloc_id(cm_id_priv); 608 if (ret) 609 goto error; 610 611 spin_lock_init(&cm_id_priv->lock); 612 init_completion(&cm_id_priv->comp); 613 INIT_LIST_HEAD(&cm_id_priv->work_list); 614 atomic_set(&cm_id_priv->work_count, -1); 615 atomic_set(&cm_id_priv->refcount, 1); 616 return &cm_id_priv->id; 617 618error: 619 kfree(cm_id_priv); 620 return ERR_PTR(-ENOMEM); 621} 622EXPORT_SYMBOL(ib_create_cm_id); 623 624static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 625{ 626 struct cm_work *work; 627 628 if (list_empty(&cm_id_priv->work_list)) 629 return NULL; 630 631 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 632 list_del(&work->list); 633 return work; 634} 635 636static void cm_free_work(struct cm_work *work) 637{ 638 if (work->mad_recv_wc) 639 ib_free_recv_mad(work->mad_recv_wc); 640 kfree(work); 641} 642 643static inline int cm_convert_to_ms(int iba_time) 644{ 645 /* approximate conversion to ms from 4.096us x 2^iba_time */ 646 return 1 << max(iba_time - 8, 0); 647} 648 649static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 650{ 651 if (timewait_info->inserted_remote_id) { 652 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 653 timewait_info->inserted_remote_id = 0; 654 } 655 656 if (timewait_info->inserted_remote_qp) { 657 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 658 timewait_info->inserted_remote_qp = 0; 659 } 660} 661 662static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 663{ 664 struct cm_timewait_info *timewait_info; 665 666 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 667 if (!timewait_info) 668 return ERR_PTR(-ENOMEM); 669 670 timewait_info->work.local_id = local_id; 671 INIT_WORK(&timewait_info->work.work, cm_work_handler, 672 &timewait_info->work); 673 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 674 return timewait_info; 675} 676 677static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 678{ 679 int wait_time; 680 unsigned long flags; 681 682 spin_lock_irqsave(&cm.lock, flags); 683 cm_cleanup_timewait(cm_id_priv->timewait_info); 684 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 685 spin_unlock_irqrestore(&cm.lock, flags); 686 687 /* 688 * The cm_id could be destroyed by the user before we exit timewait. 689 * To protect against this, we search for the cm_id after exiting 690 * timewait before notifying the user that we've exited timewait. 691 */ 692 cm_id_priv->id.state = IB_CM_TIMEWAIT; 693 wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1); 694 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 695 msecs_to_jiffies(wait_time)); 696 cm_id_priv->timewait_info = NULL; 697} 698 699static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 700{ 701 unsigned long flags; 702 703 cm_id_priv->id.state = IB_CM_IDLE; 704 if (cm_id_priv->timewait_info) { 705 spin_lock_irqsave(&cm.lock, flags); 706 cm_cleanup_timewait(cm_id_priv->timewait_info); 707 spin_unlock_irqrestore(&cm.lock, flags); 708 kfree(cm_id_priv->timewait_info); 709 cm_id_priv->timewait_info = NULL; 710 } 711} 712 713static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 714{ 715 struct cm_id_private *cm_id_priv; 716 struct cm_work *work; 717 unsigned long flags; 718 719 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 720retest: 721 spin_lock_irqsave(&cm_id_priv->lock, flags); 722 switch (cm_id->state) { 723 case IB_CM_LISTEN: 724 cm_id->state = IB_CM_IDLE; 725 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 726 spin_lock_irqsave(&cm.lock, flags); 727 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 728 spin_unlock_irqrestore(&cm.lock, flags); 729 break; 730 case IB_CM_SIDR_REQ_SENT: 731 cm_id->state = IB_CM_IDLE; 732 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 733 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 734 break; 735 case IB_CM_SIDR_REQ_RCVD: 736 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 737 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 738 break; 739 case IB_CM_REQ_SENT: 740 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 741 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 742 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 743 &cm_id_priv->av.port->cm_dev->ca_guid, 744 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 745 NULL, 0); 746 break; 747 case IB_CM_REQ_RCVD: 748 if (err == -ENOMEM) { 749 /* Do not reject to allow future retries. */ 750 cm_reset_to_idle(cm_id_priv); 751 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 752 } else { 753 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 754 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 755 NULL, 0, NULL, 0); 756 } 757 break; 758 case IB_CM_MRA_REQ_RCVD: 759 case IB_CM_REP_SENT: 760 case IB_CM_MRA_REP_RCVD: 761 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 762 /* Fall through */ 763 case IB_CM_MRA_REQ_SENT: 764 case IB_CM_REP_RCVD: 765 case IB_CM_MRA_REP_SENT: 766 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 767 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 768 NULL, 0, NULL, 0); 769 break; 770 case IB_CM_ESTABLISHED: 771 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 772 ib_send_cm_dreq(cm_id, NULL, 0); 773 goto retest; 774 case IB_CM_DREQ_SENT: 775 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 776 cm_enter_timewait(cm_id_priv); 777 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 778 break; 779 case IB_CM_DREQ_RCVD: 780 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 781 ib_send_cm_drep(cm_id, NULL, 0); 782 break; 783 default: 784 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 785 break; 786 } 787 788 cm_free_id(cm_id->local_id); 789 cm_deref_id(cm_id_priv); 790 wait_for_completion(&cm_id_priv->comp); 791 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 792 cm_free_work(work); 793 kfree(cm_id_priv->compare_data); 794 kfree(cm_id_priv->private_data); 795 kfree(cm_id_priv); 796} 797 798void ib_destroy_cm_id(struct ib_cm_id *cm_id) 799{ 800 cm_destroy_id(cm_id, 0); 801} 802EXPORT_SYMBOL(ib_destroy_cm_id); 803 804int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 805 struct ib_cm_compare_data *compare_data) 806{ 807 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 808 unsigned long flags; 809 int ret = 0; 810 811 service_mask = service_mask ? service_mask : 812 __constant_cpu_to_be64(~0ULL); 813 service_id &= service_mask; 814 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 815 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 816 return -EINVAL; 817 818 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 819 if (cm_id->state != IB_CM_IDLE) 820 return -EINVAL; 821 822 if (compare_data) { 823 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 824 GFP_KERNEL); 825 if (!cm_id_priv->compare_data) 826 return -ENOMEM; 827 cm_mask_copy(cm_id_priv->compare_data->data, 828 compare_data->data, compare_data->mask); 829 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 830 IB_CM_COMPARE_SIZE); 831 } 832 833 cm_id->state = IB_CM_LISTEN; 834 835 spin_lock_irqsave(&cm.lock, flags); 836 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 837 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 838 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 839 } else { 840 cm_id->service_id = service_id; 841 cm_id->service_mask = service_mask; 842 } 843 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 844 spin_unlock_irqrestore(&cm.lock, flags); 845 846 if (cur_cm_id_priv) { 847 cm_id->state = IB_CM_IDLE; 848 kfree(cm_id_priv->compare_data); 849 cm_id_priv->compare_data = NULL; 850 ret = -EBUSY; 851 } 852 return ret; 853} 854EXPORT_SYMBOL(ib_cm_listen); 855 856static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 857 enum cm_msg_sequence msg_seq) 858{ 859 u64 hi_tid, low_tid; 860 861 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 862 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 863 (msg_seq << 30)); 864 return cpu_to_be64(hi_tid | low_tid); 865} 866 867static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 868 __be16 attr_id, __be64 tid) 869{ 870 hdr->base_version = IB_MGMT_BASE_VERSION; 871 hdr->mgmt_class = IB_MGMT_CLASS_CM; 872 hdr->class_version = IB_CM_CLASS_VERSION; 873 hdr->method = IB_MGMT_METHOD_SEND; 874 hdr->attr_id = attr_id; 875 hdr->tid = tid; 876} 877 878static void cm_format_req(struct cm_req_msg *req_msg, 879 struct cm_id_private *cm_id_priv, 880 struct ib_cm_req_param *param) 881{ 882 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 883 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 884 885 req_msg->local_comm_id = cm_id_priv->id.local_id; 886 req_msg->service_id = param->service_id; 887 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 888 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 889 cm_req_set_resp_res(req_msg, param->responder_resources); 890 cm_req_set_init_depth(req_msg, param->initiator_depth); 891 cm_req_set_remote_resp_timeout(req_msg, 892 param->remote_cm_response_timeout); 893 cm_req_set_qp_type(req_msg, param->qp_type); 894 cm_req_set_flow_ctrl(req_msg, param->flow_control); 895 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 896 cm_req_set_local_resp_timeout(req_msg, 897 param->local_cm_response_timeout); 898 cm_req_set_retry_count(req_msg, param->retry_count); 899 req_msg->pkey = param->primary_path->pkey; 900 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 901 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 902 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 903 cm_req_set_srq(req_msg, param->srq); 904 905 req_msg->primary_local_lid = param->primary_path->slid; 906 req_msg->primary_remote_lid = param->primary_path->dlid; 907 req_msg->primary_local_gid = param->primary_path->sgid; 908 req_msg->primary_remote_gid = param->primary_path->dgid; 909 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 910 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 911 req_msg->primary_traffic_class = param->primary_path->traffic_class; 912 req_msg->primary_hop_limit = param->primary_path->hop_limit; 913 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 914 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 915 cm_req_set_primary_local_ack_timeout(req_msg, 916 min(31, param->primary_path->packet_life_time + 1)); 917 918 if (param->alternate_path) { 919 req_msg->alt_local_lid = param->alternate_path->slid; 920 req_msg->alt_remote_lid = param->alternate_path->dlid; 921 req_msg->alt_local_gid = param->alternate_path->sgid; 922 req_msg->alt_remote_gid = param->alternate_path->dgid; 923 cm_req_set_alt_flow_label(req_msg, 924 param->alternate_path->flow_label); 925 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 926 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 927 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 928 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 929 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 930 cm_req_set_alt_local_ack_timeout(req_msg, 931 min(31, param->alternate_path->packet_life_time + 1)); 932 } 933 934 if (param->private_data && param->private_data_len) 935 memcpy(req_msg->private_data, param->private_data, 936 param->private_data_len); 937} 938 939static int cm_validate_req_param(struct ib_cm_req_param *param) 940{ 941 /* peer-to-peer not supported */ 942 if (param->peer_to_peer) 943 return -EINVAL; 944 945 if (!param->primary_path) 946 return -EINVAL; 947 948 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 949 return -EINVAL; 950 951 if (param->private_data && 952 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 953 return -EINVAL; 954 955 if (param->alternate_path && 956 (param->alternate_path->pkey != param->primary_path->pkey || 957 param->alternate_path->mtu != param->primary_path->mtu)) 958 return -EINVAL; 959 960 return 0; 961} 962 963int ib_send_cm_req(struct ib_cm_id *cm_id, 964 struct ib_cm_req_param *param) 965{ 966 struct cm_id_private *cm_id_priv; 967 struct cm_req_msg *req_msg; 968 unsigned long flags; 969 int ret; 970 971 ret = cm_validate_req_param(param); 972 if (ret) 973 return ret; 974 975 /* Verify that we're not in timewait. */ 976 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 977 spin_lock_irqsave(&cm_id_priv->lock, flags); 978 if (cm_id->state != IB_CM_IDLE) { 979 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 980 ret = -EINVAL; 981 goto out; 982 } 983 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 984 985 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 986 id.local_id); 987 if (IS_ERR(cm_id_priv->timewait_info)) { 988 ret = PTR_ERR(cm_id_priv->timewait_info); 989 goto out; 990 } 991 992 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 993 if (ret) 994 goto error1; 995 if (param->alternate_path) { 996 ret = cm_init_av_by_path(param->alternate_path, 997 &cm_id_priv->alt_av); 998 if (ret) 999 goto error1; 1000 } 1001 cm_id->service_id = param->service_id; 1002 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1003 cm_id_priv->timeout_ms = cm_convert_to_ms( 1004 param->primary_path->packet_life_time) * 2 + 1005 cm_convert_to_ms( 1006 param->remote_cm_response_timeout); 1007 cm_id_priv->max_cm_retries = param->max_cm_retries; 1008 cm_id_priv->initiator_depth = param->initiator_depth; 1009 cm_id_priv->responder_resources = param->responder_resources; 1010 cm_id_priv->retry_count = param->retry_count; 1011 cm_id_priv->path_mtu = param->primary_path->mtu; 1012 cm_id_priv->pkey = param->primary_path->pkey; 1013 cm_id_priv->qp_type = param->qp_type; 1014 1015 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1016 if (ret) 1017 goto error1; 1018 1019 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1020 cm_format_req(req_msg, cm_id_priv, param); 1021 cm_id_priv->tid = req_msg->hdr.tid; 1022 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1023 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1024 1025 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1026 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1027 1028 spin_lock_irqsave(&cm_id_priv->lock, flags); 1029 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1030 if (ret) { 1031 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1032 goto error2; 1033 } 1034 BUG_ON(cm_id->state != IB_CM_IDLE); 1035 cm_id->state = IB_CM_REQ_SENT; 1036 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1037 return 0; 1038 1039error2: cm_free_msg(cm_id_priv->msg); 1040error1: kfree(cm_id_priv->timewait_info); 1041out: return ret; 1042} 1043EXPORT_SYMBOL(ib_send_cm_req); 1044 1045static int cm_issue_rej(struct cm_port *port, 1046 struct ib_mad_recv_wc *mad_recv_wc, 1047 enum ib_cm_rej_reason reason, 1048 enum cm_msg_response msg_rejected, 1049 void *ari, u8 ari_length) 1050{ 1051 struct ib_mad_send_buf *msg = NULL; 1052 struct cm_rej_msg *rej_msg, *rcv_msg; 1053 int ret; 1054 1055 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1056 if (ret) 1057 return ret; 1058 1059 /* We just need common CM header information. Cast to any message. */ 1060 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1061 rej_msg = (struct cm_rej_msg *) msg->mad; 1062 1063 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1064 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1065 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1066 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1067 rej_msg->reason = cpu_to_be16(reason); 1068 1069 if (ari && ari_length) { 1070 cm_rej_set_reject_info_len(rej_msg, ari_length); 1071 memcpy(rej_msg->ari, ari, ari_length); 1072 } 1073 1074 ret = ib_post_send_mad(msg, NULL); 1075 if (ret) 1076 cm_free_msg(msg); 1077 1078 return ret; 1079} 1080 1081static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1082 __be32 local_qpn, __be32 remote_qpn) 1083{ 1084 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1085 ((local_ca_guid == remote_ca_guid) && 1086 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1087} 1088 1089static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1090 struct ib_sa_path_rec *primary_path, 1091 struct ib_sa_path_rec *alt_path) 1092{ 1093 memset(primary_path, 0, sizeof *primary_path); 1094 primary_path->dgid = req_msg->primary_local_gid; 1095 primary_path->sgid = req_msg->primary_remote_gid; 1096 primary_path->dlid = req_msg->primary_local_lid; 1097 primary_path->slid = req_msg->primary_remote_lid; 1098 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1099 primary_path->hop_limit = req_msg->primary_hop_limit; 1100 primary_path->traffic_class = req_msg->primary_traffic_class; 1101 primary_path->reversible = 1; 1102 primary_path->pkey = req_msg->pkey; 1103 primary_path->sl = cm_req_get_primary_sl(req_msg); 1104 primary_path->mtu_selector = IB_SA_EQ; 1105 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1106 primary_path->rate_selector = IB_SA_EQ; 1107 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1108 primary_path->packet_life_time_selector = IB_SA_EQ; 1109 primary_path->packet_life_time = 1110 cm_req_get_primary_local_ack_timeout(req_msg); 1111 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1112 1113 if (req_msg->alt_local_lid) { 1114 memset(alt_path, 0, sizeof *alt_path); 1115 alt_path->dgid = req_msg->alt_local_gid; 1116 alt_path->sgid = req_msg->alt_remote_gid; 1117 alt_path->dlid = req_msg->alt_local_lid; 1118 alt_path->slid = req_msg->alt_remote_lid; 1119 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1120 alt_path->hop_limit = req_msg->alt_hop_limit; 1121 alt_path->traffic_class = req_msg->alt_traffic_class; 1122 alt_path->reversible = 1; 1123 alt_path->pkey = req_msg->pkey; 1124 alt_path->sl = cm_req_get_alt_sl(req_msg); 1125 alt_path->mtu_selector = IB_SA_EQ; 1126 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1127 alt_path->rate_selector = IB_SA_EQ; 1128 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1129 alt_path->packet_life_time_selector = IB_SA_EQ; 1130 alt_path->packet_life_time = 1131 cm_req_get_alt_local_ack_timeout(req_msg); 1132 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1133 } 1134} 1135 1136static void cm_format_req_event(struct cm_work *work, 1137 struct cm_id_private *cm_id_priv, 1138 struct ib_cm_id *listen_id) 1139{ 1140 struct cm_req_msg *req_msg; 1141 struct ib_cm_req_event_param *param; 1142 1143 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1144 param = &work->cm_event.param.req_rcvd; 1145 param->listen_id = listen_id; 1146 param->port = cm_id_priv->av.port->port_num; 1147 param->primary_path = &work->path[0]; 1148 if (req_msg->alt_local_lid) 1149 param->alternate_path = &work->path[1]; 1150 else 1151 param->alternate_path = NULL; 1152 param->remote_ca_guid = req_msg->local_ca_guid; 1153 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1154 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1155 param->qp_type = cm_req_get_qp_type(req_msg); 1156 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1157 param->responder_resources = cm_req_get_init_depth(req_msg); 1158 param->initiator_depth = cm_req_get_resp_res(req_msg); 1159 param->local_cm_response_timeout = 1160 cm_req_get_remote_resp_timeout(req_msg); 1161 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1162 param->remote_cm_response_timeout = 1163 cm_req_get_local_resp_timeout(req_msg); 1164 param->retry_count = cm_req_get_retry_count(req_msg); 1165 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1166 param->srq = cm_req_get_srq(req_msg); 1167 work->cm_event.private_data = &req_msg->private_data; 1168} 1169 1170static void cm_process_work(struct cm_id_private *cm_id_priv, 1171 struct cm_work *work) 1172{ 1173 unsigned long flags; 1174 int ret; 1175 1176 /* We will typically only have the current event to report. */ 1177 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1178 cm_free_work(work); 1179 1180 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1181 spin_lock_irqsave(&cm_id_priv->lock, flags); 1182 work = cm_dequeue_work(cm_id_priv); 1183 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1184 BUG_ON(!work); 1185 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1186 &work->cm_event); 1187 cm_free_work(work); 1188 } 1189 cm_deref_id(cm_id_priv); 1190 if (ret) 1191 cm_destroy_id(&cm_id_priv->id, ret); 1192} 1193 1194static void cm_format_mra(struct cm_mra_msg *mra_msg, 1195 struct cm_id_private *cm_id_priv, 1196 enum cm_msg_response msg_mraed, u8 service_timeout, 1197 const void *private_data, u8 private_data_len) 1198{ 1199 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1200 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1201 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1202 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1203 cm_mra_set_service_timeout(mra_msg, service_timeout); 1204 1205 if (private_data && private_data_len) 1206 memcpy(mra_msg->private_data, private_data, private_data_len); 1207} 1208 1209static void cm_format_rej(struct cm_rej_msg *rej_msg, 1210 struct cm_id_private *cm_id_priv, 1211 enum ib_cm_rej_reason reason, 1212 void *ari, 1213 u8 ari_length, 1214 const void *private_data, 1215 u8 private_data_len) 1216{ 1217 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1218 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1219 1220 switch(cm_id_priv->id.state) { 1221 case IB_CM_REQ_RCVD: 1222 rej_msg->local_comm_id = 0; 1223 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1224 break; 1225 case IB_CM_MRA_REQ_SENT: 1226 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1227 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1228 break; 1229 case IB_CM_REP_RCVD: 1230 case IB_CM_MRA_REP_SENT: 1231 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1232 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1233 break; 1234 default: 1235 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1236 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1237 break; 1238 } 1239 1240 rej_msg->reason = cpu_to_be16(reason); 1241 if (ari && ari_length) { 1242 cm_rej_set_reject_info_len(rej_msg, ari_length); 1243 memcpy(rej_msg->ari, ari, ari_length); 1244 } 1245 1246 if (private_data && private_data_len) 1247 memcpy(rej_msg->private_data, private_data, private_data_len); 1248} 1249 1250static void cm_dup_req_handler(struct cm_work *work, 1251 struct cm_id_private *cm_id_priv) 1252{ 1253 struct ib_mad_send_buf *msg = NULL; 1254 unsigned long flags; 1255 int ret; 1256 1257 /* Quick state check to discard duplicate REQs. */ 1258 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1259 return; 1260 1261 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1262 if (ret) 1263 return; 1264 1265 spin_lock_irqsave(&cm_id_priv->lock, flags); 1266 switch (cm_id_priv->id.state) { 1267 case IB_CM_MRA_REQ_SENT: 1268 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1269 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1270 cm_id_priv->private_data, 1271 cm_id_priv->private_data_len); 1272 break; 1273 case IB_CM_TIMEWAIT: 1274 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1275 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1276 break; 1277 default: 1278 goto unlock; 1279 } 1280 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1281 1282 ret = ib_post_send_mad(msg, NULL); 1283 if (ret) 1284 goto free; 1285 return; 1286 1287unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1288free: cm_free_msg(msg); 1289} 1290 1291static struct cm_id_private * cm_match_req(struct cm_work *work, 1292 struct cm_id_private *cm_id_priv) 1293{ 1294 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1295 struct cm_timewait_info *timewait_info; 1296 struct cm_req_msg *req_msg; 1297 unsigned long flags; 1298 1299 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1300 1301 /* Check for duplicate REQ and stale connections. */ 1302 spin_lock_irqsave(&cm.lock, flags); 1303 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1304 if (!timewait_info) 1305 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1306 1307 if (timewait_info) { 1308 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1309 timewait_info->work.remote_id); 1310 cm_cleanup_timewait(cm_id_priv->timewait_info); 1311 spin_unlock_irqrestore(&cm.lock, flags); 1312 if (cur_cm_id_priv) { 1313 cm_dup_req_handler(work, cur_cm_id_priv); 1314 cm_deref_id(cur_cm_id_priv); 1315 } else 1316 cm_issue_rej(work->port, work->mad_recv_wc, 1317 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1318 NULL, 0); 1319 listen_cm_id_priv = NULL; 1320 goto out; 1321 } 1322 1323 /* Find matching listen request. */ 1324 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1325 req_msg->service_id, 1326 req_msg->private_data); 1327 if (!listen_cm_id_priv) { 1328 cm_cleanup_timewait(cm_id_priv->timewait_info); 1329 spin_unlock_irqrestore(&cm.lock, flags); 1330 cm_issue_rej(work->port, work->mad_recv_wc, 1331 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1332 NULL, 0); 1333 goto out; 1334 } 1335 atomic_inc(&listen_cm_id_priv->refcount); 1336 atomic_inc(&cm_id_priv->refcount); 1337 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1338 atomic_inc(&cm_id_priv->work_count); 1339 spin_unlock_irqrestore(&cm.lock, flags); 1340out: 1341 return listen_cm_id_priv; 1342} 1343 1344static int cm_req_handler(struct cm_work *work) 1345{ 1346 struct ib_cm_id *cm_id; 1347 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1348 struct cm_req_msg *req_msg; 1349 int ret; 1350 1351 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1352 1353 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1354 if (IS_ERR(cm_id)) 1355 return PTR_ERR(cm_id); 1356 1357 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1358 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1359 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1360 work->mad_recv_wc->recv_buf.grh, 1361 &cm_id_priv->av); 1362 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1363 id.local_id); 1364 if (IS_ERR(cm_id_priv->timewait_info)) { 1365 ret = PTR_ERR(cm_id_priv->timewait_info); 1366 goto destroy; 1367 } 1368 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1369 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1370 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1371 1372 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1373 if (!listen_cm_id_priv) { 1374 ret = -EINVAL; 1375 kfree(cm_id_priv->timewait_info); 1376 goto destroy; 1377 } 1378 1379 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1380 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1381 cm_id_priv->id.service_id = req_msg->service_id; 1382 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1383 1384 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1385 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1386 if (ret) { 1387 ib_get_cached_gid(work->port->cm_dev->device, 1388 work->port->port_num, 0, &work->path[0].sgid); 1389 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1390 &work->path[0].sgid, sizeof work->path[0].sgid, 1391 NULL, 0); 1392 goto rejected; 1393 } 1394 if (req_msg->alt_local_lid) { 1395 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1396 if (ret) { 1397 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1398 &work->path[0].sgid, 1399 sizeof work->path[0].sgid, NULL, 0); 1400 goto rejected; 1401 } 1402 } 1403 cm_id_priv->tid = req_msg->hdr.tid; 1404 cm_id_priv->timeout_ms = cm_convert_to_ms( 1405 cm_req_get_local_resp_timeout(req_msg)); 1406 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1407 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1408 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1409 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1410 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1411 cm_id_priv->pkey = req_msg->pkey; 1412 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1413 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1414 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1415 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1416 1417 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1418 cm_process_work(cm_id_priv, work); 1419 cm_deref_id(listen_cm_id_priv); 1420 return 0; 1421 1422rejected: 1423 atomic_dec(&cm_id_priv->refcount); 1424 cm_deref_id(listen_cm_id_priv); 1425destroy: 1426 ib_destroy_cm_id(cm_id); 1427 return ret; 1428} 1429 1430static void cm_format_rep(struct cm_rep_msg *rep_msg, 1431 struct cm_id_private *cm_id_priv, 1432 struct ib_cm_rep_param *param) 1433{ 1434 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1435 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1436 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1437 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1438 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1439 rep_msg->resp_resources = param->responder_resources; 1440 rep_msg->initiator_depth = param->initiator_depth; 1441 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1442 cm_rep_set_failover(rep_msg, param->failover_accepted); 1443 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1444 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1445 cm_rep_set_srq(rep_msg, param->srq); 1446 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1447 1448 if (param->private_data && param->private_data_len) 1449 memcpy(rep_msg->private_data, param->private_data, 1450 param->private_data_len); 1451} 1452 1453int ib_send_cm_rep(struct ib_cm_id *cm_id, 1454 struct ib_cm_rep_param *param) 1455{ 1456 struct cm_id_private *cm_id_priv; 1457 struct ib_mad_send_buf *msg; 1458 struct cm_rep_msg *rep_msg; 1459 unsigned long flags; 1460 int ret; 1461 1462 if (param->private_data && 1463 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1464 return -EINVAL; 1465 1466 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1467 spin_lock_irqsave(&cm_id_priv->lock, flags); 1468 if (cm_id->state != IB_CM_REQ_RCVD && 1469 cm_id->state != IB_CM_MRA_REQ_SENT) { 1470 ret = -EINVAL; 1471 goto out; 1472 } 1473 1474 ret = cm_alloc_msg(cm_id_priv, &msg); 1475 if (ret) 1476 goto out; 1477 1478 rep_msg = (struct cm_rep_msg *) msg->mad; 1479 cm_format_rep(rep_msg, cm_id_priv, param); 1480 msg->timeout_ms = cm_id_priv->timeout_ms; 1481 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1482 1483 ret = ib_post_send_mad(msg, NULL); 1484 if (ret) { 1485 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1486 cm_free_msg(msg); 1487 return ret; 1488 } 1489 1490 cm_id->state = IB_CM_REP_SENT; 1491 cm_id_priv->msg = msg; 1492 cm_id_priv->initiator_depth = param->initiator_depth; 1493 cm_id_priv->responder_resources = param->responder_resources; 1494 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1495 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1496 1497out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1498 return ret; 1499} 1500EXPORT_SYMBOL(ib_send_cm_rep); 1501 1502static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1503 struct cm_id_private *cm_id_priv, 1504 const void *private_data, 1505 u8 private_data_len) 1506{ 1507 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1508 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1509 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1510 1511 if (private_data && private_data_len) 1512 memcpy(rtu_msg->private_data, private_data, private_data_len); 1513} 1514 1515int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1516 const void *private_data, 1517 u8 private_data_len) 1518{ 1519 struct cm_id_private *cm_id_priv; 1520 struct ib_mad_send_buf *msg; 1521 unsigned long flags; 1522 void *data; 1523 int ret; 1524 1525 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1526 return -EINVAL; 1527 1528 data = cm_copy_private_data(private_data, private_data_len); 1529 if (IS_ERR(data)) 1530 return PTR_ERR(data); 1531 1532 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1533 spin_lock_irqsave(&cm_id_priv->lock, flags); 1534 if (cm_id->state != IB_CM_REP_RCVD && 1535 cm_id->state != IB_CM_MRA_REP_SENT) { 1536 ret = -EINVAL; 1537 goto error; 1538 } 1539 1540 ret = cm_alloc_msg(cm_id_priv, &msg); 1541 if (ret) 1542 goto error; 1543 1544 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1545 private_data, private_data_len); 1546 1547 ret = ib_post_send_mad(msg, NULL); 1548 if (ret) { 1549 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1550 cm_free_msg(msg); 1551 kfree(data); 1552 return ret; 1553 } 1554 1555 cm_id->state = IB_CM_ESTABLISHED; 1556 cm_set_private_data(cm_id_priv, data, private_data_len); 1557 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1558 return 0; 1559 1560error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1561 kfree(data); 1562 return ret; 1563} 1564EXPORT_SYMBOL(ib_send_cm_rtu); 1565 1566static void cm_format_rep_event(struct cm_work *work) 1567{ 1568 struct cm_rep_msg *rep_msg; 1569 struct ib_cm_rep_event_param *param; 1570 1571 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1572 param = &work->cm_event.param.rep_rcvd; 1573 param->remote_ca_guid = rep_msg->local_ca_guid; 1574 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1575 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1576 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1577 param->responder_resources = rep_msg->initiator_depth; 1578 param->initiator_depth = rep_msg->resp_resources; 1579 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1580 param->failover_accepted = cm_rep_get_failover(rep_msg); 1581 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1582 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1583 param->srq = cm_rep_get_srq(rep_msg); 1584 work->cm_event.private_data = &rep_msg->private_data; 1585} 1586 1587static void cm_dup_rep_handler(struct cm_work *work) 1588{ 1589 struct cm_id_private *cm_id_priv; 1590 struct cm_rep_msg *rep_msg; 1591 struct ib_mad_send_buf *msg = NULL; 1592 unsigned long flags; 1593 int ret; 1594 1595 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1596 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1597 rep_msg->local_comm_id); 1598 if (!cm_id_priv) 1599 return; 1600 1601 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1602 if (ret) 1603 goto deref; 1604 1605 spin_lock_irqsave(&cm_id_priv->lock, flags); 1606 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1607 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1608 cm_id_priv->private_data, 1609 cm_id_priv->private_data_len); 1610 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1611 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1612 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1613 cm_id_priv->private_data, 1614 cm_id_priv->private_data_len); 1615 else 1616 goto unlock; 1617 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1618 1619 ret = ib_post_send_mad(msg, NULL); 1620 if (ret) 1621 goto free; 1622 goto deref; 1623 1624unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1625free: cm_free_msg(msg); 1626deref: cm_deref_id(cm_id_priv); 1627} 1628 1629static int cm_rep_handler(struct cm_work *work) 1630{ 1631 struct cm_id_private *cm_id_priv; 1632 struct cm_rep_msg *rep_msg; 1633 unsigned long flags; 1634 int ret; 1635 1636 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1637 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1638 if (!cm_id_priv) { 1639 cm_dup_rep_handler(work); 1640 return -EINVAL; 1641 } 1642 1643 cm_format_rep_event(work); 1644 1645 spin_lock_irqsave(&cm_id_priv->lock, flags); 1646 switch (cm_id_priv->id.state) { 1647 case IB_CM_REQ_SENT: 1648 case IB_CM_MRA_REQ_RCVD: 1649 break; 1650 default: 1651 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1652 ret = -EINVAL; 1653 goto error; 1654 } 1655 1656 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1657 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1658 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1659 1660 spin_lock(&cm.lock); 1661 /* Check for duplicate REP. */ 1662 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1663 spin_unlock(&cm.lock); 1664 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1665 ret = -EINVAL; 1666 goto error; 1667 } 1668 /* Check for a stale connection. */ 1669 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1670 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1671 &cm.remote_id_table); 1672 cm_id_priv->timewait_info->inserted_remote_id = 0; 1673 spin_unlock(&cm.lock); 1674 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1675 cm_issue_rej(work->port, work->mad_recv_wc, 1676 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1677 NULL, 0); 1678 ret = -EINVAL; 1679 goto error; 1680 } 1681 spin_unlock(&cm.lock); 1682 1683 cm_id_priv->id.state = IB_CM_REP_RCVD; 1684 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1685 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1686 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1687 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1688 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1689 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1690 1691 /* todo: handle peer_to_peer */ 1692 1693 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1694 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1695 if (!ret) 1696 list_add_tail(&work->list, &cm_id_priv->work_list); 1697 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1698 1699 if (ret) 1700 cm_process_work(cm_id_priv, work); 1701 else 1702 cm_deref_id(cm_id_priv); 1703 return 0; 1704 1705error: 1706 cm_deref_id(cm_id_priv); 1707 return ret; 1708} 1709 1710static int cm_establish_handler(struct cm_work *work) 1711{ 1712 struct cm_id_private *cm_id_priv; 1713 unsigned long flags; 1714 int ret; 1715 1716 /* See comment in cm_establish about lookup. */ 1717 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1718 if (!cm_id_priv) 1719 return -EINVAL; 1720 1721 spin_lock_irqsave(&cm_id_priv->lock, flags); 1722 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1723 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1724 goto out; 1725 } 1726 1727 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1728 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1729 if (!ret) 1730 list_add_tail(&work->list, &cm_id_priv->work_list); 1731 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1732 1733 if (ret) 1734 cm_process_work(cm_id_priv, work); 1735 else 1736 cm_deref_id(cm_id_priv); 1737 return 0; 1738out: 1739 cm_deref_id(cm_id_priv); 1740 return -EINVAL; 1741} 1742 1743static int cm_rtu_handler(struct cm_work *work) 1744{ 1745 struct cm_id_private *cm_id_priv; 1746 struct cm_rtu_msg *rtu_msg; 1747 unsigned long flags; 1748 int ret; 1749 1750 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1751 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1752 rtu_msg->local_comm_id); 1753 if (!cm_id_priv) 1754 return -EINVAL; 1755 1756 work->cm_event.private_data = &rtu_msg->private_data; 1757 1758 spin_lock_irqsave(&cm_id_priv->lock, flags); 1759 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1760 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1761 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1762 goto out; 1763 } 1764 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1765 1766 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1767 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1768 if (!ret) 1769 list_add_tail(&work->list, &cm_id_priv->work_list); 1770 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1771 1772 if (ret) 1773 cm_process_work(cm_id_priv, work); 1774 else 1775 cm_deref_id(cm_id_priv); 1776 return 0; 1777out: 1778 cm_deref_id(cm_id_priv); 1779 return -EINVAL; 1780} 1781 1782static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1783 struct cm_id_private *cm_id_priv, 1784 const void *private_data, 1785 u8 private_data_len) 1786{ 1787 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1788 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1789 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1790 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1791 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1792 1793 if (private_data && private_data_len) 1794 memcpy(dreq_msg->private_data, private_data, private_data_len); 1795} 1796 1797int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1798 const void *private_data, 1799 u8 private_data_len) 1800{ 1801 struct cm_id_private *cm_id_priv; 1802 struct ib_mad_send_buf *msg; 1803 unsigned long flags; 1804 int ret; 1805 1806 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1807 return -EINVAL; 1808 1809 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1810 spin_lock_irqsave(&cm_id_priv->lock, flags); 1811 if (cm_id->state != IB_CM_ESTABLISHED) { 1812 ret = -EINVAL; 1813 goto out; 1814 } 1815 1816 ret = cm_alloc_msg(cm_id_priv, &msg); 1817 if (ret) { 1818 cm_enter_timewait(cm_id_priv); 1819 goto out; 1820 } 1821 1822 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1823 private_data, private_data_len); 1824 msg->timeout_ms = cm_id_priv->timeout_ms; 1825 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1826 1827 ret = ib_post_send_mad(msg, NULL); 1828 if (ret) { 1829 cm_enter_timewait(cm_id_priv); 1830 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1831 cm_free_msg(msg); 1832 return ret; 1833 } 1834 1835 cm_id->state = IB_CM_DREQ_SENT; 1836 cm_id_priv->msg = msg; 1837out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1838 return ret; 1839} 1840EXPORT_SYMBOL(ib_send_cm_dreq); 1841 1842static void cm_format_drep(struct cm_drep_msg *drep_msg, 1843 struct cm_id_private *cm_id_priv, 1844 const void *private_data, 1845 u8 private_data_len) 1846{ 1847 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1848 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1849 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1850 1851 if (private_data && private_data_len) 1852 memcpy(drep_msg->private_data, private_data, private_data_len); 1853} 1854 1855int ib_send_cm_drep(struct ib_cm_id *cm_id, 1856 const void *private_data, 1857 u8 private_data_len) 1858{ 1859 struct cm_id_private *cm_id_priv; 1860 struct ib_mad_send_buf *msg; 1861 unsigned long flags; 1862 void *data; 1863 int ret; 1864 1865 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1866 return -EINVAL; 1867 1868 data = cm_copy_private_data(private_data, private_data_len); 1869 if (IS_ERR(data)) 1870 return PTR_ERR(data); 1871 1872 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1873 spin_lock_irqsave(&cm_id_priv->lock, flags); 1874 if (cm_id->state != IB_CM_DREQ_RCVD) { 1875 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1876 kfree(data); 1877 return -EINVAL; 1878 } 1879 1880 cm_set_private_data(cm_id_priv, data, private_data_len); 1881 cm_enter_timewait(cm_id_priv); 1882 1883 ret = cm_alloc_msg(cm_id_priv, &msg); 1884 if (ret) 1885 goto out; 1886 1887 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1888 private_data, private_data_len); 1889 1890 ret = ib_post_send_mad(msg, NULL); 1891 if (ret) { 1892 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1893 cm_free_msg(msg); 1894 return ret; 1895 } 1896 1897out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1898 return ret; 1899} 1900EXPORT_SYMBOL(ib_send_cm_drep); 1901 1902static int cm_issue_drep(struct cm_port *port, 1903 struct ib_mad_recv_wc *mad_recv_wc) 1904{ 1905 struct ib_mad_send_buf *msg = NULL; 1906 struct cm_dreq_msg *dreq_msg; 1907 struct cm_drep_msg *drep_msg; 1908 int ret; 1909 1910 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1911 if (ret) 1912 return ret; 1913 1914 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 1915 drep_msg = (struct cm_drep_msg *) msg->mad; 1916 1917 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 1918 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 1919 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 1920 1921 ret = ib_post_send_mad(msg, NULL); 1922 if (ret) 1923 cm_free_msg(msg); 1924 1925 return ret; 1926} 1927 1928static int cm_dreq_handler(struct cm_work *work) 1929{ 1930 struct cm_id_private *cm_id_priv; 1931 struct cm_dreq_msg *dreq_msg; 1932 struct ib_mad_send_buf *msg = NULL; 1933 unsigned long flags; 1934 int ret; 1935 1936 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1937 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1938 dreq_msg->local_comm_id); 1939 if (!cm_id_priv) { 1940 cm_issue_drep(work->port, work->mad_recv_wc); 1941 return -EINVAL; 1942 } 1943 1944 work->cm_event.private_data = &dreq_msg->private_data; 1945 1946 spin_lock_irqsave(&cm_id_priv->lock, flags); 1947 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1948 goto unlock; 1949 1950 switch (cm_id_priv->id.state) { 1951 case IB_CM_REP_SENT: 1952 case IB_CM_DREQ_SENT: 1953 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1954 break; 1955 case IB_CM_ESTABLISHED: 1956 case IB_CM_MRA_REP_RCVD: 1957 break; 1958 case IB_CM_TIMEWAIT: 1959 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1960 goto unlock; 1961 1962 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1963 cm_id_priv->private_data, 1964 cm_id_priv->private_data_len); 1965 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1966 1967 if (ib_post_send_mad(msg, NULL)) 1968 cm_free_msg(msg); 1969 goto deref; 1970 default: 1971 goto unlock; 1972 } 1973 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1974 cm_id_priv->tid = dreq_msg->hdr.tid; 1975 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1976 if (!ret) 1977 list_add_tail(&work->list, &cm_id_priv->work_list); 1978 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1979 1980 if (ret) 1981 cm_process_work(cm_id_priv, work); 1982 else 1983 cm_deref_id(cm_id_priv); 1984 return 0; 1985 1986unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1987deref: cm_deref_id(cm_id_priv); 1988 return -EINVAL; 1989} 1990 1991static int cm_drep_handler(struct cm_work *work) 1992{ 1993 struct cm_id_private *cm_id_priv; 1994 struct cm_drep_msg *drep_msg; 1995 unsigned long flags; 1996 int ret; 1997 1998 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1999 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 2000 drep_msg->local_comm_id); 2001 if (!cm_id_priv) 2002 return -EINVAL; 2003 2004 work->cm_event.private_data = &drep_msg->private_data; 2005 2006 spin_lock_irqsave(&cm_id_priv->lock, flags); 2007 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2008 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2009 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2010 goto out; 2011 } 2012 cm_enter_timewait(cm_id_priv); 2013 2014 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2015 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2016 if (!ret) 2017 list_add_tail(&work->list, &cm_id_priv->work_list); 2018 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2019 2020 if (ret) 2021 cm_process_work(cm_id_priv, work); 2022 else 2023 cm_deref_id(cm_id_priv); 2024 return 0; 2025out: 2026 cm_deref_id(cm_id_priv); 2027 return -EINVAL; 2028} 2029 2030int ib_send_cm_rej(struct ib_cm_id *cm_id, 2031 enum ib_cm_rej_reason reason, 2032 void *ari, 2033 u8 ari_length, 2034 const void *private_data, 2035 u8 private_data_len) 2036{ 2037 struct cm_id_private *cm_id_priv; 2038 struct ib_mad_send_buf *msg; 2039 unsigned long flags; 2040 int ret; 2041 2042 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2043 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2044 return -EINVAL; 2045 2046 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2047 2048 spin_lock_irqsave(&cm_id_priv->lock, flags); 2049 switch (cm_id->state) { 2050 case IB_CM_REQ_SENT: 2051 case IB_CM_MRA_REQ_RCVD: 2052 case IB_CM_REQ_RCVD: 2053 case IB_CM_MRA_REQ_SENT: 2054 case IB_CM_REP_RCVD: 2055 case IB_CM_MRA_REP_SENT: 2056 ret = cm_alloc_msg(cm_id_priv, &msg); 2057 if (!ret) 2058 cm_format_rej((struct cm_rej_msg *) msg->mad, 2059 cm_id_priv, reason, ari, ari_length, 2060 private_data, private_data_len); 2061 2062 cm_reset_to_idle(cm_id_priv); 2063 break; 2064 case IB_CM_REP_SENT: 2065 case IB_CM_MRA_REP_RCVD: 2066 ret = cm_alloc_msg(cm_id_priv, &msg); 2067 if (!ret) 2068 cm_format_rej((struct cm_rej_msg *) msg->mad, 2069 cm_id_priv, reason, ari, ari_length, 2070 private_data, private_data_len); 2071 2072 cm_enter_timewait(cm_id_priv); 2073 break; 2074 default: 2075 ret = -EINVAL; 2076 goto out; 2077 } 2078 2079 if (ret) 2080 goto out; 2081 2082 ret = ib_post_send_mad(msg, NULL); 2083 if (ret) 2084 cm_free_msg(msg); 2085 2086out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2087 return ret; 2088} 2089EXPORT_SYMBOL(ib_send_cm_rej); 2090 2091static void cm_format_rej_event(struct cm_work *work) 2092{ 2093 struct cm_rej_msg *rej_msg; 2094 struct ib_cm_rej_event_param *param; 2095 2096 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2097 param = &work->cm_event.param.rej_rcvd; 2098 param->ari = rej_msg->ari; 2099 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2100 param->reason = __be16_to_cpu(rej_msg->reason); 2101 work->cm_event.private_data = &rej_msg->private_data; 2102} 2103 2104static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2105{ 2106 struct cm_timewait_info *timewait_info; 2107 struct cm_id_private *cm_id_priv; 2108 unsigned long flags; 2109 __be32 remote_id; 2110 2111 remote_id = rej_msg->local_comm_id; 2112 2113 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2114 spin_lock_irqsave(&cm.lock, flags); 2115 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2116 remote_id); 2117 if (!timewait_info) { 2118 spin_unlock_irqrestore(&cm.lock, flags); 2119 return NULL; 2120 } 2121 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2122 (timewait_info->work.local_id ^ 2123 cm.random_id_operand)); 2124 if (cm_id_priv) { 2125 if (cm_id_priv->id.remote_id == remote_id) 2126 atomic_inc(&cm_id_priv->refcount); 2127 else 2128 cm_id_priv = NULL; 2129 } 2130 spin_unlock_irqrestore(&cm.lock, flags); 2131 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2132 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2133 else 2134 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2135 2136 return cm_id_priv; 2137} 2138 2139static int cm_rej_handler(struct cm_work *work) 2140{ 2141 struct cm_id_private *cm_id_priv; 2142 struct cm_rej_msg *rej_msg; 2143 unsigned long flags; 2144 int ret; 2145 2146 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2147 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2148 if (!cm_id_priv) 2149 return -EINVAL; 2150 2151 cm_format_rej_event(work); 2152 2153 spin_lock_irqsave(&cm_id_priv->lock, flags); 2154 switch (cm_id_priv->id.state) { 2155 case IB_CM_REQ_SENT: 2156 case IB_CM_MRA_REQ_RCVD: 2157 case IB_CM_REP_SENT: 2158 case IB_CM_MRA_REP_RCVD: 2159 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2160 /* fall through */ 2161 case IB_CM_REQ_RCVD: 2162 case IB_CM_MRA_REQ_SENT: 2163 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2164 cm_enter_timewait(cm_id_priv); 2165 else 2166 cm_reset_to_idle(cm_id_priv); 2167 break; 2168 case IB_CM_DREQ_SENT: 2169 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2170 /* fall through */ 2171 case IB_CM_REP_RCVD: 2172 case IB_CM_MRA_REP_SENT: 2173 case IB_CM_ESTABLISHED: 2174 cm_enter_timewait(cm_id_priv); 2175 break; 2176 default: 2177 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2178 ret = -EINVAL; 2179 goto out; 2180 } 2181 2182 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2183 if (!ret) 2184 list_add_tail(&work->list, &cm_id_priv->work_list); 2185 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2186 2187 if (ret) 2188 cm_process_work(cm_id_priv, work); 2189 else 2190 cm_deref_id(cm_id_priv); 2191 return 0; 2192out: 2193 cm_deref_id(cm_id_priv); 2194 return -EINVAL; 2195} 2196 2197int ib_send_cm_mra(struct ib_cm_id *cm_id, 2198 u8 service_timeout, 2199 const void *private_data, 2200 u8 private_data_len) 2201{ 2202 struct cm_id_private *cm_id_priv; 2203 struct ib_mad_send_buf *msg; 2204 void *data; 2205 unsigned long flags; 2206 int ret; 2207 2208 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2209 return -EINVAL; 2210 2211 data = cm_copy_private_data(private_data, private_data_len); 2212 if (IS_ERR(data)) 2213 return PTR_ERR(data); 2214 2215 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2216 2217 spin_lock_irqsave(&cm_id_priv->lock, flags); 2218 switch(cm_id_priv->id.state) { 2219 case IB_CM_REQ_RCVD: 2220 ret = cm_alloc_msg(cm_id_priv, &msg); 2221 if (ret) 2222 goto error1; 2223 2224 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2225 CM_MSG_RESPONSE_REQ, service_timeout, 2226 private_data, private_data_len); 2227 ret = ib_post_send_mad(msg, NULL); 2228 if (ret) 2229 goto error2; 2230 cm_id->state = IB_CM_MRA_REQ_SENT; 2231 break; 2232 case IB_CM_REP_RCVD: 2233 ret = cm_alloc_msg(cm_id_priv, &msg); 2234 if (ret) 2235 goto error1; 2236 2237 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2238 CM_MSG_RESPONSE_REP, service_timeout, 2239 private_data, private_data_len); 2240 ret = ib_post_send_mad(msg, NULL); 2241 if (ret) 2242 goto error2; 2243 cm_id->state = IB_CM_MRA_REP_SENT; 2244 break; 2245 case IB_CM_ESTABLISHED: 2246 ret = cm_alloc_msg(cm_id_priv, &msg); 2247 if (ret) 2248 goto error1; 2249 2250 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2251 CM_MSG_RESPONSE_OTHER, service_timeout, 2252 private_data, private_data_len); 2253 ret = ib_post_send_mad(msg, NULL); 2254 if (ret) 2255 goto error2; 2256 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2257 break; 2258 default: 2259 ret = -EINVAL; 2260 goto error1; 2261 } 2262 cm_id_priv->service_timeout = service_timeout; 2263 cm_set_private_data(cm_id_priv, data, private_data_len); 2264 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2265 return 0; 2266 2267error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2268 kfree(data); 2269 return ret; 2270 2271error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2272 kfree(data); 2273 cm_free_msg(msg); 2274 return ret; 2275} 2276EXPORT_SYMBOL(ib_send_cm_mra); 2277 2278static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2279{ 2280 switch (cm_mra_get_msg_mraed(mra_msg)) { 2281 case CM_MSG_RESPONSE_REQ: 2282 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2283 case CM_MSG_RESPONSE_REP: 2284 case CM_MSG_RESPONSE_OTHER: 2285 return cm_acquire_id(mra_msg->remote_comm_id, 2286 mra_msg->local_comm_id); 2287 default: 2288 return NULL; 2289 } 2290} 2291 2292static int cm_mra_handler(struct cm_work *work) 2293{ 2294 struct cm_id_private *cm_id_priv; 2295 struct cm_mra_msg *mra_msg; 2296 unsigned long flags; 2297 int timeout, ret; 2298 2299 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2300 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2301 if (!cm_id_priv) 2302 return -EINVAL; 2303 2304 work->cm_event.private_data = &mra_msg->private_data; 2305 work->cm_event.param.mra_rcvd.service_timeout = 2306 cm_mra_get_service_timeout(mra_msg); 2307 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2308 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2309 2310 spin_lock_irqsave(&cm_id_priv->lock, flags); 2311 switch (cm_id_priv->id.state) { 2312 case IB_CM_REQ_SENT: 2313 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2314 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2315 cm_id_priv->msg, timeout)) 2316 goto out; 2317 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2318 break; 2319 case IB_CM_REP_SENT: 2320 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2321 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2322 cm_id_priv->msg, timeout)) 2323 goto out; 2324 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2325 break; 2326 case IB_CM_ESTABLISHED: 2327 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2328 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2329 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2330 cm_id_priv->msg, timeout)) 2331 goto out; 2332 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2333 break; 2334 default: 2335 goto out; 2336 } 2337 2338 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2339 cm_id_priv->id.state; 2340 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2341 if (!ret) 2342 list_add_tail(&work->list, &cm_id_priv->work_list); 2343 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2344 2345 if (ret) 2346 cm_process_work(cm_id_priv, work); 2347 else 2348 cm_deref_id(cm_id_priv); 2349 return 0; 2350out: 2351 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2352 cm_deref_id(cm_id_priv); 2353 return -EINVAL; 2354} 2355 2356static void cm_format_lap(struct cm_lap_msg *lap_msg, 2357 struct cm_id_private *cm_id_priv, 2358 struct ib_sa_path_rec *alternate_path, 2359 const void *private_data, 2360 u8 private_data_len) 2361{ 2362 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2363 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2364 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2365 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2366 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2367 /* todo: need remote CM response timeout */ 2368 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2369 lap_msg->alt_local_lid = alternate_path->slid; 2370 lap_msg->alt_remote_lid = alternate_path->dlid; 2371 lap_msg->alt_local_gid = alternate_path->sgid; 2372 lap_msg->alt_remote_gid = alternate_path->dgid; 2373 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2374 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2375 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2376 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2377 cm_lap_set_sl(lap_msg, alternate_path->sl); 2378 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2379 cm_lap_set_local_ack_timeout(lap_msg, 2380 min(31, alternate_path->packet_life_time + 1)); 2381 2382 if (private_data && private_data_len) 2383 memcpy(lap_msg->private_data, private_data, private_data_len); 2384} 2385 2386int ib_send_cm_lap(struct ib_cm_id *cm_id, 2387 struct ib_sa_path_rec *alternate_path, 2388 const void *private_data, 2389 u8 private_data_len) 2390{ 2391 struct cm_id_private *cm_id_priv; 2392 struct ib_mad_send_buf *msg; 2393 unsigned long flags; 2394 int ret; 2395 2396 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2397 return -EINVAL; 2398 2399 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2400 spin_lock_irqsave(&cm_id_priv->lock, flags); 2401 if (cm_id->state != IB_CM_ESTABLISHED || 2402 (cm_id->lap_state != IB_CM_LAP_UNINIT && 2403 cm_id->lap_state != IB_CM_LAP_IDLE)) { 2404 ret = -EINVAL; 2405 goto out; 2406 } 2407 2408 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2409 if (ret) 2410 goto out; 2411 2412 ret = cm_alloc_msg(cm_id_priv, &msg); 2413 if (ret) 2414 goto out; 2415 2416 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2417 alternate_path, private_data, private_data_len); 2418 msg->timeout_ms = cm_id_priv->timeout_ms; 2419 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2420 2421 ret = ib_post_send_mad(msg, NULL); 2422 if (ret) { 2423 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2424 cm_free_msg(msg); 2425 return ret; 2426 } 2427 2428 cm_id->lap_state = IB_CM_LAP_SENT; 2429 cm_id_priv->msg = msg; 2430 2431out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2432 return ret; 2433} 2434EXPORT_SYMBOL(ib_send_cm_lap); 2435 2436static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 2437 struct ib_sa_path_rec *path, 2438 struct cm_lap_msg *lap_msg) 2439{ 2440 memset(path, 0, sizeof *path); 2441 path->dgid = lap_msg->alt_local_gid; 2442 path->sgid = lap_msg->alt_remote_gid; 2443 path->dlid = lap_msg->alt_local_lid; 2444 path->slid = lap_msg->alt_remote_lid; 2445 path->flow_label = cm_lap_get_flow_label(lap_msg); 2446 path->hop_limit = lap_msg->alt_hop_limit; 2447 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2448 path->reversible = 1; 2449 path->pkey = cm_id_priv->pkey; 2450 path->sl = cm_lap_get_sl(lap_msg); 2451 path->mtu_selector = IB_SA_EQ; 2452 path->mtu = cm_id_priv->path_mtu; 2453 path->rate_selector = IB_SA_EQ; 2454 path->rate = cm_lap_get_packet_rate(lap_msg); 2455 path->packet_life_time_selector = IB_SA_EQ; 2456 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2457 path->packet_life_time -= (path->packet_life_time > 0); 2458} 2459 2460static int cm_lap_handler(struct cm_work *work) 2461{ 2462 struct cm_id_private *cm_id_priv; 2463 struct cm_lap_msg *lap_msg; 2464 struct ib_cm_lap_event_param *param; 2465 struct ib_mad_send_buf *msg = NULL; 2466 unsigned long flags; 2467 int ret; 2468 2469 /* todo: verify LAP request and send reject APR if invalid. */ 2470 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2471 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2472 lap_msg->local_comm_id); 2473 if (!cm_id_priv) 2474 return -EINVAL; 2475 2476 param = &work->cm_event.param.lap_rcvd; 2477 param->alternate_path = &work->path[0]; 2478 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 2479 work->cm_event.private_data = &lap_msg->private_data; 2480 2481 spin_lock_irqsave(&cm_id_priv->lock, flags); 2482 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2483 goto unlock; 2484 2485 switch (cm_id_priv->id.lap_state) { 2486 case IB_CM_LAP_UNINIT: 2487 case IB_CM_LAP_IDLE: 2488 break; 2489 case IB_CM_MRA_LAP_SENT: 2490 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2491 goto unlock; 2492 2493 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2494 CM_MSG_RESPONSE_OTHER, 2495 cm_id_priv->service_timeout, 2496 cm_id_priv->private_data, 2497 cm_id_priv->private_data_len); 2498 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2499 2500 if (ib_post_send_mad(msg, NULL)) 2501 cm_free_msg(msg); 2502 goto deref; 2503 default: 2504 goto unlock; 2505 } 2506 2507 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2508 cm_id_priv->tid = lap_msg->hdr.tid; 2509 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2510 work->mad_recv_wc->recv_buf.grh, 2511 &cm_id_priv->av); 2512 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2513 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2514 if (!ret) 2515 list_add_tail(&work->list, &cm_id_priv->work_list); 2516 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2517 2518 if (ret) 2519 cm_process_work(cm_id_priv, work); 2520 else 2521 cm_deref_id(cm_id_priv); 2522 return 0; 2523 2524unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2525deref: cm_deref_id(cm_id_priv); 2526 return -EINVAL; 2527} 2528 2529static void cm_format_apr(struct cm_apr_msg *apr_msg, 2530 struct cm_id_private *cm_id_priv, 2531 enum ib_cm_apr_status status, 2532 void *info, 2533 u8 info_length, 2534 const void *private_data, 2535 u8 private_data_len) 2536{ 2537 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2538 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2539 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2540 apr_msg->ap_status = (u8) status; 2541 2542 if (info && info_length) { 2543 apr_msg->info_length = info_length; 2544 memcpy(apr_msg->info, info, info_length); 2545 } 2546 2547 if (private_data && private_data_len) 2548 memcpy(apr_msg->private_data, private_data, private_data_len); 2549} 2550 2551int ib_send_cm_apr(struct ib_cm_id *cm_id, 2552 enum ib_cm_apr_status status, 2553 void *info, 2554 u8 info_length, 2555 const void *private_data, 2556 u8 private_data_len) 2557{ 2558 struct cm_id_private *cm_id_priv; 2559 struct ib_mad_send_buf *msg; 2560 unsigned long flags; 2561 int ret; 2562 2563 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2564 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2565 return -EINVAL; 2566 2567 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2568 spin_lock_irqsave(&cm_id_priv->lock, flags); 2569 if (cm_id->state != IB_CM_ESTABLISHED || 2570 (cm_id->lap_state != IB_CM_LAP_RCVD && 2571 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2572 ret = -EINVAL; 2573 goto out; 2574 } 2575 2576 ret = cm_alloc_msg(cm_id_priv, &msg); 2577 if (ret) 2578 goto out; 2579 2580 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2581 info, info_length, private_data, private_data_len); 2582 ret = ib_post_send_mad(msg, NULL); 2583 if (ret) { 2584 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2585 cm_free_msg(msg); 2586 return ret; 2587 } 2588 2589 cm_id->lap_state = IB_CM_LAP_IDLE; 2590out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2591 return ret; 2592} 2593EXPORT_SYMBOL(ib_send_cm_apr); 2594 2595static int cm_apr_handler(struct cm_work *work) 2596{ 2597 struct cm_id_private *cm_id_priv; 2598 struct cm_apr_msg *apr_msg; 2599 unsigned long flags; 2600 int ret; 2601 2602 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2603 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2604 apr_msg->local_comm_id); 2605 if (!cm_id_priv) 2606 return -EINVAL; /* Unmatched reply. */ 2607 2608 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2609 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2610 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2611 work->cm_event.private_data = &apr_msg->private_data; 2612 2613 spin_lock_irqsave(&cm_id_priv->lock, flags); 2614 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2615 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2616 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2617 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2618 goto out; 2619 } 2620 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2621 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2622 cm_id_priv->msg = NULL; 2623 2624 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2625 if (!ret) 2626 list_add_tail(&work->list, &cm_id_priv->work_list); 2627 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2628 2629 if (ret) 2630 cm_process_work(cm_id_priv, work); 2631 else 2632 cm_deref_id(cm_id_priv); 2633 return 0; 2634out: 2635 cm_deref_id(cm_id_priv); 2636 return -EINVAL; 2637} 2638 2639static int cm_timewait_handler(struct cm_work *work) 2640{ 2641 struct cm_timewait_info *timewait_info; 2642 struct cm_id_private *cm_id_priv; 2643 int ret; 2644 2645 timewait_info = (struct cm_timewait_info *)work; 2646 spin_lock_irq(&cm.lock); 2647 list_del(&timewait_info->list); 2648 spin_unlock_irq(&cm.lock); 2649 2650 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2651 timewait_info->work.remote_id); 2652 if (!cm_id_priv) 2653 return -EINVAL; 2654 2655 spin_lock_irq(&cm_id_priv->lock); 2656 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2657 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2658 spin_unlock_irq(&cm_id_priv->lock); 2659 goto out; 2660 } 2661 cm_id_priv->id.state = IB_CM_IDLE; 2662 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2663 if (!ret) 2664 list_add_tail(&work->list, &cm_id_priv->work_list); 2665 spin_unlock_irq(&cm_id_priv->lock); 2666 2667 if (ret) 2668 cm_process_work(cm_id_priv, work); 2669 else 2670 cm_deref_id(cm_id_priv); 2671 return 0; 2672out: 2673 cm_deref_id(cm_id_priv); 2674 return -EINVAL; 2675} 2676 2677static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2678 struct cm_id_private *cm_id_priv, 2679 struct ib_cm_sidr_req_param *param) 2680{ 2681 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2682 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2683 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2684 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2685 sidr_req_msg->service_id = param->service_id; 2686 2687 if (param->private_data && param->private_data_len) 2688 memcpy(sidr_req_msg->private_data, param->private_data, 2689 param->private_data_len); 2690} 2691 2692int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2693 struct ib_cm_sidr_req_param *param) 2694{ 2695 struct cm_id_private *cm_id_priv; 2696 struct ib_mad_send_buf *msg; 2697 unsigned long flags; 2698 int ret; 2699 2700 if (!param->path || (param->private_data && 2701 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2702 return -EINVAL; 2703 2704 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2705 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2706 if (ret) 2707 goto out; 2708 2709 cm_id->service_id = param->service_id; 2710 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2711 cm_id_priv->timeout_ms = param->timeout_ms; 2712 cm_id_priv->max_cm_retries = param->max_cm_retries; 2713 ret = cm_alloc_msg(cm_id_priv, &msg); 2714 if (ret) 2715 goto out; 2716 2717 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2718 param); 2719 msg->timeout_ms = cm_id_priv->timeout_ms; 2720 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2721 2722 spin_lock_irqsave(&cm_id_priv->lock, flags); 2723 if (cm_id->state == IB_CM_IDLE) 2724 ret = ib_post_send_mad(msg, NULL); 2725 else 2726 ret = -EINVAL; 2727 2728 if (ret) { 2729 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2730 cm_free_msg(msg); 2731 goto out; 2732 } 2733 cm_id->state = IB_CM_SIDR_REQ_SENT; 2734 cm_id_priv->msg = msg; 2735 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2736out: 2737 return ret; 2738} 2739EXPORT_SYMBOL(ib_send_cm_sidr_req); 2740 2741static void cm_format_sidr_req_event(struct cm_work *work, 2742 struct ib_cm_id *listen_id) 2743{ 2744 struct cm_sidr_req_msg *sidr_req_msg; 2745 struct ib_cm_sidr_req_event_param *param; 2746 2747 sidr_req_msg = (struct cm_sidr_req_msg *) 2748 work->mad_recv_wc->recv_buf.mad; 2749 param = &work->cm_event.param.sidr_req_rcvd; 2750 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2751 param->listen_id = listen_id; 2752 param->port = work->port->port_num; 2753 work->cm_event.private_data = &sidr_req_msg->private_data; 2754} 2755 2756static int cm_sidr_req_handler(struct cm_work *work) 2757{ 2758 struct ib_cm_id *cm_id; 2759 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2760 struct cm_sidr_req_msg *sidr_req_msg; 2761 struct ib_wc *wc; 2762 unsigned long flags; 2763 2764 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2765 if (IS_ERR(cm_id)) 2766 return PTR_ERR(cm_id); 2767 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2768 2769 /* Record SGID/SLID and request ID for lookup. */ 2770 sidr_req_msg = (struct cm_sidr_req_msg *) 2771 work->mad_recv_wc->recv_buf.mad; 2772 wc = work->mad_recv_wc->wc; 2773 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2774 cm_id_priv->av.dgid.global.interface_id = 0; 2775 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2776 work->mad_recv_wc->recv_buf.grh, 2777 &cm_id_priv->av); 2778 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2779 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2780 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2781 atomic_inc(&cm_id_priv->work_count); 2782 2783 spin_lock_irqsave(&cm.lock, flags); 2784 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2785 if (cur_cm_id_priv) { 2786 spin_unlock_irqrestore(&cm.lock, flags); 2787 goto out; /* Duplicate message. */ 2788 } 2789 cur_cm_id_priv = cm_find_listen(cm_id->device, 2790 sidr_req_msg->service_id, 2791 sidr_req_msg->private_data); 2792 if (!cur_cm_id_priv) { 2793 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2794 spin_unlock_irqrestore(&cm.lock, flags); 2795 /* todo: reply with no match */ 2796 goto out; /* No match. */ 2797 } 2798 atomic_inc(&cur_cm_id_priv->refcount); 2799 spin_unlock_irqrestore(&cm.lock, flags); 2800 2801 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2802 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2803 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2804 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2805 2806 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2807 cm_process_work(cm_id_priv, work); 2808 cm_deref_id(cur_cm_id_priv); 2809 return 0; 2810out: 2811 ib_destroy_cm_id(&cm_id_priv->id); 2812 return -EINVAL; 2813} 2814 2815static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2816 struct cm_id_private *cm_id_priv, 2817 struct ib_cm_sidr_rep_param *param) 2818{ 2819 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2820 cm_id_priv->tid); 2821 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2822 sidr_rep_msg->status = param->status; 2823 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2824 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2825 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2826 2827 if (param->info && param->info_length) 2828 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2829 2830 if (param->private_data && param->private_data_len) 2831 memcpy(sidr_rep_msg->private_data, param->private_data, 2832 param->private_data_len); 2833} 2834 2835int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2836 struct ib_cm_sidr_rep_param *param) 2837{ 2838 struct cm_id_private *cm_id_priv; 2839 struct ib_mad_send_buf *msg; 2840 unsigned long flags; 2841 int ret; 2842 2843 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2844 (param->private_data && 2845 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2846 return -EINVAL; 2847 2848 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2849 spin_lock_irqsave(&cm_id_priv->lock, flags); 2850 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2851 ret = -EINVAL; 2852 goto error; 2853 } 2854 2855 ret = cm_alloc_msg(cm_id_priv, &msg); 2856 if (ret) 2857 goto error; 2858 2859 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2860 param); 2861 ret = ib_post_send_mad(msg, NULL); 2862 if (ret) { 2863 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2864 cm_free_msg(msg); 2865 return ret; 2866 } 2867 cm_id->state = IB_CM_IDLE; 2868 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2869 2870 spin_lock_irqsave(&cm.lock, flags); 2871 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2872 spin_unlock_irqrestore(&cm.lock, flags); 2873 return 0; 2874 2875error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2876 return ret; 2877} 2878EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2879 2880static void cm_format_sidr_rep_event(struct cm_work *work) 2881{ 2882 struct cm_sidr_rep_msg *sidr_rep_msg; 2883 struct ib_cm_sidr_rep_event_param *param; 2884 2885 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2886 work->mad_recv_wc->recv_buf.mad; 2887 param = &work->cm_event.param.sidr_rep_rcvd; 2888 param->status = sidr_rep_msg->status; 2889 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2890 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2891 param->info = &sidr_rep_msg->info; 2892 param->info_len = sidr_rep_msg->info_length; 2893 work->cm_event.private_data = &sidr_rep_msg->private_data; 2894} 2895 2896static int cm_sidr_rep_handler(struct cm_work *work) 2897{ 2898 struct cm_sidr_rep_msg *sidr_rep_msg; 2899 struct cm_id_private *cm_id_priv; 2900 unsigned long flags; 2901 2902 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2903 work->mad_recv_wc->recv_buf.mad; 2904 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2905 if (!cm_id_priv) 2906 return -EINVAL; /* Unmatched reply. */ 2907 2908 spin_lock_irqsave(&cm_id_priv->lock, flags); 2909 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2910 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2911 goto out; 2912 } 2913 cm_id_priv->id.state = IB_CM_IDLE; 2914 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2915 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2916 2917 cm_format_sidr_rep_event(work); 2918 cm_process_work(cm_id_priv, work); 2919 return 0; 2920out: 2921 cm_deref_id(cm_id_priv); 2922 return -EINVAL; 2923} 2924 2925static void cm_process_send_error(struct ib_mad_send_buf *msg, 2926 enum ib_wc_status wc_status) 2927{ 2928 struct cm_id_private *cm_id_priv; 2929 struct ib_cm_event cm_event; 2930 enum ib_cm_state state; 2931 unsigned long flags; 2932 int ret; 2933 2934 memset(&cm_event, 0, sizeof cm_event); 2935 cm_id_priv = msg->context[0]; 2936 2937 /* Discard old sends or ones without a response. */ 2938 spin_lock_irqsave(&cm_id_priv->lock, flags); 2939 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2940 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2941 goto discard; 2942 2943 switch (state) { 2944 case IB_CM_REQ_SENT: 2945 case IB_CM_MRA_REQ_RCVD: 2946 cm_reset_to_idle(cm_id_priv); 2947 cm_event.event = IB_CM_REQ_ERROR; 2948 break; 2949 case IB_CM_REP_SENT: 2950 case IB_CM_MRA_REP_RCVD: 2951 cm_reset_to_idle(cm_id_priv); 2952 cm_event.event = IB_CM_REP_ERROR; 2953 break; 2954 case IB_CM_DREQ_SENT: 2955 cm_enter_timewait(cm_id_priv); 2956 cm_event.event = IB_CM_DREQ_ERROR; 2957 break; 2958 case IB_CM_SIDR_REQ_SENT: 2959 cm_id_priv->id.state = IB_CM_IDLE; 2960 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2961 break; 2962 default: 2963 goto discard; 2964 } 2965 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2966 cm_event.param.send_status = wc_status; 2967 2968 /* No other events can occur on the cm_id at this point. */ 2969 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2970 cm_free_msg(msg); 2971 if (ret) 2972 ib_destroy_cm_id(&cm_id_priv->id); 2973 return; 2974discard: 2975 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2976 cm_free_msg(msg); 2977} 2978 2979static void cm_send_handler(struct ib_mad_agent *mad_agent, 2980 struct ib_mad_send_wc *mad_send_wc) 2981{ 2982 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2983 2984 switch (mad_send_wc->status) { 2985 case IB_WC_SUCCESS: 2986 case IB_WC_WR_FLUSH_ERR: 2987 cm_free_msg(msg); 2988 break; 2989 default: 2990 if (msg->context[0] && msg->context[1]) 2991 cm_process_send_error(msg, mad_send_wc->status); 2992 else 2993 cm_free_msg(msg); 2994 break; 2995 } 2996} 2997 2998static void cm_work_handler(void *data) 2999{ 3000 struct cm_work *work = data; 3001 int ret; 3002 3003 switch (work->cm_event.event) { 3004 case IB_CM_REQ_RECEIVED: 3005 ret = cm_req_handler(work); 3006 break; 3007 case IB_CM_MRA_RECEIVED: 3008 ret = cm_mra_handler(work); 3009 break; 3010 case IB_CM_REJ_RECEIVED: 3011 ret = cm_rej_handler(work); 3012 break; 3013 case IB_CM_REP_RECEIVED: 3014 ret = cm_rep_handler(work); 3015 break; 3016 case IB_CM_RTU_RECEIVED: 3017 ret = cm_rtu_handler(work); 3018 break; 3019 case IB_CM_USER_ESTABLISHED: 3020 ret = cm_establish_handler(work); 3021 break; 3022 case IB_CM_DREQ_RECEIVED: 3023 ret = cm_dreq_handler(work); 3024 break; 3025 case IB_CM_DREP_RECEIVED: 3026 ret = cm_drep_handler(work); 3027 break; 3028 case IB_CM_SIDR_REQ_RECEIVED: 3029 ret = cm_sidr_req_handler(work); 3030 break; 3031 case IB_CM_SIDR_REP_RECEIVED: 3032 ret = cm_sidr_rep_handler(work); 3033 break; 3034 case IB_CM_LAP_RECEIVED: 3035 ret = cm_lap_handler(work); 3036 break; 3037 case IB_CM_APR_RECEIVED: 3038 ret = cm_apr_handler(work); 3039 break; 3040 case IB_CM_TIMEWAIT_EXIT: 3041 ret = cm_timewait_handler(work); 3042 break; 3043 default: 3044 ret = -EINVAL; 3045 break; 3046 } 3047 if (ret) 3048 cm_free_work(work); 3049} 3050 3051static int cm_establish(struct ib_cm_id *cm_id) 3052{ 3053 struct cm_id_private *cm_id_priv; 3054 struct cm_work *work; 3055 unsigned long flags; 3056 int ret = 0; 3057 3058 work = kmalloc(sizeof *work, GFP_ATOMIC); 3059 if (!work) 3060 return -ENOMEM; 3061 3062 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3063 spin_lock_irqsave(&cm_id_priv->lock, flags); 3064 switch (cm_id->state) 3065 { 3066 case IB_CM_REP_SENT: 3067 case IB_CM_MRA_REP_RCVD: 3068 cm_id->state = IB_CM_ESTABLISHED; 3069 break; 3070 case IB_CM_ESTABLISHED: 3071 ret = -EISCONN; 3072 break; 3073 default: 3074 ret = -EINVAL; 3075 break; 3076 } 3077 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3078 3079 if (ret) { 3080 kfree(work); 3081 goto out; 3082 } 3083 3084 /* 3085 * The CM worker thread may try to destroy the cm_id before it 3086 * can execute this work item. To prevent potential deadlock, 3087 * we need to find the cm_id once we're in the context of the 3088 * worker thread, rather than holding a reference on it. 3089 */ 3090 INIT_WORK(&work->work, cm_work_handler, work); 3091 work->local_id = cm_id->local_id; 3092 work->remote_id = cm_id->remote_id; 3093 work->mad_recv_wc = NULL; 3094 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3095 queue_work(cm.wq, &work->work); 3096out: 3097 return ret; 3098} 3099 3100static int cm_migrate(struct ib_cm_id *cm_id) 3101{ 3102 struct cm_id_private *cm_id_priv; 3103 unsigned long flags; 3104 int ret = 0; 3105 3106 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3107 spin_lock_irqsave(&cm_id_priv->lock, flags); 3108 if (cm_id->state == IB_CM_ESTABLISHED && 3109 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3110 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3111 cm_id->lap_state = IB_CM_LAP_IDLE; 3112 cm_id_priv->av = cm_id_priv->alt_av; 3113 } else 3114 ret = -EINVAL; 3115 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3116 3117 return ret; 3118} 3119 3120int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3121{ 3122 int ret; 3123 3124 switch (event) { 3125 case IB_EVENT_COMM_EST: 3126 ret = cm_establish(cm_id); 3127 break; 3128 case IB_EVENT_PATH_MIG: 3129 ret = cm_migrate(cm_id); 3130 break; 3131 default: 3132 ret = -EINVAL; 3133 } 3134 return ret; 3135} 3136EXPORT_SYMBOL(ib_cm_notify); 3137 3138static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3139 struct ib_mad_recv_wc *mad_recv_wc) 3140{ 3141 struct cm_work *work; 3142 enum ib_cm_event_type event; 3143 int paths = 0; 3144 3145 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3146 case CM_REQ_ATTR_ID: 3147 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3148 alt_local_lid != 0); 3149 event = IB_CM_REQ_RECEIVED; 3150 break; 3151 case CM_MRA_ATTR_ID: 3152 event = IB_CM_MRA_RECEIVED; 3153 break; 3154 case CM_REJ_ATTR_ID: 3155 event = IB_CM_REJ_RECEIVED; 3156 break; 3157 case CM_REP_ATTR_ID: 3158 event = IB_CM_REP_RECEIVED; 3159 break; 3160 case CM_RTU_ATTR_ID: 3161 event = IB_CM_RTU_RECEIVED; 3162 break; 3163 case CM_DREQ_ATTR_ID: 3164 event = IB_CM_DREQ_RECEIVED; 3165 break; 3166 case CM_DREP_ATTR_ID: 3167 event = IB_CM_DREP_RECEIVED; 3168 break; 3169 case CM_SIDR_REQ_ATTR_ID: 3170 event = IB_CM_SIDR_REQ_RECEIVED; 3171 break; 3172 case CM_SIDR_REP_ATTR_ID: 3173 event = IB_CM_SIDR_REP_RECEIVED; 3174 break; 3175 case CM_LAP_ATTR_ID: 3176 paths = 1; 3177 event = IB_CM_LAP_RECEIVED; 3178 break; 3179 case CM_APR_ATTR_ID: 3180 event = IB_CM_APR_RECEIVED; 3181 break; 3182 default: 3183 ib_free_recv_mad(mad_recv_wc); 3184 return; 3185 } 3186 3187 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3188 GFP_KERNEL); 3189 if (!work) { 3190 ib_free_recv_mad(mad_recv_wc); 3191 return; 3192 } 3193 3194 INIT_WORK(&work->work, cm_work_handler, work); 3195 work->cm_event.event = event; 3196 work->mad_recv_wc = mad_recv_wc; 3197 work->port = (struct cm_port *)mad_agent->context; 3198 queue_work(cm.wq, &work->work); 3199} 3200 3201static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3202 struct ib_qp_attr *qp_attr, 3203 int *qp_attr_mask) 3204{ 3205 unsigned long flags; 3206 int ret; 3207 3208 spin_lock_irqsave(&cm_id_priv->lock, flags); 3209 switch (cm_id_priv->id.state) { 3210 case IB_CM_REQ_SENT: 3211 case IB_CM_MRA_REQ_RCVD: 3212 case IB_CM_REQ_RCVD: 3213 case IB_CM_MRA_REQ_SENT: 3214 case IB_CM_REP_RCVD: 3215 case IB_CM_MRA_REP_SENT: 3216 case IB_CM_REP_SENT: 3217 case IB_CM_MRA_REP_RCVD: 3218 case IB_CM_ESTABLISHED: 3219 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3220 IB_QP_PKEY_INDEX | IB_QP_PORT; 3221 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 3222 if (cm_id_priv->responder_resources) 3223 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3224 IB_ACCESS_REMOTE_ATOMIC; 3225 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3226 qp_attr->port_num = cm_id_priv->av.port->port_num; 3227 ret = 0; 3228 break; 3229 default: 3230 ret = -EINVAL; 3231 break; 3232 } 3233 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3234 return ret; 3235} 3236 3237static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3238 struct ib_qp_attr *qp_attr, 3239 int *qp_attr_mask) 3240{ 3241 unsigned long flags; 3242 int ret; 3243 3244 spin_lock_irqsave(&cm_id_priv->lock, flags); 3245 switch (cm_id_priv->id.state) { 3246 case IB_CM_REQ_RCVD: 3247 case IB_CM_MRA_REQ_SENT: 3248 case IB_CM_REP_RCVD: 3249 case IB_CM_MRA_REP_SENT: 3250 case IB_CM_REP_SENT: 3251 case IB_CM_MRA_REP_RCVD: 3252 case IB_CM_ESTABLISHED: 3253 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3254 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3255 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3256 qp_attr->path_mtu = cm_id_priv->path_mtu; 3257 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3258 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3259 if (cm_id_priv->qp_type == IB_QPT_RC) { 3260 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3261 IB_QP_MIN_RNR_TIMER; 3262 qp_attr->max_dest_rd_atomic = 3263 cm_id_priv->responder_resources; 3264 qp_attr->min_rnr_timer = 0; 3265 } 3266 if (cm_id_priv->alt_av.ah_attr.dlid) { 3267 *qp_attr_mask |= IB_QP_ALT_PATH; 3268 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3269 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3270 qp_attr->alt_timeout = 3271 cm_id_priv->alt_av.packet_life_time + 1; 3272 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3273 } 3274 ret = 0; 3275 break; 3276 default: 3277 ret = -EINVAL; 3278 break; 3279 } 3280 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3281 return ret; 3282} 3283 3284static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3285 struct ib_qp_attr *qp_attr, 3286 int *qp_attr_mask) 3287{ 3288 unsigned long flags; 3289 int ret; 3290 3291 spin_lock_irqsave(&cm_id_priv->lock, flags); 3292 switch (cm_id_priv->id.state) { 3293 case IB_CM_REP_RCVD: 3294 case IB_CM_MRA_REP_SENT: 3295 case IB_CM_REP_SENT: 3296 case IB_CM_MRA_REP_RCVD: 3297 case IB_CM_ESTABLISHED: 3298 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 3299 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3300 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3301 if (cm_id_priv->qp_type == IB_QPT_RC) { 3302 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3303 IB_QP_RNR_RETRY | 3304 IB_QP_MAX_QP_RD_ATOMIC; 3305 qp_attr->timeout = 3306 cm_id_priv->av.packet_life_time + 1; 3307 qp_attr->retry_cnt = cm_id_priv->retry_count; 3308 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3309 qp_attr->max_rd_atomic = 3310 cm_id_priv->initiator_depth; 3311 } 3312 if (cm_id_priv->alt_av.ah_attr.dlid) { 3313 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3314 qp_attr->path_mig_state = IB_MIG_REARM; 3315 } 3316 } else { 3317 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 3318 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3319 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3320 qp_attr->alt_timeout = 3321 cm_id_priv->alt_av.packet_life_time + 1; 3322 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3323 qp_attr->path_mig_state = IB_MIG_REARM; 3324 } 3325 ret = 0; 3326 break; 3327 default: 3328 ret = -EINVAL; 3329 break; 3330 } 3331 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3332 return ret; 3333} 3334 3335int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3336 struct ib_qp_attr *qp_attr, 3337 int *qp_attr_mask) 3338{ 3339 struct cm_id_private *cm_id_priv; 3340 int ret; 3341 3342 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3343 switch (qp_attr->qp_state) { 3344 case IB_QPS_INIT: 3345 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3346 break; 3347 case IB_QPS_RTR: 3348 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3349 break; 3350 case IB_QPS_RTS: 3351 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3352 break; 3353 default: 3354 ret = -EINVAL; 3355 break; 3356 } 3357 return ret; 3358} 3359EXPORT_SYMBOL(ib_cm_init_qp_attr); 3360 3361static void cm_add_one(struct ib_device *device) 3362{ 3363 struct cm_device *cm_dev; 3364 struct cm_port *port; 3365 struct ib_mad_reg_req reg_req = { 3366 .mgmt_class = IB_MGMT_CLASS_CM, 3367 .mgmt_class_version = IB_CM_CLASS_VERSION 3368 }; 3369 struct ib_port_modify port_modify = { 3370 .set_port_cap_mask = IB_PORT_CM_SUP 3371 }; 3372 unsigned long flags; 3373 int ret; 3374 u8 i; 3375 3376 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3377 return; 3378 3379 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3380 device->phys_port_cnt, GFP_KERNEL); 3381 if (!cm_dev) 3382 return; 3383 3384 cm_dev->device = device; 3385 cm_dev->ca_guid = device->node_guid; 3386 3387 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3388 for (i = 1; i <= device->phys_port_cnt; i++) { 3389 port = &cm_dev->port[i-1]; 3390 port->cm_dev = cm_dev; 3391 port->port_num = i; 3392 port->mad_agent = ib_register_mad_agent(device, i, 3393 IB_QPT_GSI, 3394 ®_req, 3395 0, 3396 cm_send_handler, 3397 cm_recv_handler, 3398 port); 3399 if (IS_ERR(port->mad_agent)) 3400 goto error1; 3401 3402 ret = ib_modify_port(device, i, 0, &port_modify); 3403 if (ret) 3404 goto error2; 3405 } 3406 ib_set_client_data(device, &cm_client, cm_dev); 3407 3408 write_lock_irqsave(&cm.device_lock, flags); 3409 list_add_tail(&cm_dev->list, &cm.device_list); 3410 write_unlock_irqrestore(&cm.device_lock, flags); 3411 return; 3412 3413error2: 3414 ib_unregister_mad_agent(port->mad_agent); 3415error1: 3416 port_modify.set_port_cap_mask = 0; 3417 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3418 while (--i) { 3419 port = &cm_dev->port[i-1]; 3420 ib_modify_port(device, port->port_num, 0, &port_modify); 3421 ib_unregister_mad_agent(port->mad_agent); 3422 } 3423 kfree(cm_dev); 3424} 3425 3426static void cm_remove_one(struct ib_device *device) 3427{ 3428 struct cm_device *cm_dev; 3429 struct cm_port *port; 3430 struct ib_port_modify port_modify = { 3431 .clr_port_cap_mask = IB_PORT_CM_SUP 3432 }; 3433 unsigned long flags; 3434 int i; 3435 3436 cm_dev = ib_get_client_data(device, &cm_client); 3437 if (!cm_dev) 3438 return; 3439 3440 write_lock_irqsave(&cm.device_lock, flags); 3441 list_del(&cm_dev->list); 3442 write_unlock_irqrestore(&cm.device_lock, flags); 3443 3444 for (i = 1; i <= device->phys_port_cnt; i++) { 3445 port = &cm_dev->port[i-1]; 3446 ib_modify_port(device, port->port_num, 0, &port_modify); 3447 ib_unregister_mad_agent(port->mad_agent); 3448 } 3449 kfree(cm_dev); 3450} 3451 3452static int __init ib_cm_init(void) 3453{ 3454 int ret; 3455 3456 memset(&cm, 0, sizeof cm); 3457 INIT_LIST_HEAD(&cm.device_list); 3458 rwlock_init(&cm.device_lock); 3459 spin_lock_init(&cm.lock); 3460 cm.listen_service_table = RB_ROOT; 3461 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3462 cm.remote_id_table = RB_ROOT; 3463 cm.remote_qp_table = RB_ROOT; 3464 cm.remote_sidr_table = RB_ROOT; 3465 idr_init(&cm.local_id_table); 3466 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3467 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3468 INIT_LIST_HEAD(&cm.timewait_list); 3469 3470 cm.wq = create_workqueue("ib_cm"); 3471 if (!cm.wq) 3472 return -ENOMEM; 3473 3474 ret = ib_register_client(&cm_client); 3475 if (ret) 3476 goto error; 3477 3478 return 0; 3479error: 3480 destroy_workqueue(cm.wq); 3481 return ret; 3482} 3483 3484static void __exit ib_cm_cleanup(void) 3485{ 3486 struct cm_timewait_info *timewait_info, *tmp; 3487 3488 spin_lock_irq(&cm.lock); 3489 list_for_each_entry(timewait_info, &cm.timewait_list, list) 3490 cancel_delayed_work(&timewait_info->work.work); 3491 spin_unlock_irq(&cm.lock); 3492 3493 destroy_workqueue(cm.wq); 3494 3495 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 3496 list_del(&timewait_info->list); 3497 kfree(timewait_info); 3498 } 3499 3500 ib_unregister_client(&cm_client); 3501 idr_destroy(&cm.local_id_table); 3502} 3503 3504module_init(ib_cm_init); 3505module_exit(ib_cm_cleanup); 3506 3507