cm.c revision 75df23e229acab85b704f4603bdf5efdc7960e6a
1/* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38#include <linux/completion.h> 39#include <linux/dma-mapping.h> 40#include <linux/err.h> 41#include <linux/idr.h> 42#include <linux/interrupt.h> 43#include <linux/pci.h> 44#include <linux/rbtree.h> 45#include <linux/spinlock.h> 46#include <linux/workqueue.h> 47 48#include <rdma/ib_cache.h> 49#include <rdma/ib_cm.h> 50#include "cm_msgs.h" 51 52MODULE_AUTHOR("Sean Hefty"); 53MODULE_DESCRIPTION("InfiniBand CM"); 54MODULE_LICENSE("Dual BSD/GPL"); 55 56static void cm_add_one(struct ib_device *device); 57static void cm_remove_one(struct ib_device *device); 58 59static struct ib_client cm_client = { 60 .name = "cm", 61 .add = cm_add_one, 62 .remove = cm_remove_one 63}; 64 65static struct ib_cm { 66 spinlock_t lock; 67 struct list_head device_list; 68 rwlock_t device_lock; 69 struct rb_root listen_service_table; 70 u64 listen_service_id; 71 /* struct rb_root peer_service_table; todo: fix peer to peer */ 72 struct rb_root remote_qp_table; 73 struct rb_root remote_id_table; 74 struct rb_root remote_sidr_table; 75 struct idr local_id_table; 76 struct workqueue_struct *wq; 77} cm; 78 79struct cm_port { 80 struct cm_device *cm_dev; 81 struct ib_mad_agent *mad_agent; 82 u8 port_num; 83}; 84 85struct cm_device { 86 struct list_head list; 87 struct ib_device *device; 88 __be64 ca_guid; 89 struct cm_port port[0]; 90}; 91 92struct cm_av { 93 struct cm_port *port; 94 union ib_gid dgid; 95 struct ib_ah_attr ah_attr; 96 u16 pkey_index; 97 u8 packet_life_time; 98}; 99 100struct cm_work { 101 struct work_struct work; 102 struct list_head list; 103 struct cm_port *port; 104 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 105 __be32 local_id; /* Established / timewait */ 106 __be32 remote_id; 107 struct ib_cm_event cm_event; 108 struct ib_sa_path_rec path[0]; 109}; 110 111struct cm_timewait_info { 112 struct cm_work work; /* Must be first. */ 113 struct rb_node remote_qp_node; 114 struct rb_node remote_id_node; 115 __be64 remote_ca_guid; 116 __be32 remote_qpn; 117 u8 inserted_remote_qp; 118 u8 inserted_remote_id; 119}; 120 121struct cm_id_private { 122 struct ib_cm_id id; 123 124 struct rb_node service_node; 125 struct rb_node sidr_id_node; 126 spinlock_t lock; /* Do not acquire inside cm.lock */ 127 struct completion comp; 128 atomic_t refcount; 129 130 struct ib_mad_send_buf *msg; 131 struct cm_timewait_info *timewait_info; 132 /* todo: use alternate port on send failure */ 133 struct cm_av av; 134 struct cm_av alt_av; 135 struct ib_cm_compare_data *compare_data; 136 137 void *private_data; 138 __be64 tid; 139 __be32 local_qpn; 140 __be32 remote_qpn; 141 enum ib_qp_type qp_type; 142 __be32 sq_psn; 143 __be32 rq_psn; 144 int timeout_ms; 145 enum ib_mtu path_mtu; 146 u8 private_data_len; 147 u8 max_cm_retries; 148 u8 peer_to_peer; 149 u8 responder_resources; 150 u8 initiator_depth; 151 u8 local_ack_timeout; 152 u8 retry_count; 153 u8 rnr_retry_count; 154 u8 service_timeout; 155 156 struct list_head work_list; 157 atomic_t work_count; 158}; 159 160static void cm_work_handler(void *data); 161 162static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 163{ 164 if (atomic_dec_and_test(&cm_id_priv->refcount)) 165 complete(&cm_id_priv->comp); 166} 167 168static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 169 struct ib_mad_send_buf **msg) 170{ 171 struct ib_mad_agent *mad_agent; 172 struct ib_mad_send_buf *m; 173 struct ib_ah *ah; 174 175 mad_agent = cm_id_priv->av.port->mad_agent; 176 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 177 if (IS_ERR(ah)) 178 return PTR_ERR(ah); 179 180 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 181 cm_id_priv->av.pkey_index, 182 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 183 GFP_ATOMIC); 184 if (IS_ERR(m)) { 185 ib_destroy_ah(ah); 186 return PTR_ERR(m); 187 } 188 189 /* Timeout set by caller if response is expected. */ 190 m->ah = ah; 191 m->retries = cm_id_priv->max_cm_retries; 192 193 atomic_inc(&cm_id_priv->refcount); 194 m->context[0] = cm_id_priv; 195 *msg = m; 196 return 0; 197} 198 199static int cm_alloc_response_msg(struct cm_port *port, 200 struct ib_mad_recv_wc *mad_recv_wc, 201 struct ib_mad_send_buf **msg) 202{ 203 struct ib_mad_send_buf *m; 204 struct ib_ah *ah; 205 206 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 207 mad_recv_wc->recv_buf.grh, port->port_num); 208 if (IS_ERR(ah)) 209 return PTR_ERR(ah); 210 211 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 212 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 213 GFP_ATOMIC); 214 if (IS_ERR(m)) { 215 ib_destroy_ah(ah); 216 return PTR_ERR(m); 217 } 218 m->ah = ah; 219 *msg = m; 220 return 0; 221} 222 223static void cm_free_msg(struct ib_mad_send_buf *msg) 224{ 225 ib_destroy_ah(msg->ah); 226 if (msg->context[0]) 227 cm_deref_id(msg->context[0]); 228 ib_free_send_mad(msg); 229} 230 231static void * cm_copy_private_data(const void *private_data, 232 u8 private_data_len) 233{ 234 void *data; 235 236 if (!private_data || !private_data_len) 237 return NULL; 238 239 data = kmalloc(private_data_len, GFP_KERNEL); 240 if (!data) 241 return ERR_PTR(-ENOMEM); 242 243 memcpy(data, private_data, private_data_len); 244 return data; 245} 246 247static void cm_set_private_data(struct cm_id_private *cm_id_priv, 248 void *private_data, u8 private_data_len) 249{ 250 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 251 kfree(cm_id_priv->private_data); 252 253 cm_id_priv->private_data = private_data; 254 cm_id_priv->private_data_len = private_data_len; 255} 256 257static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 258 struct ib_grh *grh, struct cm_av *av) 259{ 260 av->port = port; 261 av->pkey_index = wc->pkey_index; 262 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 263 grh, &av->ah_attr); 264} 265 266static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 267{ 268 struct cm_device *cm_dev; 269 struct cm_port *port = NULL; 270 unsigned long flags; 271 int ret; 272 u8 p; 273 274 read_lock_irqsave(&cm.device_lock, flags); 275 list_for_each_entry(cm_dev, &cm.device_list, list) { 276 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 277 &p, NULL)) { 278 port = &cm_dev->port[p-1]; 279 break; 280 } 281 } 282 read_unlock_irqrestore(&cm.device_lock, flags); 283 284 if (!port) 285 return -EINVAL; 286 287 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 288 be16_to_cpu(path->pkey), &av->pkey_index); 289 if (ret) 290 return ret; 291 292 av->port = port; 293 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 294 &av->ah_attr); 295 av->packet_life_time = path->packet_life_time; 296 return 0; 297} 298 299static int cm_alloc_id(struct cm_id_private *cm_id_priv) 300{ 301 unsigned long flags; 302 int ret; 303 static int next_id; 304 305 do { 306 spin_lock_irqsave(&cm.lock, flags); 307 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, 308 (__force int *) &cm_id_priv->id.local_id); 309 spin_unlock_irqrestore(&cm.lock, flags); 310 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 311 return ret; 312} 313 314static void cm_free_id(__be32 local_id) 315{ 316 unsigned long flags; 317 318 spin_lock_irqsave(&cm.lock, flags); 319 idr_remove(&cm.local_id_table, (__force int) local_id); 320 spin_unlock_irqrestore(&cm.lock, flags); 321} 322 323static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 324{ 325 struct cm_id_private *cm_id_priv; 326 327 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); 328 if (cm_id_priv) { 329 if (cm_id_priv->id.remote_id == remote_id) 330 atomic_inc(&cm_id_priv->refcount); 331 else 332 cm_id_priv = NULL; 333 } 334 335 return cm_id_priv; 336} 337 338static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 339{ 340 struct cm_id_private *cm_id_priv; 341 unsigned long flags; 342 343 spin_lock_irqsave(&cm.lock, flags); 344 cm_id_priv = cm_get_id(local_id, remote_id); 345 spin_unlock_irqrestore(&cm.lock, flags); 346 347 return cm_id_priv; 348} 349 350static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 351{ 352 int i; 353 354 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 355 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 356 ((unsigned long *) mask)[i]; 357} 358 359static int cm_compare_data(struct ib_cm_compare_data *src_data, 360 struct ib_cm_compare_data *dst_data) 361{ 362 u8 src[IB_CM_COMPARE_SIZE]; 363 u8 dst[IB_CM_COMPARE_SIZE]; 364 365 if (!src_data || !dst_data) 366 return 0; 367 368 cm_mask_copy(src, src_data->data, dst_data->mask); 369 cm_mask_copy(dst, dst_data->data, src_data->mask); 370 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 371} 372 373static int cm_compare_private_data(u8 *private_data, 374 struct ib_cm_compare_data *dst_data) 375{ 376 u8 src[IB_CM_COMPARE_SIZE]; 377 378 if (!dst_data) 379 return 0; 380 381 cm_mask_copy(src, private_data, dst_data->mask); 382 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 383} 384 385static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 386{ 387 struct rb_node **link = &cm.listen_service_table.rb_node; 388 struct rb_node *parent = NULL; 389 struct cm_id_private *cur_cm_id_priv; 390 __be64 service_id = cm_id_priv->id.service_id; 391 __be64 service_mask = cm_id_priv->id.service_mask; 392 int data_cmp; 393 394 while (*link) { 395 parent = *link; 396 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 397 service_node); 398 data_cmp = cm_compare_data(cm_id_priv->compare_data, 399 cur_cm_id_priv->compare_data); 400 if ((cur_cm_id_priv->id.service_mask & service_id) == 401 (service_mask & cur_cm_id_priv->id.service_id) && 402 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 403 !data_cmp) 404 return cur_cm_id_priv; 405 406 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 407 link = &(*link)->rb_left; 408 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 409 link = &(*link)->rb_right; 410 else if (service_id < cur_cm_id_priv->id.service_id) 411 link = &(*link)->rb_left; 412 else if (service_id > cur_cm_id_priv->id.service_id) 413 link = &(*link)->rb_right; 414 else if (data_cmp < 0) 415 link = &(*link)->rb_left; 416 else 417 link = &(*link)->rb_right; 418 } 419 rb_link_node(&cm_id_priv->service_node, parent, link); 420 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 421 return NULL; 422} 423 424static struct cm_id_private * cm_find_listen(struct ib_device *device, 425 __be64 service_id, 426 u8 *private_data) 427{ 428 struct rb_node *node = cm.listen_service_table.rb_node; 429 struct cm_id_private *cm_id_priv; 430 int data_cmp; 431 432 while (node) { 433 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 434 data_cmp = cm_compare_private_data(private_data, 435 cm_id_priv->compare_data); 436 if ((cm_id_priv->id.service_mask & service_id) == 437 cm_id_priv->id.service_id && 438 (cm_id_priv->id.device == device) && !data_cmp) 439 return cm_id_priv; 440 441 if (device < cm_id_priv->id.device) 442 node = node->rb_left; 443 else if (device > cm_id_priv->id.device) 444 node = node->rb_right; 445 else if (service_id < cm_id_priv->id.service_id) 446 node = node->rb_left; 447 else if (service_id > cm_id_priv->id.service_id) 448 node = node->rb_right; 449 else if (data_cmp < 0) 450 node = node->rb_left; 451 else 452 node = node->rb_right; 453 } 454 return NULL; 455} 456 457static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 458 *timewait_info) 459{ 460 struct rb_node **link = &cm.remote_id_table.rb_node; 461 struct rb_node *parent = NULL; 462 struct cm_timewait_info *cur_timewait_info; 463 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 464 __be32 remote_id = timewait_info->work.remote_id; 465 466 while (*link) { 467 parent = *link; 468 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 469 remote_id_node); 470 if (remote_id < cur_timewait_info->work.remote_id) 471 link = &(*link)->rb_left; 472 else if (remote_id > cur_timewait_info->work.remote_id) 473 link = &(*link)->rb_right; 474 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 475 link = &(*link)->rb_left; 476 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 477 link = &(*link)->rb_right; 478 else 479 return cur_timewait_info; 480 } 481 timewait_info->inserted_remote_id = 1; 482 rb_link_node(&timewait_info->remote_id_node, parent, link); 483 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 484 return NULL; 485} 486 487static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 488 __be32 remote_id) 489{ 490 struct rb_node *node = cm.remote_id_table.rb_node; 491 struct cm_timewait_info *timewait_info; 492 493 while (node) { 494 timewait_info = rb_entry(node, struct cm_timewait_info, 495 remote_id_node); 496 if (remote_id < timewait_info->work.remote_id) 497 node = node->rb_left; 498 else if (remote_id > timewait_info->work.remote_id) 499 node = node->rb_right; 500 else if (remote_ca_guid < timewait_info->remote_ca_guid) 501 node = node->rb_left; 502 else if (remote_ca_guid > timewait_info->remote_ca_guid) 503 node = node->rb_right; 504 else 505 return timewait_info; 506 } 507 return NULL; 508} 509 510static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 511 *timewait_info) 512{ 513 struct rb_node **link = &cm.remote_qp_table.rb_node; 514 struct rb_node *parent = NULL; 515 struct cm_timewait_info *cur_timewait_info; 516 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 517 __be32 remote_qpn = timewait_info->remote_qpn; 518 519 while (*link) { 520 parent = *link; 521 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 522 remote_qp_node); 523 if (remote_qpn < cur_timewait_info->remote_qpn) 524 link = &(*link)->rb_left; 525 else if (remote_qpn > cur_timewait_info->remote_qpn) 526 link = &(*link)->rb_right; 527 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 528 link = &(*link)->rb_left; 529 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 530 link = &(*link)->rb_right; 531 else 532 return cur_timewait_info; 533 } 534 timewait_info->inserted_remote_qp = 1; 535 rb_link_node(&timewait_info->remote_qp_node, parent, link); 536 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 537 return NULL; 538} 539 540static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 541 *cm_id_priv) 542{ 543 struct rb_node **link = &cm.remote_sidr_table.rb_node; 544 struct rb_node *parent = NULL; 545 struct cm_id_private *cur_cm_id_priv; 546 union ib_gid *port_gid = &cm_id_priv->av.dgid; 547 __be32 remote_id = cm_id_priv->id.remote_id; 548 549 while (*link) { 550 parent = *link; 551 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 552 sidr_id_node); 553 if (remote_id < cur_cm_id_priv->id.remote_id) 554 link = &(*link)->rb_left; 555 else if (remote_id > cur_cm_id_priv->id.remote_id) 556 link = &(*link)->rb_right; 557 else { 558 int cmp; 559 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 560 sizeof *port_gid); 561 if (cmp < 0) 562 link = &(*link)->rb_left; 563 else if (cmp > 0) 564 link = &(*link)->rb_right; 565 else 566 return cur_cm_id_priv; 567 } 568 } 569 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 570 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 571 return NULL; 572} 573 574static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 575 enum ib_cm_sidr_status status) 576{ 577 struct ib_cm_sidr_rep_param param; 578 579 memset(¶m, 0, sizeof param); 580 param.status = status; 581 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 582} 583 584struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 585 ib_cm_handler cm_handler, 586 void *context) 587{ 588 struct cm_id_private *cm_id_priv; 589 int ret; 590 591 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 592 if (!cm_id_priv) 593 return ERR_PTR(-ENOMEM); 594 595 cm_id_priv->id.state = IB_CM_IDLE; 596 cm_id_priv->id.device = device; 597 cm_id_priv->id.cm_handler = cm_handler; 598 cm_id_priv->id.context = context; 599 cm_id_priv->id.remote_cm_qpn = 1; 600 ret = cm_alloc_id(cm_id_priv); 601 if (ret) 602 goto error; 603 604 spin_lock_init(&cm_id_priv->lock); 605 init_completion(&cm_id_priv->comp); 606 INIT_LIST_HEAD(&cm_id_priv->work_list); 607 atomic_set(&cm_id_priv->work_count, -1); 608 atomic_set(&cm_id_priv->refcount, 1); 609 return &cm_id_priv->id; 610 611error: 612 kfree(cm_id_priv); 613 return ERR_PTR(-ENOMEM); 614} 615EXPORT_SYMBOL(ib_create_cm_id); 616 617static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 618{ 619 struct cm_work *work; 620 621 if (list_empty(&cm_id_priv->work_list)) 622 return NULL; 623 624 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 625 list_del(&work->list); 626 return work; 627} 628 629static void cm_free_work(struct cm_work *work) 630{ 631 if (work->mad_recv_wc) 632 ib_free_recv_mad(work->mad_recv_wc); 633 kfree(work); 634} 635 636static inline int cm_convert_to_ms(int iba_time) 637{ 638 /* approximate conversion to ms from 4.096us x 2^iba_time */ 639 return 1 << max(iba_time - 8, 0); 640} 641 642static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 643{ 644 unsigned long flags; 645 646 if (!timewait_info->inserted_remote_id && 647 !timewait_info->inserted_remote_qp) 648 return; 649 650 spin_lock_irqsave(&cm.lock, flags); 651 if (timewait_info->inserted_remote_id) { 652 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 653 timewait_info->inserted_remote_id = 0; 654 } 655 656 if (timewait_info->inserted_remote_qp) { 657 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 658 timewait_info->inserted_remote_qp = 0; 659 } 660 spin_unlock_irqrestore(&cm.lock, flags); 661} 662 663static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 664{ 665 struct cm_timewait_info *timewait_info; 666 667 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 668 if (!timewait_info) 669 return ERR_PTR(-ENOMEM); 670 671 timewait_info->work.local_id = local_id; 672 INIT_WORK(&timewait_info->work.work, cm_work_handler, 673 &timewait_info->work); 674 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 675 return timewait_info; 676} 677 678static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 679{ 680 int wait_time; 681 682 /* 683 * The cm_id could be destroyed by the user before we exit timewait. 684 * To protect against this, we search for the cm_id after exiting 685 * timewait before notifying the user that we've exited timewait. 686 */ 687 cm_id_priv->id.state = IB_CM_TIMEWAIT; 688 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 689 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 690 msecs_to_jiffies(wait_time)); 691 cm_id_priv->timewait_info = NULL; 692} 693 694static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 695{ 696 cm_id_priv->id.state = IB_CM_IDLE; 697 if (cm_id_priv->timewait_info) { 698 cm_cleanup_timewait(cm_id_priv->timewait_info); 699 kfree(cm_id_priv->timewait_info); 700 cm_id_priv->timewait_info = NULL; 701 } 702} 703 704static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 705{ 706 struct cm_id_private *cm_id_priv; 707 struct cm_work *work; 708 unsigned long flags; 709 710 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 711retest: 712 spin_lock_irqsave(&cm_id_priv->lock, flags); 713 switch (cm_id->state) { 714 case IB_CM_LISTEN: 715 cm_id->state = IB_CM_IDLE; 716 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 717 spin_lock_irqsave(&cm.lock, flags); 718 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 719 spin_unlock_irqrestore(&cm.lock, flags); 720 break; 721 case IB_CM_SIDR_REQ_SENT: 722 cm_id->state = IB_CM_IDLE; 723 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 724 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 725 break; 726 case IB_CM_SIDR_REQ_RCVD: 727 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 728 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 729 break; 730 case IB_CM_REQ_SENT: 731 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 733 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 734 &cm_id_priv->av.port->cm_dev->ca_guid, 735 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 736 NULL, 0); 737 break; 738 case IB_CM_REQ_RCVD: 739 if (err == -ENOMEM) { 740 /* Do not reject to allow future retries. */ 741 cm_reset_to_idle(cm_id_priv); 742 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 743 } else { 744 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 745 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 746 NULL, 0, NULL, 0); 747 } 748 break; 749 case IB_CM_MRA_REQ_RCVD: 750 case IB_CM_REP_SENT: 751 case IB_CM_MRA_REP_RCVD: 752 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 753 /* Fall through */ 754 case IB_CM_MRA_REQ_SENT: 755 case IB_CM_REP_RCVD: 756 case IB_CM_MRA_REP_SENT: 757 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 758 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 759 NULL, 0, NULL, 0); 760 break; 761 case IB_CM_ESTABLISHED: 762 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 763 ib_send_cm_dreq(cm_id, NULL, 0); 764 goto retest; 765 case IB_CM_DREQ_SENT: 766 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 767 cm_enter_timewait(cm_id_priv); 768 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 769 break; 770 case IB_CM_DREQ_RCVD: 771 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 772 ib_send_cm_drep(cm_id, NULL, 0); 773 break; 774 default: 775 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 776 break; 777 } 778 779 cm_free_id(cm_id->local_id); 780 cm_deref_id(cm_id_priv); 781 wait_for_completion(&cm_id_priv->comp); 782 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 783 cm_free_work(work); 784 kfree(cm_id_priv->compare_data); 785 kfree(cm_id_priv->private_data); 786 kfree(cm_id_priv); 787} 788 789void ib_destroy_cm_id(struct ib_cm_id *cm_id) 790{ 791 cm_destroy_id(cm_id, 0); 792} 793EXPORT_SYMBOL(ib_destroy_cm_id); 794 795int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 796 struct ib_cm_compare_data *compare_data) 797{ 798 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 799 unsigned long flags; 800 int ret = 0; 801 802 service_mask = service_mask ? service_mask : 803 __constant_cpu_to_be64(~0ULL); 804 service_id &= service_mask; 805 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 806 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 807 return -EINVAL; 808 809 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 810 if (cm_id->state != IB_CM_IDLE) 811 return -EINVAL; 812 813 if (compare_data) { 814 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 815 GFP_KERNEL); 816 if (!cm_id_priv->compare_data) 817 return -ENOMEM; 818 cm_mask_copy(cm_id_priv->compare_data->data, 819 compare_data->data, compare_data->mask); 820 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 821 IB_CM_COMPARE_SIZE); 822 } 823 824 cm_id->state = IB_CM_LISTEN; 825 826 spin_lock_irqsave(&cm.lock, flags); 827 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 828 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 829 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 830 } else { 831 cm_id->service_id = service_id; 832 cm_id->service_mask = service_mask; 833 } 834 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 835 spin_unlock_irqrestore(&cm.lock, flags); 836 837 if (cur_cm_id_priv) { 838 cm_id->state = IB_CM_IDLE; 839 kfree(cm_id_priv->compare_data); 840 cm_id_priv->compare_data = NULL; 841 ret = -EBUSY; 842 } 843 return ret; 844} 845EXPORT_SYMBOL(ib_cm_listen); 846 847static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 848 enum cm_msg_sequence msg_seq) 849{ 850 u64 hi_tid, low_tid; 851 852 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 853 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 854 (msg_seq << 30)); 855 return cpu_to_be64(hi_tid | low_tid); 856} 857 858static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 859 __be16 attr_id, __be64 tid) 860{ 861 hdr->base_version = IB_MGMT_BASE_VERSION; 862 hdr->mgmt_class = IB_MGMT_CLASS_CM; 863 hdr->class_version = IB_CM_CLASS_VERSION; 864 hdr->method = IB_MGMT_METHOD_SEND; 865 hdr->attr_id = attr_id; 866 hdr->tid = tid; 867} 868 869static void cm_format_req(struct cm_req_msg *req_msg, 870 struct cm_id_private *cm_id_priv, 871 struct ib_cm_req_param *param) 872{ 873 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 874 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 875 876 req_msg->local_comm_id = cm_id_priv->id.local_id; 877 req_msg->service_id = param->service_id; 878 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 879 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 880 cm_req_set_resp_res(req_msg, param->responder_resources); 881 cm_req_set_init_depth(req_msg, param->initiator_depth); 882 cm_req_set_remote_resp_timeout(req_msg, 883 param->remote_cm_response_timeout); 884 cm_req_set_qp_type(req_msg, param->qp_type); 885 cm_req_set_flow_ctrl(req_msg, param->flow_control); 886 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 887 cm_req_set_local_resp_timeout(req_msg, 888 param->local_cm_response_timeout); 889 cm_req_set_retry_count(req_msg, param->retry_count); 890 req_msg->pkey = param->primary_path->pkey; 891 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 892 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 893 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 894 cm_req_set_srq(req_msg, param->srq); 895 896 req_msg->primary_local_lid = param->primary_path->slid; 897 req_msg->primary_remote_lid = param->primary_path->dlid; 898 req_msg->primary_local_gid = param->primary_path->sgid; 899 req_msg->primary_remote_gid = param->primary_path->dgid; 900 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 901 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 902 req_msg->primary_traffic_class = param->primary_path->traffic_class; 903 req_msg->primary_hop_limit = param->primary_path->hop_limit; 904 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 905 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 906 cm_req_set_primary_local_ack_timeout(req_msg, 907 min(31, param->primary_path->packet_life_time + 1)); 908 909 if (param->alternate_path) { 910 req_msg->alt_local_lid = param->alternate_path->slid; 911 req_msg->alt_remote_lid = param->alternate_path->dlid; 912 req_msg->alt_local_gid = param->alternate_path->sgid; 913 req_msg->alt_remote_gid = param->alternate_path->dgid; 914 cm_req_set_alt_flow_label(req_msg, 915 param->alternate_path->flow_label); 916 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 917 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 918 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 919 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 920 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 921 cm_req_set_alt_local_ack_timeout(req_msg, 922 min(31, param->alternate_path->packet_life_time + 1)); 923 } 924 925 if (param->private_data && param->private_data_len) 926 memcpy(req_msg->private_data, param->private_data, 927 param->private_data_len); 928} 929 930static int cm_validate_req_param(struct ib_cm_req_param *param) 931{ 932 /* peer-to-peer not supported */ 933 if (param->peer_to_peer) 934 return -EINVAL; 935 936 if (!param->primary_path) 937 return -EINVAL; 938 939 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 940 return -EINVAL; 941 942 if (param->private_data && 943 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 944 return -EINVAL; 945 946 if (param->alternate_path && 947 (param->alternate_path->pkey != param->primary_path->pkey || 948 param->alternate_path->mtu != param->primary_path->mtu)) 949 return -EINVAL; 950 951 return 0; 952} 953 954int ib_send_cm_req(struct ib_cm_id *cm_id, 955 struct ib_cm_req_param *param) 956{ 957 struct cm_id_private *cm_id_priv; 958 struct cm_req_msg *req_msg; 959 unsigned long flags; 960 int ret; 961 962 ret = cm_validate_req_param(param); 963 if (ret) 964 return ret; 965 966 /* Verify that we're not in timewait. */ 967 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 968 spin_lock_irqsave(&cm_id_priv->lock, flags); 969 if (cm_id->state != IB_CM_IDLE) { 970 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 971 ret = -EINVAL; 972 goto out; 973 } 974 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 975 976 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 977 id.local_id); 978 if (IS_ERR(cm_id_priv->timewait_info)) { 979 ret = PTR_ERR(cm_id_priv->timewait_info); 980 goto out; 981 } 982 983 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 984 if (ret) 985 goto error1; 986 if (param->alternate_path) { 987 ret = cm_init_av_by_path(param->alternate_path, 988 &cm_id_priv->alt_av); 989 if (ret) 990 goto error1; 991 } 992 cm_id->service_id = param->service_id; 993 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 994 cm_id_priv->timeout_ms = cm_convert_to_ms( 995 param->primary_path->packet_life_time) * 2 + 996 cm_convert_to_ms( 997 param->remote_cm_response_timeout); 998 cm_id_priv->max_cm_retries = param->max_cm_retries; 999 cm_id_priv->initiator_depth = param->initiator_depth; 1000 cm_id_priv->responder_resources = param->responder_resources; 1001 cm_id_priv->retry_count = param->retry_count; 1002 cm_id_priv->path_mtu = param->primary_path->mtu; 1003 cm_id_priv->qp_type = param->qp_type; 1004 1005 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1006 if (ret) 1007 goto error1; 1008 1009 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1010 cm_format_req(req_msg, cm_id_priv, param); 1011 cm_id_priv->tid = req_msg->hdr.tid; 1012 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1013 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1014 1015 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1016 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1017 cm_id_priv->local_ack_timeout = 1018 cm_req_get_primary_local_ack_timeout(req_msg); 1019 1020 spin_lock_irqsave(&cm_id_priv->lock, flags); 1021 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1022 if (ret) { 1023 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1024 goto error2; 1025 } 1026 BUG_ON(cm_id->state != IB_CM_IDLE); 1027 cm_id->state = IB_CM_REQ_SENT; 1028 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1029 return 0; 1030 1031error2: cm_free_msg(cm_id_priv->msg); 1032error1: kfree(cm_id_priv->timewait_info); 1033out: return ret; 1034} 1035EXPORT_SYMBOL(ib_send_cm_req); 1036 1037static int cm_issue_rej(struct cm_port *port, 1038 struct ib_mad_recv_wc *mad_recv_wc, 1039 enum ib_cm_rej_reason reason, 1040 enum cm_msg_response msg_rejected, 1041 void *ari, u8 ari_length) 1042{ 1043 struct ib_mad_send_buf *msg = NULL; 1044 struct cm_rej_msg *rej_msg, *rcv_msg; 1045 int ret; 1046 1047 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1048 if (ret) 1049 return ret; 1050 1051 /* We just need common CM header information. Cast to any message. */ 1052 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1053 rej_msg = (struct cm_rej_msg *) msg->mad; 1054 1055 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1056 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1057 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1058 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1059 rej_msg->reason = cpu_to_be16(reason); 1060 1061 if (ari && ari_length) { 1062 cm_rej_set_reject_info_len(rej_msg, ari_length); 1063 memcpy(rej_msg->ari, ari, ari_length); 1064 } 1065 1066 ret = ib_post_send_mad(msg, NULL); 1067 if (ret) 1068 cm_free_msg(msg); 1069 1070 return ret; 1071} 1072 1073static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1074 __be32 local_qpn, __be32 remote_qpn) 1075{ 1076 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1077 ((local_ca_guid == remote_ca_guid) && 1078 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1079} 1080 1081static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1082 struct ib_sa_path_rec *primary_path, 1083 struct ib_sa_path_rec *alt_path) 1084{ 1085 memset(primary_path, 0, sizeof *primary_path); 1086 primary_path->dgid = req_msg->primary_local_gid; 1087 primary_path->sgid = req_msg->primary_remote_gid; 1088 primary_path->dlid = req_msg->primary_local_lid; 1089 primary_path->slid = req_msg->primary_remote_lid; 1090 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1091 primary_path->hop_limit = req_msg->primary_hop_limit; 1092 primary_path->traffic_class = req_msg->primary_traffic_class; 1093 primary_path->reversible = 1; 1094 primary_path->pkey = req_msg->pkey; 1095 primary_path->sl = cm_req_get_primary_sl(req_msg); 1096 primary_path->mtu_selector = IB_SA_EQ; 1097 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1098 primary_path->rate_selector = IB_SA_EQ; 1099 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1100 primary_path->packet_life_time_selector = IB_SA_EQ; 1101 primary_path->packet_life_time = 1102 cm_req_get_primary_local_ack_timeout(req_msg); 1103 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1104 1105 if (req_msg->alt_local_lid) { 1106 memset(alt_path, 0, sizeof *alt_path); 1107 alt_path->dgid = req_msg->alt_local_gid; 1108 alt_path->sgid = req_msg->alt_remote_gid; 1109 alt_path->dlid = req_msg->alt_local_lid; 1110 alt_path->slid = req_msg->alt_remote_lid; 1111 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1112 alt_path->hop_limit = req_msg->alt_hop_limit; 1113 alt_path->traffic_class = req_msg->alt_traffic_class; 1114 alt_path->reversible = 1; 1115 alt_path->pkey = req_msg->pkey; 1116 alt_path->sl = cm_req_get_alt_sl(req_msg); 1117 alt_path->mtu_selector = IB_SA_EQ; 1118 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1119 alt_path->rate_selector = IB_SA_EQ; 1120 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1121 alt_path->packet_life_time_selector = IB_SA_EQ; 1122 alt_path->packet_life_time = 1123 cm_req_get_alt_local_ack_timeout(req_msg); 1124 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1125 } 1126} 1127 1128static void cm_format_req_event(struct cm_work *work, 1129 struct cm_id_private *cm_id_priv, 1130 struct ib_cm_id *listen_id) 1131{ 1132 struct cm_req_msg *req_msg; 1133 struct ib_cm_req_event_param *param; 1134 1135 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1136 param = &work->cm_event.param.req_rcvd; 1137 param->listen_id = listen_id; 1138 param->port = cm_id_priv->av.port->port_num; 1139 param->primary_path = &work->path[0]; 1140 if (req_msg->alt_local_lid) 1141 param->alternate_path = &work->path[1]; 1142 else 1143 param->alternate_path = NULL; 1144 param->remote_ca_guid = req_msg->local_ca_guid; 1145 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1146 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1147 param->qp_type = cm_req_get_qp_type(req_msg); 1148 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1149 param->responder_resources = cm_req_get_init_depth(req_msg); 1150 param->initiator_depth = cm_req_get_resp_res(req_msg); 1151 param->local_cm_response_timeout = 1152 cm_req_get_remote_resp_timeout(req_msg); 1153 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1154 param->remote_cm_response_timeout = 1155 cm_req_get_local_resp_timeout(req_msg); 1156 param->retry_count = cm_req_get_retry_count(req_msg); 1157 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1158 param->srq = cm_req_get_srq(req_msg); 1159 work->cm_event.private_data = &req_msg->private_data; 1160} 1161 1162static void cm_process_work(struct cm_id_private *cm_id_priv, 1163 struct cm_work *work) 1164{ 1165 unsigned long flags; 1166 int ret; 1167 1168 /* We will typically only have the current event to report. */ 1169 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1170 cm_free_work(work); 1171 1172 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1173 spin_lock_irqsave(&cm_id_priv->lock, flags); 1174 work = cm_dequeue_work(cm_id_priv); 1175 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1176 BUG_ON(!work); 1177 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1178 &work->cm_event); 1179 cm_free_work(work); 1180 } 1181 cm_deref_id(cm_id_priv); 1182 if (ret) 1183 cm_destroy_id(&cm_id_priv->id, ret); 1184} 1185 1186static void cm_format_mra(struct cm_mra_msg *mra_msg, 1187 struct cm_id_private *cm_id_priv, 1188 enum cm_msg_response msg_mraed, u8 service_timeout, 1189 const void *private_data, u8 private_data_len) 1190{ 1191 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1192 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1193 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1194 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1195 cm_mra_set_service_timeout(mra_msg, service_timeout); 1196 1197 if (private_data && private_data_len) 1198 memcpy(mra_msg->private_data, private_data, private_data_len); 1199} 1200 1201static void cm_format_rej(struct cm_rej_msg *rej_msg, 1202 struct cm_id_private *cm_id_priv, 1203 enum ib_cm_rej_reason reason, 1204 void *ari, 1205 u8 ari_length, 1206 const void *private_data, 1207 u8 private_data_len) 1208{ 1209 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1210 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1211 1212 switch(cm_id_priv->id.state) { 1213 case IB_CM_REQ_RCVD: 1214 rej_msg->local_comm_id = 0; 1215 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1216 break; 1217 case IB_CM_MRA_REQ_SENT: 1218 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1219 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1220 break; 1221 case IB_CM_REP_RCVD: 1222 case IB_CM_MRA_REP_SENT: 1223 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1224 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1225 break; 1226 default: 1227 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1228 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1229 break; 1230 } 1231 1232 rej_msg->reason = cpu_to_be16(reason); 1233 if (ari && ari_length) { 1234 cm_rej_set_reject_info_len(rej_msg, ari_length); 1235 memcpy(rej_msg->ari, ari, ari_length); 1236 } 1237 1238 if (private_data && private_data_len) 1239 memcpy(rej_msg->private_data, private_data, private_data_len); 1240} 1241 1242static void cm_dup_req_handler(struct cm_work *work, 1243 struct cm_id_private *cm_id_priv) 1244{ 1245 struct ib_mad_send_buf *msg = NULL; 1246 unsigned long flags; 1247 int ret; 1248 1249 /* Quick state check to discard duplicate REQs. */ 1250 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1251 return; 1252 1253 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1254 if (ret) 1255 return; 1256 1257 spin_lock_irqsave(&cm_id_priv->lock, flags); 1258 switch (cm_id_priv->id.state) { 1259 case IB_CM_MRA_REQ_SENT: 1260 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1261 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1262 cm_id_priv->private_data, 1263 cm_id_priv->private_data_len); 1264 break; 1265 case IB_CM_TIMEWAIT: 1266 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1267 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1268 break; 1269 default: 1270 goto unlock; 1271 } 1272 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1273 1274 ret = ib_post_send_mad(msg, NULL); 1275 if (ret) 1276 goto free; 1277 return; 1278 1279unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1280free: cm_free_msg(msg); 1281} 1282 1283static struct cm_id_private * cm_match_req(struct cm_work *work, 1284 struct cm_id_private *cm_id_priv) 1285{ 1286 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1287 struct cm_timewait_info *timewait_info; 1288 struct cm_req_msg *req_msg; 1289 unsigned long flags; 1290 1291 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1292 1293 /* Check for duplicate REQ and stale connections. */ 1294 spin_lock_irqsave(&cm.lock, flags); 1295 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1296 if (!timewait_info) 1297 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1298 1299 if (timewait_info) { 1300 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1301 timewait_info->work.remote_id); 1302 spin_unlock_irqrestore(&cm.lock, flags); 1303 if (cur_cm_id_priv) { 1304 cm_dup_req_handler(work, cur_cm_id_priv); 1305 cm_deref_id(cur_cm_id_priv); 1306 } else 1307 cm_issue_rej(work->port, work->mad_recv_wc, 1308 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1309 NULL, 0); 1310 goto error; 1311 } 1312 1313 /* Find matching listen request. */ 1314 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1315 req_msg->service_id, 1316 req_msg->private_data); 1317 if (!listen_cm_id_priv) { 1318 spin_unlock_irqrestore(&cm.lock, flags); 1319 cm_issue_rej(work->port, work->mad_recv_wc, 1320 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1321 NULL, 0); 1322 goto error; 1323 } 1324 atomic_inc(&listen_cm_id_priv->refcount); 1325 atomic_inc(&cm_id_priv->refcount); 1326 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1327 atomic_inc(&cm_id_priv->work_count); 1328 spin_unlock_irqrestore(&cm.lock, flags); 1329 return listen_cm_id_priv; 1330 1331error: cm_cleanup_timewait(cm_id_priv->timewait_info); 1332 return NULL; 1333} 1334 1335static int cm_req_handler(struct cm_work *work) 1336{ 1337 struct ib_cm_id *cm_id; 1338 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1339 struct cm_req_msg *req_msg; 1340 int ret; 1341 1342 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1343 1344 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1345 if (IS_ERR(cm_id)) 1346 return PTR_ERR(cm_id); 1347 1348 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1349 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1350 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1351 work->mad_recv_wc->recv_buf.grh, 1352 &cm_id_priv->av); 1353 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1354 id.local_id); 1355 if (IS_ERR(cm_id_priv->timewait_info)) { 1356 ret = PTR_ERR(cm_id_priv->timewait_info); 1357 goto error1; 1358 } 1359 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1360 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1361 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1362 1363 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1364 if (!listen_cm_id_priv) { 1365 ret = -EINVAL; 1366 goto error2; 1367 } 1368 1369 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1370 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1371 cm_id_priv->id.service_id = req_msg->service_id; 1372 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1373 1374 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1375 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1376 if (ret) 1377 goto error3; 1378 if (req_msg->alt_local_lid) { 1379 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1380 if (ret) 1381 goto error3; 1382 } 1383 cm_id_priv->tid = req_msg->hdr.tid; 1384 cm_id_priv->timeout_ms = cm_convert_to_ms( 1385 cm_req_get_local_resp_timeout(req_msg)); 1386 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1387 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1388 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1389 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1390 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1391 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1392 cm_id_priv->local_ack_timeout = 1393 cm_req_get_primary_local_ack_timeout(req_msg); 1394 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1395 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1396 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1397 1398 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1399 cm_process_work(cm_id_priv, work); 1400 cm_deref_id(listen_cm_id_priv); 1401 return 0; 1402 1403error3: atomic_dec(&cm_id_priv->refcount); 1404 cm_deref_id(listen_cm_id_priv); 1405 cm_cleanup_timewait(cm_id_priv->timewait_info); 1406error2: kfree(cm_id_priv->timewait_info); 1407 cm_id_priv->timewait_info = NULL; 1408error1: ib_destroy_cm_id(&cm_id_priv->id); 1409 return ret; 1410} 1411 1412static void cm_format_rep(struct cm_rep_msg *rep_msg, 1413 struct cm_id_private *cm_id_priv, 1414 struct ib_cm_rep_param *param) 1415{ 1416 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1417 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1418 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1419 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1420 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1421 rep_msg->resp_resources = param->responder_resources; 1422 rep_msg->initiator_depth = param->initiator_depth; 1423 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1424 cm_rep_set_failover(rep_msg, param->failover_accepted); 1425 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1426 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1427 cm_rep_set_srq(rep_msg, param->srq); 1428 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1429 1430 if (param->private_data && param->private_data_len) 1431 memcpy(rep_msg->private_data, param->private_data, 1432 param->private_data_len); 1433} 1434 1435int ib_send_cm_rep(struct ib_cm_id *cm_id, 1436 struct ib_cm_rep_param *param) 1437{ 1438 struct cm_id_private *cm_id_priv; 1439 struct ib_mad_send_buf *msg; 1440 struct cm_rep_msg *rep_msg; 1441 unsigned long flags; 1442 int ret; 1443 1444 if (param->private_data && 1445 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1446 return -EINVAL; 1447 1448 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1449 spin_lock_irqsave(&cm_id_priv->lock, flags); 1450 if (cm_id->state != IB_CM_REQ_RCVD && 1451 cm_id->state != IB_CM_MRA_REQ_SENT) { 1452 ret = -EINVAL; 1453 goto out; 1454 } 1455 1456 ret = cm_alloc_msg(cm_id_priv, &msg); 1457 if (ret) 1458 goto out; 1459 1460 rep_msg = (struct cm_rep_msg *) msg->mad; 1461 cm_format_rep(rep_msg, cm_id_priv, param); 1462 msg->timeout_ms = cm_id_priv->timeout_ms; 1463 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1464 1465 ret = ib_post_send_mad(msg, NULL); 1466 if (ret) { 1467 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1468 cm_free_msg(msg); 1469 return ret; 1470 } 1471 1472 cm_id->state = IB_CM_REP_SENT; 1473 cm_id_priv->msg = msg; 1474 cm_id_priv->initiator_depth = param->initiator_depth; 1475 cm_id_priv->responder_resources = param->responder_resources; 1476 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1477 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1478 1479out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1480 return ret; 1481} 1482EXPORT_SYMBOL(ib_send_cm_rep); 1483 1484static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1485 struct cm_id_private *cm_id_priv, 1486 const void *private_data, 1487 u8 private_data_len) 1488{ 1489 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1490 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1491 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1492 1493 if (private_data && private_data_len) 1494 memcpy(rtu_msg->private_data, private_data, private_data_len); 1495} 1496 1497int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1498 const void *private_data, 1499 u8 private_data_len) 1500{ 1501 struct cm_id_private *cm_id_priv; 1502 struct ib_mad_send_buf *msg; 1503 unsigned long flags; 1504 void *data; 1505 int ret; 1506 1507 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1508 return -EINVAL; 1509 1510 data = cm_copy_private_data(private_data, private_data_len); 1511 if (IS_ERR(data)) 1512 return PTR_ERR(data); 1513 1514 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1515 spin_lock_irqsave(&cm_id_priv->lock, flags); 1516 if (cm_id->state != IB_CM_REP_RCVD && 1517 cm_id->state != IB_CM_MRA_REP_SENT) { 1518 ret = -EINVAL; 1519 goto error; 1520 } 1521 1522 ret = cm_alloc_msg(cm_id_priv, &msg); 1523 if (ret) 1524 goto error; 1525 1526 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1527 private_data, private_data_len); 1528 1529 ret = ib_post_send_mad(msg, NULL); 1530 if (ret) { 1531 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1532 cm_free_msg(msg); 1533 kfree(data); 1534 return ret; 1535 } 1536 1537 cm_id->state = IB_CM_ESTABLISHED; 1538 cm_set_private_data(cm_id_priv, data, private_data_len); 1539 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1540 return 0; 1541 1542error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1543 kfree(data); 1544 return ret; 1545} 1546EXPORT_SYMBOL(ib_send_cm_rtu); 1547 1548static void cm_format_rep_event(struct cm_work *work) 1549{ 1550 struct cm_rep_msg *rep_msg; 1551 struct ib_cm_rep_event_param *param; 1552 1553 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1554 param = &work->cm_event.param.rep_rcvd; 1555 param->remote_ca_guid = rep_msg->local_ca_guid; 1556 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1557 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1558 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1559 param->responder_resources = rep_msg->initiator_depth; 1560 param->initiator_depth = rep_msg->resp_resources; 1561 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1562 param->failover_accepted = cm_rep_get_failover(rep_msg); 1563 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1564 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1565 param->srq = cm_rep_get_srq(rep_msg); 1566 work->cm_event.private_data = &rep_msg->private_data; 1567} 1568 1569static void cm_dup_rep_handler(struct cm_work *work) 1570{ 1571 struct cm_id_private *cm_id_priv; 1572 struct cm_rep_msg *rep_msg; 1573 struct ib_mad_send_buf *msg = NULL; 1574 unsigned long flags; 1575 int ret; 1576 1577 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1578 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1579 rep_msg->local_comm_id); 1580 if (!cm_id_priv) 1581 return; 1582 1583 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1584 if (ret) 1585 goto deref; 1586 1587 spin_lock_irqsave(&cm_id_priv->lock, flags); 1588 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1589 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1590 cm_id_priv->private_data, 1591 cm_id_priv->private_data_len); 1592 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1593 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1594 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1595 cm_id_priv->private_data, 1596 cm_id_priv->private_data_len); 1597 else 1598 goto unlock; 1599 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1600 1601 ret = ib_post_send_mad(msg, NULL); 1602 if (ret) 1603 goto free; 1604 goto deref; 1605 1606unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1607free: cm_free_msg(msg); 1608deref: cm_deref_id(cm_id_priv); 1609} 1610 1611static int cm_rep_handler(struct cm_work *work) 1612{ 1613 struct cm_id_private *cm_id_priv; 1614 struct cm_rep_msg *rep_msg; 1615 unsigned long flags; 1616 int ret; 1617 1618 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1619 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1620 if (!cm_id_priv) { 1621 cm_dup_rep_handler(work); 1622 return -EINVAL; 1623 } 1624 1625 cm_format_rep_event(work); 1626 1627 spin_lock_irqsave(&cm_id_priv->lock, flags); 1628 switch (cm_id_priv->id.state) { 1629 case IB_CM_REQ_SENT: 1630 case IB_CM_MRA_REQ_RCVD: 1631 break; 1632 default: 1633 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1634 ret = -EINVAL; 1635 goto error; 1636 } 1637 1638 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1639 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1640 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1641 1642 spin_lock(&cm.lock); 1643 /* Check for duplicate REP. */ 1644 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1645 spin_unlock(&cm.lock); 1646 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1647 ret = -EINVAL; 1648 goto error; 1649 } 1650 /* Check for a stale connection. */ 1651 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1652 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1653 &cm.remote_id_table); 1654 cm_id_priv->timewait_info->inserted_remote_id = 0; 1655 spin_unlock(&cm.lock); 1656 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1657 cm_issue_rej(work->port, work->mad_recv_wc, 1658 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1659 NULL, 0); 1660 ret = -EINVAL; 1661 goto error; 1662 } 1663 spin_unlock(&cm.lock); 1664 1665 cm_id_priv->id.state = IB_CM_REP_RCVD; 1666 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1667 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1668 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1669 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1670 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1671 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1672 1673 /* todo: handle peer_to_peer */ 1674 1675 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1676 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1677 if (!ret) 1678 list_add_tail(&work->list, &cm_id_priv->work_list); 1679 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1680 1681 if (ret) 1682 cm_process_work(cm_id_priv, work); 1683 else 1684 cm_deref_id(cm_id_priv); 1685 return 0; 1686 1687error: 1688 cm_deref_id(cm_id_priv); 1689 return ret; 1690} 1691 1692static int cm_establish_handler(struct cm_work *work) 1693{ 1694 struct cm_id_private *cm_id_priv; 1695 unsigned long flags; 1696 int ret; 1697 1698 /* See comment in ib_cm_establish about lookup. */ 1699 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1700 if (!cm_id_priv) 1701 return -EINVAL; 1702 1703 spin_lock_irqsave(&cm_id_priv->lock, flags); 1704 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1705 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1706 goto out; 1707 } 1708 1709 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1710 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1711 if (!ret) 1712 list_add_tail(&work->list, &cm_id_priv->work_list); 1713 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1714 1715 if (ret) 1716 cm_process_work(cm_id_priv, work); 1717 else 1718 cm_deref_id(cm_id_priv); 1719 return 0; 1720out: 1721 cm_deref_id(cm_id_priv); 1722 return -EINVAL; 1723} 1724 1725static int cm_rtu_handler(struct cm_work *work) 1726{ 1727 struct cm_id_private *cm_id_priv; 1728 struct cm_rtu_msg *rtu_msg; 1729 unsigned long flags; 1730 int ret; 1731 1732 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1733 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1734 rtu_msg->local_comm_id); 1735 if (!cm_id_priv) 1736 return -EINVAL; 1737 1738 work->cm_event.private_data = &rtu_msg->private_data; 1739 1740 spin_lock_irqsave(&cm_id_priv->lock, flags); 1741 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1742 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1743 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1744 goto out; 1745 } 1746 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1747 1748 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1749 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1750 if (!ret) 1751 list_add_tail(&work->list, &cm_id_priv->work_list); 1752 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1753 1754 if (ret) 1755 cm_process_work(cm_id_priv, work); 1756 else 1757 cm_deref_id(cm_id_priv); 1758 return 0; 1759out: 1760 cm_deref_id(cm_id_priv); 1761 return -EINVAL; 1762} 1763 1764static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1765 struct cm_id_private *cm_id_priv, 1766 const void *private_data, 1767 u8 private_data_len) 1768{ 1769 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1770 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1771 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1772 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1773 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1774 1775 if (private_data && private_data_len) 1776 memcpy(dreq_msg->private_data, private_data, private_data_len); 1777} 1778 1779int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1780 const void *private_data, 1781 u8 private_data_len) 1782{ 1783 struct cm_id_private *cm_id_priv; 1784 struct ib_mad_send_buf *msg; 1785 unsigned long flags; 1786 int ret; 1787 1788 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1789 return -EINVAL; 1790 1791 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1792 spin_lock_irqsave(&cm_id_priv->lock, flags); 1793 if (cm_id->state != IB_CM_ESTABLISHED) { 1794 ret = -EINVAL; 1795 goto out; 1796 } 1797 1798 ret = cm_alloc_msg(cm_id_priv, &msg); 1799 if (ret) { 1800 cm_enter_timewait(cm_id_priv); 1801 goto out; 1802 } 1803 1804 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1805 private_data, private_data_len); 1806 msg->timeout_ms = cm_id_priv->timeout_ms; 1807 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1808 1809 ret = ib_post_send_mad(msg, NULL); 1810 if (ret) { 1811 cm_enter_timewait(cm_id_priv); 1812 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1813 cm_free_msg(msg); 1814 return ret; 1815 } 1816 1817 cm_id->state = IB_CM_DREQ_SENT; 1818 cm_id_priv->msg = msg; 1819out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1820 return ret; 1821} 1822EXPORT_SYMBOL(ib_send_cm_dreq); 1823 1824static void cm_format_drep(struct cm_drep_msg *drep_msg, 1825 struct cm_id_private *cm_id_priv, 1826 const void *private_data, 1827 u8 private_data_len) 1828{ 1829 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1830 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1831 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1832 1833 if (private_data && private_data_len) 1834 memcpy(drep_msg->private_data, private_data, private_data_len); 1835} 1836 1837int ib_send_cm_drep(struct ib_cm_id *cm_id, 1838 const void *private_data, 1839 u8 private_data_len) 1840{ 1841 struct cm_id_private *cm_id_priv; 1842 struct ib_mad_send_buf *msg; 1843 unsigned long flags; 1844 void *data; 1845 int ret; 1846 1847 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1848 return -EINVAL; 1849 1850 data = cm_copy_private_data(private_data, private_data_len); 1851 if (IS_ERR(data)) 1852 return PTR_ERR(data); 1853 1854 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1855 spin_lock_irqsave(&cm_id_priv->lock, flags); 1856 if (cm_id->state != IB_CM_DREQ_RCVD) { 1857 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1858 kfree(data); 1859 return -EINVAL; 1860 } 1861 1862 cm_set_private_data(cm_id_priv, data, private_data_len); 1863 cm_enter_timewait(cm_id_priv); 1864 1865 ret = cm_alloc_msg(cm_id_priv, &msg); 1866 if (ret) 1867 goto out; 1868 1869 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1870 private_data, private_data_len); 1871 1872 ret = ib_post_send_mad(msg, NULL); 1873 if (ret) { 1874 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1875 cm_free_msg(msg); 1876 return ret; 1877 } 1878 1879out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1880 return ret; 1881} 1882EXPORT_SYMBOL(ib_send_cm_drep); 1883 1884static int cm_dreq_handler(struct cm_work *work) 1885{ 1886 struct cm_id_private *cm_id_priv; 1887 struct cm_dreq_msg *dreq_msg; 1888 struct ib_mad_send_buf *msg = NULL; 1889 unsigned long flags; 1890 int ret; 1891 1892 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1893 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1894 dreq_msg->local_comm_id); 1895 if (!cm_id_priv) 1896 return -EINVAL; 1897 1898 work->cm_event.private_data = &dreq_msg->private_data; 1899 1900 spin_lock_irqsave(&cm_id_priv->lock, flags); 1901 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1902 goto unlock; 1903 1904 switch (cm_id_priv->id.state) { 1905 case IB_CM_REP_SENT: 1906 case IB_CM_DREQ_SENT: 1907 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1908 break; 1909 case IB_CM_ESTABLISHED: 1910 case IB_CM_MRA_REP_RCVD: 1911 break; 1912 case IB_CM_TIMEWAIT: 1913 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1914 goto unlock; 1915 1916 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1917 cm_id_priv->private_data, 1918 cm_id_priv->private_data_len); 1919 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1920 1921 if (ib_post_send_mad(msg, NULL)) 1922 cm_free_msg(msg); 1923 goto deref; 1924 default: 1925 goto unlock; 1926 } 1927 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1928 cm_id_priv->tid = dreq_msg->hdr.tid; 1929 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1930 if (!ret) 1931 list_add_tail(&work->list, &cm_id_priv->work_list); 1932 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1933 1934 if (ret) 1935 cm_process_work(cm_id_priv, work); 1936 else 1937 cm_deref_id(cm_id_priv); 1938 return 0; 1939 1940unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1941deref: cm_deref_id(cm_id_priv); 1942 return -EINVAL; 1943} 1944 1945static int cm_drep_handler(struct cm_work *work) 1946{ 1947 struct cm_id_private *cm_id_priv; 1948 struct cm_drep_msg *drep_msg; 1949 unsigned long flags; 1950 int ret; 1951 1952 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1953 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 1954 drep_msg->local_comm_id); 1955 if (!cm_id_priv) 1956 return -EINVAL; 1957 1958 work->cm_event.private_data = &drep_msg->private_data; 1959 1960 spin_lock_irqsave(&cm_id_priv->lock, flags); 1961 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 1962 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 1963 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1964 goto out; 1965 } 1966 cm_enter_timewait(cm_id_priv); 1967 1968 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1969 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1970 if (!ret) 1971 list_add_tail(&work->list, &cm_id_priv->work_list); 1972 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1973 1974 if (ret) 1975 cm_process_work(cm_id_priv, work); 1976 else 1977 cm_deref_id(cm_id_priv); 1978 return 0; 1979out: 1980 cm_deref_id(cm_id_priv); 1981 return -EINVAL; 1982} 1983 1984int ib_send_cm_rej(struct ib_cm_id *cm_id, 1985 enum ib_cm_rej_reason reason, 1986 void *ari, 1987 u8 ari_length, 1988 const void *private_data, 1989 u8 private_data_len) 1990{ 1991 struct cm_id_private *cm_id_priv; 1992 struct ib_mad_send_buf *msg; 1993 unsigned long flags; 1994 int ret; 1995 1996 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 1997 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 1998 return -EINVAL; 1999 2000 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2001 2002 spin_lock_irqsave(&cm_id_priv->lock, flags); 2003 switch (cm_id->state) { 2004 case IB_CM_REQ_SENT: 2005 case IB_CM_MRA_REQ_RCVD: 2006 case IB_CM_REQ_RCVD: 2007 case IB_CM_MRA_REQ_SENT: 2008 case IB_CM_REP_RCVD: 2009 case IB_CM_MRA_REP_SENT: 2010 ret = cm_alloc_msg(cm_id_priv, &msg); 2011 if (!ret) 2012 cm_format_rej((struct cm_rej_msg *) msg->mad, 2013 cm_id_priv, reason, ari, ari_length, 2014 private_data, private_data_len); 2015 2016 cm_reset_to_idle(cm_id_priv); 2017 break; 2018 case IB_CM_REP_SENT: 2019 case IB_CM_MRA_REP_RCVD: 2020 ret = cm_alloc_msg(cm_id_priv, &msg); 2021 if (!ret) 2022 cm_format_rej((struct cm_rej_msg *) msg->mad, 2023 cm_id_priv, reason, ari, ari_length, 2024 private_data, private_data_len); 2025 2026 cm_enter_timewait(cm_id_priv); 2027 break; 2028 default: 2029 ret = -EINVAL; 2030 goto out; 2031 } 2032 2033 if (ret) 2034 goto out; 2035 2036 ret = ib_post_send_mad(msg, NULL); 2037 if (ret) 2038 cm_free_msg(msg); 2039 2040out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2041 return ret; 2042} 2043EXPORT_SYMBOL(ib_send_cm_rej); 2044 2045static void cm_format_rej_event(struct cm_work *work) 2046{ 2047 struct cm_rej_msg *rej_msg; 2048 struct ib_cm_rej_event_param *param; 2049 2050 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2051 param = &work->cm_event.param.rej_rcvd; 2052 param->ari = rej_msg->ari; 2053 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2054 param->reason = __be16_to_cpu(rej_msg->reason); 2055 work->cm_event.private_data = &rej_msg->private_data; 2056} 2057 2058static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2059{ 2060 struct cm_timewait_info *timewait_info; 2061 struct cm_id_private *cm_id_priv; 2062 unsigned long flags; 2063 __be32 remote_id; 2064 2065 remote_id = rej_msg->local_comm_id; 2066 2067 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2068 spin_lock_irqsave(&cm.lock, flags); 2069 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2070 remote_id); 2071 if (!timewait_info) { 2072 spin_unlock_irqrestore(&cm.lock, flags); 2073 return NULL; 2074 } 2075 cm_id_priv = idr_find(&cm.local_id_table, 2076 (__force int) timewait_info->work.local_id); 2077 if (cm_id_priv) { 2078 if (cm_id_priv->id.remote_id == remote_id) 2079 atomic_inc(&cm_id_priv->refcount); 2080 else 2081 cm_id_priv = NULL; 2082 } 2083 spin_unlock_irqrestore(&cm.lock, flags); 2084 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2085 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2086 else 2087 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2088 2089 return cm_id_priv; 2090} 2091 2092static int cm_rej_handler(struct cm_work *work) 2093{ 2094 struct cm_id_private *cm_id_priv; 2095 struct cm_rej_msg *rej_msg; 2096 unsigned long flags; 2097 int ret; 2098 2099 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2100 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2101 if (!cm_id_priv) 2102 return -EINVAL; 2103 2104 cm_format_rej_event(work); 2105 2106 spin_lock_irqsave(&cm_id_priv->lock, flags); 2107 switch (cm_id_priv->id.state) { 2108 case IB_CM_REQ_SENT: 2109 case IB_CM_MRA_REQ_RCVD: 2110 case IB_CM_REP_SENT: 2111 case IB_CM_MRA_REP_RCVD: 2112 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2113 /* fall through */ 2114 case IB_CM_REQ_RCVD: 2115 case IB_CM_MRA_REQ_SENT: 2116 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2117 cm_enter_timewait(cm_id_priv); 2118 else 2119 cm_reset_to_idle(cm_id_priv); 2120 break; 2121 case IB_CM_DREQ_SENT: 2122 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2123 /* fall through */ 2124 case IB_CM_REP_RCVD: 2125 case IB_CM_MRA_REP_SENT: 2126 case IB_CM_ESTABLISHED: 2127 cm_enter_timewait(cm_id_priv); 2128 break; 2129 default: 2130 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2131 ret = -EINVAL; 2132 goto out; 2133 } 2134 2135 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2136 if (!ret) 2137 list_add_tail(&work->list, &cm_id_priv->work_list); 2138 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2139 2140 if (ret) 2141 cm_process_work(cm_id_priv, work); 2142 else 2143 cm_deref_id(cm_id_priv); 2144 return 0; 2145out: 2146 cm_deref_id(cm_id_priv); 2147 return -EINVAL; 2148} 2149 2150int ib_send_cm_mra(struct ib_cm_id *cm_id, 2151 u8 service_timeout, 2152 const void *private_data, 2153 u8 private_data_len) 2154{ 2155 struct cm_id_private *cm_id_priv; 2156 struct ib_mad_send_buf *msg; 2157 void *data; 2158 unsigned long flags; 2159 int ret; 2160 2161 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2162 return -EINVAL; 2163 2164 data = cm_copy_private_data(private_data, private_data_len); 2165 if (IS_ERR(data)) 2166 return PTR_ERR(data); 2167 2168 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2169 2170 spin_lock_irqsave(&cm_id_priv->lock, flags); 2171 switch(cm_id_priv->id.state) { 2172 case IB_CM_REQ_RCVD: 2173 ret = cm_alloc_msg(cm_id_priv, &msg); 2174 if (ret) 2175 goto error1; 2176 2177 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2178 CM_MSG_RESPONSE_REQ, service_timeout, 2179 private_data, private_data_len); 2180 ret = ib_post_send_mad(msg, NULL); 2181 if (ret) 2182 goto error2; 2183 cm_id->state = IB_CM_MRA_REQ_SENT; 2184 break; 2185 case IB_CM_REP_RCVD: 2186 ret = cm_alloc_msg(cm_id_priv, &msg); 2187 if (ret) 2188 goto error1; 2189 2190 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2191 CM_MSG_RESPONSE_REP, service_timeout, 2192 private_data, private_data_len); 2193 ret = ib_post_send_mad(msg, NULL); 2194 if (ret) 2195 goto error2; 2196 cm_id->state = IB_CM_MRA_REP_SENT; 2197 break; 2198 case IB_CM_ESTABLISHED: 2199 ret = cm_alloc_msg(cm_id_priv, &msg); 2200 if (ret) 2201 goto error1; 2202 2203 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2204 CM_MSG_RESPONSE_OTHER, service_timeout, 2205 private_data, private_data_len); 2206 ret = ib_post_send_mad(msg, NULL); 2207 if (ret) 2208 goto error2; 2209 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2210 break; 2211 default: 2212 ret = -EINVAL; 2213 goto error1; 2214 } 2215 cm_id_priv->service_timeout = service_timeout; 2216 cm_set_private_data(cm_id_priv, data, private_data_len); 2217 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2218 return 0; 2219 2220error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2221 kfree(data); 2222 return ret; 2223 2224error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2225 kfree(data); 2226 cm_free_msg(msg); 2227 return ret; 2228} 2229EXPORT_SYMBOL(ib_send_cm_mra); 2230 2231static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2232{ 2233 switch (cm_mra_get_msg_mraed(mra_msg)) { 2234 case CM_MSG_RESPONSE_REQ: 2235 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2236 case CM_MSG_RESPONSE_REP: 2237 case CM_MSG_RESPONSE_OTHER: 2238 return cm_acquire_id(mra_msg->remote_comm_id, 2239 mra_msg->local_comm_id); 2240 default: 2241 return NULL; 2242 } 2243} 2244 2245static int cm_mra_handler(struct cm_work *work) 2246{ 2247 struct cm_id_private *cm_id_priv; 2248 struct cm_mra_msg *mra_msg; 2249 unsigned long flags; 2250 int timeout, ret; 2251 2252 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2253 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2254 if (!cm_id_priv) 2255 return -EINVAL; 2256 2257 work->cm_event.private_data = &mra_msg->private_data; 2258 work->cm_event.param.mra_rcvd.service_timeout = 2259 cm_mra_get_service_timeout(mra_msg); 2260 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2261 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2262 2263 spin_lock_irqsave(&cm_id_priv->lock, flags); 2264 switch (cm_id_priv->id.state) { 2265 case IB_CM_REQ_SENT: 2266 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2267 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2268 cm_id_priv->msg, timeout)) 2269 goto out; 2270 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2271 break; 2272 case IB_CM_REP_SENT: 2273 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2274 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2275 cm_id_priv->msg, timeout)) 2276 goto out; 2277 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2278 break; 2279 case IB_CM_ESTABLISHED: 2280 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2281 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2282 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2283 cm_id_priv->msg, timeout)) 2284 goto out; 2285 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2286 break; 2287 default: 2288 goto out; 2289 } 2290 2291 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2292 cm_id_priv->id.state; 2293 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2294 if (!ret) 2295 list_add_tail(&work->list, &cm_id_priv->work_list); 2296 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2297 2298 if (ret) 2299 cm_process_work(cm_id_priv, work); 2300 else 2301 cm_deref_id(cm_id_priv); 2302 return 0; 2303out: 2304 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2305 cm_deref_id(cm_id_priv); 2306 return -EINVAL; 2307} 2308 2309static void cm_format_lap(struct cm_lap_msg *lap_msg, 2310 struct cm_id_private *cm_id_priv, 2311 struct ib_sa_path_rec *alternate_path, 2312 const void *private_data, 2313 u8 private_data_len) 2314{ 2315 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2316 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2317 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2318 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2319 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2320 /* todo: need remote CM response timeout */ 2321 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2322 lap_msg->alt_local_lid = alternate_path->slid; 2323 lap_msg->alt_remote_lid = alternate_path->dlid; 2324 lap_msg->alt_local_gid = alternate_path->sgid; 2325 lap_msg->alt_remote_gid = alternate_path->dgid; 2326 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2327 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2328 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2329 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2330 cm_lap_set_sl(lap_msg, alternate_path->sl); 2331 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2332 cm_lap_set_local_ack_timeout(lap_msg, 2333 min(31, alternate_path->packet_life_time + 1)); 2334 2335 if (private_data && private_data_len) 2336 memcpy(lap_msg->private_data, private_data, private_data_len); 2337} 2338 2339int ib_send_cm_lap(struct ib_cm_id *cm_id, 2340 struct ib_sa_path_rec *alternate_path, 2341 const void *private_data, 2342 u8 private_data_len) 2343{ 2344 struct cm_id_private *cm_id_priv; 2345 struct ib_mad_send_buf *msg; 2346 unsigned long flags; 2347 int ret; 2348 2349 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2350 return -EINVAL; 2351 2352 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2353 spin_lock_irqsave(&cm_id_priv->lock, flags); 2354 if (cm_id->state != IB_CM_ESTABLISHED || 2355 cm_id->lap_state != IB_CM_LAP_IDLE) { 2356 ret = -EINVAL; 2357 goto out; 2358 } 2359 2360 ret = cm_alloc_msg(cm_id_priv, &msg); 2361 if (ret) 2362 goto out; 2363 2364 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2365 alternate_path, private_data, private_data_len); 2366 msg->timeout_ms = cm_id_priv->timeout_ms; 2367 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2368 2369 ret = ib_post_send_mad(msg, NULL); 2370 if (ret) { 2371 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2372 cm_free_msg(msg); 2373 return ret; 2374 } 2375 2376 cm_id->lap_state = IB_CM_LAP_SENT; 2377 cm_id_priv->msg = msg; 2378 2379out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2380 return ret; 2381} 2382EXPORT_SYMBOL(ib_send_cm_lap); 2383 2384static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2385 struct cm_lap_msg *lap_msg) 2386{ 2387 memset(path, 0, sizeof *path); 2388 path->dgid = lap_msg->alt_local_gid; 2389 path->sgid = lap_msg->alt_remote_gid; 2390 path->dlid = lap_msg->alt_local_lid; 2391 path->slid = lap_msg->alt_remote_lid; 2392 path->flow_label = cm_lap_get_flow_label(lap_msg); 2393 path->hop_limit = lap_msg->alt_hop_limit; 2394 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2395 path->reversible = 1; 2396 /* pkey is same as in REQ */ 2397 path->sl = cm_lap_get_sl(lap_msg); 2398 path->mtu_selector = IB_SA_EQ; 2399 /* mtu is same as in REQ */ 2400 path->rate_selector = IB_SA_EQ; 2401 path->rate = cm_lap_get_packet_rate(lap_msg); 2402 path->packet_life_time_selector = IB_SA_EQ; 2403 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2404 path->packet_life_time -= (path->packet_life_time > 0); 2405} 2406 2407static int cm_lap_handler(struct cm_work *work) 2408{ 2409 struct cm_id_private *cm_id_priv; 2410 struct cm_lap_msg *lap_msg; 2411 struct ib_cm_lap_event_param *param; 2412 struct ib_mad_send_buf *msg = NULL; 2413 unsigned long flags; 2414 int ret; 2415 2416 /* todo: verify LAP request and send reject APR if invalid. */ 2417 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2418 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2419 lap_msg->local_comm_id); 2420 if (!cm_id_priv) 2421 return -EINVAL; 2422 2423 param = &work->cm_event.param.lap_rcvd; 2424 param->alternate_path = &work->path[0]; 2425 cm_format_path_from_lap(param->alternate_path, lap_msg); 2426 work->cm_event.private_data = &lap_msg->private_data; 2427 2428 spin_lock_irqsave(&cm_id_priv->lock, flags); 2429 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2430 goto unlock; 2431 2432 switch (cm_id_priv->id.lap_state) { 2433 case IB_CM_LAP_IDLE: 2434 break; 2435 case IB_CM_MRA_LAP_SENT: 2436 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2437 goto unlock; 2438 2439 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2440 CM_MSG_RESPONSE_OTHER, 2441 cm_id_priv->service_timeout, 2442 cm_id_priv->private_data, 2443 cm_id_priv->private_data_len); 2444 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2445 2446 if (ib_post_send_mad(msg, NULL)) 2447 cm_free_msg(msg); 2448 goto deref; 2449 default: 2450 goto unlock; 2451 } 2452 2453 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2454 cm_id_priv->tid = lap_msg->hdr.tid; 2455 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2456 if (!ret) 2457 list_add_tail(&work->list, &cm_id_priv->work_list); 2458 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2459 2460 if (ret) 2461 cm_process_work(cm_id_priv, work); 2462 else 2463 cm_deref_id(cm_id_priv); 2464 return 0; 2465 2466unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2467deref: cm_deref_id(cm_id_priv); 2468 return -EINVAL; 2469} 2470 2471static void cm_format_apr(struct cm_apr_msg *apr_msg, 2472 struct cm_id_private *cm_id_priv, 2473 enum ib_cm_apr_status status, 2474 void *info, 2475 u8 info_length, 2476 const void *private_data, 2477 u8 private_data_len) 2478{ 2479 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2480 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2481 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2482 apr_msg->ap_status = (u8) status; 2483 2484 if (info && info_length) { 2485 apr_msg->info_length = info_length; 2486 memcpy(apr_msg->info, info, info_length); 2487 } 2488 2489 if (private_data && private_data_len) 2490 memcpy(apr_msg->private_data, private_data, private_data_len); 2491} 2492 2493int ib_send_cm_apr(struct ib_cm_id *cm_id, 2494 enum ib_cm_apr_status status, 2495 void *info, 2496 u8 info_length, 2497 const void *private_data, 2498 u8 private_data_len) 2499{ 2500 struct cm_id_private *cm_id_priv; 2501 struct ib_mad_send_buf *msg; 2502 unsigned long flags; 2503 int ret; 2504 2505 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2506 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2507 return -EINVAL; 2508 2509 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2510 spin_lock_irqsave(&cm_id_priv->lock, flags); 2511 if (cm_id->state != IB_CM_ESTABLISHED || 2512 (cm_id->lap_state != IB_CM_LAP_RCVD && 2513 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2514 ret = -EINVAL; 2515 goto out; 2516 } 2517 2518 ret = cm_alloc_msg(cm_id_priv, &msg); 2519 if (ret) 2520 goto out; 2521 2522 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2523 info, info_length, private_data, private_data_len); 2524 ret = ib_post_send_mad(msg, NULL); 2525 if (ret) { 2526 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2527 cm_free_msg(msg); 2528 return ret; 2529 } 2530 2531 cm_id->lap_state = IB_CM_LAP_IDLE; 2532out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2533 return ret; 2534} 2535EXPORT_SYMBOL(ib_send_cm_apr); 2536 2537static int cm_apr_handler(struct cm_work *work) 2538{ 2539 struct cm_id_private *cm_id_priv; 2540 struct cm_apr_msg *apr_msg; 2541 unsigned long flags; 2542 int ret; 2543 2544 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2545 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2546 apr_msg->local_comm_id); 2547 if (!cm_id_priv) 2548 return -EINVAL; /* Unmatched reply. */ 2549 2550 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2551 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2552 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2553 work->cm_event.private_data = &apr_msg->private_data; 2554 2555 spin_lock_irqsave(&cm_id_priv->lock, flags); 2556 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2557 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2558 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2559 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2560 goto out; 2561 } 2562 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2563 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2564 cm_id_priv->msg = NULL; 2565 2566 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2567 if (!ret) 2568 list_add_tail(&work->list, &cm_id_priv->work_list); 2569 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2570 2571 if (ret) 2572 cm_process_work(cm_id_priv, work); 2573 else 2574 cm_deref_id(cm_id_priv); 2575 return 0; 2576out: 2577 cm_deref_id(cm_id_priv); 2578 return -EINVAL; 2579} 2580 2581static int cm_timewait_handler(struct cm_work *work) 2582{ 2583 struct cm_timewait_info *timewait_info; 2584 struct cm_id_private *cm_id_priv; 2585 unsigned long flags; 2586 int ret; 2587 2588 timewait_info = (struct cm_timewait_info *)work; 2589 cm_cleanup_timewait(timewait_info); 2590 2591 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2592 timewait_info->work.remote_id); 2593 if (!cm_id_priv) 2594 return -EINVAL; 2595 2596 spin_lock_irqsave(&cm_id_priv->lock, flags); 2597 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2598 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2599 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2600 goto out; 2601 } 2602 cm_id_priv->id.state = IB_CM_IDLE; 2603 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2604 if (!ret) 2605 list_add_tail(&work->list, &cm_id_priv->work_list); 2606 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2607 2608 if (ret) 2609 cm_process_work(cm_id_priv, work); 2610 else 2611 cm_deref_id(cm_id_priv); 2612 return 0; 2613out: 2614 cm_deref_id(cm_id_priv); 2615 return -EINVAL; 2616} 2617 2618static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2619 struct cm_id_private *cm_id_priv, 2620 struct ib_cm_sidr_req_param *param) 2621{ 2622 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2623 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2624 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2625 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2626 sidr_req_msg->service_id = param->service_id; 2627 2628 if (param->private_data && param->private_data_len) 2629 memcpy(sidr_req_msg->private_data, param->private_data, 2630 param->private_data_len); 2631} 2632 2633int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2634 struct ib_cm_sidr_req_param *param) 2635{ 2636 struct cm_id_private *cm_id_priv; 2637 struct ib_mad_send_buf *msg; 2638 unsigned long flags; 2639 int ret; 2640 2641 if (!param->path || (param->private_data && 2642 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2643 return -EINVAL; 2644 2645 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2646 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2647 if (ret) 2648 goto out; 2649 2650 cm_id->service_id = param->service_id; 2651 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2652 cm_id_priv->timeout_ms = param->timeout_ms; 2653 cm_id_priv->max_cm_retries = param->max_cm_retries; 2654 ret = cm_alloc_msg(cm_id_priv, &msg); 2655 if (ret) 2656 goto out; 2657 2658 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2659 param); 2660 msg->timeout_ms = cm_id_priv->timeout_ms; 2661 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2662 2663 spin_lock_irqsave(&cm_id_priv->lock, flags); 2664 if (cm_id->state == IB_CM_IDLE) 2665 ret = ib_post_send_mad(msg, NULL); 2666 else 2667 ret = -EINVAL; 2668 2669 if (ret) { 2670 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2671 cm_free_msg(msg); 2672 goto out; 2673 } 2674 cm_id->state = IB_CM_SIDR_REQ_SENT; 2675 cm_id_priv->msg = msg; 2676 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2677out: 2678 return ret; 2679} 2680EXPORT_SYMBOL(ib_send_cm_sidr_req); 2681 2682static void cm_format_sidr_req_event(struct cm_work *work, 2683 struct ib_cm_id *listen_id) 2684{ 2685 struct cm_sidr_req_msg *sidr_req_msg; 2686 struct ib_cm_sidr_req_event_param *param; 2687 2688 sidr_req_msg = (struct cm_sidr_req_msg *) 2689 work->mad_recv_wc->recv_buf.mad; 2690 param = &work->cm_event.param.sidr_req_rcvd; 2691 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2692 param->listen_id = listen_id; 2693 param->port = work->port->port_num; 2694 work->cm_event.private_data = &sidr_req_msg->private_data; 2695} 2696 2697static int cm_sidr_req_handler(struct cm_work *work) 2698{ 2699 struct ib_cm_id *cm_id; 2700 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2701 struct cm_sidr_req_msg *sidr_req_msg; 2702 struct ib_wc *wc; 2703 unsigned long flags; 2704 2705 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2706 if (IS_ERR(cm_id)) 2707 return PTR_ERR(cm_id); 2708 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2709 2710 /* Record SGID/SLID and request ID for lookup. */ 2711 sidr_req_msg = (struct cm_sidr_req_msg *) 2712 work->mad_recv_wc->recv_buf.mad; 2713 wc = work->mad_recv_wc->wc; 2714 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2715 cm_id_priv->av.dgid.global.interface_id = 0; 2716 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2717 work->mad_recv_wc->recv_buf.grh, 2718 &cm_id_priv->av); 2719 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2720 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2721 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2722 atomic_inc(&cm_id_priv->work_count); 2723 2724 spin_lock_irqsave(&cm.lock, flags); 2725 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2726 if (cur_cm_id_priv) { 2727 spin_unlock_irqrestore(&cm.lock, flags); 2728 goto out; /* Duplicate message. */ 2729 } 2730 cur_cm_id_priv = cm_find_listen(cm_id->device, 2731 sidr_req_msg->service_id, 2732 sidr_req_msg->private_data); 2733 if (!cur_cm_id_priv) { 2734 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2735 spin_unlock_irqrestore(&cm.lock, flags); 2736 /* todo: reply with no match */ 2737 goto out; /* No match. */ 2738 } 2739 atomic_inc(&cur_cm_id_priv->refcount); 2740 spin_unlock_irqrestore(&cm.lock, flags); 2741 2742 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2743 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2744 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2745 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2746 2747 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2748 cm_process_work(cm_id_priv, work); 2749 cm_deref_id(cur_cm_id_priv); 2750 return 0; 2751out: 2752 ib_destroy_cm_id(&cm_id_priv->id); 2753 return -EINVAL; 2754} 2755 2756static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2757 struct cm_id_private *cm_id_priv, 2758 struct ib_cm_sidr_rep_param *param) 2759{ 2760 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2761 cm_id_priv->tid); 2762 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2763 sidr_rep_msg->status = param->status; 2764 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2765 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2766 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2767 2768 if (param->info && param->info_length) 2769 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2770 2771 if (param->private_data && param->private_data_len) 2772 memcpy(sidr_rep_msg->private_data, param->private_data, 2773 param->private_data_len); 2774} 2775 2776int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2777 struct ib_cm_sidr_rep_param *param) 2778{ 2779 struct cm_id_private *cm_id_priv; 2780 struct ib_mad_send_buf *msg; 2781 unsigned long flags; 2782 int ret; 2783 2784 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2785 (param->private_data && 2786 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2787 return -EINVAL; 2788 2789 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2790 spin_lock_irqsave(&cm_id_priv->lock, flags); 2791 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2792 ret = -EINVAL; 2793 goto error; 2794 } 2795 2796 ret = cm_alloc_msg(cm_id_priv, &msg); 2797 if (ret) 2798 goto error; 2799 2800 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2801 param); 2802 ret = ib_post_send_mad(msg, NULL); 2803 if (ret) { 2804 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2805 cm_free_msg(msg); 2806 return ret; 2807 } 2808 cm_id->state = IB_CM_IDLE; 2809 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2810 2811 spin_lock_irqsave(&cm.lock, flags); 2812 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2813 spin_unlock_irqrestore(&cm.lock, flags); 2814 return 0; 2815 2816error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2817 return ret; 2818} 2819EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2820 2821static void cm_format_sidr_rep_event(struct cm_work *work) 2822{ 2823 struct cm_sidr_rep_msg *sidr_rep_msg; 2824 struct ib_cm_sidr_rep_event_param *param; 2825 2826 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2827 work->mad_recv_wc->recv_buf.mad; 2828 param = &work->cm_event.param.sidr_rep_rcvd; 2829 param->status = sidr_rep_msg->status; 2830 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2831 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2832 param->info = &sidr_rep_msg->info; 2833 param->info_len = sidr_rep_msg->info_length; 2834 work->cm_event.private_data = &sidr_rep_msg->private_data; 2835} 2836 2837static int cm_sidr_rep_handler(struct cm_work *work) 2838{ 2839 struct cm_sidr_rep_msg *sidr_rep_msg; 2840 struct cm_id_private *cm_id_priv; 2841 unsigned long flags; 2842 2843 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2844 work->mad_recv_wc->recv_buf.mad; 2845 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2846 if (!cm_id_priv) 2847 return -EINVAL; /* Unmatched reply. */ 2848 2849 spin_lock_irqsave(&cm_id_priv->lock, flags); 2850 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2851 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2852 goto out; 2853 } 2854 cm_id_priv->id.state = IB_CM_IDLE; 2855 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2856 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2857 2858 cm_format_sidr_rep_event(work); 2859 cm_process_work(cm_id_priv, work); 2860 return 0; 2861out: 2862 cm_deref_id(cm_id_priv); 2863 return -EINVAL; 2864} 2865 2866static void cm_process_send_error(struct ib_mad_send_buf *msg, 2867 enum ib_wc_status wc_status) 2868{ 2869 struct cm_id_private *cm_id_priv; 2870 struct ib_cm_event cm_event; 2871 enum ib_cm_state state; 2872 unsigned long flags; 2873 int ret; 2874 2875 memset(&cm_event, 0, sizeof cm_event); 2876 cm_id_priv = msg->context[0]; 2877 2878 /* Discard old sends or ones without a response. */ 2879 spin_lock_irqsave(&cm_id_priv->lock, flags); 2880 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2881 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2882 goto discard; 2883 2884 switch (state) { 2885 case IB_CM_REQ_SENT: 2886 case IB_CM_MRA_REQ_RCVD: 2887 cm_reset_to_idle(cm_id_priv); 2888 cm_event.event = IB_CM_REQ_ERROR; 2889 break; 2890 case IB_CM_REP_SENT: 2891 case IB_CM_MRA_REP_RCVD: 2892 cm_reset_to_idle(cm_id_priv); 2893 cm_event.event = IB_CM_REP_ERROR; 2894 break; 2895 case IB_CM_DREQ_SENT: 2896 cm_enter_timewait(cm_id_priv); 2897 cm_event.event = IB_CM_DREQ_ERROR; 2898 break; 2899 case IB_CM_SIDR_REQ_SENT: 2900 cm_id_priv->id.state = IB_CM_IDLE; 2901 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2902 break; 2903 default: 2904 goto discard; 2905 } 2906 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2907 cm_event.param.send_status = wc_status; 2908 2909 /* No other events can occur on the cm_id at this point. */ 2910 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2911 cm_free_msg(msg); 2912 if (ret) 2913 ib_destroy_cm_id(&cm_id_priv->id); 2914 return; 2915discard: 2916 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2917 cm_free_msg(msg); 2918} 2919 2920static void cm_send_handler(struct ib_mad_agent *mad_agent, 2921 struct ib_mad_send_wc *mad_send_wc) 2922{ 2923 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2924 2925 switch (mad_send_wc->status) { 2926 case IB_WC_SUCCESS: 2927 case IB_WC_WR_FLUSH_ERR: 2928 cm_free_msg(msg); 2929 break; 2930 default: 2931 if (msg->context[0] && msg->context[1]) 2932 cm_process_send_error(msg, mad_send_wc->status); 2933 else 2934 cm_free_msg(msg); 2935 break; 2936 } 2937} 2938 2939static void cm_work_handler(void *data) 2940{ 2941 struct cm_work *work = data; 2942 int ret; 2943 2944 switch (work->cm_event.event) { 2945 case IB_CM_REQ_RECEIVED: 2946 ret = cm_req_handler(work); 2947 break; 2948 case IB_CM_MRA_RECEIVED: 2949 ret = cm_mra_handler(work); 2950 break; 2951 case IB_CM_REJ_RECEIVED: 2952 ret = cm_rej_handler(work); 2953 break; 2954 case IB_CM_REP_RECEIVED: 2955 ret = cm_rep_handler(work); 2956 break; 2957 case IB_CM_RTU_RECEIVED: 2958 ret = cm_rtu_handler(work); 2959 break; 2960 case IB_CM_USER_ESTABLISHED: 2961 ret = cm_establish_handler(work); 2962 break; 2963 case IB_CM_DREQ_RECEIVED: 2964 ret = cm_dreq_handler(work); 2965 break; 2966 case IB_CM_DREP_RECEIVED: 2967 ret = cm_drep_handler(work); 2968 break; 2969 case IB_CM_SIDR_REQ_RECEIVED: 2970 ret = cm_sidr_req_handler(work); 2971 break; 2972 case IB_CM_SIDR_REP_RECEIVED: 2973 ret = cm_sidr_rep_handler(work); 2974 break; 2975 case IB_CM_LAP_RECEIVED: 2976 ret = cm_lap_handler(work); 2977 break; 2978 case IB_CM_APR_RECEIVED: 2979 ret = cm_apr_handler(work); 2980 break; 2981 case IB_CM_TIMEWAIT_EXIT: 2982 ret = cm_timewait_handler(work); 2983 break; 2984 default: 2985 ret = -EINVAL; 2986 break; 2987 } 2988 if (ret) 2989 cm_free_work(work); 2990} 2991 2992int ib_cm_establish(struct ib_cm_id *cm_id) 2993{ 2994 struct cm_id_private *cm_id_priv; 2995 struct cm_work *work; 2996 unsigned long flags; 2997 int ret = 0; 2998 2999 work = kmalloc(sizeof *work, GFP_ATOMIC); 3000 if (!work) 3001 return -ENOMEM; 3002 3003 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3004 spin_lock_irqsave(&cm_id_priv->lock, flags); 3005 switch (cm_id->state) 3006 { 3007 case IB_CM_REP_SENT: 3008 case IB_CM_MRA_REP_RCVD: 3009 cm_id->state = IB_CM_ESTABLISHED; 3010 break; 3011 case IB_CM_ESTABLISHED: 3012 ret = -EISCONN; 3013 break; 3014 default: 3015 ret = -EINVAL; 3016 break; 3017 } 3018 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3019 3020 if (ret) { 3021 kfree(work); 3022 goto out; 3023 } 3024 3025 /* 3026 * The CM worker thread may try to destroy the cm_id before it 3027 * can execute this work item. To prevent potential deadlock, 3028 * we need to find the cm_id once we're in the context of the 3029 * worker thread, rather than holding a reference on it. 3030 */ 3031 INIT_WORK(&work->work, cm_work_handler, work); 3032 work->local_id = cm_id->local_id; 3033 work->remote_id = cm_id->remote_id; 3034 work->mad_recv_wc = NULL; 3035 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3036 queue_work(cm.wq, &work->work); 3037out: 3038 return ret; 3039} 3040EXPORT_SYMBOL(ib_cm_establish); 3041 3042static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3043 struct ib_mad_recv_wc *mad_recv_wc) 3044{ 3045 struct cm_work *work; 3046 enum ib_cm_event_type event; 3047 int paths = 0; 3048 3049 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3050 case CM_REQ_ATTR_ID: 3051 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3052 alt_local_lid != 0); 3053 event = IB_CM_REQ_RECEIVED; 3054 break; 3055 case CM_MRA_ATTR_ID: 3056 event = IB_CM_MRA_RECEIVED; 3057 break; 3058 case CM_REJ_ATTR_ID: 3059 event = IB_CM_REJ_RECEIVED; 3060 break; 3061 case CM_REP_ATTR_ID: 3062 event = IB_CM_REP_RECEIVED; 3063 break; 3064 case CM_RTU_ATTR_ID: 3065 event = IB_CM_RTU_RECEIVED; 3066 break; 3067 case CM_DREQ_ATTR_ID: 3068 event = IB_CM_DREQ_RECEIVED; 3069 break; 3070 case CM_DREP_ATTR_ID: 3071 event = IB_CM_DREP_RECEIVED; 3072 break; 3073 case CM_SIDR_REQ_ATTR_ID: 3074 event = IB_CM_SIDR_REQ_RECEIVED; 3075 break; 3076 case CM_SIDR_REP_ATTR_ID: 3077 event = IB_CM_SIDR_REP_RECEIVED; 3078 break; 3079 case CM_LAP_ATTR_ID: 3080 paths = 1; 3081 event = IB_CM_LAP_RECEIVED; 3082 break; 3083 case CM_APR_ATTR_ID: 3084 event = IB_CM_APR_RECEIVED; 3085 break; 3086 default: 3087 ib_free_recv_mad(mad_recv_wc); 3088 return; 3089 } 3090 3091 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3092 GFP_KERNEL); 3093 if (!work) { 3094 ib_free_recv_mad(mad_recv_wc); 3095 return; 3096 } 3097 3098 INIT_WORK(&work->work, cm_work_handler, work); 3099 work->cm_event.event = event; 3100 work->mad_recv_wc = mad_recv_wc; 3101 work->port = (struct cm_port *)mad_agent->context; 3102 queue_work(cm.wq, &work->work); 3103} 3104 3105static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3106 struct ib_qp_attr *qp_attr, 3107 int *qp_attr_mask) 3108{ 3109 unsigned long flags; 3110 int ret; 3111 3112 spin_lock_irqsave(&cm_id_priv->lock, flags); 3113 switch (cm_id_priv->id.state) { 3114 case IB_CM_REQ_SENT: 3115 case IB_CM_MRA_REQ_RCVD: 3116 case IB_CM_REQ_RCVD: 3117 case IB_CM_MRA_REQ_SENT: 3118 case IB_CM_REP_RCVD: 3119 case IB_CM_MRA_REP_SENT: 3120 case IB_CM_REP_SENT: 3121 case IB_CM_MRA_REP_RCVD: 3122 case IB_CM_ESTABLISHED: 3123 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3124 IB_QP_PKEY_INDEX | IB_QP_PORT; 3125 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3126 IB_ACCESS_REMOTE_WRITE; 3127 if (cm_id_priv->responder_resources) 3128 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; 3129 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3130 qp_attr->port_num = cm_id_priv->av.port->port_num; 3131 ret = 0; 3132 break; 3133 default: 3134 ret = -EINVAL; 3135 break; 3136 } 3137 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3138 return ret; 3139} 3140 3141static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3142 struct ib_qp_attr *qp_attr, 3143 int *qp_attr_mask) 3144{ 3145 unsigned long flags; 3146 int ret; 3147 3148 spin_lock_irqsave(&cm_id_priv->lock, flags); 3149 switch (cm_id_priv->id.state) { 3150 case IB_CM_REQ_RCVD: 3151 case IB_CM_MRA_REQ_SENT: 3152 case IB_CM_REP_RCVD: 3153 case IB_CM_MRA_REP_SENT: 3154 case IB_CM_REP_SENT: 3155 case IB_CM_MRA_REP_RCVD: 3156 case IB_CM_ESTABLISHED: 3157 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3158 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3159 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3160 qp_attr->path_mtu = cm_id_priv->path_mtu; 3161 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3162 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3163 if (cm_id_priv->qp_type == IB_QPT_RC) { 3164 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3165 IB_QP_MIN_RNR_TIMER; 3166 qp_attr->max_dest_rd_atomic = 3167 cm_id_priv->responder_resources; 3168 qp_attr->min_rnr_timer = 0; 3169 } 3170 if (cm_id_priv->alt_av.ah_attr.dlid) { 3171 *qp_attr_mask |= IB_QP_ALT_PATH; 3172 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3173 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3174 } 3175 ret = 0; 3176 break; 3177 default: 3178 ret = -EINVAL; 3179 break; 3180 } 3181 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3182 return ret; 3183} 3184 3185static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3186 struct ib_qp_attr *qp_attr, 3187 int *qp_attr_mask) 3188{ 3189 unsigned long flags; 3190 int ret; 3191 3192 spin_lock_irqsave(&cm_id_priv->lock, flags); 3193 switch (cm_id_priv->id.state) { 3194 case IB_CM_REP_RCVD: 3195 case IB_CM_MRA_REP_SENT: 3196 case IB_CM_REP_SENT: 3197 case IB_CM_MRA_REP_RCVD: 3198 case IB_CM_ESTABLISHED: 3199 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3200 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3201 if (cm_id_priv->qp_type == IB_QPT_RC) { 3202 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3203 IB_QP_RNR_RETRY | 3204 IB_QP_MAX_QP_RD_ATOMIC; 3205 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3206 qp_attr->retry_cnt = cm_id_priv->retry_count; 3207 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3208 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3209 } 3210 if (cm_id_priv->alt_av.ah_attr.dlid) { 3211 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3212 qp_attr->path_mig_state = IB_MIG_REARM; 3213 } 3214 ret = 0; 3215 break; 3216 default: 3217 ret = -EINVAL; 3218 break; 3219 } 3220 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3221 return ret; 3222} 3223 3224int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3225 struct ib_qp_attr *qp_attr, 3226 int *qp_attr_mask) 3227{ 3228 struct cm_id_private *cm_id_priv; 3229 int ret; 3230 3231 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3232 switch (qp_attr->qp_state) { 3233 case IB_QPS_INIT: 3234 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3235 break; 3236 case IB_QPS_RTR: 3237 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3238 break; 3239 case IB_QPS_RTS: 3240 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3241 break; 3242 default: 3243 ret = -EINVAL; 3244 break; 3245 } 3246 return ret; 3247} 3248EXPORT_SYMBOL(ib_cm_init_qp_attr); 3249 3250static void cm_add_one(struct ib_device *device) 3251{ 3252 struct cm_device *cm_dev; 3253 struct cm_port *port; 3254 struct ib_mad_reg_req reg_req = { 3255 .mgmt_class = IB_MGMT_CLASS_CM, 3256 .mgmt_class_version = IB_CM_CLASS_VERSION 3257 }; 3258 struct ib_port_modify port_modify = { 3259 .set_port_cap_mask = IB_PORT_CM_SUP 3260 }; 3261 unsigned long flags; 3262 int ret; 3263 u8 i; 3264 3265 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3266 device->phys_port_cnt, GFP_KERNEL); 3267 if (!cm_dev) 3268 return; 3269 3270 cm_dev->device = device; 3271 cm_dev->ca_guid = device->node_guid; 3272 3273 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3274 for (i = 1; i <= device->phys_port_cnt; i++) { 3275 port = &cm_dev->port[i-1]; 3276 port->cm_dev = cm_dev; 3277 port->port_num = i; 3278 port->mad_agent = ib_register_mad_agent(device, i, 3279 IB_QPT_GSI, 3280 ®_req, 3281 0, 3282 cm_send_handler, 3283 cm_recv_handler, 3284 port); 3285 if (IS_ERR(port->mad_agent)) 3286 goto error1; 3287 3288 ret = ib_modify_port(device, i, 0, &port_modify); 3289 if (ret) 3290 goto error2; 3291 } 3292 ib_set_client_data(device, &cm_client, cm_dev); 3293 3294 write_lock_irqsave(&cm.device_lock, flags); 3295 list_add_tail(&cm_dev->list, &cm.device_list); 3296 write_unlock_irqrestore(&cm.device_lock, flags); 3297 return; 3298 3299error2: 3300 ib_unregister_mad_agent(port->mad_agent); 3301error1: 3302 port_modify.set_port_cap_mask = 0; 3303 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3304 while (--i) { 3305 port = &cm_dev->port[i-1]; 3306 ib_modify_port(device, port->port_num, 0, &port_modify); 3307 ib_unregister_mad_agent(port->mad_agent); 3308 } 3309 kfree(cm_dev); 3310} 3311 3312static void cm_remove_one(struct ib_device *device) 3313{ 3314 struct cm_device *cm_dev; 3315 struct cm_port *port; 3316 struct ib_port_modify port_modify = { 3317 .clr_port_cap_mask = IB_PORT_CM_SUP 3318 }; 3319 unsigned long flags; 3320 int i; 3321 3322 cm_dev = ib_get_client_data(device, &cm_client); 3323 if (!cm_dev) 3324 return; 3325 3326 write_lock_irqsave(&cm.device_lock, flags); 3327 list_del(&cm_dev->list); 3328 write_unlock_irqrestore(&cm.device_lock, flags); 3329 3330 for (i = 1; i <= device->phys_port_cnt; i++) { 3331 port = &cm_dev->port[i-1]; 3332 ib_modify_port(device, port->port_num, 0, &port_modify); 3333 ib_unregister_mad_agent(port->mad_agent); 3334 } 3335 kfree(cm_dev); 3336} 3337 3338static int __init ib_cm_init(void) 3339{ 3340 int ret; 3341 3342 memset(&cm, 0, sizeof cm); 3343 INIT_LIST_HEAD(&cm.device_list); 3344 rwlock_init(&cm.device_lock); 3345 spin_lock_init(&cm.lock); 3346 cm.listen_service_table = RB_ROOT; 3347 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3348 cm.remote_id_table = RB_ROOT; 3349 cm.remote_qp_table = RB_ROOT; 3350 cm.remote_sidr_table = RB_ROOT; 3351 idr_init(&cm.local_id_table); 3352 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3353 3354 cm.wq = create_workqueue("ib_cm"); 3355 if (!cm.wq) 3356 return -ENOMEM; 3357 3358 ret = ib_register_client(&cm_client); 3359 if (ret) 3360 goto error; 3361 3362 return 0; 3363error: 3364 destroy_workqueue(cm.wq); 3365 return ret; 3366} 3367 3368static void __exit ib_cm_cleanup(void) 3369{ 3370 destroy_workqueue(cm.wq); 3371 ib_unregister_client(&cm_client); 3372 idr_destroy(&cm.local_id_table); 3373} 3374 3375module_init(ib_cm_init); 3376module_exit(ib_cm_cleanup); 3377 3378