cm.c revision a70d059009f4a207e2a9c794f40fc8c870096d54
1/* 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38#include <linux/completion.h> 39#include <linux/dma-mapping.h> 40#include <linux/err.h> 41#include <linux/idr.h> 42#include <linux/interrupt.h> 43#include <linux/pci.h> 44#include <linux/random.h> 45#include <linux/rbtree.h> 46#include <linux/spinlock.h> 47#include <linux/workqueue.h> 48 49#include <rdma/ib_cache.h> 50#include <rdma/ib_cm.h> 51#include "cm_msgs.h" 52 53MODULE_AUTHOR("Sean Hefty"); 54MODULE_DESCRIPTION("InfiniBand CM"); 55MODULE_LICENSE("Dual BSD/GPL"); 56 57static void cm_add_one(struct ib_device *device); 58static void cm_remove_one(struct ib_device *device); 59 60static struct ib_client cm_client = { 61 .name = "cm", 62 .add = cm_add_one, 63 .remove = cm_remove_one 64}; 65 66static struct ib_cm { 67 spinlock_t lock; 68 struct list_head device_list; 69 rwlock_t device_lock; 70 struct rb_root listen_service_table; 71 u64 listen_service_id; 72 /* struct rb_root peer_service_table; todo: fix peer to peer */ 73 struct rb_root remote_qp_table; 74 struct rb_root remote_id_table; 75 struct rb_root remote_sidr_table; 76 struct idr local_id_table; 77 __be32 random_id_operand; 78 struct workqueue_struct *wq; 79} cm; 80 81struct cm_port { 82 struct cm_device *cm_dev; 83 struct ib_mad_agent *mad_agent; 84 u8 port_num; 85}; 86 87struct cm_device { 88 struct list_head list; 89 struct ib_device *device; 90 __be64 ca_guid; 91 struct cm_port port[0]; 92}; 93 94struct cm_av { 95 struct cm_port *port; 96 union ib_gid dgid; 97 struct ib_ah_attr ah_attr; 98 u16 pkey_index; 99 u8 packet_life_time; 100}; 101 102struct cm_work { 103 struct work_struct work; 104 struct list_head list; 105 struct cm_port *port; 106 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 107 __be32 local_id; /* Established / timewait */ 108 __be32 remote_id; 109 struct ib_cm_event cm_event; 110 struct ib_sa_path_rec path[0]; 111}; 112 113struct cm_timewait_info { 114 struct cm_work work; /* Must be first. */ 115 struct rb_node remote_qp_node; 116 struct rb_node remote_id_node; 117 __be64 remote_ca_guid; 118 __be32 remote_qpn; 119 u8 inserted_remote_qp; 120 u8 inserted_remote_id; 121}; 122 123struct cm_id_private { 124 struct ib_cm_id id; 125 126 struct rb_node service_node; 127 struct rb_node sidr_id_node; 128 spinlock_t lock; /* Do not acquire inside cm.lock */ 129 struct completion comp; 130 atomic_t refcount; 131 132 struct ib_mad_send_buf *msg; 133 struct cm_timewait_info *timewait_info; 134 /* todo: use alternate port on send failure */ 135 struct cm_av av; 136 struct cm_av alt_av; 137 struct ib_cm_compare_data *compare_data; 138 139 void *private_data; 140 __be64 tid; 141 __be32 local_qpn; 142 __be32 remote_qpn; 143 enum ib_qp_type qp_type; 144 __be32 sq_psn; 145 __be32 rq_psn; 146 int timeout_ms; 147 enum ib_mtu path_mtu; 148 u8 private_data_len; 149 u8 max_cm_retries; 150 u8 peer_to_peer; 151 u8 responder_resources; 152 u8 initiator_depth; 153 u8 local_ack_timeout; 154 u8 retry_count; 155 u8 rnr_retry_count; 156 u8 service_timeout; 157 158 struct list_head work_list; 159 atomic_t work_count; 160}; 161 162static void cm_work_handler(void *data); 163 164static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 165{ 166 if (atomic_dec_and_test(&cm_id_priv->refcount)) 167 complete(&cm_id_priv->comp); 168} 169 170static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 171 struct ib_mad_send_buf **msg) 172{ 173 struct ib_mad_agent *mad_agent; 174 struct ib_mad_send_buf *m; 175 struct ib_ah *ah; 176 177 mad_agent = cm_id_priv->av.port->mad_agent; 178 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 179 if (IS_ERR(ah)) 180 return PTR_ERR(ah); 181 182 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 183 cm_id_priv->av.pkey_index, 184 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 185 GFP_ATOMIC); 186 if (IS_ERR(m)) { 187 ib_destroy_ah(ah); 188 return PTR_ERR(m); 189 } 190 191 /* Timeout set by caller if response is expected. */ 192 m->ah = ah; 193 m->retries = cm_id_priv->max_cm_retries; 194 195 atomic_inc(&cm_id_priv->refcount); 196 m->context[0] = cm_id_priv; 197 *msg = m; 198 return 0; 199} 200 201static int cm_alloc_response_msg(struct cm_port *port, 202 struct ib_mad_recv_wc *mad_recv_wc, 203 struct ib_mad_send_buf **msg) 204{ 205 struct ib_mad_send_buf *m; 206 struct ib_ah *ah; 207 208 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 209 mad_recv_wc->recv_buf.grh, port->port_num); 210 if (IS_ERR(ah)) 211 return PTR_ERR(ah); 212 213 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 214 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 215 GFP_ATOMIC); 216 if (IS_ERR(m)) { 217 ib_destroy_ah(ah); 218 return PTR_ERR(m); 219 } 220 m->ah = ah; 221 *msg = m; 222 return 0; 223} 224 225static void cm_free_msg(struct ib_mad_send_buf *msg) 226{ 227 ib_destroy_ah(msg->ah); 228 if (msg->context[0]) 229 cm_deref_id(msg->context[0]); 230 ib_free_send_mad(msg); 231} 232 233static void * cm_copy_private_data(const void *private_data, 234 u8 private_data_len) 235{ 236 void *data; 237 238 if (!private_data || !private_data_len) 239 return NULL; 240 241 data = kmalloc(private_data_len, GFP_KERNEL); 242 if (!data) 243 return ERR_PTR(-ENOMEM); 244 245 memcpy(data, private_data, private_data_len); 246 return data; 247} 248 249static void cm_set_private_data(struct cm_id_private *cm_id_priv, 250 void *private_data, u8 private_data_len) 251{ 252 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 253 kfree(cm_id_priv->private_data); 254 255 cm_id_priv->private_data = private_data; 256 cm_id_priv->private_data_len = private_data_len; 257} 258 259static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 260 struct ib_grh *grh, struct cm_av *av) 261{ 262 av->port = port; 263 av->pkey_index = wc->pkey_index; 264 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 265 grh, &av->ah_attr); 266} 267 268static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 269{ 270 struct cm_device *cm_dev; 271 struct cm_port *port = NULL; 272 unsigned long flags; 273 int ret; 274 u8 p; 275 276 read_lock_irqsave(&cm.device_lock, flags); 277 list_for_each_entry(cm_dev, &cm.device_list, list) { 278 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 279 &p, NULL)) { 280 port = &cm_dev->port[p-1]; 281 break; 282 } 283 } 284 read_unlock_irqrestore(&cm.device_lock, flags); 285 286 if (!port) 287 return -EINVAL; 288 289 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 290 be16_to_cpu(path->pkey), &av->pkey_index); 291 if (ret) 292 return ret; 293 294 av->port = port; 295 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 296 &av->ah_attr); 297 av->packet_life_time = path->packet_life_time; 298 return 0; 299} 300 301static int cm_alloc_id(struct cm_id_private *cm_id_priv) 302{ 303 unsigned long flags; 304 int ret, id; 305 static int next_id; 306 307 do { 308 spin_lock_irqsave(&cm.lock, flags); 309 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 310 next_id++, &id); 311 spin_unlock_irqrestore(&cm.lock, flags); 312 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 313 314 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); 315 return ret; 316} 317 318static void cm_free_id(__be32 local_id) 319{ 320 unsigned long flags; 321 322 spin_lock_irqsave(&cm.lock, flags); 323 idr_remove(&cm.local_id_table, 324 (__force int) (local_id ^ cm.random_id_operand)); 325 spin_unlock_irqrestore(&cm.lock, flags); 326} 327 328static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 329{ 330 struct cm_id_private *cm_id_priv; 331 332 cm_id_priv = idr_find(&cm.local_id_table, 333 (__force int) (local_id ^ cm.random_id_operand)); 334 if (cm_id_priv) { 335 if (cm_id_priv->id.remote_id == remote_id) 336 atomic_inc(&cm_id_priv->refcount); 337 else 338 cm_id_priv = NULL; 339 } 340 341 return cm_id_priv; 342} 343 344static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 345{ 346 struct cm_id_private *cm_id_priv; 347 unsigned long flags; 348 349 spin_lock_irqsave(&cm.lock, flags); 350 cm_id_priv = cm_get_id(local_id, remote_id); 351 spin_unlock_irqrestore(&cm.lock, flags); 352 353 return cm_id_priv; 354} 355 356static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 357{ 358 int i; 359 360 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 361 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 362 ((unsigned long *) mask)[i]; 363} 364 365static int cm_compare_data(struct ib_cm_compare_data *src_data, 366 struct ib_cm_compare_data *dst_data) 367{ 368 u8 src[IB_CM_COMPARE_SIZE]; 369 u8 dst[IB_CM_COMPARE_SIZE]; 370 371 if (!src_data || !dst_data) 372 return 0; 373 374 cm_mask_copy(src, src_data->data, dst_data->mask); 375 cm_mask_copy(dst, dst_data->data, src_data->mask); 376 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 377} 378 379static int cm_compare_private_data(u8 *private_data, 380 struct ib_cm_compare_data *dst_data) 381{ 382 u8 src[IB_CM_COMPARE_SIZE]; 383 384 if (!dst_data) 385 return 0; 386 387 cm_mask_copy(src, private_data, dst_data->mask); 388 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 389} 390 391static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 392{ 393 struct rb_node **link = &cm.listen_service_table.rb_node; 394 struct rb_node *parent = NULL; 395 struct cm_id_private *cur_cm_id_priv; 396 __be64 service_id = cm_id_priv->id.service_id; 397 __be64 service_mask = cm_id_priv->id.service_mask; 398 int data_cmp; 399 400 while (*link) { 401 parent = *link; 402 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 403 service_node); 404 data_cmp = cm_compare_data(cm_id_priv->compare_data, 405 cur_cm_id_priv->compare_data); 406 if ((cur_cm_id_priv->id.service_mask & service_id) == 407 (service_mask & cur_cm_id_priv->id.service_id) && 408 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 409 !data_cmp) 410 return cur_cm_id_priv; 411 412 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 413 link = &(*link)->rb_left; 414 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 415 link = &(*link)->rb_right; 416 else if (service_id < cur_cm_id_priv->id.service_id) 417 link = &(*link)->rb_left; 418 else if (service_id > cur_cm_id_priv->id.service_id) 419 link = &(*link)->rb_right; 420 else if (data_cmp < 0) 421 link = &(*link)->rb_left; 422 else 423 link = &(*link)->rb_right; 424 } 425 rb_link_node(&cm_id_priv->service_node, parent, link); 426 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 427 return NULL; 428} 429 430static struct cm_id_private * cm_find_listen(struct ib_device *device, 431 __be64 service_id, 432 u8 *private_data) 433{ 434 struct rb_node *node = cm.listen_service_table.rb_node; 435 struct cm_id_private *cm_id_priv; 436 int data_cmp; 437 438 while (node) { 439 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 440 data_cmp = cm_compare_private_data(private_data, 441 cm_id_priv->compare_data); 442 if ((cm_id_priv->id.service_mask & service_id) == 443 cm_id_priv->id.service_id && 444 (cm_id_priv->id.device == device) && !data_cmp) 445 return cm_id_priv; 446 447 if (device < cm_id_priv->id.device) 448 node = node->rb_left; 449 else if (device > cm_id_priv->id.device) 450 node = node->rb_right; 451 else if (service_id < cm_id_priv->id.service_id) 452 node = node->rb_left; 453 else if (service_id > cm_id_priv->id.service_id) 454 node = node->rb_right; 455 else if (data_cmp < 0) 456 node = node->rb_left; 457 else 458 node = node->rb_right; 459 } 460 return NULL; 461} 462 463static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 464 *timewait_info) 465{ 466 struct rb_node **link = &cm.remote_id_table.rb_node; 467 struct rb_node *parent = NULL; 468 struct cm_timewait_info *cur_timewait_info; 469 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 470 __be32 remote_id = timewait_info->work.remote_id; 471 472 while (*link) { 473 parent = *link; 474 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 475 remote_id_node); 476 if (remote_id < cur_timewait_info->work.remote_id) 477 link = &(*link)->rb_left; 478 else if (remote_id > cur_timewait_info->work.remote_id) 479 link = &(*link)->rb_right; 480 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 481 link = &(*link)->rb_left; 482 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 483 link = &(*link)->rb_right; 484 else 485 return cur_timewait_info; 486 } 487 timewait_info->inserted_remote_id = 1; 488 rb_link_node(&timewait_info->remote_id_node, parent, link); 489 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 490 return NULL; 491} 492 493static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 494 __be32 remote_id) 495{ 496 struct rb_node *node = cm.remote_id_table.rb_node; 497 struct cm_timewait_info *timewait_info; 498 499 while (node) { 500 timewait_info = rb_entry(node, struct cm_timewait_info, 501 remote_id_node); 502 if (remote_id < timewait_info->work.remote_id) 503 node = node->rb_left; 504 else if (remote_id > timewait_info->work.remote_id) 505 node = node->rb_right; 506 else if (remote_ca_guid < timewait_info->remote_ca_guid) 507 node = node->rb_left; 508 else if (remote_ca_guid > timewait_info->remote_ca_guid) 509 node = node->rb_right; 510 else 511 return timewait_info; 512 } 513 return NULL; 514} 515 516static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 517 *timewait_info) 518{ 519 struct rb_node **link = &cm.remote_qp_table.rb_node; 520 struct rb_node *parent = NULL; 521 struct cm_timewait_info *cur_timewait_info; 522 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 523 __be32 remote_qpn = timewait_info->remote_qpn; 524 525 while (*link) { 526 parent = *link; 527 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 528 remote_qp_node); 529 if (remote_qpn < cur_timewait_info->remote_qpn) 530 link = &(*link)->rb_left; 531 else if (remote_qpn > cur_timewait_info->remote_qpn) 532 link = &(*link)->rb_right; 533 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 534 link = &(*link)->rb_left; 535 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 536 link = &(*link)->rb_right; 537 else 538 return cur_timewait_info; 539 } 540 timewait_info->inserted_remote_qp = 1; 541 rb_link_node(&timewait_info->remote_qp_node, parent, link); 542 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 543 return NULL; 544} 545 546static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 547 *cm_id_priv) 548{ 549 struct rb_node **link = &cm.remote_sidr_table.rb_node; 550 struct rb_node *parent = NULL; 551 struct cm_id_private *cur_cm_id_priv; 552 union ib_gid *port_gid = &cm_id_priv->av.dgid; 553 __be32 remote_id = cm_id_priv->id.remote_id; 554 555 while (*link) { 556 parent = *link; 557 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 558 sidr_id_node); 559 if (remote_id < cur_cm_id_priv->id.remote_id) 560 link = &(*link)->rb_left; 561 else if (remote_id > cur_cm_id_priv->id.remote_id) 562 link = &(*link)->rb_right; 563 else { 564 int cmp; 565 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 566 sizeof *port_gid); 567 if (cmp < 0) 568 link = &(*link)->rb_left; 569 else if (cmp > 0) 570 link = &(*link)->rb_right; 571 else 572 return cur_cm_id_priv; 573 } 574 } 575 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 576 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 577 return NULL; 578} 579 580static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 581 enum ib_cm_sidr_status status) 582{ 583 struct ib_cm_sidr_rep_param param; 584 585 memset(¶m, 0, sizeof param); 586 param.status = status; 587 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 588} 589 590struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 591 ib_cm_handler cm_handler, 592 void *context) 593{ 594 struct cm_id_private *cm_id_priv; 595 int ret; 596 597 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 598 if (!cm_id_priv) 599 return ERR_PTR(-ENOMEM); 600 601 cm_id_priv->id.state = IB_CM_IDLE; 602 cm_id_priv->id.device = device; 603 cm_id_priv->id.cm_handler = cm_handler; 604 cm_id_priv->id.context = context; 605 cm_id_priv->id.remote_cm_qpn = 1; 606 ret = cm_alloc_id(cm_id_priv); 607 if (ret) 608 goto error; 609 610 spin_lock_init(&cm_id_priv->lock); 611 init_completion(&cm_id_priv->comp); 612 INIT_LIST_HEAD(&cm_id_priv->work_list); 613 atomic_set(&cm_id_priv->work_count, -1); 614 atomic_set(&cm_id_priv->refcount, 1); 615 return &cm_id_priv->id; 616 617error: 618 kfree(cm_id_priv); 619 return ERR_PTR(-ENOMEM); 620} 621EXPORT_SYMBOL(ib_create_cm_id); 622 623static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 624{ 625 struct cm_work *work; 626 627 if (list_empty(&cm_id_priv->work_list)) 628 return NULL; 629 630 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 631 list_del(&work->list); 632 return work; 633} 634 635static void cm_free_work(struct cm_work *work) 636{ 637 if (work->mad_recv_wc) 638 ib_free_recv_mad(work->mad_recv_wc); 639 kfree(work); 640} 641 642static inline int cm_convert_to_ms(int iba_time) 643{ 644 /* approximate conversion to ms from 4.096us x 2^iba_time */ 645 return 1 << max(iba_time - 8, 0); 646} 647 648static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 649{ 650 unsigned long flags; 651 652 if (!timewait_info->inserted_remote_id && 653 !timewait_info->inserted_remote_qp) 654 return; 655 656 spin_lock_irqsave(&cm.lock, flags); 657 if (timewait_info->inserted_remote_id) { 658 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 659 timewait_info->inserted_remote_id = 0; 660 } 661 662 if (timewait_info->inserted_remote_qp) { 663 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 664 timewait_info->inserted_remote_qp = 0; 665 } 666 spin_unlock_irqrestore(&cm.lock, flags); 667} 668 669static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 670{ 671 struct cm_timewait_info *timewait_info; 672 673 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 674 if (!timewait_info) 675 return ERR_PTR(-ENOMEM); 676 677 timewait_info->work.local_id = local_id; 678 INIT_WORK(&timewait_info->work.work, cm_work_handler, 679 &timewait_info->work); 680 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 681 return timewait_info; 682} 683 684static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 685{ 686 int wait_time; 687 688 cm_cleanup_timewait(cm_id_priv->timewait_info); 689 690 /* 691 * The cm_id could be destroyed by the user before we exit timewait. 692 * To protect against this, we search for the cm_id after exiting 693 * timewait before notifying the user that we've exited timewait. 694 */ 695 cm_id_priv->id.state = IB_CM_TIMEWAIT; 696 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 697 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 698 msecs_to_jiffies(wait_time)); 699 cm_id_priv->timewait_info = NULL; 700} 701 702static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 703{ 704 cm_id_priv->id.state = IB_CM_IDLE; 705 if (cm_id_priv->timewait_info) { 706 cm_cleanup_timewait(cm_id_priv->timewait_info); 707 kfree(cm_id_priv->timewait_info); 708 cm_id_priv->timewait_info = NULL; 709 } 710} 711 712static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 713{ 714 struct cm_id_private *cm_id_priv; 715 struct cm_work *work; 716 unsigned long flags; 717 718 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 719retest: 720 spin_lock_irqsave(&cm_id_priv->lock, flags); 721 switch (cm_id->state) { 722 case IB_CM_LISTEN: 723 cm_id->state = IB_CM_IDLE; 724 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 725 spin_lock_irqsave(&cm.lock, flags); 726 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 727 spin_unlock_irqrestore(&cm.lock, flags); 728 break; 729 case IB_CM_SIDR_REQ_SENT: 730 cm_id->state = IB_CM_IDLE; 731 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 733 break; 734 case IB_CM_SIDR_REQ_RCVD: 735 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 736 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 737 break; 738 case IB_CM_REQ_SENT: 739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 740 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 741 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 742 &cm_id_priv->av.port->cm_dev->ca_guid, 743 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 744 NULL, 0); 745 break; 746 case IB_CM_REQ_RCVD: 747 if (err == -ENOMEM) { 748 /* Do not reject to allow future retries. */ 749 cm_reset_to_idle(cm_id_priv); 750 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 751 } else { 752 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 753 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 754 NULL, 0, NULL, 0); 755 } 756 break; 757 case IB_CM_MRA_REQ_RCVD: 758 case IB_CM_REP_SENT: 759 case IB_CM_MRA_REP_RCVD: 760 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 761 /* Fall through */ 762 case IB_CM_MRA_REQ_SENT: 763 case IB_CM_REP_RCVD: 764 case IB_CM_MRA_REP_SENT: 765 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 766 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 767 NULL, 0, NULL, 0); 768 break; 769 case IB_CM_ESTABLISHED: 770 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 771 ib_send_cm_dreq(cm_id, NULL, 0); 772 goto retest; 773 case IB_CM_DREQ_SENT: 774 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 775 cm_enter_timewait(cm_id_priv); 776 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 777 break; 778 case IB_CM_DREQ_RCVD: 779 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 780 ib_send_cm_drep(cm_id, NULL, 0); 781 break; 782 default: 783 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 784 break; 785 } 786 787 cm_free_id(cm_id->local_id); 788 cm_deref_id(cm_id_priv); 789 wait_for_completion(&cm_id_priv->comp); 790 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 791 cm_free_work(work); 792 kfree(cm_id_priv->compare_data); 793 kfree(cm_id_priv->private_data); 794 kfree(cm_id_priv); 795} 796 797void ib_destroy_cm_id(struct ib_cm_id *cm_id) 798{ 799 cm_destroy_id(cm_id, 0); 800} 801EXPORT_SYMBOL(ib_destroy_cm_id); 802 803int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 804 struct ib_cm_compare_data *compare_data) 805{ 806 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 807 unsigned long flags; 808 int ret = 0; 809 810 service_mask = service_mask ? service_mask : 811 __constant_cpu_to_be64(~0ULL); 812 service_id &= service_mask; 813 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 814 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 815 return -EINVAL; 816 817 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 818 if (cm_id->state != IB_CM_IDLE) 819 return -EINVAL; 820 821 if (compare_data) { 822 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 823 GFP_KERNEL); 824 if (!cm_id_priv->compare_data) 825 return -ENOMEM; 826 cm_mask_copy(cm_id_priv->compare_data->data, 827 compare_data->data, compare_data->mask); 828 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 829 IB_CM_COMPARE_SIZE); 830 } 831 832 cm_id->state = IB_CM_LISTEN; 833 834 spin_lock_irqsave(&cm.lock, flags); 835 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 836 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 837 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 838 } else { 839 cm_id->service_id = service_id; 840 cm_id->service_mask = service_mask; 841 } 842 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 843 spin_unlock_irqrestore(&cm.lock, flags); 844 845 if (cur_cm_id_priv) { 846 cm_id->state = IB_CM_IDLE; 847 kfree(cm_id_priv->compare_data); 848 cm_id_priv->compare_data = NULL; 849 ret = -EBUSY; 850 } 851 return ret; 852} 853EXPORT_SYMBOL(ib_cm_listen); 854 855static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 856 enum cm_msg_sequence msg_seq) 857{ 858 u64 hi_tid, low_tid; 859 860 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 861 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 862 (msg_seq << 30)); 863 return cpu_to_be64(hi_tid | low_tid); 864} 865 866static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 867 __be16 attr_id, __be64 tid) 868{ 869 hdr->base_version = IB_MGMT_BASE_VERSION; 870 hdr->mgmt_class = IB_MGMT_CLASS_CM; 871 hdr->class_version = IB_CM_CLASS_VERSION; 872 hdr->method = IB_MGMT_METHOD_SEND; 873 hdr->attr_id = attr_id; 874 hdr->tid = tid; 875} 876 877static void cm_format_req(struct cm_req_msg *req_msg, 878 struct cm_id_private *cm_id_priv, 879 struct ib_cm_req_param *param) 880{ 881 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 882 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 883 884 req_msg->local_comm_id = cm_id_priv->id.local_id; 885 req_msg->service_id = param->service_id; 886 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 887 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 888 cm_req_set_resp_res(req_msg, param->responder_resources); 889 cm_req_set_init_depth(req_msg, param->initiator_depth); 890 cm_req_set_remote_resp_timeout(req_msg, 891 param->remote_cm_response_timeout); 892 cm_req_set_qp_type(req_msg, param->qp_type); 893 cm_req_set_flow_ctrl(req_msg, param->flow_control); 894 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 895 cm_req_set_local_resp_timeout(req_msg, 896 param->local_cm_response_timeout); 897 cm_req_set_retry_count(req_msg, param->retry_count); 898 req_msg->pkey = param->primary_path->pkey; 899 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 900 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 901 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 902 cm_req_set_srq(req_msg, param->srq); 903 904 req_msg->primary_local_lid = param->primary_path->slid; 905 req_msg->primary_remote_lid = param->primary_path->dlid; 906 req_msg->primary_local_gid = param->primary_path->sgid; 907 req_msg->primary_remote_gid = param->primary_path->dgid; 908 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 909 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 910 req_msg->primary_traffic_class = param->primary_path->traffic_class; 911 req_msg->primary_hop_limit = param->primary_path->hop_limit; 912 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 913 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 914 cm_req_set_primary_local_ack_timeout(req_msg, 915 min(31, param->primary_path->packet_life_time + 1)); 916 917 if (param->alternate_path) { 918 req_msg->alt_local_lid = param->alternate_path->slid; 919 req_msg->alt_remote_lid = param->alternate_path->dlid; 920 req_msg->alt_local_gid = param->alternate_path->sgid; 921 req_msg->alt_remote_gid = param->alternate_path->dgid; 922 cm_req_set_alt_flow_label(req_msg, 923 param->alternate_path->flow_label); 924 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 925 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 926 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 927 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 928 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 929 cm_req_set_alt_local_ack_timeout(req_msg, 930 min(31, param->alternate_path->packet_life_time + 1)); 931 } 932 933 if (param->private_data && param->private_data_len) 934 memcpy(req_msg->private_data, param->private_data, 935 param->private_data_len); 936} 937 938static int cm_validate_req_param(struct ib_cm_req_param *param) 939{ 940 /* peer-to-peer not supported */ 941 if (param->peer_to_peer) 942 return -EINVAL; 943 944 if (!param->primary_path) 945 return -EINVAL; 946 947 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 948 return -EINVAL; 949 950 if (param->private_data && 951 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 952 return -EINVAL; 953 954 if (param->alternate_path && 955 (param->alternate_path->pkey != param->primary_path->pkey || 956 param->alternate_path->mtu != param->primary_path->mtu)) 957 return -EINVAL; 958 959 return 0; 960} 961 962int ib_send_cm_req(struct ib_cm_id *cm_id, 963 struct ib_cm_req_param *param) 964{ 965 struct cm_id_private *cm_id_priv; 966 struct cm_req_msg *req_msg; 967 unsigned long flags; 968 int ret; 969 970 ret = cm_validate_req_param(param); 971 if (ret) 972 return ret; 973 974 /* Verify that we're not in timewait. */ 975 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 976 spin_lock_irqsave(&cm_id_priv->lock, flags); 977 if (cm_id->state != IB_CM_IDLE) { 978 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 979 ret = -EINVAL; 980 goto out; 981 } 982 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 983 984 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 985 id.local_id); 986 if (IS_ERR(cm_id_priv->timewait_info)) { 987 ret = PTR_ERR(cm_id_priv->timewait_info); 988 goto out; 989 } 990 991 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 992 if (ret) 993 goto error1; 994 if (param->alternate_path) { 995 ret = cm_init_av_by_path(param->alternate_path, 996 &cm_id_priv->alt_av); 997 if (ret) 998 goto error1; 999 } 1000 cm_id->service_id = param->service_id; 1001 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1002 cm_id_priv->timeout_ms = cm_convert_to_ms( 1003 param->primary_path->packet_life_time) * 2 + 1004 cm_convert_to_ms( 1005 param->remote_cm_response_timeout); 1006 cm_id_priv->max_cm_retries = param->max_cm_retries; 1007 cm_id_priv->initiator_depth = param->initiator_depth; 1008 cm_id_priv->responder_resources = param->responder_resources; 1009 cm_id_priv->retry_count = param->retry_count; 1010 cm_id_priv->path_mtu = param->primary_path->mtu; 1011 cm_id_priv->qp_type = param->qp_type; 1012 1013 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1014 if (ret) 1015 goto error1; 1016 1017 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1018 cm_format_req(req_msg, cm_id_priv, param); 1019 cm_id_priv->tid = req_msg->hdr.tid; 1020 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1021 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1022 1023 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1024 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1025 cm_id_priv->local_ack_timeout = 1026 cm_req_get_primary_local_ack_timeout(req_msg); 1027 1028 spin_lock_irqsave(&cm_id_priv->lock, flags); 1029 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1030 if (ret) { 1031 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1032 goto error2; 1033 } 1034 BUG_ON(cm_id->state != IB_CM_IDLE); 1035 cm_id->state = IB_CM_REQ_SENT; 1036 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1037 return 0; 1038 1039error2: cm_free_msg(cm_id_priv->msg); 1040error1: kfree(cm_id_priv->timewait_info); 1041out: return ret; 1042} 1043EXPORT_SYMBOL(ib_send_cm_req); 1044 1045static int cm_issue_rej(struct cm_port *port, 1046 struct ib_mad_recv_wc *mad_recv_wc, 1047 enum ib_cm_rej_reason reason, 1048 enum cm_msg_response msg_rejected, 1049 void *ari, u8 ari_length) 1050{ 1051 struct ib_mad_send_buf *msg = NULL; 1052 struct cm_rej_msg *rej_msg, *rcv_msg; 1053 int ret; 1054 1055 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1056 if (ret) 1057 return ret; 1058 1059 /* We just need common CM header information. Cast to any message. */ 1060 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1061 rej_msg = (struct cm_rej_msg *) msg->mad; 1062 1063 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1064 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1065 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1066 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1067 rej_msg->reason = cpu_to_be16(reason); 1068 1069 if (ari && ari_length) { 1070 cm_rej_set_reject_info_len(rej_msg, ari_length); 1071 memcpy(rej_msg->ari, ari, ari_length); 1072 } 1073 1074 ret = ib_post_send_mad(msg, NULL); 1075 if (ret) 1076 cm_free_msg(msg); 1077 1078 return ret; 1079} 1080 1081static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1082 __be32 local_qpn, __be32 remote_qpn) 1083{ 1084 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1085 ((local_ca_guid == remote_ca_guid) && 1086 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1087} 1088 1089static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1090 struct ib_sa_path_rec *primary_path, 1091 struct ib_sa_path_rec *alt_path) 1092{ 1093 memset(primary_path, 0, sizeof *primary_path); 1094 primary_path->dgid = req_msg->primary_local_gid; 1095 primary_path->sgid = req_msg->primary_remote_gid; 1096 primary_path->dlid = req_msg->primary_local_lid; 1097 primary_path->slid = req_msg->primary_remote_lid; 1098 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1099 primary_path->hop_limit = req_msg->primary_hop_limit; 1100 primary_path->traffic_class = req_msg->primary_traffic_class; 1101 primary_path->reversible = 1; 1102 primary_path->pkey = req_msg->pkey; 1103 primary_path->sl = cm_req_get_primary_sl(req_msg); 1104 primary_path->mtu_selector = IB_SA_EQ; 1105 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1106 primary_path->rate_selector = IB_SA_EQ; 1107 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1108 primary_path->packet_life_time_selector = IB_SA_EQ; 1109 primary_path->packet_life_time = 1110 cm_req_get_primary_local_ack_timeout(req_msg); 1111 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1112 1113 if (req_msg->alt_local_lid) { 1114 memset(alt_path, 0, sizeof *alt_path); 1115 alt_path->dgid = req_msg->alt_local_gid; 1116 alt_path->sgid = req_msg->alt_remote_gid; 1117 alt_path->dlid = req_msg->alt_local_lid; 1118 alt_path->slid = req_msg->alt_remote_lid; 1119 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1120 alt_path->hop_limit = req_msg->alt_hop_limit; 1121 alt_path->traffic_class = req_msg->alt_traffic_class; 1122 alt_path->reversible = 1; 1123 alt_path->pkey = req_msg->pkey; 1124 alt_path->sl = cm_req_get_alt_sl(req_msg); 1125 alt_path->mtu_selector = IB_SA_EQ; 1126 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1127 alt_path->rate_selector = IB_SA_EQ; 1128 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1129 alt_path->packet_life_time_selector = IB_SA_EQ; 1130 alt_path->packet_life_time = 1131 cm_req_get_alt_local_ack_timeout(req_msg); 1132 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1133 } 1134} 1135 1136static void cm_format_req_event(struct cm_work *work, 1137 struct cm_id_private *cm_id_priv, 1138 struct ib_cm_id *listen_id) 1139{ 1140 struct cm_req_msg *req_msg; 1141 struct ib_cm_req_event_param *param; 1142 1143 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1144 param = &work->cm_event.param.req_rcvd; 1145 param->listen_id = listen_id; 1146 param->port = cm_id_priv->av.port->port_num; 1147 param->primary_path = &work->path[0]; 1148 if (req_msg->alt_local_lid) 1149 param->alternate_path = &work->path[1]; 1150 else 1151 param->alternate_path = NULL; 1152 param->remote_ca_guid = req_msg->local_ca_guid; 1153 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1154 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1155 param->qp_type = cm_req_get_qp_type(req_msg); 1156 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1157 param->responder_resources = cm_req_get_init_depth(req_msg); 1158 param->initiator_depth = cm_req_get_resp_res(req_msg); 1159 param->local_cm_response_timeout = 1160 cm_req_get_remote_resp_timeout(req_msg); 1161 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1162 param->remote_cm_response_timeout = 1163 cm_req_get_local_resp_timeout(req_msg); 1164 param->retry_count = cm_req_get_retry_count(req_msg); 1165 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1166 param->srq = cm_req_get_srq(req_msg); 1167 work->cm_event.private_data = &req_msg->private_data; 1168} 1169 1170static void cm_process_work(struct cm_id_private *cm_id_priv, 1171 struct cm_work *work) 1172{ 1173 unsigned long flags; 1174 int ret; 1175 1176 /* We will typically only have the current event to report. */ 1177 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1178 cm_free_work(work); 1179 1180 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1181 spin_lock_irqsave(&cm_id_priv->lock, flags); 1182 work = cm_dequeue_work(cm_id_priv); 1183 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1184 BUG_ON(!work); 1185 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1186 &work->cm_event); 1187 cm_free_work(work); 1188 } 1189 cm_deref_id(cm_id_priv); 1190 if (ret) 1191 cm_destroy_id(&cm_id_priv->id, ret); 1192} 1193 1194static void cm_format_mra(struct cm_mra_msg *mra_msg, 1195 struct cm_id_private *cm_id_priv, 1196 enum cm_msg_response msg_mraed, u8 service_timeout, 1197 const void *private_data, u8 private_data_len) 1198{ 1199 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1200 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1201 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1202 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1203 cm_mra_set_service_timeout(mra_msg, service_timeout); 1204 1205 if (private_data && private_data_len) 1206 memcpy(mra_msg->private_data, private_data, private_data_len); 1207} 1208 1209static void cm_format_rej(struct cm_rej_msg *rej_msg, 1210 struct cm_id_private *cm_id_priv, 1211 enum ib_cm_rej_reason reason, 1212 void *ari, 1213 u8 ari_length, 1214 const void *private_data, 1215 u8 private_data_len) 1216{ 1217 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1218 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1219 1220 switch(cm_id_priv->id.state) { 1221 case IB_CM_REQ_RCVD: 1222 rej_msg->local_comm_id = 0; 1223 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1224 break; 1225 case IB_CM_MRA_REQ_SENT: 1226 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1227 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1228 break; 1229 case IB_CM_REP_RCVD: 1230 case IB_CM_MRA_REP_SENT: 1231 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1232 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1233 break; 1234 default: 1235 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1236 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1237 break; 1238 } 1239 1240 rej_msg->reason = cpu_to_be16(reason); 1241 if (ari && ari_length) { 1242 cm_rej_set_reject_info_len(rej_msg, ari_length); 1243 memcpy(rej_msg->ari, ari, ari_length); 1244 } 1245 1246 if (private_data && private_data_len) 1247 memcpy(rej_msg->private_data, private_data, private_data_len); 1248} 1249 1250static void cm_dup_req_handler(struct cm_work *work, 1251 struct cm_id_private *cm_id_priv) 1252{ 1253 struct ib_mad_send_buf *msg = NULL; 1254 unsigned long flags; 1255 int ret; 1256 1257 /* Quick state check to discard duplicate REQs. */ 1258 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1259 return; 1260 1261 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1262 if (ret) 1263 return; 1264 1265 spin_lock_irqsave(&cm_id_priv->lock, flags); 1266 switch (cm_id_priv->id.state) { 1267 case IB_CM_MRA_REQ_SENT: 1268 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1269 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1270 cm_id_priv->private_data, 1271 cm_id_priv->private_data_len); 1272 break; 1273 case IB_CM_TIMEWAIT: 1274 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1275 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1276 break; 1277 default: 1278 goto unlock; 1279 } 1280 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1281 1282 ret = ib_post_send_mad(msg, NULL); 1283 if (ret) 1284 goto free; 1285 return; 1286 1287unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1288free: cm_free_msg(msg); 1289} 1290 1291static struct cm_id_private * cm_match_req(struct cm_work *work, 1292 struct cm_id_private *cm_id_priv) 1293{ 1294 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1295 struct cm_timewait_info *timewait_info; 1296 struct cm_req_msg *req_msg; 1297 unsigned long flags; 1298 1299 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1300 1301 /* Check for duplicate REQ and stale connections. */ 1302 spin_lock_irqsave(&cm.lock, flags); 1303 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1304 if (!timewait_info) 1305 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1306 1307 if (timewait_info) { 1308 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1309 timewait_info->work.remote_id); 1310 spin_unlock_irqrestore(&cm.lock, flags); 1311 if (cur_cm_id_priv) { 1312 cm_dup_req_handler(work, cur_cm_id_priv); 1313 cm_deref_id(cur_cm_id_priv); 1314 } else 1315 cm_issue_rej(work->port, work->mad_recv_wc, 1316 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1317 NULL, 0); 1318 goto error; 1319 } 1320 1321 /* Find matching listen request. */ 1322 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1323 req_msg->service_id, 1324 req_msg->private_data); 1325 if (!listen_cm_id_priv) { 1326 spin_unlock_irqrestore(&cm.lock, flags); 1327 cm_issue_rej(work->port, work->mad_recv_wc, 1328 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1329 NULL, 0); 1330 goto error; 1331 } 1332 atomic_inc(&listen_cm_id_priv->refcount); 1333 atomic_inc(&cm_id_priv->refcount); 1334 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1335 atomic_inc(&cm_id_priv->work_count); 1336 spin_unlock_irqrestore(&cm.lock, flags); 1337 return listen_cm_id_priv; 1338 1339error: cm_cleanup_timewait(cm_id_priv->timewait_info); 1340 return NULL; 1341} 1342 1343static int cm_req_handler(struct cm_work *work) 1344{ 1345 struct ib_cm_id *cm_id; 1346 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1347 struct cm_req_msg *req_msg; 1348 int ret; 1349 1350 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1351 1352 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1353 if (IS_ERR(cm_id)) 1354 return PTR_ERR(cm_id); 1355 1356 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1357 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1358 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1359 work->mad_recv_wc->recv_buf.grh, 1360 &cm_id_priv->av); 1361 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1362 id.local_id); 1363 if (IS_ERR(cm_id_priv->timewait_info)) { 1364 ret = PTR_ERR(cm_id_priv->timewait_info); 1365 goto destroy; 1366 } 1367 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1368 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1369 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1370 1371 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1372 if (!listen_cm_id_priv) { 1373 ret = -EINVAL; 1374 kfree(cm_id_priv->timewait_info); 1375 goto destroy; 1376 } 1377 1378 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1379 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1380 cm_id_priv->id.service_id = req_msg->service_id; 1381 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1382 1383 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1384 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1385 if (ret) { 1386 ib_get_cached_gid(work->port->cm_dev->device, 1387 work->port->port_num, 0, &work->path[0].sgid); 1388 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1389 &work->path[0].sgid, sizeof work->path[0].sgid, 1390 NULL, 0); 1391 goto rejected; 1392 } 1393 if (req_msg->alt_local_lid) { 1394 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1395 if (ret) { 1396 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1397 &work->path[0].sgid, 1398 sizeof work->path[0].sgid, NULL, 0); 1399 goto rejected; 1400 } 1401 } 1402 cm_id_priv->tid = req_msg->hdr.tid; 1403 cm_id_priv->timeout_ms = cm_convert_to_ms( 1404 cm_req_get_local_resp_timeout(req_msg)); 1405 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1406 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1407 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1408 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1409 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1410 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1411 cm_id_priv->local_ack_timeout = 1412 cm_req_get_primary_local_ack_timeout(req_msg); 1413 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1414 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1415 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1416 1417 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1418 cm_process_work(cm_id_priv, work); 1419 cm_deref_id(listen_cm_id_priv); 1420 return 0; 1421 1422rejected: 1423 atomic_dec(&cm_id_priv->refcount); 1424 cm_deref_id(listen_cm_id_priv); 1425destroy: 1426 ib_destroy_cm_id(cm_id); 1427 return ret; 1428} 1429 1430static void cm_format_rep(struct cm_rep_msg *rep_msg, 1431 struct cm_id_private *cm_id_priv, 1432 struct ib_cm_rep_param *param) 1433{ 1434 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1435 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1436 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1437 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1438 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1439 rep_msg->resp_resources = param->responder_resources; 1440 rep_msg->initiator_depth = param->initiator_depth; 1441 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1442 cm_rep_set_failover(rep_msg, param->failover_accepted); 1443 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1444 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1445 cm_rep_set_srq(rep_msg, param->srq); 1446 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1447 1448 if (param->private_data && param->private_data_len) 1449 memcpy(rep_msg->private_data, param->private_data, 1450 param->private_data_len); 1451} 1452 1453int ib_send_cm_rep(struct ib_cm_id *cm_id, 1454 struct ib_cm_rep_param *param) 1455{ 1456 struct cm_id_private *cm_id_priv; 1457 struct ib_mad_send_buf *msg; 1458 struct cm_rep_msg *rep_msg; 1459 unsigned long flags; 1460 int ret; 1461 1462 if (param->private_data && 1463 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1464 return -EINVAL; 1465 1466 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1467 spin_lock_irqsave(&cm_id_priv->lock, flags); 1468 if (cm_id->state != IB_CM_REQ_RCVD && 1469 cm_id->state != IB_CM_MRA_REQ_SENT) { 1470 ret = -EINVAL; 1471 goto out; 1472 } 1473 1474 ret = cm_alloc_msg(cm_id_priv, &msg); 1475 if (ret) 1476 goto out; 1477 1478 rep_msg = (struct cm_rep_msg *) msg->mad; 1479 cm_format_rep(rep_msg, cm_id_priv, param); 1480 msg->timeout_ms = cm_id_priv->timeout_ms; 1481 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1482 1483 ret = ib_post_send_mad(msg, NULL); 1484 if (ret) { 1485 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1486 cm_free_msg(msg); 1487 return ret; 1488 } 1489 1490 cm_id->state = IB_CM_REP_SENT; 1491 cm_id_priv->msg = msg; 1492 cm_id_priv->initiator_depth = param->initiator_depth; 1493 cm_id_priv->responder_resources = param->responder_resources; 1494 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1495 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1496 1497out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1498 return ret; 1499} 1500EXPORT_SYMBOL(ib_send_cm_rep); 1501 1502static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1503 struct cm_id_private *cm_id_priv, 1504 const void *private_data, 1505 u8 private_data_len) 1506{ 1507 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1508 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1509 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1510 1511 if (private_data && private_data_len) 1512 memcpy(rtu_msg->private_data, private_data, private_data_len); 1513} 1514 1515int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1516 const void *private_data, 1517 u8 private_data_len) 1518{ 1519 struct cm_id_private *cm_id_priv; 1520 struct ib_mad_send_buf *msg; 1521 unsigned long flags; 1522 void *data; 1523 int ret; 1524 1525 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1526 return -EINVAL; 1527 1528 data = cm_copy_private_data(private_data, private_data_len); 1529 if (IS_ERR(data)) 1530 return PTR_ERR(data); 1531 1532 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1533 spin_lock_irqsave(&cm_id_priv->lock, flags); 1534 if (cm_id->state != IB_CM_REP_RCVD && 1535 cm_id->state != IB_CM_MRA_REP_SENT) { 1536 ret = -EINVAL; 1537 goto error; 1538 } 1539 1540 ret = cm_alloc_msg(cm_id_priv, &msg); 1541 if (ret) 1542 goto error; 1543 1544 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1545 private_data, private_data_len); 1546 1547 ret = ib_post_send_mad(msg, NULL); 1548 if (ret) { 1549 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1550 cm_free_msg(msg); 1551 kfree(data); 1552 return ret; 1553 } 1554 1555 cm_id->state = IB_CM_ESTABLISHED; 1556 cm_set_private_data(cm_id_priv, data, private_data_len); 1557 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1558 return 0; 1559 1560error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1561 kfree(data); 1562 return ret; 1563} 1564EXPORT_SYMBOL(ib_send_cm_rtu); 1565 1566static void cm_format_rep_event(struct cm_work *work) 1567{ 1568 struct cm_rep_msg *rep_msg; 1569 struct ib_cm_rep_event_param *param; 1570 1571 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1572 param = &work->cm_event.param.rep_rcvd; 1573 param->remote_ca_guid = rep_msg->local_ca_guid; 1574 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1575 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1576 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1577 param->responder_resources = rep_msg->initiator_depth; 1578 param->initiator_depth = rep_msg->resp_resources; 1579 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1580 param->failover_accepted = cm_rep_get_failover(rep_msg); 1581 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1582 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1583 param->srq = cm_rep_get_srq(rep_msg); 1584 work->cm_event.private_data = &rep_msg->private_data; 1585} 1586 1587static void cm_dup_rep_handler(struct cm_work *work) 1588{ 1589 struct cm_id_private *cm_id_priv; 1590 struct cm_rep_msg *rep_msg; 1591 struct ib_mad_send_buf *msg = NULL; 1592 unsigned long flags; 1593 int ret; 1594 1595 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1596 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1597 rep_msg->local_comm_id); 1598 if (!cm_id_priv) 1599 return; 1600 1601 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1602 if (ret) 1603 goto deref; 1604 1605 spin_lock_irqsave(&cm_id_priv->lock, flags); 1606 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1607 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1608 cm_id_priv->private_data, 1609 cm_id_priv->private_data_len); 1610 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1611 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1612 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1613 cm_id_priv->private_data, 1614 cm_id_priv->private_data_len); 1615 else 1616 goto unlock; 1617 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1618 1619 ret = ib_post_send_mad(msg, NULL); 1620 if (ret) 1621 goto free; 1622 goto deref; 1623 1624unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1625free: cm_free_msg(msg); 1626deref: cm_deref_id(cm_id_priv); 1627} 1628 1629static int cm_rep_handler(struct cm_work *work) 1630{ 1631 struct cm_id_private *cm_id_priv; 1632 struct cm_rep_msg *rep_msg; 1633 unsigned long flags; 1634 int ret; 1635 1636 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1637 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1638 if (!cm_id_priv) { 1639 cm_dup_rep_handler(work); 1640 return -EINVAL; 1641 } 1642 1643 cm_format_rep_event(work); 1644 1645 spin_lock_irqsave(&cm_id_priv->lock, flags); 1646 switch (cm_id_priv->id.state) { 1647 case IB_CM_REQ_SENT: 1648 case IB_CM_MRA_REQ_RCVD: 1649 break; 1650 default: 1651 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1652 ret = -EINVAL; 1653 goto error; 1654 } 1655 1656 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1657 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1658 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1659 1660 spin_lock(&cm.lock); 1661 /* Check for duplicate REP. */ 1662 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1663 spin_unlock(&cm.lock); 1664 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1665 ret = -EINVAL; 1666 goto error; 1667 } 1668 /* Check for a stale connection. */ 1669 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1670 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1671 &cm.remote_id_table); 1672 cm_id_priv->timewait_info->inserted_remote_id = 0; 1673 spin_unlock(&cm.lock); 1674 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1675 cm_issue_rej(work->port, work->mad_recv_wc, 1676 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1677 NULL, 0); 1678 ret = -EINVAL; 1679 goto error; 1680 } 1681 spin_unlock(&cm.lock); 1682 1683 cm_id_priv->id.state = IB_CM_REP_RCVD; 1684 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1685 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1686 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1687 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1688 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1689 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1690 1691 /* todo: handle peer_to_peer */ 1692 1693 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1694 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1695 if (!ret) 1696 list_add_tail(&work->list, &cm_id_priv->work_list); 1697 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1698 1699 if (ret) 1700 cm_process_work(cm_id_priv, work); 1701 else 1702 cm_deref_id(cm_id_priv); 1703 return 0; 1704 1705error: 1706 cm_deref_id(cm_id_priv); 1707 return ret; 1708} 1709 1710static int cm_establish_handler(struct cm_work *work) 1711{ 1712 struct cm_id_private *cm_id_priv; 1713 unsigned long flags; 1714 int ret; 1715 1716 /* See comment in ib_cm_establish about lookup. */ 1717 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1718 if (!cm_id_priv) 1719 return -EINVAL; 1720 1721 spin_lock_irqsave(&cm_id_priv->lock, flags); 1722 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1723 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1724 goto out; 1725 } 1726 1727 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1728 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1729 if (!ret) 1730 list_add_tail(&work->list, &cm_id_priv->work_list); 1731 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1732 1733 if (ret) 1734 cm_process_work(cm_id_priv, work); 1735 else 1736 cm_deref_id(cm_id_priv); 1737 return 0; 1738out: 1739 cm_deref_id(cm_id_priv); 1740 return -EINVAL; 1741} 1742 1743static int cm_rtu_handler(struct cm_work *work) 1744{ 1745 struct cm_id_private *cm_id_priv; 1746 struct cm_rtu_msg *rtu_msg; 1747 unsigned long flags; 1748 int ret; 1749 1750 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1751 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1752 rtu_msg->local_comm_id); 1753 if (!cm_id_priv) 1754 return -EINVAL; 1755 1756 work->cm_event.private_data = &rtu_msg->private_data; 1757 1758 spin_lock_irqsave(&cm_id_priv->lock, flags); 1759 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1760 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1761 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1762 goto out; 1763 } 1764 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1765 1766 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1767 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1768 if (!ret) 1769 list_add_tail(&work->list, &cm_id_priv->work_list); 1770 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1771 1772 if (ret) 1773 cm_process_work(cm_id_priv, work); 1774 else 1775 cm_deref_id(cm_id_priv); 1776 return 0; 1777out: 1778 cm_deref_id(cm_id_priv); 1779 return -EINVAL; 1780} 1781 1782static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1783 struct cm_id_private *cm_id_priv, 1784 const void *private_data, 1785 u8 private_data_len) 1786{ 1787 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1788 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1789 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1790 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1791 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1792 1793 if (private_data && private_data_len) 1794 memcpy(dreq_msg->private_data, private_data, private_data_len); 1795} 1796 1797int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1798 const void *private_data, 1799 u8 private_data_len) 1800{ 1801 struct cm_id_private *cm_id_priv; 1802 struct ib_mad_send_buf *msg; 1803 unsigned long flags; 1804 int ret; 1805 1806 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1807 return -EINVAL; 1808 1809 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1810 spin_lock_irqsave(&cm_id_priv->lock, flags); 1811 if (cm_id->state != IB_CM_ESTABLISHED) { 1812 ret = -EINVAL; 1813 goto out; 1814 } 1815 1816 ret = cm_alloc_msg(cm_id_priv, &msg); 1817 if (ret) { 1818 cm_enter_timewait(cm_id_priv); 1819 goto out; 1820 } 1821 1822 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1823 private_data, private_data_len); 1824 msg->timeout_ms = cm_id_priv->timeout_ms; 1825 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1826 1827 ret = ib_post_send_mad(msg, NULL); 1828 if (ret) { 1829 cm_enter_timewait(cm_id_priv); 1830 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1831 cm_free_msg(msg); 1832 return ret; 1833 } 1834 1835 cm_id->state = IB_CM_DREQ_SENT; 1836 cm_id_priv->msg = msg; 1837out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1838 return ret; 1839} 1840EXPORT_SYMBOL(ib_send_cm_dreq); 1841 1842static void cm_format_drep(struct cm_drep_msg *drep_msg, 1843 struct cm_id_private *cm_id_priv, 1844 const void *private_data, 1845 u8 private_data_len) 1846{ 1847 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1848 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1849 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1850 1851 if (private_data && private_data_len) 1852 memcpy(drep_msg->private_data, private_data, private_data_len); 1853} 1854 1855int ib_send_cm_drep(struct ib_cm_id *cm_id, 1856 const void *private_data, 1857 u8 private_data_len) 1858{ 1859 struct cm_id_private *cm_id_priv; 1860 struct ib_mad_send_buf *msg; 1861 unsigned long flags; 1862 void *data; 1863 int ret; 1864 1865 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1866 return -EINVAL; 1867 1868 data = cm_copy_private_data(private_data, private_data_len); 1869 if (IS_ERR(data)) 1870 return PTR_ERR(data); 1871 1872 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1873 spin_lock_irqsave(&cm_id_priv->lock, flags); 1874 if (cm_id->state != IB_CM_DREQ_RCVD) { 1875 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1876 kfree(data); 1877 return -EINVAL; 1878 } 1879 1880 cm_set_private_data(cm_id_priv, data, private_data_len); 1881 cm_enter_timewait(cm_id_priv); 1882 1883 ret = cm_alloc_msg(cm_id_priv, &msg); 1884 if (ret) 1885 goto out; 1886 1887 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1888 private_data, private_data_len); 1889 1890 ret = ib_post_send_mad(msg, NULL); 1891 if (ret) { 1892 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1893 cm_free_msg(msg); 1894 return ret; 1895 } 1896 1897out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1898 return ret; 1899} 1900EXPORT_SYMBOL(ib_send_cm_drep); 1901 1902static int cm_dreq_handler(struct cm_work *work) 1903{ 1904 struct cm_id_private *cm_id_priv; 1905 struct cm_dreq_msg *dreq_msg; 1906 struct ib_mad_send_buf *msg = NULL; 1907 unsigned long flags; 1908 int ret; 1909 1910 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1911 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1912 dreq_msg->local_comm_id); 1913 if (!cm_id_priv) 1914 return -EINVAL; 1915 1916 work->cm_event.private_data = &dreq_msg->private_data; 1917 1918 spin_lock_irqsave(&cm_id_priv->lock, flags); 1919 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1920 goto unlock; 1921 1922 switch (cm_id_priv->id.state) { 1923 case IB_CM_REP_SENT: 1924 case IB_CM_DREQ_SENT: 1925 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1926 break; 1927 case IB_CM_ESTABLISHED: 1928 case IB_CM_MRA_REP_RCVD: 1929 break; 1930 case IB_CM_TIMEWAIT: 1931 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1932 goto unlock; 1933 1934 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1935 cm_id_priv->private_data, 1936 cm_id_priv->private_data_len); 1937 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1938 1939 if (ib_post_send_mad(msg, NULL)) 1940 cm_free_msg(msg); 1941 goto deref; 1942 default: 1943 goto unlock; 1944 } 1945 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1946 cm_id_priv->tid = dreq_msg->hdr.tid; 1947 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1948 if (!ret) 1949 list_add_tail(&work->list, &cm_id_priv->work_list); 1950 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1951 1952 if (ret) 1953 cm_process_work(cm_id_priv, work); 1954 else 1955 cm_deref_id(cm_id_priv); 1956 return 0; 1957 1958unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1959deref: cm_deref_id(cm_id_priv); 1960 return -EINVAL; 1961} 1962 1963static int cm_drep_handler(struct cm_work *work) 1964{ 1965 struct cm_id_private *cm_id_priv; 1966 struct cm_drep_msg *drep_msg; 1967 unsigned long flags; 1968 int ret; 1969 1970 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1971 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 1972 drep_msg->local_comm_id); 1973 if (!cm_id_priv) 1974 return -EINVAL; 1975 1976 work->cm_event.private_data = &drep_msg->private_data; 1977 1978 spin_lock_irqsave(&cm_id_priv->lock, flags); 1979 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 1980 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 1981 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1982 goto out; 1983 } 1984 cm_enter_timewait(cm_id_priv); 1985 1986 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1987 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1988 if (!ret) 1989 list_add_tail(&work->list, &cm_id_priv->work_list); 1990 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1991 1992 if (ret) 1993 cm_process_work(cm_id_priv, work); 1994 else 1995 cm_deref_id(cm_id_priv); 1996 return 0; 1997out: 1998 cm_deref_id(cm_id_priv); 1999 return -EINVAL; 2000} 2001 2002int ib_send_cm_rej(struct ib_cm_id *cm_id, 2003 enum ib_cm_rej_reason reason, 2004 void *ari, 2005 u8 ari_length, 2006 const void *private_data, 2007 u8 private_data_len) 2008{ 2009 struct cm_id_private *cm_id_priv; 2010 struct ib_mad_send_buf *msg; 2011 unsigned long flags; 2012 int ret; 2013 2014 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2015 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2016 return -EINVAL; 2017 2018 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2019 2020 spin_lock_irqsave(&cm_id_priv->lock, flags); 2021 switch (cm_id->state) { 2022 case IB_CM_REQ_SENT: 2023 case IB_CM_MRA_REQ_RCVD: 2024 case IB_CM_REQ_RCVD: 2025 case IB_CM_MRA_REQ_SENT: 2026 case IB_CM_REP_RCVD: 2027 case IB_CM_MRA_REP_SENT: 2028 ret = cm_alloc_msg(cm_id_priv, &msg); 2029 if (!ret) 2030 cm_format_rej((struct cm_rej_msg *) msg->mad, 2031 cm_id_priv, reason, ari, ari_length, 2032 private_data, private_data_len); 2033 2034 cm_reset_to_idle(cm_id_priv); 2035 break; 2036 case IB_CM_REP_SENT: 2037 case IB_CM_MRA_REP_RCVD: 2038 ret = cm_alloc_msg(cm_id_priv, &msg); 2039 if (!ret) 2040 cm_format_rej((struct cm_rej_msg *) msg->mad, 2041 cm_id_priv, reason, ari, ari_length, 2042 private_data, private_data_len); 2043 2044 cm_enter_timewait(cm_id_priv); 2045 break; 2046 default: 2047 ret = -EINVAL; 2048 goto out; 2049 } 2050 2051 if (ret) 2052 goto out; 2053 2054 ret = ib_post_send_mad(msg, NULL); 2055 if (ret) 2056 cm_free_msg(msg); 2057 2058out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2059 return ret; 2060} 2061EXPORT_SYMBOL(ib_send_cm_rej); 2062 2063static void cm_format_rej_event(struct cm_work *work) 2064{ 2065 struct cm_rej_msg *rej_msg; 2066 struct ib_cm_rej_event_param *param; 2067 2068 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2069 param = &work->cm_event.param.rej_rcvd; 2070 param->ari = rej_msg->ari; 2071 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2072 param->reason = __be16_to_cpu(rej_msg->reason); 2073 work->cm_event.private_data = &rej_msg->private_data; 2074} 2075 2076static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2077{ 2078 struct cm_timewait_info *timewait_info; 2079 struct cm_id_private *cm_id_priv; 2080 unsigned long flags; 2081 __be32 remote_id; 2082 2083 remote_id = rej_msg->local_comm_id; 2084 2085 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2086 spin_lock_irqsave(&cm.lock, flags); 2087 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2088 remote_id); 2089 if (!timewait_info) { 2090 spin_unlock_irqrestore(&cm.lock, flags); 2091 return NULL; 2092 } 2093 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2094 (timewait_info->work.local_id ^ 2095 cm.random_id_operand)); 2096 if (cm_id_priv) { 2097 if (cm_id_priv->id.remote_id == remote_id) 2098 atomic_inc(&cm_id_priv->refcount); 2099 else 2100 cm_id_priv = NULL; 2101 } 2102 spin_unlock_irqrestore(&cm.lock, flags); 2103 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2104 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2105 else 2106 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2107 2108 return cm_id_priv; 2109} 2110 2111static int cm_rej_handler(struct cm_work *work) 2112{ 2113 struct cm_id_private *cm_id_priv; 2114 struct cm_rej_msg *rej_msg; 2115 unsigned long flags; 2116 int ret; 2117 2118 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2119 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2120 if (!cm_id_priv) 2121 return -EINVAL; 2122 2123 cm_format_rej_event(work); 2124 2125 spin_lock_irqsave(&cm_id_priv->lock, flags); 2126 switch (cm_id_priv->id.state) { 2127 case IB_CM_REQ_SENT: 2128 case IB_CM_MRA_REQ_RCVD: 2129 case IB_CM_REP_SENT: 2130 case IB_CM_MRA_REP_RCVD: 2131 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2132 /* fall through */ 2133 case IB_CM_REQ_RCVD: 2134 case IB_CM_MRA_REQ_SENT: 2135 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2136 cm_enter_timewait(cm_id_priv); 2137 else 2138 cm_reset_to_idle(cm_id_priv); 2139 break; 2140 case IB_CM_DREQ_SENT: 2141 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2142 /* fall through */ 2143 case IB_CM_REP_RCVD: 2144 case IB_CM_MRA_REP_SENT: 2145 case IB_CM_ESTABLISHED: 2146 cm_enter_timewait(cm_id_priv); 2147 break; 2148 default: 2149 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2150 ret = -EINVAL; 2151 goto out; 2152 } 2153 2154 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2155 if (!ret) 2156 list_add_tail(&work->list, &cm_id_priv->work_list); 2157 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2158 2159 if (ret) 2160 cm_process_work(cm_id_priv, work); 2161 else 2162 cm_deref_id(cm_id_priv); 2163 return 0; 2164out: 2165 cm_deref_id(cm_id_priv); 2166 return -EINVAL; 2167} 2168 2169int ib_send_cm_mra(struct ib_cm_id *cm_id, 2170 u8 service_timeout, 2171 const void *private_data, 2172 u8 private_data_len) 2173{ 2174 struct cm_id_private *cm_id_priv; 2175 struct ib_mad_send_buf *msg; 2176 void *data; 2177 unsigned long flags; 2178 int ret; 2179 2180 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2181 return -EINVAL; 2182 2183 data = cm_copy_private_data(private_data, private_data_len); 2184 if (IS_ERR(data)) 2185 return PTR_ERR(data); 2186 2187 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2188 2189 spin_lock_irqsave(&cm_id_priv->lock, flags); 2190 switch(cm_id_priv->id.state) { 2191 case IB_CM_REQ_RCVD: 2192 ret = cm_alloc_msg(cm_id_priv, &msg); 2193 if (ret) 2194 goto error1; 2195 2196 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2197 CM_MSG_RESPONSE_REQ, service_timeout, 2198 private_data, private_data_len); 2199 ret = ib_post_send_mad(msg, NULL); 2200 if (ret) 2201 goto error2; 2202 cm_id->state = IB_CM_MRA_REQ_SENT; 2203 break; 2204 case IB_CM_REP_RCVD: 2205 ret = cm_alloc_msg(cm_id_priv, &msg); 2206 if (ret) 2207 goto error1; 2208 2209 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2210 CM_MSG_RESPONSE_REP, service_timeout, 2211 private_data, private_data_len); 2212 ret = ib_post_send_mad(msg, NULL); 2213 if (ret) 2214 goto error2; 2215 cm_id->state = IB_CM_MRA_REP_SENT; 2216 break; 2217 case IB_CM_ESTABLISHED: 2218 ret = cm_alloc_msg(cm_id_priv, &msg); 2219 if (ret) 2220 goto error1; 2221 2222 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2223 CM_MSG_RESPONSE_OTHER, service_timeout, 2224 private_data, private_data_len); 2225 ret = ib_post_send_mad(msg, NULL); 2226 if (ret) 2227 goto error2; 2228 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2229 break; 2230 default: 2231 ret = -EINVAL; 2232 goto error1; 2233 } 2234 cm_id_priv->service_timeout = service_timeout; 2235 cm_set_private_data(cm_id_priv, data, private_data_len); 2236 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2237 return 0; 2238 2239error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2240 kfree(data); 2241 return ret; 2242 2243error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2244 kfree(data); 2245 cm_free_msg(msg); 2246 return ret; 2247} 2248EXPORT_SYMBOL(ib_send_cm_mra); 2249 2250static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2251{ 2252 switch (cm_mra_get_msg_mraed(mra_msg)) { 2253 case CM_MSG_RESPONSE_REQ: 2254 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2255 case CM_MSG_RESPONSE_REP: 2256 case CM_MSG_RESPONSE_OTHER: 2257 return cm_acquire_id(mra_msg->remote_comm_id, 2258 mra_msg->local_comm_id); 2259 default: 2260 return NULL; 2261 } 2262} 2263 2264static int cm_mra_handler(struct cm_work *work) 2265{ 2266 struct cm_id_private *cm_id_priv; 2267 struct cm_mra_msg *mra_msg; 2268 unsigned long flags; 2269 int timeout, ret; 2270 2271 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2272 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2273 if (!cm_id_priv) 2274 return -EINVAL; 2275 2276 work->cm_event.private_data = &mra_msg->private_data; 2277 work->cm_event.param.mra_rcvd.service_timeout = 2278 cm_mra_get_service_timeout(mra_msg); 2279 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2280 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2281 2282 spin_lock_irqsave(&cm_id_priv->lock, flags); 2283 switch (cm_id_priv->id.state) { 2284 case IB_CM_REQ_SENT: 2285 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2286 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2287 cm_id_priv->msg, timeout)) 2288 goto out; 2289 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2290 break; 2291 case IB_CM_REP_SENT: 2292 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2293 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2294 cm_id_priv->msg, timeout)) 2295 goto out; 2296 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2297 break; 2298 case IB_CM_ESTABLISHED: 2299 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2300 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2301 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2302 cm_id_priv->msg, timeout)) 2303 goto out; 2304 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2305 break; 2306 default: 2307 goto out; 2308 } 2309 2310 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2311 cm_id_priv->id.state; 2312 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2313 if (!ret) 2314 list_add_tail(&work->list, &cm_id_priv->work_list); 2315 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2316 2317 if (ret) 2318 cm_process_work(cm_id_priv, work); 2319 else 2320 cm_deref_id(cm_id_priv); 2321 return 0; 2322out: 2323 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2324 cm_deref_id(cm_id_priv); 2325 return -EINVAL; 2326} 2327 2328static void cm_format_lap(struct cm_lap_msg *lap_msg, 2329 struct cm_id_private *cm_id_priv, 2330 struct ib_sa_path_rec *alternate_path, 2331 const void *private_data, 2332 u8 private_data_len) 2333{ 2334 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2335 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2336 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2337 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2338 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2339 /* todo: need remote CM response timeout */ 2340 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2341 lap_msg->alt_local_lid = alternate_path->slid; 2342 lap_msg->alt_remote_lid = alternate_path->dlid; 2343 lap_msg->alt_local_gid = alternate_path->sgid; 2344 lap_msg->alt_remote_gid = alternate_path->dgid; 2345 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2346 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2347 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2348 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2349 cm_lap_set_sl(lap_msg, alternate_path->sl); 2350 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2351 cm_lap_set_local_ack_timeout(lap_msg, 2352 min(31, alternate_path->packet_life_time + 1)); 2353 2354 if (private_data && private_data_len) 2355 memcpy(lap_msg->private_data, private_data, private_data_len); 2356} 2357 2358int ib_send_cm_lap(struct ib_cm_id *cm_id, 2359 struct ib_sa_path_rec *alternate_path, 2360 const void *private_data, 2361 u8 private_data_len) 2362{ 2363 struct cm_id_private *cm_id_priv; 2364 struct ib_mad_send_buf *msg; 2365 unsigned long flags; 2366 int ret; 2367 2368 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2369 return -EINVAL; 2370 2371 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2372 spin_lock_irqsave(&cm_id_priv->lock, flags); 2373 if (cm_id->state != IB_CM_ESTABLISHED || 2374 cm_id->lap_state != IB_CM_LAP_IDLE) { 2375 ret = -EINVAL; 2376 goto out; 2377 } 2378 2379 ret = cm_alloc_msg(cm_id_priv, &msg); 2380 if (ret) 2381 goto out; 2382 2383 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2384 alternate_path, private_data, private_data_len); 2385 msg->timeout_ms = cm_id_priv->timeout_ms; 2386 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2387 2388 ret = ib_post_send_mad(msg, NULL); 2389 if (ret) { 2390 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2391 cm_free_msg(msg); 2392 return ret; 2393 } 2394 2395 cm_id->lap_state = IB_CM_LAP_SENT; 2396 cm_id_priv->msg = msg; 2397 2398out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2399 return ret; 2400} 2401EXPORT_SYMBOL(ib_send_cm_lap); 2402 2403static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2404 struct cm_lap_msg *lap_msg) 2405{ 2406 memset(path, 0, sizeof *path); 2407 path->dgid = lap_msg->alt_local_gid; 2408 path->sgid = lap_msg->alt_remote_gid; 2409 path->dlid = lap_msg->alt_local_lid; 2410 path->slid = lap_msg->alt_remote_lid; 2411 path->flow_label = cm_lap_get_flow_label(lap_msg); 2412 path->hop_limit = lap_msg->alt_hop_limit; 2413 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2414 path->reversible = 1; 2415 /* pkey is same as in REQ */ 2416 path->sl = cm_lap_get_sl(lap_msg); 2417 path->mtu_selector = IB_SA_EQ; 2418 /* mtu is same as in REQ */ 2419 path->rate_selector = IB_SA_EQ; 2420 path->rate = cm_lap_get_packet_rate(lap_msg); 2421 path->packet_life_time_selector = IB_SA_EQ; 2422 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2423 path->packet_life_time -= (path->packet_life_time > 0); 2424} 2425 2426static int cm_lap_handler(struct cm_work *work) 2427{ 2428 struct cm_id_private *cm_id_priv; 2429 struct cm_lap_msg *lap_msg; 2430 struct ib_cm_lap_event_param *param; 2431 struct ib_mad_send_buf *msg = NULL; 2432 unsigned long flags; 2433 int ret; 2434 2435 /* todo: verify LAP request and send reject APR if invalid. */ 2436 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2437 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2438 lap_msg->local_comm_id); 2439 if (!cm_id_priv) 2440 return -EINVAL; 2441 2442 param = &work->cm_event.param.lap_rcvd; 2443 param->alternate_path = &work->path[0]; 2444 cm_format_path_from_lap(param->alternate_path, lap_msg); 2445 work->cm_event.private_data = &lap_msg->private_data; 2446 2447 spin_lock_irqsave(&cm_id_priv->lock, flags); 2448 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2449 goto unlock; 2450 2451 switch (cm_id_priv->id.lap_state) { 2452 case IB_CM_LAP_IDLE: 2453 break; 2454 case IB_CM_MRA_LAP_SENT: 2455 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2456 goto unlock; 2457 2458 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2459 CM_MSG_RESPONSE_OTHER, 2460 cm_id_priv->service_timeout, 2461 cm_id_priv->private_data, 2462 cm_id_priv->private_data_len); 2463 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2464 2465 if (ib_post_send_mad(msg, NULL)) 2466 cm_free_msg(msg); 2467 goto deref; 2468 default: 2469 goto unlock; 2470 } 2471 2472 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2473 cm_id_priv->tid = lap_msg->hdr.tid; 2474 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2475 if (!ret) 2476 list_add_tail(&work->list, &cm_id_priv->work_list); 2477 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2478 2479 if (ret) 2480 cm_process_work(cm_id_priv, work); 2481 else 2482 cm_deref_id(cm_id_priv); 2483 return 0; 2484 2485unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2486deref: cm_deref_id(cm_id_priv); 2487 return -EINVAL; 2488} 2489 2490static void cm_format_apr(struct cm_apr_msg *apr_msg, 2491 struct cm_id_private *cm_id_priv, 2492 enum ib_cm_apr_status status, 2493 void *info, 2494 u8 info_length, 2495 const void *private_data, 2496 u8 private_data_len) 2497{ 2498 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2499 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2500 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2501 apr_msg->ap_status = (u8) status; 2502 2503 if (info && info_length) { 2504 apr_msg->info_length = info_length; 2505 memcpy(apr_msg->info, info, info_length); 2506 } 2507 2508 if (private_data && private_data_len) 2509 memcpy(apr_msg->private_data, private_data, private_data_len); 2510} 2511 2512int ib_send_cm_apr(struct ib_cm_id *cm_id, 2513 enum ib_cm_apr_status status, 2514 void *info, 2515 u8 info_length, 2516 const void *private_data, 2517 u8 private_data_len) 2518{ 2519 struct cm_id_private *cm_id_priv; 2520 struct ib_mad_send_buf *msg; 2521 unsigned long flags; 2522 int ret; 2523 2524 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2525 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2526 return -EINVAL; 2527 2528 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2529 spin_lock_irqsave(&cm_id_priv->lock, flags); 2530 if (cm_id->state != IB_CM_ESTABLISHED || 2531 (cm_id->lap_state != IB_CM_LAP_RCVD && 2532 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2533 ret = -EINVAL; 2534 goto out; 2535 } 2536 2537 ret = cm_alloc_msg(cm_id_priv, &msg); 2538 if (ret) 2539 goto out; 2540 2541 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2542 info, info_length, private_data, private_data_len); 2543 ret = ib_post_send_mad(msg, NULL); 2544 if (ret) { 2545 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2546 cm_free_msg(msg); 2547 return ret; 2548 } 2549 2550 cm_id->lap_state = IB_CM_LAP_IDLE; 2551out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2552 return ret; 2553} 2554EXPORT_SYMBOL(ib_send_cm_apr); 2555 2556static int cm_apr_handler(struct cm_work *work) 2557{ 2558 struct cm_id_private *cm_id_priv; 2559 struct cm_apr_msg *apr_msg; 2560 unsigned long flags; 2561 int ret; 2562 2563 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2564 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2565 apr_msg->local_comm_id); 2566 if (!cm_id_priv) 2567 return -EINVAL; /* Unmatched reply. */ 2568 2569 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2570 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2571 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2572 work->cm_event.private_data = &apr_msg->private_data; 2573 2574 spin_lock_irqsave(&cm_id_priv->lock, flags); 2575 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2576 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2577 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2578 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2579 goto out; 2580 } 2581 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2582 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2583 cm_id_priv->msg = NULL; 2584 2585 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2586 if (!ret) 2587 list_add_tail(&work->list, &cm_id_priv->work_list); 2588 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2589 2590 if (ret) 2591 cm_process_work(cm_id_priv, work); 2592 else 2593 cm_deref_id(cm_id_priv); 2594 return 0; 2595out: 2596 cm_deref_id(cm_id_priv); 2597 return -EINVAL; 2598} 2599 2600static int cm_timewait_handler(struct cm_work *work) 2601{ 2602 struct cm_timewait_info *timewait_info; 2603 struct cm_id_private *cm_id_priv; 2604 unsigned long flags; 2605 int ret; 2606 2607 timewait_info = (struct cm_timewait_info *)work; 2608 cm_cleanup_timewait(timewait_info); 2609 2610 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2611 timewait_info->work.remote_id); 2612 if (!cm_id_priv) 2613 return -EINVAL; 2614 2615 spin_lock_irqsave(&cm_id_priv->lock, flags); 2616 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2617 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2618 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2619 goto out; 2620 } 2621 cm_id_priv->id.state = IB_CM_IDLE; 2622 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2623 if (!ret) 2624 list_add_tail(&work->list, &cm_id_priv->work_list); 2625 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2626 2627 if (ret) 2628 cm_process_work(cm_id_priv, work); 2629 else 2630 cm_deref_id(cm_id_priv); 2631 return 0; 2632out: 2633 cm_deref_id(cm_id_priv); 2634 return -EINVAL; 2635} 2636 2637static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2638 struct cm_id_private *cm_id_priv, 2639 struct ib_cm_sidr_req_param *param) 2640{ 2641 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2642 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2643 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2644 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2645 sidr_req_msg->service_id = param->service_id; 2646 2647 if (param->private_data && param->private_data_len) 2648 memcpy(sidr_req_msg->private_data, param->private_data, 2649 param->private_data_len); 2650} 2651 2652int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2653 struct ib_cm_sidr_req_param *param) 2654{ 2655 struct cm_id_private *cm_id_priv; 2656 struct ib_mad_send_buf *msg; 2657 unsigned long flags; 2658 int ret; 2659 2660 if (!param->path || (param->private_data && 2661 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2662 return -EINVAL; 2663 2664 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2665 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2666 if (ret) 2667 goto out; 2668 2669 cm_id->service_id = param->service_id; 2670 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2671 cm_id_priv->timeout_ms = param->timeout_ms; 2672 cm_id_priv->max_cm_retries = param->max_cm_retries; 2673 ret = cm_alloc_msg(cm_id_priv, &msg); 2674 if (ret) 2675 goto out; 2676 2677 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2678 param); 2679 msg->timeout_ms = cm_id_priv->timeout_ms; 2680 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2681 2682 spin_lock_irqsave(&cm_id_priv->lock, flags); 2683 if (cm_id->state == IB_CM_IDLE) 2684 ret = ib_post_send_mad(msg, NULL); 2685 else 2686 ret = -EINVAL; 2687 2688 if (ret) { 2689 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2690 cm_free_msg(msg); 2691 goto out; 2692 } 2693 cm_id->state = IB_CM_SIDR_REQ_SENT; 2694 cm_id_priv->msg = msg; 2695 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2696out: 2697 return ret; 2698} 2699EXPORT_SYMBOL(ib_send_cm_sidr_req); 2700 2701static void cm_format_sidr_req_event(struct cm_work *work, 2702 struct ib_cm_id *listen_id) 2703{ 2704 struct cm_sidr_req_msg *sidr_req_msg; 2705 struct ib_cm_sidr_req_event_param *param; 2706 2707 sidr_req_msg = (struct cm_sidr_req_msg *) 2708 work->mad_recv_wc->recv_buf.mad; 2709 param = &work->cm_event.param.sidr_req_rcvd; 2710 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2711 param->listen_id = listen_id; 2712 param->port = work->port->port_num; 2713 work->cm_event.private_data = &sidr_req_msg->private_data; 2714} 2715 2716static int cm_sidr_req_handler(struct cm_work *work) 2717{ 2718 struct ib_cm_id *cm_id; 2719 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2720 struct cm_sidr_req_msg *sidr_req_msg; 2721 struct ib_wc *wc; 2722 unsigned long flags; 2723 2724 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2725 if (IS_ERR(cm_id)) 2726 return PTR_ERR(cm_id); 2727 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2728 2729 /* Record SGID/SLID and request ID for lookup. */ 2730 sidr_req_msg = (struct cm_sidr_req_msg *) 2731 work->mad_recv_wc->recv_buf.mad; 2732 wc = work->mad_recv_wc->wc; 2733 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2734 cm_id_priv->av.dgid.global.interface_id = 0; 2735 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2736 work->mad_recv_wc->recv_buf.grh, 2737 &cm_id_priv->av); 2738 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2739 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2740 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2741 atomic_inc(&cm_id_priv->work_count); 2742 2743 spin_lock_irqsave(&cm.lock, flags); 2744 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2745 if (cur_cm_id_priv) { 2746 spin_unlock_irqrestore(&cm.lock, flags); 2747 goto out; /* Duplicate message. */ 2748 } 2749 cur_cm_id_priv = cm_find_listen(cm_id->device, 2750 sidr_req_msg->service_id, 2751 sidr_req_msg->private_data); 2752 if (!cur_cm_id_priv) { 2753 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2754 spin_unlock_irqrestore(&cm.lock, flags); 2755 /* todo: reply with no match */ 2756 goto out; /* No match. */ 2757 } 2758 atomic_inc(&cur_cm_id_priv->refcount); 2759 spin_unlock_irqrestore(&cm.lock, flags); 2760 2761 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2762 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2763 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2764 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2765 2766 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2767 cm_process_work(cm_id_priv, work); 2768 cm_deref_id(cur_cm_id_priv); 2769 return 0; 2770out: 2771 ib_destroy_cm_id(&cm_id_priv->id); 2772 return -EINVAL; 2773} 2774 2775static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2776 struct cm_id_private *cm_id_priv, 2777 struct ib_cm_sidr_rep_param *param) 2778{ 2779 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2780 cm_id_priv->tid); 2781 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2782 sidr_rep_msg->status = param->status; 2783 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2784 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2785 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2786 2787 if (param->info && param->info_length) 2788 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2789 2790 if (param->private_data && param->private_data_len) 2791 memcpy(sidr_rep_msg->private_data, param->private_data, 2792 param->private_data_len); 2793} 2794 2795int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2796 struct ib_cm_sidr_rep_param *param) 2797{ 2798 struct cm_id_private *cm_id_priv; 2799 struct ib_mad_send_buf *msg; 2800 unsigned long flags; 2801 int ret; 2802 2803 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2804 (param->private_data && 2805 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2806 return -EINVAL; 2807 2808 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2809 spin_lock_irqsave(&cm_id_priv->lock, flags); 2810 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2811 ret = -EINVAL; 2812 goto error; 2813 } 2814 2815 ret = cm_alloc_msg(cm_id_priv, &msg); 2816 if (ret) 2817 goto error; 2818 2819 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2820 param); 2821 ret = ib_post_send_mad(msg, NULL); 2822 if (ret) { 2823 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2824 cm_free_msg(msg); 2825 return ret; 2826 } 2827 cm_id->state = IB_CM_IDLE; 2828 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2829 2830 spin_lock_irqsave(&cm.lock, flags); 2831 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2832 spin_unlock_irqrestore(&cm.lock, flags); 2833 return 0; 2834 2835error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2836 return ret; 2837} 2838EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2839 2840static void cm_format_sidr_rep_event(struct cm_work *work) 2841{ 2842 struct cm_sidr_rep_msg *sidr_rep_msg; 2843 struct ib_cm_sidr_rep_event_param *param; 2844 2845 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2846 work->mad_recv_wc->recv_buf.mad; 2847 param = &work->cm_event.param.sidr_rep_rcvd; 2848 param->status = sidr_rep_msg->status; 2849 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2850 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2851 param->info = &sidr_rep_msg->info; 2852 param->info_len = sidr_rep_msg->info_length; 2853 work->cm_event.private_data = &sidr_rep_msg->private_data; 2854} 2855 2856static int cm_sidr_rep_handler(struct cm_work *work) 2857{ 2858 struct cm_sidr_rep_msg *sidr_rep_msg; 2859 struct cm_id_private *cm_id_priv; 2860 unsigned long flags; 2861 2862 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2863 work->mad_recv_wc->recv_buf.mad; 2864 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2865 if (!cm_id_priv) 2866 return -EINVAL; /* Unmatched reply. */ 2867 2868 spin_lock_irqsave(&cm_id_priv->lock, flags); 2869 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2870 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2871 goto out; 2872 } 2873 cm_id_priv->id.state = IB_CM_IDLE; 2874 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2875 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2876 2877 cm_format_sidr_rep_event(work); 2878 cm_process_work(cm_id_priv, work); 2879 return 0; 2880out: 2881 cm_deref_id(cm_id_priv); 2882 return -EINVAL; 2883} 2884 2885static void cm_process_send_error(struct ib_mad_send_buf *msg, 2886 enum ib_wc_status wc_status) 2887{ 2888 struct cm_id_private *cm_id_priv; 2889 struct ib_cm_event cm_event; 2890 enum ib_cm_state state; 2891 unsigned long flags; 2892 int ret; 2893 2894 memset(&cm_event, 0, sizeof cm_event); 2895 cm_id_priv = msg->context[0]; 2896 2897 /* Discard old sends or ones without a response. */ 2898 spin_lock_irqsave(&cm_id_priv->lock, flags); 2899 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2900 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2901 goto discard; 2902 2903 switch (state) { 2904 case IB_CM_REQ_SENT: 2905 case IB_CM_MRA_REQ_RCVD: 2906 cm_reset_to_idle(cm_id_priv); 2907 cm_event.event = IB_CM_REQ_ERROR; 2908 break; 2909 case IB_CM_REP_SENT: 2910 case IB_CM_MRA_REP_RCVD: 2911 cm_reset_to_idle(cm_id_priv); 2912 cm_event.event = IB_CM_REP_ERROR; 2913 break; 2914 case IB_CM_DREQ_SENT: 2915 cm_enter_timewait(cm_id_priv); 2916 cm_event.event = IB_CM_DREQ_ERROR; 2917 break; 2918 case IB_CM_SIDR_REQ_SENT: 2919 cm_id_priv->id.state = IB_CM_IDLE; 2920 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2921 break; 2922 default: 2923 goto discard; 2924 } 2925 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2926 cm_event.param.send_status = wc_status; 2927 2928 /* No other events can occur on the cm_id at this point. */ 2929 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2930 cm_free_msg(msg); 2931 if (ret) 2932 ib_destroy_cm_id(&cm_id_priv->id); 2933 return; 2934discard: 2935 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2936 cm_free_msg(msg); 2937} 2938 2939static void cm_send_handler(struct ib_mad_agent *mad_agent, 2940 struct ib_mad_send_wc *mad_send_wc) 2941{ 2942 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2943 2944 switch (mad_send_wc->status) { 2945 case IB_WC_SUCCESS: 2946 case IB_WC_WR_FLUSH_ERR: 2947 cm_free_msg(msg); 2948 break; 2949 default: 2950 if (msg->context[0] && msg->context[1]) 2951 cm_process_send_error(msg, mad_send_wc->status); 2952 else 2953 cm_free_msg(msg); 2954 break; 2955 } 2956} 2957 2958static void cm_work_handler(void *data) 2959{ 2960 struct cm_work *work = data; 2961 int ret; 2962 2963 switch (work->cm_event.event) { 2964 case IB_CM_REQ_RECEIVED: 2965 ret = cm_req_handler(work); 2966 break; 2967 case IB_CM_MRA_RECEIVED: 2968 ret = cm_mra_handler(work); 2969 break; 2970 case IB_CM_REJ_RECEIVED: 2971 ret = cm_rej_handler(work); 2972 break; 2973 case IB_CM_REP_RECEIVED: 2974 ret = cm_rep_handler(work); 2975 break; 2976 case IB_CM_RTU_RECEIVED: 2977 ret = cm_rtu_handler(work); 2978 break; 2979 case IB_CM_USER_ESTABLISHED: 2980 ret = cm_establish_handler(work); 2981 break; 2982 case IB_CM_DREQ_RECEIVED: 2983 ret = cm_dreq_handler(work); 2984 break; 2985 case IB_CM_DREP_RECEIVED: 2986 ret = cm_drep_handler(work); 2987 break; 2988 case IB_CM_SIDR_REQ_RECEIVED: 2989 ret = cm_sidr_req_handler(work); 2990 break; 2991 case IB_CM_SIDR_REP_RECEIVED: 2992 ret = cm_sidr_rep_handler(work); 2993 break; 2994 case IB_CM_LAP_RECEIVED: 2995 ret = cm_lap_handler(work); 2996 break; 2997 case IB_CM_APR_RECEIVED: 2998 ret = cm_apr_handler(work); 2999 break; 3000 case IB_CM_TIMEWAIT_EXIT: 3001 ret = cm_timewait_handler(work); 3002 break; 3003 default: 3004 ret = -EINVAL; 3005 break; 3006 } 3007 if (ret) 3008 cm_free_work(work); 3009} 3010 3011int ib_cm_establish(struct ib_cm_id *cm_id) 3012{ 3013 struct cm_id_private *cm_id_priv; 3014 struct cm_work *work; 3015 unsigned long flags; 3016 int ret = 0; 3017 3018 work = kmalloc(sizeof *work, GFP_ATOMIC); 3019 if (!work) 3020 return -ENOMEM; 3021 3022 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3023 spin_lock_irqsave(&cm_id_priv->lock, flags); 3024 switch (cm_id->state) 3025 { 3026 case IB_CM_REP_SENT: 3027 case IB_CM_MRA_REP_RCVD: 3028 cm_id->state = IB_CM_ESTABLISHED; 3029 break; 3030 case IB_CM_ESTABLISHED: 3031 ret = -EISCONN; 3032 break; 3033 default: 3034 ret = -EINVAL; 3035 break; 3036 } 3037 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3038 3039 if (ret) { 3040 kfree(work); 3041 goto out; 3042 } 3043 3044 /* 3045 * The CM worker thread may try to destroy the cm_id before it 3046 * can execute this work item. To prevent potential deadlock, 3047 * we need to find the cm_id once we're in the context of the 3048 * worker thread, rather than holding a reference on it. 3049 */ 3050 INIT_WORK(&work->work, cm_work_handler, work); 3051 work->local_id = cm_id->local_id; 3052 work->remote_id = cm_id->remote_id; 3053 work->mad_recv_wc = NULL; 3054 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3055 queue_work(cm.wq, &work->work); 3056out: 3057 return ret; 3058} 3059EXPORT_SYMBOL(ib_cm_establish); 3060 3061static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3062 struct ib_mad_recv_wc *mad_recv_wc) 3063{ 3064 struct cm_work *work; 3065 enum ib_cm_event_type event; 3066 int paths = 0; 3067 3068 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3069 case CM_REQ_ATTR_ID: 3070 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3071 alt_local_lid != 0); 3072 event = IB_CM_REQ_RECEIVED; 3073 break; 3074 case CM_MRA_ATTR_ID: 3075 event = IB_CM_MRA_RECEIVED; 3076 break; 3077 case CM_REJ_ATTR_ID: 3078 event = IB_CM_REJ_RECEIVED; 3079 break; 3080 case CM_REP_ATTR_ID: 3081 event = IB_CM_REP_RECEIVED; 3082 break; 3083 case CM_RTU_ATTR_ID: 3084 event = IB_CM_RTU_RECEIVED; 3085 break; 3086 case CM_DREQ_ATTR_ID: 3087 event = IB_CM_DREQ_RECEIVED; 3088 break; 3089 case CM_DREP_ATTR_ID: 3090 event = IB_CM_DREP_RECEIVED; 3091 break; 3092 case CM_SIDR_REQ_ATTR_ID: 3093 event = IB_CM_SIDR_REQ_RECEIVED; 3094 break; 3095 case CM_SIDR_REP_ATTR_ID: 3096 event = IB_CM_SIDR_REP_RECEIVED; 3097 break; 3098 case CM_LAP_ATTR_ID: 3099 paths = 1; 3100 event = IB_CM_LAP_RECEIVED; 3101 break; 3102 case CM_APR_ATTR_ID: 3103 event = IB_CM_APR_RECEIVED; 3104 break; 3105 default: 3106 ib_free_recv_mad(mad_recv_wc); 3107 return; 3108 } 3109 3110 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3111 GFP_KERNEL); 3112 if (!work) { 3113 ib_free_recv_mad(mad_recv_wc); 3114 return; 3115 } 3116 3117 INIT_WORK(&work->work, cm_work_handler, work); 3118 work->cm_event.event = event; 3119 work->mad_recv_wc = mad_recv_wc; 3120 work->port = (struct cm_port *)mad_agent->context; 3121 queue_work(cm.wq, &work->work); 3122} 3123 3124static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3125 struct ib_qp_attr *qp_attr, 3126 int *qp_attr_mask) 3127{ 3128 unsigned long flags; 3129 int ret; 3130 3131 spin_lock_irqsave(&cm_id_priv->lock, flags); 3132 switch (cm_id_priv->id.state) { 3133 case IB_CM_REQ_SENT: 3134 case IB_CM_MRA_REQ_RCVD: 3135 case IB_CM_REQ_RCVD: 3136 case IB_CM_MRA_REQ_SENT: 3137 case IB_CM_REP_RCVD: 3138 case IB_CM_MRA_REP_SENT: 3139 case IB_CM_REP_SENT: 3140 case IB_CM_MRA_REP_RCVD: 3141 case IB_CM_ESTABLISHED: 3142 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3143 IB_QP_PKEY_INDEX | IB_QP_PORT; 3144 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3145 IB_ACCESS_REMOTE_WRITE; 3146 if (cm_id_priv->responder_resources) 3147 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3148 IB_ACCESS_REMOTE_ATOMIC; 3149 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3150 qp_attr->port_num = cm_id_priv->av.port->port_num; 3151 ret = 0; 3152 break; 3153 default: 3154 ret = -EINVAL; 3155 break; 3156 } 3157 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3158 return ret; 3159} 3160 3161static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3162 struct ib_qp_attr *qp_attr, 3163 int *qp_attr_mask) 3164{ 3165 unsigned long flags; 3166 int ret; 3167 3168 spin_lock_irqsave(&cm_id_priv->lock, flags); 3169 switch (cm_id_priv->id.state) { 3170 case IB_CM_REQ_RCVD: 3171 case IB_CM_MRA_REQ_SENT: 3172 case IB_CM_REP_RCVD: 3173 case IB_CM_MRA_REP_SENT: 3174 case IB_CM_REP_SENT: 3175 case IB_CM_MRA_REP_RCVD: 3176 case IB_CM_ESTABLISHED: 3177 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3178 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3179 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3180 qp_attr->path_mtu = cm_id_priv->path_mtu; 3181 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3182 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3183 if (cm_id_priv->qp_type == IB_QPT_RC) { 3184 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3185 IB_QP_MIN_RNR_TIMER; 3186 qp_attr->max_dest_rd_atomic = 3187 cm_id_priv->responder_resources; 3188 qp_attr->min_rnr_timer = 0; 3189 } 3190 if (cm_id_priv->alt_av.ah_attr.dlid) { 3191 *qp_attr_mask |= IB_QP_ALT_PATH; 3192 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3193 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3194 } 3195 ret = 0; 3196 break; 3197 default: 3198 ret = -EINVAL; 3199 break; 3200 } 3201 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3202 return ret; 3203} 3204 3205static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3206 struct ib_qp_attr *qp_attr, 3207 int *qp_attr_mask) 3208{ 3209 unsigned long flags; 3210 int ret; 3211 3212 spin_lock_irqsave(&cm_id_priv->lock, flags); 3213 switch (cm_id_priv->id.state) { 3214 case IB_CM_REP_RCVD: 3215 case IB_CM_MRA_REP_SENT: 3216 case IB_CM_REP_SENT: 3217 case IB_CM_MRA_REP_RCVD: 3218 case IB_CM_ESTABLISHED: 3219 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3220 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3221 if (cm_id_priv->qp_type == IB_QPT_RC) { 3222 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3223 IB_QP_RNR_RETRY | 3224 IB_QP_MAX_QP_RD_ATOMIC; 3225 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3226 qp_attr->retry_cnt = cm_id_priv->retry_count; 3227 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3228 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3229 } 3230 if (cm_id_priv->alt_av.ah_attr.dlid) { 3231 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3232 qp_attr->path_mig_state = IB_MIG_REARM; 3233 } 3234 ret = 0; 3235 break; 3236 default: 3237 ret = -EINVAL; 3238 break; 3239 } 3240 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3241 return ret; 3242} 3243 3244int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3245 struct ib_qp_attr *qp_attr, 3246 int *qp_attr_mask) 3247{ 3248 struct cm_id_private *cm_id_priv; 3249 int ret; 3250 3251 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3252 switch (qp_attr->qp_state) { 3253 case IB_QPS_INIT: 3254 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3255 break; 3256 case IB_QPS_RTR: 3257 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3258 break; 3259 case IB_QPS_RTS: 3260 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3261 break; 3262 default: 3263 ret = -EINVAL; 3264 break; 3265 } 3266 return ret; 3267} 3268EXPORT_SYMBOL(ib_cm_init_qp_attr); 3269 3270static void cm_add_one(struct ib_device *device) 3271{ 3272 struct cm_device *cm_dev; 3273 struct cm_port *port; 3274 struct ib_mad_reg_req reg_req = { 3275 .mgmt_class = IB_MGMT_CLASS_CM, 3276 .mgmt_class_version = IB_CM_CLASS_VERSION 3277 }; 3278 struct ib_port_modify port_modify = { 3279 .set_port_cap_mask = IB_PORT_CM_SUP 3280 }; 3281 unsigned long flags; 3282 int ret; 3283 u8 i; 3284 3285 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3286 return; 3287 3288 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3289 device->phys_port_cnt, GFP_KERNEL); 3290 if (!cm_dev) 3291 return; 3292 3293 cm_dev->device = device; 3294 cm_dev->ca_guid = device->node_guid; 3295 3296 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3297 for (i = 1; i <= device->phys_port_cnt; i++) { 3298 port = &cm_dev->port[i-1]; 3299 port->cm_dev = cm_dev; 3300 port->port_num = i; 3301 port->mad_agent = ib_register_mad_agent(device, i, 3302 IB_QPT_GSI, 3303 ®_req, 3304 0, 3305 cm_send_handler, 3306 cm_recv_handler, 3307 port); 3308 if (IS_ERR(port->mad_agent)) 3309 goto error1; 3310 3311 ret = ib_modify_port(device, i, 0, &port_modify); 3312 if (ret) 3313 goto error2; 3314 } 3315 ib_set_client_data(device, &cm_client, cm_dev); 3316 3317 write_lock_irqsave(&cm.device_lock, flags); 3318 list_add_tail(&cm_dev->list, &cm.device_list); 3319 write_unlock_irqrestore(&cm.device_lock, flags); 3320 return; 3321 3322error2: 3323 ib_unregister_mad_agent(port->mad_agent); 3324error1: 3325 port_modify.set_port_cap_mask = 0; 3326 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3327 while (--i) { 3328 port = &cm_dev->port[i-1]; 3329 ib_modify_port(device, port->port_num, 0, &port_modify); 3330 ib_unregister_mad_agent(port->mad_agent); 3331 } 3332 kfree(cm_dev); 3333} 3334 3335static void cm_remove_one(struct ib_device *device) 3336{ 3337 struct cm_device *cm_dev; 3338 struct cm_port *port; 3339 struct ib_port_modify port_modify = { 3340 .clr_port_cap_mask = IB_PORT_CM_SUP 3341 }; 3342 unsigned long flags; 3343 int i; 3344 3345 cm_dev = ib_get_client_data(device, &cm_client); 3346 if (!cm_dev) 3347 return; 3348 3349 write_lock_irqsave(&cm.device_lock, flags); 3350 list_del(&cm_dev->list); 3351 write_unlock_irqrestore(&cm.device_lock, flags); 3352 3353 for (i = 1; i <= device->phys_port_cnt; i++) { 3354 port = &cm_dev->port[i-1]; 3355 ib_modify_port(device, port->port_num, 0, &port_modify); 3356 ib_unregister_mad_agent(port->mad_agent); 3357 } 3358 kfree(cm_dev); 3359} 3360 3361static int __init ib_cm_init(void) 3362{ 3363 int ret; 3364 3365 memset(&cm, 0, sizeof cm); 3366 INIT_LIST_HEAD(&cm.device_list); 3367 rwlock_init(&cm.device_lock); 3368 spin_lock_init(&cm.lock); 3369 cm.listen_service_table = RB_ROOT; 3370 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3371 cm.remote_id_table = RB_ROOT; 3372 cm.remote_qp_table = RB_ROOT; 3373 cm.remote_sidr_table = RB_ROOT; 3374 idr_init(&cm.local_id_table); 3375 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3376 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3377 3378 cm.wq = create_workqueue("ib_cm"); 3379 if (!cm.wq) 3380 return -ENOMEM; 3381 3382 ret = ib_register_client(&cm_client); 3383 if (ret) 3384 goto error; 3385 3386 return 0; 3387error: 3388 destroy_workqueue(cm.wq); 3389 return ret; 3390} 3391 3392static void __exit ib_cm_cleanup(void) 3393{ 3394 destroy_workqueue(cm.wq); 3395 ib_unregister_client(&cm_client); 3396 idr_destroy(&cm.local_id_table); 3397} 3398 3399module_init(ib_cm_init); 3400module_exit(ib_cm_cleanup); 3401 3402