cm.c revision f06d26537559113207e4b73af6a22eaa5c5e9dc3
1/* 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38#include <linux/completion.h> 39#include <linux/dma-mapping.h> 40#include <linux/err.h> 41#include <linux/idr.h> 42#include <linux/interrupt.h> 43#include <linux/pci.h> 44#include <linux/random.h> 45#include <linux/rbtree.h> 46#include <linux/spinlock.h> 47#include <linux/workqueue.h> 48 49#include <rdma/ib_cache.h> 50#include <rdma/ib_cm.h> 51#include "cm_msgs.h" 52 53MODULE_AUTHOR("Sean Hefty"); 54MODULE_DESCRIPTION("InfiniBand CM"); 55MODULE_LICENSE("Dual BSD/GPL"); 56 57static void cm_add_one(struct ib_device *device); 58static void cm_remove_one(struct ib_device *device); 59 60static struct ib_client cm_client = { 61 .name = "cm", 62 .add = cm_add_one, 63 .remove = cm_remove_one 64}; 65 66static struct ib_cm { 67 spinlock_t lock; 68 struct list_head device_list; 69 rwlock_t device_lock; 70 struct rb_root listen_service_table; 71 u64 listen_service_id; 72 /* struct rb_root peer_service_table; todo: fix peer to peer */ 73 struct rb_root remote_qp_table; 74 struct rb_root remote_id_table; 75 struct rb_root remote_sidr_table; 76 struct idr local_id_table; 77 __be32 random_id_operand; 78 struct workqueue_struct *wq; 79} cm; 80 81struct cm_port { 82 struct cm_device *cm_dev; 83 struct ib_mad_agent *mad_agent; 84 u8 port_num; 85}; 86 87struct cm_device { 88 struct list_head list; 89 struct ib_device *device; 90 __be64 ca_guid; 91 struct cm_port port[0]; 92}; 93 94struct cm_av { 95 struct cm_port *port; 96 union ib_gid dgid; 97 struct ib_ah_attr ah_attr; 98 u16 pkey_index; 99 u8 packet_life_time; 100}; 101 102struct cm_work { 103 struct work_struct work; 104 struct list_head list; 105 struct cm_port *port; 106 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 107 __be32 local_id; /* Established / timewait */ 108 __be32 remote_id; 109 struct ib_cm_event cm_event; 110 struct ib_sa_path_rec path[0]; 111}; 112 113struct cm_timewait_info { 114 struct cm_work work; /* Must be first. */ 115 struct rb_node remote_qp_node; 116 struct rb_node remote_id_node; 117 __be64 remote_ca_guid; 118 __be32 remote_qpn; 119 u8 inserted_remote_qp; 120 u8 inserted_remote_id; 121}; 122 123struct cm_id_private { 124 struct ib_cm_id id; 125 126 struct rb_node service_node; 127 struct rb_node sidr_id_node; 128 spinlock_t lock; /* Do not acquire inside cm.lock */ 129 struct completion comp; 130 atomic_t refcount; 131 132 struct ib_mad_send_buf *msg; 133 struct cm_timewait_info *timewait_info; 134 /* todo: use alternate port on send failure */ 135 struct cm_av av; 136 struct cm_av alt_av; 137 struct ib_cm_compare_data *compare_data; 138 139 void *private_data; 140 __be64 tid; 141 __be32 local_qpn; 142 __be32 remote_qpn; 143 enum ib_qp_type qp_type; 144 __be32 sq_psn; 145 __be32 rq_psn; 146 int timeout_ms; 147 enum ib_mtu path_mtu; 148 u8 private_data_len; 149 u8 max_cm_retries; 150 u8 peer_to_peer; 151 u8 responder_resources; 152 u8 initiator_depth; 153 u8 local_ack_timeout; 154 u8 retry_count; 155 u8 rnr_retry_count; 156 u8 service_timeout; 157 158 struct list_head work_list; 159 atomic_t work_count; 160}; 161 162static void cm_work_handler(void *data); 163 164static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 165{ 166 if (atomic_dec_and_test(&cm_id_priv->refcount)) 167 complete(&cm_id_priv->comp); 168} 169 170static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 171 struct ib_mad_send_buf **msg) 172{ 173 struct ib_mad_agent *mad_agent; 174 struct ib_mad_send_buf *m; 175 struct ib_ah *ah; 176 177 mad_agent = cm_id_priv->av.port->mad_agent; 178 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 179 if (IS_ERR(ah)) 180 return PTR_ERR(ah); 181 182 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 183 cm_id_priv->av.pkey_index, 184 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 185 GFP_ATOMIC); 186 if (IS_ERR(m)) { 187 ib_destroy_ah(ah); 188 return PTR_ERR(m); 189 } 190 191 /* Timeout set by caller if response is expected. */ 192 m->ah = ah; 193 m->retries = cm_id_priv->max_cm_retries; 194 195 atomic_inc(&cm_id_priv->refcount); 196 m->context[0] = cm_id_priv; 197 *msg = m; 198 return 0; 199} 200 201static int cm_alloc_response_msg(struct cm_port *port, 202 struct ib_mad_recv_wc *mad_recv_wc, 203 struct ib_mad_send_buf **msg) 204{ 205 struct ib_mad_send_buf *m; 206 struct ib_ah *ah; 207 208 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 209 mad_recv_wc->recv_buf.grh, port->port_num); 210 if (IS_ERR(ah)) 211 return PTR_ERR(ah); 212 213 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 214 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 215 GFP_ATOMIC); 216 if (IS_ERR(m)) { 217 ib_destroy_ah(ah); 218 return PTR_ERR(m); 219 } 220 m->ah = ah; 221 *msg = m; 222 return 0; 223} 224 225static void cm_free_msg(struct ib_mad_send_buf *msg) 226{ 227 ib_destroy_ah(msg->ah); 228 if (msg->context[0]) 229 cm_deref_id(msg->context[0]); 230 ib_free_send_mad(msg); 231} 232 233static void * cm_copy_private_data(const void *private_data, 234 u8 private_data_len) 235{ 236 void *data; 237 238 if (!private_data || !private_data_len) 239 return NULL; 240 241 data = kmalloc(private_data_len, GFP_KERNEL); 242 if (!data) 243 return ERR_PTR(-ENOMEM); 244 245 memcpy(data, private_data, private_data_len); 246 return data; 247} 248 249static void cm_set_private_data(struct cm_id_private *cm_id_priv, 250 void *private_data, u8 private_data_len) 251{ 252 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 253 kfree(cm_id_priv->private_data); 254 255 cm_id_priv->private_data = private_data; 256 cm_id_priv->private_data_len = private_data_len; 257} 258 259static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 260 struct ib_grh *grh, struct cm_av *av) 261{ 262 av->port = port; 263 av->pkey_index = wc->pkey_index; 264 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 265 grh, &av->ah_attr); 266} 267 268static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 269{ 270 struct cm_device *cm_dev; 271 struct cm_port *port = NULL; 272 unsigned long flags; 273 int ret; 274 u8 p; 275 276 read_lock_irqsave(&cm.device_lock, flags); 277 list_for_each_entry(cm_dev, &cm.device_list, list) { 278 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 279 &p, NULL)) { 280 port = &cm_dev->port[p-1]; 281 break; 282 } 283 } 284 read_unlock_irqrestore(&cm.device_lock, flags); 285 286 if (!port) 287 return -EINVAL; 288 289 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 290 be16_to_cpu(path->pkey), &av->pkey_index); 291 if (ret) 292 return ret; 293 294 av->port = port; 295 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 296 &av->ah_attr); 297 av->packet_life_time = path->packet_life_time; 298 return 0; 299} 300 301static int cm_alloc_id(struct cm_id_private *cm_id_priv) 302{ 303 unsigned long flags; 304 int ret, id; 305 static int next_id; 306 307 do { 308 spin_lock_irqsave(&cm.lock, flags); 309 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 310 next_id++, &id); 311 spin_unlock_irqrestore(&cm.lock, flags); 312 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 313 314 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); 315 return ret; 316} 317 318static void cm_free_id(__be32 local_id) 319{ 320 unsigned long flags; 321 322 spin_lock_irqsave(&cm.lock, flags); 323 idr_remove(&cm.local_id_table, 324 (__force int) (local_id ^ cm.random_id_operand)); 325 spin_unlock_irqrestore(&cm.lock, flags); 326} 327 328static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 329{ 330 struct cm_id_private *cm_id_priv; 331 332 cm_id_priv = idr_find(&cm.local_id_table, 333 (__force int) (local_id ^ cm.random_id_operand)); 334 if (cm_id_priv) { 335 if (cm_id_priv->id.remote_id == remote_id) 336 atomic_inc(&cm_id_priv->refcount); 337 else 338 cm_id_priv = NULL; 339 } 340 341 return cm_id_priv; 342} 343 344static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 345{ 346 struct cm_id_private *cm_id_priv; 347 unsigned long flags; 348 349 spin_lock_irqsave(&cm.lock, flags); 350 cm_id_priv = cm_get_id(local_id, remote_id); 351 spin_unlock_irqrestore(&cm.lock, flags); 352 353 return cm_id_priv; 354} 355 356static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 357{ 358 int i; 359 360 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 361 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 362 ((unsigned long *) mask)[i]; 363} 364 365static int cm_compare_data(struct ib_cm_compare_data *src_data, 366 struct ib_cm_compare_data *dst_data) 367{ 368 u8 src[IB_CM_COMPARE_SIZE]; 369 u8 dst[IB_CM_COMPARE_SIZE]; 370 371 if (!src_data || !dst_data) 372 return 0; 373 374 cm_mask_copy(src, src_data->data, dst_data->mask); 375 cm_mask_copy(dst, dst_data->data, src_data->mask); 376 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 377} 378 379static int cm_compare_private_data(u8 *private_data, 380 struct ib_cm_compare_data *dst_data) 381{ 382 u8 src[IB_CM_COMPARE_SIZE]; 383 384 if (!dst_data) 385 return 0; 386 387 cm_mask_copy(src, private_data, dst_data->mask); 388 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 389} 390 391static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 392{ 393 struct rb_node **link = &cm.listen_service_table.rb_node; 394 struct rb_node *parent = NULL; 395 struct cm_id_private *cur_cm_id_priv; 396 __be64 service_id = cm_id_priv->id.service_id; 397 __be64 service_mask = cm_id_priv->id.service_mask; 398 int data_cmp; 399 400 while (*link) { 401 parent = *link; 402 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 403 service_node); 404 data_cmp = cm_compare_data(cm_id_priv->compare_data, 405 cur_cm_id_priv->compare_data); 406 if ((cur_cm_id_priv->id.service_mask & service_id) == 407 (service_mask & cur_cm_id_priv->id.service_id) && 408 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 409 !data_cmp) 410 return cur_cm_id_priv; 411 412 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 413 link = &(*link)->rb_left; 414 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 415 link = &(*link)->rb_right; 416 else if (service_id < cur_cm_id_priv->id.service_id) 417 link = &(*link)->rb_left; 418 else if (service_id > cur_cm_id_priv->id.service_id) 419 link = &(*link)->rb_right; 420 else if (data_cmp < 0) 421 link = &(*link)->rb_left; 422 else 423 link = &(*link)->rb_right; 424 } 425 rb_link_node(&cm_id_priv->service_node, parent, link); 426 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 427 return NULL; 428} 429 430static struct cm_id_private * cm_find_listen(struct ib_device *device, 431 __be64 service_id, 432 u8 *private_data) 433{ 434 struct rb_node *node = cm.listen_service_table.rb_node; 435 struct cm_id_private *cm_id_priv; 436 int data_cmp; 437 438 while (node) { 439 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 440 data_cmp = cm_compare_private_data(private_data, 441 cm_id_priv->compare_data); 442 if ((cm_id_priv->id.service_mask & service_id) == 443 cm_id_priv->id.service_id && 444 (cm_id_priv->id.device == device) && !data_cmp) 445 return cm_id_priv; 446 447 if (device < cm_id_priv->id.device) 448 node = node->rb_left; 449 else if (device > cm_id_priv->id.device) 450 node = node->rb_right; 451 else if (service_id < cm_id_priv->id.service_id) 452 node = node->rb_left; 453 else if (service_id > cm_id_priv->id.service_id) 454 node = node->rb_right; 455 else if (data_cmp < 0) 456 node = node->rb_left; 457 else 458 node = node->rb_right; 459 } 460 return NULL; 461} 462 463static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 464 *timewait_info) 465{ 466 struct rb_node **link = &cm.remote_id_table.rb_node; 467 struct rb_node *parent = NULL; 468 struct cm_timewait_info *cur_timewait_info; 469 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 470 __be32 remote_id = timewait_info->work.remote_id; 471 472 while (*link) { 473 parent = *link; 474 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 475 remote_id_node); 476 if (remote_id < cur_timewait_info->work.remote_id) 477 link = &(*link)->rb_left; 478 else if (remote_id > cur_timewait_info->work.remote_id) 479 link = &(*link)->rb_right; 480 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 481 link = &(*link)->rb_left; 482 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 483 link = &(*link)->rb_right; 484 else 485 return cur_timewait_info; 486 } 487 timewait_info->inserted_remote_id = 1; 488 rb_link_node(&timewait_info->remote_id_node, parent, link); 489 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 490 return NULL; 491} 492 493static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 494 __be32 remote_id) 495{ 496 struct rb_node *node = cm.remote_id_table.rb_node; 497 struct cm_timewait_info *timewait_info; 498 499 while (node) { 500 timewait_info = rb_entry(node, struct cm_timewait_info, 501 remote_id_node); 502 if (remote_id < timewait_info->work.remote_id) 503 node = node->rb_left; 504 else if (remote_id > timewait_info->work.remote_id) 505 node = node->rb_right; 506 else if (remote_ca_guid < timewait_info->remote_ca_guid) 507 node = node->rb_left; 508 else if (remote_ca_guid > timewait_info->remote_ca_guid) 509 node = node->rb_right; 510 else 511 return timewait_info; 512 } 513 return NULL; 514} 515 516static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 517 *timewait_info) 518{ 519 struct rb_node **link = &cm.remote_qp_table.rb_node; 520 struct rb_node *parent = NULL; 521 struct cm_timewait_info *cur_timewait_info; 522 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 523 __be32 remote_qpn = timewait_info->remote_qpn; 524 525 while (*link) { 526 parent = *link; 527 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 528 remote_qp_node); 529 if (remote_qpn < cur_timewait_info->remote_qpn) 530 link = &(*link)->rb_left; 531 else if (remote_qpn > cur_timewait_info->remote_qpn) 532 link = &(*link)->rb_right; 533 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 534 link = &(*link)->rb_left; 535 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 536 link = &(*link)->rb_right; 537 else 538 return cur_timewait_info; 539 } 540 timewait_info->inserted_remote_qp = 1; 541 rb_link_node(&timewait_info->remote_qp_node, parent, link); 542 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 543 return NULL; 544} 545 546static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 547 *cm_id_priv) 548{ 549 struct rb_node **link = &cm.remote_sidr_table.rb_node; 550 struct rb_node *parent = NULL; 551 struct cm_id_private *cur_cm_id_priv; 552 union ib_gid *port_gid = &cm_id_priv->av.dgid; 553 __be32 remote_id = cm_id_priv->id.remote_id; 554 555 while (*link) { 556 parent = *link; 557 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 558 sidr_id_node); 559 if (remote_id < cur_cm_id_priv->id.remote_id) 560 link = &(*link)->rb_left; 561 else if (remote_id > cur_cm_id_priv->id.remote_id) 562 link = &(*link)->rb_right; 563 else { 564 int cmp; 565 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 566 sizeof *port_gid); 567 if (cmp < 0) 568 link = &(*link)->rb_left; 569 else if (cmp > 0) 570 link = &(*link)->rb_right; 571 else 572 return cur_cm_id_priv; 573 } 574 } 575 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 576 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 577 return NULL; 578} 579 580static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 581 enum ib_cm_sidr_status status) 582{ 583 struct ib_cm_sidr_rep_param param; 584 585 memset(¶m, 0, sizeof param); 586 param.status = status; 587 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 588} 589 590struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 591 ib_cm_handler cm_handler, 592 void *context) 593{ 594 struct cm_id_private *cm_id_priv; 595 int ret; 596 597 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 598 if (!cm_id_priv) 599 return ERR_PTR(-ENOMEM); 600 601 cm_id_priv->id.state = IB_CM_IDLE; 602 cm_id_priv->id.device = device; 603 cm_id_priv->id.cm_handler = cm_handler; 604 cm_id_priv->id.context = context; 605 cm_id_priv->id.remote_cm_qpn = 1; 606 ret = cm_alloc_id(cm_id_priv); 607 if (ret) 608 goto error; 609 610 spin_lock_init(&cm_id_priv->lock); 611 init_completion(&cm_id_priv->comp); 612 INIT_LIST_HEAD(&cm_id_priv->work_list); 613 atomic_set(&cm_id_priv->work_count, -1); 614 atomic_set(&cm_id_priv->refcount, 1); 615 return &cm_id_priv->id; 616 617error: 618 kfree(cm_id_priv); 619 return ERR_PTR(-ENOMEM); 620} 621EXPORT_SYMBOL(ib_create_cm_id); 622 623static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 624{ 625 struct cm_work *work; 626 627 if (list_empty(&cm_id_priv->work_list)) 628 return NULL; 629 630 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 631 list_del(&work->list); 632 return work; 633} 634 635static void cm_free_work(struct cm_work *work) 636{ 637 if (work->mad_recv_wc) 638 ib_free_recv_mad(work->mad_recv_wc); 639 kfree(work); 640} 641 642static inline int cm_convert_to_ms(int iba_time) 643{ 644 /* approximate conversion to ms from 4.096us x 2^iba_time */ 645 return 1 << max(iba_time - 8, 0); 646} 647 648static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 649{ 650 unsigned long flags; 651 652 if (!timewait_info->inserted_remote_id && 653 !timewait_info->inserted_remote_qp) 654 return; 655 656 spin_lock_irqsave(&cm.lock, flags); 657 if (timewait_info->inserted_remote_id) { 658 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 659 timewait_info->inserted_remote_id = 0; 660 } 661 662 if (timewait_info->inserted_remote_qp) { 663 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 664 timewait_info->inserted_remote_qp = 0; 665 } 666 spin_unlock_irqrestore(&cm.lock, flags); 667} 668 669static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 670{ 671 struct cm_timewait_info *timewait_info; 672 673 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 674 if (!timewait_info) 675 return ERR_PTR(-ENOMEM); 676 677 timewait_info->work.local_id = local_id; 678 INIT_WORK(&timewait_info->work.work, cm_work_handler, 679 &timewait_info->work); 680 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 681 return timewait_info; 682} 683 684static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 685{ 686 int wait_time; 687 688 /* 689 * The cm_id could be destroyed by the user before we exit timewait. 690 * To protect against this, we search for the cm_id after exiting 691 * timewait before notifying the user that we've exited timewait. 692 */ 693 cm_id_priv->id.state = IB_CM_TIMEWAIT; 694 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 695 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 696 msecs_to_jiffies(wait_time)); 697 cm_id_priv->timewait_info = NULL; 698} 699 700static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 701{ 702 cm_id_priv->id.state = IB_CM_IDLE; 703 if (cm_id_priv->timewait_info) { 704 cm_cleanup_timewait(cm_id_priv->timewait_info); 705 kfree(cm_id_priv->timewait_info); 706 cm_id_priv->timewait_info = NULL; 707 } 708} 709 710static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 711{ 712 struct cm_id_private *cm_id_priv; 713 struct cm_work *work; 714 unsigned long flags; 715 716 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 717retest: 718 spin_lock_irqsave(&cm_id_priv->lock, flags); 719 switch (cm_id->state) { 720 case IB_CM_LISTEN: 721 cm_id->state = IB_CM_IDLE; 722 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 723 spin_lock_irqsave(&cm.lock, flags); 724 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 725 spin_unlock_irqrestore(&cm.lock, flags); 726 break; 727 case IB_CM_SIDR_REQ_SENT: 728 cm_id->state = IB_CM_IDLE; 729 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 730 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 731 break; 732 case IB_CM_SIDR_REQ_RCVD: 733 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 734 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 735 break; 736 case IB_CM_REQ_SENT: 737 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 738 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 739 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 740 &cm_id_priv->av.port->cm_dev->ca_guid, 741 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 742 NULL, 0); 743 break; 744 case IB_CM_REQ_RCVD: 745 if (err == -ENOMEM) { 746 /* Do not reject to allow future retries. */ 747 cm_reset_to_idle(cm_id_priv); 748 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 749 } else { 750 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 751 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 752 NULL, 0, NULL, 0); 753 } 754 break; 755 case IB_CM_MRA_REQ_RCVD: 756 case IB_CM_REP_SENT: 757 case IB_CM_MRA_REP_RCVD: 758 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 759 /* Fall through */ 760 case IB_CM_MRA_REQ_SENT: 761 case IB_CM_REP_RCVD: 762 case IB_CM_MRA_REP_SENT: 763 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 764 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 765 NULL, 0, NULL, 0); 766 break; 767 case IB_CM_ESTABLISHED: 768 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 769 ib_send_cm_dreq(cm_id, NULL, 0); 770 goto retest; 771 case IB_CM_DREQ_SENT: 772 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 773 cm_enter_timewait(cm_id_priv); 774 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 775 break; 776 case IB_CM_DREQ_RCVD: 777 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 778 ib_send_cm_drep(cm_id, NULL, 0); 779 break; 780 default: 781 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 782 break; 783 } 784 785 cm_free_id(cm_id->local_id); 786 cm_deref_id(cm_id_priv); 787 wait_for_completion(&cm_id_priv->comp); 788 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 789 cm_free_work(work); 790 kfree(cm_id_priv->compare_data); 791 kfree(cm_id_priv->private_data); 792 kfree(cm_id_priv); 793} 794 795void ib_destroy_cm_id(struct ib_cm_id *cm_id) 796{ 797 cm_destroy_id(cm_id, 0); 798} 799EXPORT_SYMBOL(ib_destroy_cm_id); 800 801int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 802 struct ib_cm_compare_data *compare_data) 803{ 804 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 805 unsigned long flags; 806 int ret = 0; 807 808 service_mask = service_mask ? service_mask : 809 __constant_cpu_to_be64(~0ULL); 810 service_id &= service_mask; 811 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 812 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 813 return -EINVAL; 814 815 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 816 if (cm_id->state != IB_CM_IDLE) 817 return -EINVAL; 818 819 if (compare_data) { 820 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 821 GFP_KERNEL); 822 if (!cm_id_priv->compare_data) 823 return -ENOMEM; 824 cm_mask_copy(cm_id_priv->compare_data->data, 825 compare_data->data, compare_data->mask); 826 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 827 IB_CM_COMPARE_SIZE); 828 } 829 830 cm_id->state = IB_CM_LISTEN; 831 832 spin_lock_irqsave(&cm.lock, flags); 833 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 834 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 835 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 836 } else { 837 cm_id->service_id = service_id; 838 cm_id->service_mask = service_mask; 839 } 840 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 841 spin_unlock_irqrestore(&cm.lock, flags); 842 843 if (cur_cm_id_priv) { 844 cm_id->state = IB_CM_IDLE; 845 kfree(cm_id_priv->compare_data); 846 cm_id_priv->compare_data = NULL; 847 ret = -EBUSY; 848 } 849 return ret; 850} 851EXPORT_SYMBOL(ib_cm_listen); 852 853static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 854 enum cm_msg_sequence msg_seq) 855{ 856 u64 hi_tid, low_tid; 857 858 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 859 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 860 (msg_seq << 30)); 861 return cpu_to_be64(hi_tid | low_tid); 862} 863 864static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 865 __be16 attr_id, __be64 tid) 866{ 867 hdr->base_version = IB_MGMT_BASE_VERSION; 868 hdr->mgmt_class = IB_MGMT_CLASS_CM; 869 hdr->class_version = IB_CM_CLASS_VERSION; 870 hdr->method = IB_MGMT_METHOD_SEND; 871 hdr->attr_id = attr_id; 872 hdr->tid = tid; 873} 874 875static void cm_format_req(struct cm_req_msg *req_msg, 876 struct cm_id_private *cm_id_priv, 877 struct ib_cm_req_param *param) 878{ 879 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 880 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 881 882 req_msg->local_comm_id = cm_id_priv->id.local_id; 883 req_msg->service_id = param->service_id; 884 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 885 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 886 cm_req_set_resp_res(req_msg, param->responder_resources); 887 cm_req_set_init_depth(req_msg, param->initiator_depth); 888 cm_req_set_remote_resp_timeout(req_msg, 889 param->remote_cm_response_timeout); 890 cm_req_set_qp_type(req_msg, param->qp_type); 891 cm_req_set_flow_ctrl(req_msg, param->flow_control); 892 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 893 cm_req_set_local_resp_timeout(req_msg, 894 param->local_cm_response_timeout); 895 cm_req_set_retry_count(req_msg, param->retry_count); 896 req_msg->pkey = param->primary_path->pkey; 897 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 898 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 899 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 900 cm_req_set_srq(req_msg, param->srq); 901 902 req_msg->primary_local_lid = param->primary_path->slid; 903 req_msg->primary_remote_lid = param->primary_path->dlid; 904 req_msg->primary_local_gid = param->primary_path->sgid; 905 req_msg->primary_remote_gid = param->primary_path->dgid; 906 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 907 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 908 req_msg->primary_traffic_class = param->primary_path->traffic_class; 909 req_msg->primary_hop_limit = param->primary_path->hop_limit; 910 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 911 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 912 cm_req_set_primary_local_ack_timeout(req_msg, 913 min(31, param->primary_path->packet_life_time + 1)); 914 915 if (param->alternate_path) { 916 req_msg->alt_local_lid = param->alternate_path->slid; 917 req_msg->alt_remote_lid = param->alternate_path->dlid; 918 req_msg->alt_local_gid = param->alternate_path->sgid; 919 req_msg->alt_remote_gid = param->alternate_path->dgid; 920 cm_req_set_alt_flow_label(req_msg, 921 param->alternate_path->flow_label); 922 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 923 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 924 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 925 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 926 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 927 cm_req_set_alt_local_ack_timeout(req_msg, 928 min(31, param->alternate_path->packet_life_time + 1)); 929 } 930 931 if (param->private_data && param->private_data_len) 932 memcpy(req_msg->private_data, param->private_data, 933 param->private_data_len); 934} 935 936static int cm_validate_req_param(struct ib_cm_req_param *param) 937{ 938 /* peer-to-peer not supported */ 939 if (param->peer_to_peer) 940 return -EINVAL; 941 942 if (!param->primary_path) 943 return -EINVAL; 944 945 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 946 return -EINVAL; 947 948 if (param->private_data && 949 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 950 return -EINVAL; 951 952 if (param->alternate_path && 953 (param->alternate_path->pkey != param->primary_path->pkey || 954 param->alternate_path->mtu != param->primary_path->mtu)) 955 return -EINVAL; 956 957 return 0; 958} 959 960int ib_send_cm_req(struct ib_cm_id *cm_id, 961 struct ib_cm_req_param *param) 962{ 963 struct cm_id_private *cm_id_priv; 964 struct cm_req_msg *req_msg; 965 unsigned long flags; 966 int ret; 967 968 ret = cm_validate_req_param(param); 969 if (ret) 970 return ret; 971 972 /* Verify that we're not in timewait. */ 973 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 974 spin_lock_irqsave(&cm_id_priv->lock, flags); 975 if (cm_id->state != IB_CM_IDLE) { 976 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 977 ret = -EINVAL; 978 goto out; 979 } 980 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 981 982 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 983 id.local_id); 984 if (IS_ERR(cm_id_priv->timewait_info)) { 985 ret = PTR_ERR(cm_id_priv->timewait_info); 986 goto out; 987 } 988 989 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 990 if (ret) 991 goto error1; 992 if (param->alternate_path) { 993 ret = cm_init_av_by_path(param->alternate_path, 994 &cm_id_priv->alt_av); 995 if (ret) 996 goto error1; 997 } 998 cm_id->service_id = param->service_id; 999 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1000 cm_id_priv->timeout_ms = cm_convert_to_ms( 1001 param->primary_path->packet_life_time) * 2 + 1002 cm_convert_to_ms( 1003 param->remote_cm_response_timeout); 1004 cm_id_priv->max_cm_retries = param->max_cm_retries; 1005 cm_id_priv->initiator_depth = param->initiator_depth; 1006 cm_id_priv->responder_resources = param->responder_resources; 1007 cm_id_priv->retry_count = param->retry_count; 1008 cm_id_priv->path_mtu = param->primary_path->mtu; 1009 cm_id_priv->qp_type = param->qp_type; 1010 1011 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1012 if (ret) 1013 goto error1; 1014 1015 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1016 cm_format_req(req_msg, cm_id_priv, param); 1017 cm_id_priv->tid = req_msg->hdr.tid; 1018 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1019 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1020 1021 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1022 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1023 cm_id_priv->local_ack_timeout = 1024 cm_req_get_primary_local_ack_timeout(req_msg); 1025 1026 spin_lock_irqsave(&cm_id_priv->lock, flags); 1027 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1028 if (ret) { 1029 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1030 goto error2; 1031 } 1032 BUG_ON(cm_id->state != IB_CM_IDLE); 1033 cm_id->state = IB_CM_REQ_SENT; 1034 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1035 return 0; 1036 1037error2: cm_free_msg(cm_id_priv->msg); 1038error1: kfree(cm_id_priv->timewait_info); 1039out: return ret; 1040} 1041EXPORT_SYMBOL(ib_send_cm_req); 1042 1043static int cm_issue_rej(struct cm_port *port, 1044 struct ib_mad_recv_wc *mad_recv_wc, 1045 enum ib_cm_rej_reason reason, 1046 enum cm_msg_response msg_rejected, 1047 void *ari, u8 ari_length) 1048{ 1049 struct ib_mad_send_buf *msg = NULL; 1050 struct cm_rej_msg *rej_msg, *rcv_msg; 1051 int ret; 1052 1053 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1054 if (ret) 1055 return ret; 1056 1057 /* We just need common CM header information. Cast to any message. */ 1058 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1059 rej_msg = (struct cm_rej_msg *) msg->mad; 1060 1061 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1062 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1063 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1064 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1065 rej_msg->reason = cpu_to_be16(reason); 1066 1067 if (ari && ari_length) { 1068 cm_rej_set_reject_info_len(rej_msg, ari_length); 1069 memcpy(rej_msg->ari, ari, ari_length); 1070 } 1071 1072 ret = ib_post_send_mad(msg, NULL); 1073 if (ret) 1074 cm_free_msg(msg); 1075 1076 return ret; 1077} 1078 1079static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1080 __be32 local_qpn, __be32 remote_qpn) 1081{ 1082 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1083 ((local_ca_guid == remote_ca_guid) && 1084 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1085} 1086 1087static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1088 struct ib_sa_path_rec *primary_path, 1089 struct ib_sa_path_rec *alt_path) 1090{ 1091 memset(primary_path, 0, sizeof *primary_path); 1092 primary_path->dgid = req_msg->primary_local_gid; 1093 primary_path->sgid = req_msg->primary_remote_gid; 1094 primary_path->dlid = req_msg->primary_local_lid; 1095 primary_path->slid = req_msg->primary_remote_lid; 1096 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1097 primary_path->hop_limit = req_msg->primary_hop_limit; 1098 primary_path->traffic_class = req_msg->primary_traffic_class; 1099 primary_path->reversible = 1; 1100 primary_path->pkey = req_msg->pkey; 1101 primary_path->sl = cm_req_get_primary_sl(req_msg); 1102 primary_path->mtu_selector = IB_SA_EQ; 1103 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1104 primary_path->rate_selector = IB_SA_EQ; 1105 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1106 primary_path->packet_life_time_selector = IB_SA_EQ; 1107 primary_path->packet_life_time = 1108 cm_req_get_primary_local_ack_timeout(req_msg); 1109 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1110 1111 if (req_msg->alt_local_lid) { 1112 memset(alt_path, 0, sizeof *alt_path); 1113 alt_path->dgid = req_msg->alt_local_gid; 1114 alt_path->sgid = req_msg->alt_remote_gid; 1115 alt_path->dlid = req_msg->alt_local_lid; 1116 alt_path->slid = req_msg->alt_remote_lid; 1117 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1118 alt_path->hop_limit = req_msg->alt_hop_limit; 1119 alt_path->traffic_class = req_msg->alt_traffic_class; 1120 alt_path->reversible = 1; 1121 alt_path->pkey = req_msg->pkey; 1122 alt_path->sl = cm_req_get_alt_sl(req_msg); 1123 alt_path->mtu_selector = IB_SA_EQ; 1124 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1125 alt_path->rate_selector = IB_SA_EQ; 1126 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1127 alt_path->packet_life_time_selector = IB_SA_EQ; 1128 alt_path->packet_life_time = 1129 cm_req_get_alt_local_ack_timeout(req_msg); 1130 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1131 } 1132} 1133 1134static void cm_format_req_event(struct cm_work *work, 1135 struct cm_id_private *cm_id_priv, 1136 struct ib_cm_id *listen_id) 1137{ 1138 struct cm_req_msg *req_msg; 1139 struct ib_cm_req_event_param *param; 1140 1141 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1142 param = &work->cm_event.param.req_rcvd; 1143 param->listen_id = listen_id; 1144 param->port = cm_id_priv->av.port->port_num; 1145 param->primary_path = &work->path[0]; 1146 if (req_msg->alt_local_lid) 1147 param->alternate_path = &work->path[1]; 1148 else 1149 param->alternate_path = NULL; 1150 param->remote_ca_guid = req_msg->local_ca_guid; 1151 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1152 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1153 param->qp_type = cm_req_get_qp_type(req_msg); 1154 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1155 param->responder_resources = cm_req_get_init_depth(req_msg); 1156 param->initiator_depth = cm_req_get_resp_res(req_msg); 1157 param->local_cm_response_timeout = 1158 cm_req_get_remote_resp_timeout(req_msg); 1159 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1160 param->remote_cm_response_timeout = 1161 cm_req_get_local_resp_timeout(req_msg); 1162 param->retry_count = cm_req_get_retry_count(req_msg); 1163 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1164 param->srq = cm_req_get_srq(req_msg); 1165 work->cm_event.private_data = &req_msg->private_data; 1166} 1167 1168static void cm_process_work(struct cm_id_private *cm_id_priv, 1169 struct cm_work *work) 1170{ 1171 unsigned long flags; 1172 int ret; 1173 1174 /* We will typically only have the current event to report. */ 1175 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1176 cm_free_work(work); 1177 1178 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1179 spin_lock_irqsave(&cm_id_priv->lock, flags); 1180 work = cm_dequeue_work(cm_id_priv); 1181 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1182 BUG_ON(!work); 1183 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1184 &work->cm_event); 1185 cm_free_work(work); 1186 } 1187 cm_deref_id(cm_id_priv); 1188 if (ret) 1189 cm_destroy_id(&cm_id_priv->id, ret); 1190} 1191 1192static void cm_format_mra(struct cm_mra_msg *mra_msg, 1193 struct cm_id_private *cm_id_priv, 1194 enum cm_msg_response msg_mraed, u8 service_timeout, 1195 const void *private_data, u8 private_data_len) 1196{ 1197 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1198 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1199 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1200 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1201 cm_mra_set_service_timeout(mra_msg, service_timeout); 1202 1203 if (private_data && private_data_len) 1204 memcpy(mra_msg->private_data, private_data, private_data_len); 1205} 1206 1207static void cm_format_rej(struct cm_rej_msg *rej_msg, 1208 struct cm_id_private *cm_id_priv, 1209 enum ib_cm_rej_reason reason, 1210 void *ari, 1211 u8 ari_length, 1212 const void *private_data, 1213 u8 private_data_len) 1214{ 1215 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1216 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1217 1218 switch(cm_id_priv->id.state) { 1219 case IB_CM_REQ_RCVD: 1220 rej_msg->local_comm_id = 0; 1221 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1222 break; 1223 case IB_CM_MRA_REQ_SENT: 1224 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1225 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1226 break; 1227 case IB_CM_REP_RCVD: 1228 case IB_CM_MRA_REP_SENT: 1229 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1230 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1231 break; 1232 default: 1233 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1234 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1235 break; 1236 } 1237 1238 rej_msg->reason = cpu_to_be16(reason); 1239 if (ari && ari_length) { 1240 cm_rej_set_reject_info_len(rej_msg, ari_length); 1241 memcpy(rej_msg->ari, ari, ari_length); 1242 } 1243 1244 if (private_data && private_data_len) 1245 memcpy(rej_msg->private_data, private_data, private_data_len); 1246} 1247 1248static void cm_dup_req_handler(struct cm_work *work, 1249 struct cm_id_private *cm_id_priv) 1250{ 1251 struct ib_mad_send_buf *msg = NULL; 1252 unsigned long flags; 1253 int ret; 1254 1255 /* Quick state check to discard duplicate REQs. */ 1256 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1257 return; 1258 1259 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1260 if (ret) 1261 return; 1262 1263 spin_lock_irqsave(&cm_id_priv->lock, flags); 1264 switch (cm_id_priv->id.state) { 1265 case IB_CM_MRA_REQ_SENT: 1266 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1267 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1268 cm_id_priv->private_data, 1269 cm_id_priv->private_data_len); 1270 break; 1271 case IB_CM_TIMEWAIT: 1272 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1273 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1274 break; 1275 default: 1276 goto unlock; 1277 } 1278 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1279 1280 ret = ib_post_send_mad(msg, NULL); 1281 if (ret) 1282 goto free; 1283 return; 1284 1285unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1286free: cm_free_msg(msg); 1287} 1288 1289static struct cm_id_private * cm_match_req(struct cm_work *work, 1290 struct cm_id_private *cm_id_priv) 1291{ 1292 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1293 struct cm_timewait_info *timewait_info; 1294 struct cm_req_msg *req_msg; 1295 unsigned long flags; 1296 1297 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1298 1299 /* Check for duplicate REQ and stale connections. */ 1300 spin_lock_irqsave(&cm.lock, flags); 1301 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1302 if (!timewait_info) 1303 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1304 1305 if (timewait_info) { 1306 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1307 timewait_info->work.remote_id); 1308 spin_unlock_irqrestore(&cm.lock, flags); 1309 if (cur_cm_id_priv) { 1310 cm_dup_req_handler(work, cur_cm_id_priv); 1311 cm_deref_id(cur_cm_id_priv); 1312 } else 1313 cm_issue_rej(work->port, work->mad_recv_wc, 1314 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1315 NULL, 0); 1316 goto error; 1317 } 1318 1319 /* Find matching listen request. */ 1320 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1321 req_msg->service_id, 1322 req_msg->private_data); 1323 if (!listen_cm_id_priv) { 1324 spin_unlock_irqrestore(&cm.lock, flags); 1325 cm_issue_rej(work->port, work->mad_recv_wc, 1326 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1327 NULL, 0); 1328 goto error; 1329 } 1330 atomic_inc(&listen_cm_id_priv->refcount); 1331 atomic_inc(&cm_id_priv->refcount); 1332 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1333 atomic_inc(&cm_id_priv->work_count); 1334 spin_unlock_irqrestore(&cm.lock, flags); 1335 return listen_cm_id_priv; 1336 1337error: cm_cleanup_timewait(cm_id_priv->timewait_info); 1338 return NULL; 1339} 1340 1341static int cm_req_handler(struct cm_work *work) 1342{ 1343 struct ib_cm_id *cm_id; 1344 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1345 struct cm_req_msg *req_msg; 1346 int ret; 1347 1348 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1349 1350 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1351 if (IS_ERR(cm_id)) 1352 return PTR_ERR(cm_id); 1353 1354 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1355 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1356 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1357 work->mad_recv_wc->recv_buf.grh, 1358 &cm_id_priv->av); 1359 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1360 id.local_id); 1361 if (IS_ERR(cm_id_priv->timewait_info)) { 1362 ret = PTR_ERR(cm_id_priv->timewait_info); 1363 goto destroy; 1364 } 1365 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1366 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1367 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1368 1369 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1370 if (!listen_cm_id_priv) { 1371 ret = -EINVAL; 1372 kfree(cm_id_priv->timewait_info); 1373 goto destroy; 1374 } 1375 1376 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1377 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1378 cm_id_priv->id.service_id = req_msg->service_id; 1379 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1380 1381 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1382 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1383 if (ret) { 1384 ib_get_cached_gid(work->port->cm_dev->device, 1385 work->port->port_num, 0, &work->path[0].sgid); 1386 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1387 &work->path[0].sgid, sizeof work->path[0].sgid, 1388 NULL, 0); 1389 goto rejected; 1390 } 1391 if (req_msg->alt_local_lid) { 1392 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1393 if (ret) { 1394 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1395 &work->path[0].sgid, 1396 sizeof work->path[0].sgid, NULL, 0); 1397 goto rejected; 1398 } 1399 } 1400 cm_id_priv->tid = req_msg->hdr.tid; 1401 cm_id_priv->timeout_ms = cm_convert_to_ms( 1402 cm_req_get_local_resp_timeout(req_msg)); 1403 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1404 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1405 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1406 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1407 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1408 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1409 cm_id_priv->local_ack_timeout = 1410 cm_req_get_primary_local_ack_timeout(req_msg); 1411 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1412 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1413 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1414 1415 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1416 cm_process_work(cm_id_priv, work); 1417 cm_deref_id(listen_cm_id_priv); 1418 return 0; 1419 1420rejected: 1421 atomic_dec(&cm_id_priv->refcount); 1422 cm_deref_id(listen_cm_id_priv); 1423destroy: 1424 ib_destroy_cm_id(cm_id); 1425 return ret; 1426} 1427 1428static void cm_format_rep(struct cm_rep_msg *rep_msg, 1429 struct cm_id_private *cm_id_priv, 1430 struct ib_cm_rep_param *param) 1431{ 1432 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1433 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1434 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1435 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1436 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1437 rep_msg->resp_resources = param->responder_resources; 1438 rep_msg->initiator_depth = param->initiator_depth; 1439 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1440 cm_rep_set_failover(rep_msg, param->failover_accepted); 1441 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1442 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1443 cm_rep_set_srq(rep_msg, param->srq); 1444 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1445 1446 if (param->private_data && param->private_data_len) 1447 memcpy(rep_msg->private_data, param->private_data, 1448 param->private_data_len); 1449} 1450 1451int ib_send_cm_rep(struct ib_cm_id *cm_id, 1452 struct ib_cm_rep_param *param) 1453{ 1454 struct cm_id_private *cm_id_priv; 1455 struct ib_mad_send_buf *msg; 1456 struct cm_rep_msg *rep_msg; 1457 unsigned long flags; 1458 int ret; 1459 1460 if (param->private_data && 1461 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1462 return -EINVAL; 1463 1464 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1465 spin_lock_irqsave(&cm_id_priv->lock, flags); 1466 if (cm_id->state != IB_CM_REQ_RCVD && 1467 cm_id->state != IB_CM_MRA_REQ_SENT) { 1468 ret = -EINVAL; 1469 goto out; 1470 } 1471 1472 ret = cm_alloc_msg(cm_id_priv, &msg); 1473 if (ret) 1474 goto out; 1475 1476 rep_msg = (struct cm_rep_msg *) msg->mad; 1477 cm_format_rep(rep_msg, cm_id_priv, param); 1478 msg->timeout_ms = cm_id_priv->timeout_ms; 1479 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1480 1481 ret = ib_post_send_mad(msg, NULL); 1482 if (ret) { 1483 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1484 cm_free_msg(msg); 1485 return ret; 1486 } 1487 1488 cm_id->state = IB_CM_REP_SENT; 1489 cm_id_priv->msg = msg; 1490 cm_id_priv->initiator_depth = param->initiator_depth; 1491 cm_id_priv->responder_resources = param->responder_resources; 1492 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1493 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1494 1495out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1496 return ret; 1497} 1498EXPORT_SYMBOL(ib_send_cm_rep); 1499 1500static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1501 struct cm_id_private *cm_id_priv, 1502 const void *private_data, 1503 u8 private_data_len) 1504{ 1505 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1506 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1507 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1508 1509 if (private_data && private_data_len) 1510 memcpy(rtu_msg->private_data, private_data, private_data_len); 1511} 1512 1513int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1514 const void *private_data, 1515 u8 private_data_len) 1516{ 1517 struct cm_id_private *cm_id_priv; 1518 struct ib_mad_send_buf *msg; 1519 unsigned long flags; 1520 void *data; 1521 int ret; 1522 1523 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1524 return -EINVAL; 1525 1526 data = cm_copy_private_data(private_data, private_data_len); 1527 if (IS_ERR(data)) 1528 return PTR_ERR(data); 1529 1530 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1531 spin_lock_irqsave(&cm_id_priv->lock, flags); 1532 if (cm_id->state != IB_CM_REP_RCVD && 1533 cm_id->state != IB_CM_MRA_REP_SENT) { 1534 ret = -EINVAL; 1535 goto error; 1536 } 1537 1538 ret = cm_alloc_msg(cm_id_priv, &msg); 1539 if (ret) 1540 goto error; 1541 1542 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1543 private_data, private_data_len); 1544 1545 ret = ib_post_send_mad(msg, NULL); 1546 if (ret) { 1547 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1548 cm_free_msg(msg); 1549 kfree(data); 1550 return ret; 1551 } 1552 1553 cm_id->state = IB_CM_ESTABLISHED; 1554 cm_set_private_data(cm_id_priv, data, private_data_len); 1555 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1556 return 0; 1557 1558error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1559 kfree(data); 1560 return ret; 1561} 1562EXPORT_SYMBOL(ib_send_cm_rtu); 1563 1564static void cm_format_rep_event(struct cm_work *work) 1565{ 1566 struct cm_rep_msg *rep_msg; 1567 struct ib_cm_rep_event_param *param; 1568 1569 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1570 param = &work->cm_event.param.rep_rcvd; 1571 param->remote_ca_guid = rep_msg->local_ca_guid; 1572 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1573 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1574 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1575 param->responder_resources = rep_msg->initiator_depth; 1576 param->initiator_depth = rep_msg->resp_resources; 1577 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1578 param->failover_accepted = cm_rep_get_failover(rep_msg); 1579 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1580 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1581 param->srq = cm_rep_get_srq(rep_msg); 1582 work->cm_event.private_data = &rep_msg->private_data; 1583} 1584 1585static void cm_dup_rep_handler(struct cm_work *work) 1586{ 1587 struct cm_id_private *cm_id_priv; 1588 struct cm_rep_msg *rep_msg; 1589 struct ib_mad_send_buf *msg = NULL; 1590 unsigned long flags; 1591 int ret; 1592 1593 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1594 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1595 rep_msg->local_comm_id); 1596 if (!cm_id_priv) 1597 return; 1598 1599 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1600 if (ret) 1601 goto deref; 1602 1603 spin_lock_irqsave(&cm_id_priv->lock, flags); 1604 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1605 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1606 cm_id_priv->private_data, 1607 cm_id_priv->private_data_len); 1608 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1609 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1610 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1611 cm_id_priv->private_data, 1612 cm_id_priv->private_data_len); 1613 else 1614 goto unlock; 1615 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1616 1617 ret = ib_post_send_mad(msg, NULL); 1618 if (ret) 1619 goto free; 1620 goto deref; 1621 1622unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1623free: cm_free_msg(msg); 1624deref: cm_deref_id(cm_id_priv); 1625} 1626 1627static int cm_rep_handler(struct cm_work *work) 1628{ 1629 struct cm_id_private *cm_id_priv; 1630 struct cm_rep_msg *rep_msg; 1631 unsigned long flags; 1632 int ret; 1633 1634 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1635 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1636 if (!cm_id_priv) { 1637 cm_dup_rep_handler(work); 1638 return -EINVAL; 1639 } 1640 1641 cm_format_rep_event(work); 1642 1643 spin_lock_irqsave(&cm_id_priv->lock, flags); 1644 switch (cm_id_priv->id.state) { 1645 case IB_CM_REQ_SENT: 1646 case IB_CM_MRA_REQ_RCVD: 1647 break; 1648 default: 1649 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1650 ret = -EINVAL; 1651 goto error; 1652 } 1653 1654 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1655 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1656 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1657 1658 spin_lock(&cm.lock); 1659 /* Check for duplicate REP. */ 1660 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1661 spin_unlock(&cm.lock); 1662 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1663 ret = -EINVAL; 1664 goto error; 1665 } 1666 /* Check for a stale connection. */ 1667 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1668 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1669 &cm.remote_id_table); 1670 cm_id_priv->timewait_info->inserted_remote_id = 0; 1671 spin_unlock(&cm.lock); 1672 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1673 cm_issue_rej(work->port, work->mad_recv_wc, 1674 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1675 NULL, 0); 1676 ret = -EINVAL; 1677 goto error; 1678 } 1679 spin_unlock(&cm.lock); 1680 1681 cm_id_priv->id.state = IB_CM_REP_RCVD; 1682 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1683 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1684 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1685 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1686 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1687 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1688 1689 /* todo: handle peer_to_peer */ 1690 1691 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1692 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1693 if (!ret) 1694 list_add_tail(&work->list, &cm_id_priv->work_list); 1695 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1696 1697 if (ret) 1698 cm_process_work(cm_id_priv, work); 1699 else 1700 cm_deref_id(cm_id_priv); 1701 return 0; 1702 1703error: 1704 cm_deref_id(cm_id_priv); 1705 return ret; 1706} 1707 1708static int cm_establish_handler(struct cm_work *work) 1709{ 1710 struct cm_id_private *cm_id_priv; 1711 unsigned long flags; 1712 int ret; 1713 1714 /* See comment in ib_cm_establish about lookup. */ 1715 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1716 if (!cm_id_priv) 1717 return -EINVAL; 1718 1719 spin_lock_irqsave(&cm_id_priv->lock, flags); 1720 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1721 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1722 goto out; 1723 } 1724 1725 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1726 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1727 if (!ret) 1728 list_add_tail(&work->list, &cm_id_priv->work_list); 1729 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1730 1731 if (ret) 1732 cm_process_work(cm_id_priv, work); 1733 else 1734 cm_deref_id(cm_id_priv); 1735 return 0; 1736out: 1737 cm_deref_id(cm_id_priv); 1738 return -EINVAL; 1739} 1740 1741static int cm_rtu_handler(struct cm_work *work) 1742{ 1743 struct cm_id_private *cm_id_priv; 1744 struct cm_rtu_msg *rtu_msg; 1745 unsigned long flags; 1746 int ret; 1747 1748 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1749 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1750 rtu_msg->local_comm_id); 1751 if (!cm_id_priv) 1752 return -EINVAL; 1753 1754 work->cm_event.private_data = &rtu_msg->private_data; 1755 1756 spin_lock_irqsave(&cm_id_priv->lock, flags); 1757 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1758 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1759 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1760 goto out; 1761 } 1762 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1763 1764 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1765 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1766 if (!ret) 1767 list_add_tail(&work->list, &cm_id_priv->work_list); 1768 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1769 1770 if (ret) 1771 cm_process_work(cm_id_priv, work); 1772 else 1773 cm_deref_id(cm_id_priv); 1774 return 0; 1775out: 1776 cm_deref_id(cm_id_priv); 1777 return -EINVAL; 1778} 1779 1780static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1781 struct cm_id_private *cm_id_priv, 1782 const void *private_data, 1783 u8 private_data_len) 1784{ 1785 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1786 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1787 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1788 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1789 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1790 1791 if (private_data && private_data_len) 1792 memcpy(dreq_msg->private_data, private_data, private_data_len); 1793} 1794 1795int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1796 const void *private_data, 1797 u8 private_data_len) 1798{ 1799 struct cm_id_private *cm_id_priv; 1800 struct ib_mad_send_buf *msg; 1801 unsigned long flags; 1802 int ret; 1803 1804 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1805 return -EINVAL; 1806 1807 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1808 spin_lock_irqsave(&cm_id_priv->lock, flags); 1809 if (cm_id->state != IB_CM_ESTABLISHED) { 1810 ret = -EINVAL; 1811 goto out; 1812 } 1813 1814 ret = cm_alloc_msg(cm_id_priv, &msg); 1815 if (ret) { 1816 cm_enter_timewait(cm_id_priv); 1817 goto out; 1818 } 1819 1820 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1821 private_data, private_data_len); 1822 msg->timeout_ms = cm_id_priv->timeout_ms; 1823 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1824 1825 ret = ib_post_send_mad(msg, NULL); 1826 if (ret) { 1827 cm_enter_timewait(cm_id_priv); 1828 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1829 cm_free_msg(msg); 1830 return ret; 1831 } 1832 1833 cm_id->state = IB_CM_DREQ_SENT; 1834 cm_id_priv->msg = msg; 1835out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1836 return ret; 1837} 1838EXPORT_SYMBOL(ib_send_cm_dreq); 1839 1840static void cm_format_drep(struct cm_drep_msg *drep_msg, 1841 struct cm_id_private *cm_id_priv, 1842 const void *private_data, 1843 u8 private_data_len) 1844{ 1845 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1846 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1847 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1848 1849 if (private_data && private_data_len) 1850 memcpy(drep_msg->private_data, private_data, private_data_len); 1851} 1852 1853int ib_send_cm_drep(struct ib_cm_id *cm_id, 1854 const void *private_data, 1855 u8 private_data_len) 1856{ 1857 struct cm_id_private *cm_id_priv; 1858 struct ib_mad_send_buf *msg; 1859 unsigned long flags; 1860 void *data; 1861 int ret; 1862 1863 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1864 return -EINVAL; 1865 1866 data = cm_copy_private_data(private_data, private_data_len); 1867 if (IS_ERR(data)) 1868 return PTR_ERR(data); 1869 1870 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1871 spin_lock_irqsave(&cm_id_priv->lock, flags); 1872 if (cm_id->state != IB_CM_DREQ_RCVD) { 1873 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1874 kfree(data); 1875 return -EINVAL; 1876 } 1877 1878 cm_set_private_data(cm_id_priv, data, private_data_len); 1879 cm_enter_timewait(cm_id_priv); 1880 1881 ret = cm_alloc_msg(cm_id_priv, &msg); 1882 if (ret) 1883 goto out; 1884 1885 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1886 private_data, private_data_len); 1887 1888 ret = ib_post_send_mad(msg, NULL); 1889 if (ret) { 1890 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1891 cm_free_msg(msg); 1892 return ret; 1893 } 1894 1895out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1896 return ret; 1897} 1898EXPORT_SYMBOL(ib_send_cm_drep); 1899 1900static int cm_dreq_handler(struct cm_work *work) 1901{ 1902 struct cm_id_private *cm_id_priv; 1903 struct cm_dreq_msg *dreq_msg; 1904 struct ib_mad_send_buf *msg = NULL; 1905 unsigned long flags; 1906 int ret; 1907 1908 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1909 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1910 dreq_msg->local_comm_id); 1911 if (!cm_id_priv) 1912 return -EINVAL; 1913 1914 work->cm_event.private_data = &dreq_msg->private_data; 1915 1916 spin_lock_irqsave(&cm_id_priv->lock, flags); 1917 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1918 goto unlock; 1919 1920 switch (cm_id_priv->id.state) { 1921 case IB_CM_REP_SENT: 1922 case IB_CM_DREQ_SENT: 1923 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1924 break; 1925 case IB_CM_ESTABLISHED: 1926 case IB_CM_MRA_REP_RCVD: 1927 break; 1928 case IB_CM_TIMEWAIT: 1929 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1930 goto unlock; 1931 1932 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1933 cm_id_priv->private_data, 1934 cm_id_priv->private_data_len); 1935 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1936 1937 if (ib_post_send_mad(msg, NULL)) 1938 cm_free_msg(msg); 1939 goto deref; 1940 default: 1941 goto unlock; 1942 } 1943 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1944 cm_id_priv->tid = dreq_msg->hdr.tid; 1945 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1946 if (!ret) 1947 list_add_tail(&work->list, &cm_id_priv->work_list); 1948 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1949 1950 if (ret) 1951 cm_process_work(cm_id_priv, work); 1952 else 1953 cm_deref_id(cm_id_priv); 1954 return 0; 1955 1956unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1957deref: cm_deref_id(cm_id_priv); 1958 return -EINVAL; 1959} 1960 1961static int cm_drep_handler(struct cm_work *work) 1962{ 1963 struct cm_id_private *cm_id_priv; 1964 struct cm_drep_msg *drep_msg; 1965 unsigned long flags; 1966 int ret; 1967 1968 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1969 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 1970 drep_msg->local_comm_id); 1971 if (!cm_id_priv) 1972 return -EINVAL; 1973 1974 work->cm_event.private_data = &drep_msg->private_data; 1975 1976 spin_lock_irqsave(&cm_id_priv->lock, flags); 1977 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 1978 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 1979 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1980 goto out; 1981 } 1982 cm_enter_timewait(cm_id_priv); 1983 1984 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1985 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1986 if (!ret) 1987 list_add_tail(&work->list, &cm_id_priv->work_list); 1988 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1989 1990 if (ret) 1991 cm_process_work(cm_id_priv, work); 1992 else 1993 cm_deref_id(cm_id_priv); 1994 return 0; 1995out: 1996 cm_deref_id(cm_id_priv); 1997 return -EINVAL; 1998} 1999 2000int ib_send_cm_rej(struct ib_cm_id *cm_id, 2001 enum ib_cm_rej_reason reason, 2002 void *ari, 2003 u8 ari_length, 2004 const void *private_data, 2005 u8 private_data_len) 2006{ 2007 struct cm_id_private *cm_id_priv; 2008 struct ib_mad_send_buf *msg; 2009 unsigned long flags; 2010 int ret; 2011 2012 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2013 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2014 return -EINVAL; 2015 2016 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2017 2018 spin_lock_irqsave(&cm_id_priv->lock, flags); 2019 switch (cm_id->state) { 2020 case IB_CM_REQ_SENT: 2021 case IB_CM_MRA_REQ_RCVD: 2022 case IB_CM_REQ_RCVD: 2023 case IB_CM_MRA_REQ_SENT: 2024 case IB_CM_REP_RCVD: 2025 case IB_CM_MRA_REP_SENT: 2026 ret = cm_alloc_msg(cm_id_priv, &msg); 2027 if (!ret) 2028 cm_format_rej((struct cm_rej_msg *) msg->mad, 2029 cm_id_priv, reason, ari, ari_length, 2030 private_data, private_data_len); 2031 2032 cm_reset_to_idle(cm_id_priv); 2033 break; 2034 case IB_CM_REP_SENT: 2035 case IB_CM_MRA_REP_RCVD: 2036 ret = cm_alloc_msg(cm_id_priv, &msg); 2037 if (!ret) 2038 cm_format_rej((struct cm_rej_msg *) msg->mad, 2039 cm_id_priv, reason, ari, ari_length, 2040 private_data, private_data_len); 2041 2042 cm_enter_timewait(cm_id_priv); 2043 break; 2044 default: 2045 ret = -EINVAL; 2046 goto out; 2047 } 2048 2049 if (ret) 2050 goto out; 2051 2052 ret = ib_post_send_mad(msg, NULL); 2053 if (ret) 2054 cm_free_msg(msg); 2055 2056out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2057 return ret; 2058} 2059EXPORT_SYMBOL(ib_send_cm_rej); 2060 2061static void cm_format_rej_event(struct cm_work *work) 2062{ 2063 struct cm_rej_msg *rej_msg; 2064 struct ib_cm_rej_event_param *param; 2065 2066 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2067 param = &work->cm_event.param.rej_rcvd; 2068 param->ari = rej_msg->ari; 2069 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2070 param->reason = __be16_to_cpu(rej_msg->reason); 2071 work->cm_event.private_data = &rej_msg->private_data; 2072} 2073 2074static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2075{ 2076 struct cm_timewait_info *timewait_info; 2077 struct cm_id_private *cm_id_priv; 2078 unsigned long flags; 2079 __be32 remote_id; 2080 2081 remote_id = rej_msg->local_comm_id; 2082 2083 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2084 spin_lock_irqsave(&cm.lock, flags); 2085 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2086 remote_id); 2087 if (!timewait_info) { 2088 spin_unlock_irqrestore(&cm.lock, flags); 2089 return NULL; 2090 } 2091 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2092 (timewait_info->work.local_id ^ 2093 cm.random_id_operand)); 2094 if (cm_id_priv) { 2095 if (cm_id_priv->id.remote_id == remote_id) 2096 atomic_inc(&cm_id_priv->refcount); 2097 else 2098 cm_id_priv = NULL; 2099 } 2100 spin_unlock_irqrestore(&cm.lock, flags); 2101 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2102 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2103 else 2104 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2105 2106 return cm_id_priv; 2107} 2108 2109static int cm_rej_handler(struct cm_work *work) 2110{ 2111 struct cm_id_private *cm_id_priv; 2112 struct cm_rej_msg *rej_msg; 2113 unsigned long flags; 2114 int ret; 2115 2116 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2117 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2118 if (!cm_id_priv) 2119 return -EINVAL; 2120 2121 cm_format_rej_event(work); 2122 2123 spin_lock_irqsave(&cm_id_priv->lock, flags); 2124 switch (cm_id_priv->id.state) { 2125 case IB_CM_REQ_SENT: 2126 case IB_CM_MRA_REQ_RCVD: 2127 case IB_CM_REP_SENT: 2128 case IB_CM_MRA_REP_RCVD: 2129 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2130 /* fall through */ 2131 case IB_CM_REQ_RCVD: 2132 case IB_CM_MRA_REQ_SENT: 2133 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2134 cm_enter_timewait(cm_id_priv); 2135 else 2136 cm_reset_to_idle(cm_id_priv); 2137 break; 2138 case IB_CM_DREQ_SENT: 2139 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2140 /* fall through */ 2141 case IB_CM_REP_RCVD: 2142 case IB_CM_MRA_REP_SENT: 2143 case IB_CM_ESTABLISHED: 2144 cm_enter_timewait(cm_id_priv); 2145 break; 2146 default: 2147 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2148 ret = -EINVAL; 2149 goto out; 2150 } 2151 2152 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2153 if (!ret) 2154 list_add_tail(&work->list, &cm_id_priv->work_list); 2155 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2156 2157 if (ret) 2158 cm_process_work(cm_id_priv, work); 2159 else 2160 cm_deref_id(cm_id_priv); 2161 return 0; 2162out: 2163 cm_deref_id(cm_id_priv); 2164 return -EINVAL; 2165} 2166 2167int ib_send_cm_mra(struct ib_cm_id *cm_id, 2168 u8 service_timeout, 2169 const void *private_data, 2170 u8 private_data_len) 2171{ 2172 struct cm_id_private *cm_id_priv; 2173 struct ib_mad_send_buf *msg; 2174 void *data; 2175 unsigned long flags; 2176 int ret; 2177 2178 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2179 return -EINVAL; 2180 2181 data = cm_copy_private_data(private_data, private_data_len); 2182 if (IS_ERR(data)) 2183 return PTR_ERR(data); 2184 2185 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2186 2187 spin_lock_irqsave(&cm_id_priv->lock, flags); 2188 switch(cm_id_priv->id.state) { 2189 case IB_CM_REQ_RCVD: 2190 ret = cm_alloc_msg(cm_id_priv, &msg); 2191 if (ret) 2192 goto error1; 2193 2194 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2195 CM_MSG_RESPONSE_REQ, service_timeout, 2196 private_data, private_data_len); 2197 ret = ib_post_send_mad(msg, NULL); 2198 if (ret) 2199 goto error2; 2200 cm_id->state = IB_CM_MRA_REQ_SENT; 2201 break; 2202 case IB_CM_REP_RCVD: 2203 ret = cm_alloc_msg(cm_id_priv, &msg); 2204 if (ret) 2205 goto error1; 2206 2207 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2208 CM_MSG_RESPONSE_REP, service_timeout, 2209 private_data, private_data_len); 2210 ret = ib_post_send_mad(msg, NULL); 2211 if (ret) 2212 goto error2; 2213 cm_id->state = IB_CM_MRA_REP_SENT; 2214 break; 2215 case IB_CM_ESTABLISHED: 2216 ret = cm_alloc_msg(cm_id_priv, &msg); 2217 if (ret) 2218 goto error1; 2219 2220 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2221 CM_MSG_RESPONSE_OTHER, service_timeout, 2222 private_data, private_data_len); 2223 ret = ib_post_send_mad(msg, NULL); 2224 if (ret) 2225 goto error2; 2226 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2227 break; 2228 default: 2229 ret = -EINVAL; 2230 goto error1; 2231 } 2232 cm_id_priv->service_timeout = service_timeout; 2233 cm_set_private_data(cm_id_priv, data, private_data_len); 2234 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2235 return 0; 2236 2237error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2238 kfree(data); 2239 return ret; 2240 2241error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2242 kfree(data); 2243 cm_free_msg(msg); 2244 return ret; 2245} 2246EXPORT_SYMBOL(ib_send_cm_mra); 2247 2248static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2249{ 2250 switch (cm_mra_get_msg_mraed(mra_msg)) { 2251 case CM_MSG_RESPONSE_REQ: 2252 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2253 case CM_MSG_RESPONSE_REP: 2254 case CM_MSG_RESPONSE_OTHER: 2255 return cm_acquire_id(mra_msg->remote_comm_id, 2256 mra_msg->local_comm_id); 2257 default: 2258 return NULL; 2259 } 2260} 2261 2262static int cm_mra_handler(struct cm_work *work) 2263{ 2264 struct cm_id_private *cm_id_priv; 2265 struct cm_mra_msg *mra_msg; 2266 unsigned long flags; 2267 int timeout, ret; 2268 2269 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2270 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2271 if (!cm_id_priv) 2272 return -EINVAL; 2273 2274 work->cm_event.private_data = &mra_msg->private_data; 2275 work->cm_event.param.mra_rcvd.service_timeout = 2276 cm_mra_get_service_timeout(mra_msg); 2277 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2278 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2279 2280 spin_lock_irqsave(&cm_id_priv->lock, flags); 2281 switch (cm_id_priv->id.state) { 2282 case IB_CM_REQ_SENT: 2283 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2284 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2285 cm_id_priv->msg, timeout)) 2286 goto out; 2287 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2288 break; 2289 case IB_CM_REP_SENT: 2290 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2291 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2292 cm_id_priv->msg, timeout)) 2293 goto out; 2294 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2295 break; 2296 case IB_CM_ESTABLISHED: 2297 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2298 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2299 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2300 cm_id_priv->msg, timeout)) 2301 goto out; 2302 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2303 break; 2304 default: 2305 goto out; 2306 } 2307 2308 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2309 cm_id_priv->id.state; 2310 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2311 if (!ret) 2312 list_add_tail(&work->list, &cm_id_priv->work_list); 2313 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2314 2315 if (ret) 2316 cm_process_work(cm_id_priv, work); 2317 else 2318 cm_deref_id(cm_id_priv); 2319 return 0; 2320out: 2321 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2322 cm_deref_id(cm_id_priv); 2323 return -EINVAL; 2324} 2325 2326static void cm_format_lap(struct cm_lap_msg *lap_msg, 2327 struct cm_id_private *cm_id_priv, 2328 struct ib_sa_path_rec *alternate_path, 2329 const void *private_data, 2330 u8 private_data_len) 2331{ 2332 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2333 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2334 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2335 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2336 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2337 /* todo: need remote CM response timeout */ 2338 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2339 lap_msg->alt_local_lid = alternate_path->slid; 2340 lap_msg->alt_remote_lid = alternate_path->dlid; 2341 lap_msg->alt_local_gid = alternate_path->sgid; 2342 lap_msg->alt_remote_gid = alternate_path->dgid; 2343 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2344 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2345 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2346 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2347 cm_lap_set_sl(lap_msg, alternate_path->sl); 2348 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2349 cm_lap_set_local_ack_timeout(lap_msg, 2350 min(31, alternate_path->packet_life_time + 1)); 2351 2352 if (private_data && private_data_len) 2353 memcpy(lap_msg->private_data, private_data, private_data_len); 2354} 2355 2356int ib_send_cm_lap(struct ib_cm_id *cm_id, 2357 struct ib_sa_path_rec *alternate_path, 2358 const void *private_data, 2359 u8 private_data_len) 2360{ 2361 struct cm_id_private *cm_id_priv; 2362 struct ib_mad_send_buf *msg; 2363 unsigned long flags; 2364 int ret; 2365 2366 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2367 return -EINVAL; 2368 2369 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2370 spin_lock_irqsave(&cm_id_priv->lock, flags); 2371 if (cm_id->state != IB_CM_ESTABLISHED || 2372 cm_id->lap_state != IB_CM_LAP_IDLE) { 2373 ret = -EINVAL; 2374 goto out; 2375 } 2376 2377 ret = cm_alloc_msg(cm_id_priv, &msg); 2378 if (ret) 2379 goto out; 2380 2381 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2382 alternate_path, private_data, private_data_len); 2383 msg->timeout_ms = cm_id_priv->timeout_ms; 2384 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2385 2386 ret = ib_post_send_mad(msg, NULL); 2387 if (ret) { 2388 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2389 cm_free_msg(msg); 2390 return ret; 2391 } 2392 2393 cm_id->lap_state = IB_CM_LAP_SENT; 2394 cm_id_priv->msg = msg; 2395 2396out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2397 return ret; 2398} 2399EXPORT_SYMBOL(ib_send_cm_lap); 2400 2401static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2402 struct cm_lap_msg *lap_msg) 2403{ 2404 memset(path, 0, sizeof *path); 2405 path->dgid = lap_msg->alt_local_gid; 2406 path->sgid = lap_msg->alt_remote_gid; 2407 path->dlid = lap_msg->alt_local_lid; 2408 path->slid = lap_msg->alt_remote_lid; 2409 path->flow_label = cm_lap_get_flow_label(lap_msg); 2410 path->hop_limit = lap_msg->alt_hop_limit; 2411 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2412 path->reversible = 1; 2413 /* pkey is same as in REQ */ 2414 path->sl = cm_lap_get_sl(lap_msg); 2415 path->mtu_selector = IB_SA_EQ; 2416 /* mtu is same as in REQ */ 2417 path->rate_selector = IB_SA_EQ; 2418 path->rate = cm_lap_get_packet_rate(lap_msg); 2419 path->packet_life_time_selector = IB_SA_EQ; 2420 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2421 path->packet_life_time -= (path->packet_life_time > 0); 2422} 2423 2424static int cm_lap_handler(struct cm_work *work) 2425{ 2426 struct cm_id_private *cm_id_priv; 2427 struct cm_lap_msg *lap_msg; 2428 struct ib_cm_lap_event_param *param; 2429 struct ib_mad_send_buf *msg = NULL; 2430 unsigned long flags; 2431 int ret; 2432 2433 /* todo: verify LAP request and send reject APR if invalid. */ 2434 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2435 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2436 lap_msg->local_comm_id); 2437 if (!cm_id_priv) 2438 return -EINVAL; 2439 2440 param = &work->cm_event.param.lap_rcvd; 2441 param->alternate_path = &work->path[0]; 2442 cm_format_path_from_lap(param->alternate_path, lap_msg); 2443 work->cm_event.private_data = &lap_msg->private_data; 2444 2445 spin_lock_irqsave(&cm_id_priv->lock, flags); 2446 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2447 goto unlock; 2448 2449 switch (cm_id_priv->id.lap_state) { 2450 case IB_CM_LAP_IDLE: 2451 break; 2452 case IB_CM_MRA_LAP_SENT: 2453 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2454 goto unlock; 2455 2456 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2457 CM_MSG_RESPONSE_OTHER, 2458 cm_id_priv->service_timeout, 2459 cm_id_priv->private_data, 2460 cm_id_priv->private_data_len); 2461 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2462 2463 if (ib_post_send_mad(msg, NULL)) 2464 cm_free_msg(msg); 2465 goto deref; 2466 default: 2467 goto unlock; 2468 } 2469 2470 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2471 cm_id_priv->tid = lap_msg->hdr.tid; 2472 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2473 if (!ret) 2474 list_add_tail(&work->list, &cm_id_priv->work_list); 2475 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2476 2477 if (ret) 2478 cm_process_work(cm_id_priv, work); 2479 else 2480 cm_deref_id(cm_id_priv); 2481 return 0; 2482 2483unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2484deref: cm_deref_id(cm_id_priv); 2485 return -EINVAL; 2486} 2487 2488static void cm_format_apr(struct cm_apr_msg *apr_msg, 2489 struct cm_id_private *cm_id_priv, 2490 enum ib_cm_apr_status status, 2491 void *info, 2492 u8 info_length, 2493 const void *private_data, 2494 u8 private_data_len) 2495{ 2496 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2497 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2498 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2499 apr_msg->ap_status = (u8) status; 2500 2501 if (info && info_length) { 2502 apr_msg->info_length = info_length; 2503 memcpy(apr_msg->info, info, info_length); 2504 } 2505 2506 if (private_data && private_data_len) 2507 memcpy(apr_msg->private_data, private_data, private_data_len); 2508} 2509 2510int ib_send_cm_apr(struct ib_cm_id *cm_id, 2511 enum ib_cm_apr_status status, 2512 void *info, 2513 u8 info_length, 2514 const void *private_data, 2515 u8 private_data_len) 2516{ 2517 struct cm_id_private *cm_id_priv; 2518 struct ib_mad_send_buf *msg; 2519 unsigned long flags; 2520 int ret; 2521 2522 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2523 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2524 return -EINVAL; 2525 2526 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2527 spin_lock_irqsave(&cm_id_priv->lock, flags); 2528 if (cm_id->state != IB_CM_ESTABLISHED || 2529 (cm_id->lap_state != IB_CM_LAP_RCVD && 2530 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2531 ret = -EINVAL; 2532 goto out; 2533 } 2534 2535 ret = cm_alloc_msg(cm_id_priv, &msg); 2536 if (ret) 2537 goto out; 2538 2539 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2540 info, info_length, private_data, private_data_len); 2541 ret = ib_post_send_mad(msg, NULL); 2542 if (ret) { 2543 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2544 cm_free_msg(msg); 2545 return ret; 2546 } 2547 2548 cm_id->lap_state = IB_CM_LAP_IDLE; 2549out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2550 return ret; 2551} 2552EXPORT_SYMBOL(ib_send_cm_apr); 2553 2554static int cm_apr_handler(struct cm_work *work) 2555{ 2556 struct cm_id_private *cm_id_priv; 2557 struct cm_apr_msg *apr_msg; 2558 unsigned long flags; 2559 int ret; 2560 2561 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2562 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2563 apr_msg->local_comm_id); 2564 if (!cm_id_priv) 2565 return -EINVAL; /* Unmatched reply. */ 2566 2567 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2568 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2569 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2570 work->cm_event.private_data = &apr_msg->private_data; 2571 2572 spin_lock_irqsave(&cm_id_priv->lock, flags); 2573 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2574 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2575 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2576 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2577 goto out; 2578 } 2579 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2580 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2581 cm_id_priv->msg = NULL; 2582 2583 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2584 if (!ret) 2585 list_add_tail(&work->list, &cm_id_priv->work_list); 2586 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2587 2588 if (ret) 2589 cm_process_work(cm_id_priv, work); 2590 else 2591 cm_deref_id(cm_id_priv); 2592 return 0; 2593out: 2594 cm_deref_id(cm_id_priv); 2595 return -EINVAL; 2596} 2597 2598static int cm_timewait_handler(struct cm_work *work) 2599{ 2600 struct cm_timewait_info *timewait_info; 2601 struct cm_id_private *cm_id_priv; 2602 unsigned long flags; 2603 int ret; 2604 2605 timewait_info = (struct cm_timewait_info *)work; 2606 cm_cleanup_timewait(timewait_info); 2607 2608 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2609 timewait_info->work.remote_id); 2610 if (!cm_id_priv) 2611 return -EINVAL; 2612 2613 spin_lock_irqsave(&cm_id_priv->lock, flags); 2614 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2615 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2616 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2617 goto out; 2618 } 2619 cm_id_priv->id.state = IB_CM_IDLE; 2620 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2621 if (!ret) 2622 list_add_tail(&work->list, &cm_id_priv->work_list); 2623 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2624 2625 if (ret) 2626 cm_process_work(cm_id_priv, work); 2627 else 2628 cm_deref_id(cm_id_priv); 2629 return 0; 2630out: 2631 cm_deref_id(cm_id_priv); 2632 return -EINVAL; 2633} 2634 2635static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2636 struct cm_id_private *cm_id_priv, 2637 struct ib_cm_sidr_req_param *param) 2638{ 2639 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2640 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2641 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2642 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2643 sidr_req_msg->service_id = param->service_id; 2644 2645 if (param->private_data && param->private_data_len) 2646 memcpy(sidr_req_msg->private_data, param->private_data, 2647 param->private_data_len); 2648} 2649 2650int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2651 struct ib_cm_sidr_req_param *param) 2652{ 2653 struct cm_id_private *cm_id_priv; 2654 struct ib_mad_send_buf *msg; 2655 unsigned long flags; 2656 int ret; 2657 2658 if (!param->path || (param->private_data && 2659 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2660 return -EINVAL; 2661 2662 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2663 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2664 if (ret) 2665 goto out; 2666 2667 cm_id->service_id = param->service_id; 2668 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2669 cm_id_priv->timeout_ms = param->timeout_ms; 2670 cm_id_priv->max_cm_retries = param->max_cm_retries; 2671 ret = cm_alloc_msg(cm_id_priv, &msg); 2672 if (ret) 2673 goto out; 2674 2675 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2676 param); 2677 msg->timeout_ms = cm_id_priv->timeout_ms; 2678 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2679 2680 spin_lock_irqsave(&cm_id_priv->lock, flags); 2681 if (cm_id->state == IB_CM_IDLE) 2682 ret = ib_post_send_mad(msg, NULL); 2683 else 2684 ret = -EINVAL; 2685 2686 if (ret) { 2687 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2688 cm_free_msg(msg); 2689 goto out; 2690 } 2691 cm_id->state = IB_CM_SIDR_REQ_SENT; 2692 cm_id_priv->msg = msg; 2693 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2694out: 2695 return ret; 2696} 2697EXPORT_SYMBOL(ib_send_cm_sidr_req); 2698 2699static void cm_format_sidr_req_event(struct cm_work *work, 2700 struct ib_cm_id *listen_id) 2701{ 2702 struct cm_sidr_req_msg *sidr_req_msg; 2703 struct ib_cm_sidr_req_event_param *param; 2704 2705 sidr_req_msg = (struct cm_sidr_req_msg *) 2706 work->mad_recv_wc->recv_buf.mad; 2707 param = &work->cm_event.param.sidr_req_rcvd; 2708 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2709 param->listen_id = listen_id; 2710 param->port = work->port->port_num; 2711 work->cm_event.private_data = &sidr_req_msg->private_data; 2712} 2713 2714static int cm_sidr_req_handler(struct cm_work *work) 2715{ 2716 struct ib_cm_id *cm_id; 2717 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2718 struct cm_sidr_req_msg *sidr_req_msg; 2719 struct ib_wc *wc; 2720 unsigned long flags; 2721 2722 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2723 if (IS_ERR(cm_id)) 2724 return PTR_ERR(cm_id); 2725 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2726 2727 /* Record SGID/SLID and request ID for lookup. */ 2728 sidr_req_msg = (struct cm_sidr_req_msg *) 2729 work->mad_recv_wc->recv_buf.mad; 2730 wc = work->mad_recv_wc->wc; 2731 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2732 cm_id_priv->av.dgid.global.interface_id = 0; 2733 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2734 work->mad_recv_wc->recv_buf.grh, 2735 &cm_id_priv->av); 2736 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2737 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2738 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2739 atomic_inc(&cm_id_priv->work_count); 2740 2741 spin_lock_irqsave(&cm.lock, flags); 2742 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2743 if (cur_cm_id_priv) { 2744 spin_unlock_irqrestore(&cm.lock, flags); 2745 goto out; /* Duplicate message. */ 2746 } 2747 cur_cm_id_priv = cm_find_listen(cm_id->device, 2748 sidr_req_msg->service_id, 2749 sidr_req_msg->private_data); 2750 if (!cur_cm_id_priv) { 2751 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2752 spin_unlock_irqrestore(&cm.lock, flags); 2753 /* todo: reply with no match */ 2754 goto out; /* No match. */ 2755 } 2756 atomic_inc(&cur_cm_id_priv->refcount); 2757 spin_unlock_irqrestore(&cm.lock, flags); 2758 2759 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2760 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2761 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2762 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2763 2764 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2765 cm_process_work(cm_id_priv, work); 2766 cm_deref_id(cur_cm_id_priv); 2767 return 0; 2768out: 2769 ib_destroy_cm_id(&cm_id_priv->id); 2770 return -EINVAL; 2771} 2772 2773static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2774 struct cm_id_private *cm_id_priv, 2775 struct ib_cm_sidr_rep_param *param) 2776{ 2777 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2778 cm_id_priv->tid); 2779 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2780 sidr_rep_msg->status = param->status; 2781 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2782 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2783 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2784 2785 if (param->info && param->info_length) 2786 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2787 2788 if (param->private_data && param->private_data_len) 2789 memcpy(sidr_rep_msg->private_data, param->private_data, 2790 param->private_data_len); 2791} 2792 2793int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2794 struct ib_cm_sidr_rep_param *param) 2795{ 2796 struct cm_id_private *cm_id_priv; 2797 struct ib_mad_send_buf *msg; 2798 unsigned long flags; 2799 int ret; 2800 2801 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2802 (param->private_data && 2803 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2804 return -EINVAL; 2805 2806 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2807 spin_lock_irqsave(&cm_id_priv->lock, flags); 2808 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2809 ret = -EINVAL; 2810 goto error; 2811 } 2812 2813 ret = cm_alloc_msg(cm_id_priv, &msg); 2814 if (ret) 2815 goto error; 2816 2817 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2818 param); 2819 ret = ib_post_send_mad(msg, NULL); 2820 if (ret) { 2821 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2822 cm_free_msg(msg); 2823 return ret; 2824 } 2825 cm_id->state = IB_CM_IDLE; 2826 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2827 2828 spin_lock_irqsave(&cm.lock, flags); 2829 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2830 spin_unlock_irqrestore(&cm.lock, flags); 2831 return 0; 2832 2833error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2834 return ret; 2835} 2836EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2837 2838static void cm_format_sidr_rep_event(struct cm_work *work) 2839{ 2840 struct cm_sidr_rep_msg *sidr_rep_msg; 2841 struct ib_cm_sidr_rep_event_param *param; 2842 2843 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2844 work->mad_recv_wc->recv_buf.mad; 2845 param = &work->cm_event.param.sidr_rep_rcvd; 2846 param->status = sidr_rep_msg->status; 2847 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2848 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2849 param->info = &sidr_rep_msg->info; 2850 param->info_len = sidr_rep_msg->info_length; 2851 work->cm_event.private_data = &sidr_rep_msg->private_data; 2852} 2853 2854static int cm_sidr_rep_handler(struct cm_work *work) 2855{ 2856 struct cm_sidr_rep_msg *sidr_rep_msg; 2857 struct cm_id_private *cm_id_priv; 2858 unsigned long flags; 2859 2860 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2861 work->mad_recv_wc->recv_buf.mad; 2862 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2863 if (!cm_id_priv) 2864 return -EINVAL; /* Unmatched reply. */ 2865 2866 spin_lock_irqsave(&cm_id_priv->lock, flags); 2867 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2868 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2869 goto out; 2870 } 2871 cm_id_priv->id.state = IB_CM_IDLE; 2872 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2873 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2874 2875 cm_format_sidr_rep_event(work); 2876 cm_process_work(cm_id_priv, work); 2877 return 0; 2878out: 2879 cm_deref_id(cm_id_priv); 2880 return -EINVAL; 2881} 2882 2883static void cm_process_send_error(struct ib_mad_send_buf *msg, 2884 enum ib_wc_status wc_status) 2885{ 2886 struct cm_id_private *cm_id_priv; 2887 struct ib_cm_event cm_event; 2888 enum ib_cm_state state; 2889 unsigned long flags; 2890 int ret; 2891 2892 memset(&cm_event, 0, sizeof cm_event); 2893 cm_id_priv = msg->context[0]; 2894 2895 /* Discard old sends or ones without a response. */ 2896 spin_lock_irqsave(&cm_id_priv->lock, flags); 2897 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2898 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2899 goto discard; 2900 2901 switch (state) { 2902 case IB_CM_REQ_SENT: 2903 case IB_CM_MRA_REQ_RCVD: 2904 cm_reset_to_idle(cm_id_priv); 2905 cm_event.event = IB_CM_REQ_ERROR; 2906 break; 2907 case IB_CM_REP_SENT: 2908 case IB_CM_MRA_REP_RCVD: 2909 cm_reset_to_idle(cm_id_priv); 2910 cm_event.event = IB_CM_REP_ERROR; 2911 break; 2912 case IB_CM_DREQ_SENT: 2913 cm_enter_timewait(cm_id_priv); 2914 cm_event.event = IB_CM_DREQ_ERROR; 2915 break; 2916 case IB_CM_SIDR_REQ_SENT: 2917 cm_id_priv->id.state = IB_CM_IDLE; 2918 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2919 break; 2920 default: 2921 goto discard; 2922 } 2923 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2924 cm_event.param.send_status = wc_status; 2925 2926 /* No other events can occur on the cm_id at this point. */ 2927 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2928 cm_free_msg(msg); 2929 if (ret) 2930 ib_destroy_cm_id(&cm_id_priv->id); 2931 return; 2932discard: 2933 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2934 cm_free_msg(msg); 2935} 2936 2937static void cm_send_handler(struct ib_mad_agent *mad_agent, 2938 struct ib_mad_send_wc *mad_send_wc) 2939{ 2940 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2941 2942 switch (mad_send_wc->status) { 2943 case IB_WC_SUCCESS: 2944 case IB_WC_WR_FLUSH_ERR: 2945 cm_free_msg(msg); 2946 break; 2947 default: 2948 if (msg->context[0] && msg->context[1]) 2949 cm_process_send_error(msg, mad_send_wc->status); 2950 else 2951 cm_free_msg(msg); 2952 break; 2953 } 2954} 2955 2956static void cm_work_handler(void *data) 2957{ 2958 struct cm_work *work = data; 2959 int ret; 2960 2961 switch (work->cm_event.event) { 2962 case IB_CM_REQ_RECEIVED: 2963 ret = cm_req_handler(work); 2964 break; 2965 case IB_CM_MRA_RECEIVED: 2966 ret = cm_mra_handler(work); 2967 break; 2968 case IB_CM_REJ_RECEIVED: 2969 ret = cm_rej_handler(work); 2970 break; 2971 case IB_CM_REP_RECEIVED: 2972 ret = cm_rep_handler(work); 2973 break; 2974 case IB_CM_RTU_RECEIVED: 2975 ret = cm_rtu_handler(work); 2976 break; 2977 case IB_CM_USER_ESTABLISHED: 2978 ret = cm_establish_handler(work); 2979 break; 2980 case IB_CM_DREQ_RECEIVED: 2981 ret = cm_dreq_handler(work); 2982 break; 2983 case IB_CM_DREP_RECEIVED: 2984 ret = cm_drep_handler(work); 2985 break; 2986 case IB_CM_SIDR_REQ_RECEIVED: 2987 ret = cm_sidr_req_handler(work); 2988 break; 2989 case IB_CM_SIDR_REP_RECEIVED: 2990 ret = cm_sidr_rep_handler(work); 2991 break; 2992 case IB_CM_LAP_RECEIVED: 2993 ret = cm_lap_handler(work); 2994 break; 2995 case IB_CM_APR_RECEIVED: 2996 ret = cm_apr_handler(work); 2997 break; 2998 case IB_CM_TIMEWAIT_EXIT: 2999 ret = cm_timewait_handler(work); 3000 break; 3001 default: 3002 ret = -EINVAL; 3003 break; 3004 } 3005 if (ret) 3006 cm_free_work(work); 3007} 3008 3009int ib_cm_establish(struct ib_cm_id *cm_id) 3010{ 3011 struct cm_id_private *cm_id_priv; 3012 struct cm_work *work; 3013 unsigned long flags; 3014 int ret = 0; 3015 3016 work = kmalloc(sizeof *work, GFP_ATOMIC); 3017 if (!work) 3018 return -ENOMEM; 3019 3020 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3021 spin_lock_irqsave(&cm_id_priv->lock, flags); 3022 switch (cm_id->state) 3023 { 3024 case IB_CM_REP_SENT: 3025 case IB_CM_MRA_REP_RCVD: 3026 cm_id->state = IB_CM_ESTABLISHED; 3027 break; 3028 case IB_CM_ESTABLISHED: 3029 ret = -EISCONN; 3030 break; 3031 default: 3032 ret = -EINVAL; 3033 break; 3034 } 3035 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3036 3037 if (ret) { 3038 kfree(work); 3039 goto out; 3040 } 3041 3042 /* 3043 * The CM worker thread may try to destroy the cm_id before it 3044 * can execute this work item. To prevent potential deadlock, 3045 * we need to find the cm_id once we're in the context of the 3046 * worker thread, rather than holding a reference on it. 3047 */ 3048 INIT_WORK(&work->work, cm_work_handler, work); 3049 work->local_id = cm_id->local_id; 3050 work->remote_id = cm_id->remote_id; 3051 work->mad_recv_wc = NULL; 3052 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3053 queue_work(cm.wq, &work->work); 3054out: 3055 return ret; 3056} 3057EXPORT_SYMBOL(ib_cm_establish); 3058 3059static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3060 struct ib_mad_recv_wc *mad_recv_wc) 3061{ 3062 struct cm_work *work; 3063 enum ib_cm_event_type event; 3064 int paths = 0; 3065 3066 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3067 case CM_REQ_ATTR_ID: 3068 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3069 alt_local_lid != 0); 3070 event = IB_CM_REQ_RECEIVED; 3071 break; 3072 case CM_MRA_ATTR_ID: 3073 event = IB_CM_MRA_RECEIVED; 3074 break; 3075 case CM_REJ_ATTR_ID: 3076 event = IB_CM_REJ_RECEIVED; 3077 break; 3078 case CM_REP_ATTR_ID: 3079 event = IB_CM_REP_RECEIVED; 3080 break; 3081 case CM_RTU_ATTR_ID: 3082 event = IB_CM_RTU_RECEIVED; 3083 break; 3084 case CM_DREQ_ATTR_ID: 3085 event = IB_CM_DREQ_RECEIVED; 3086 break; 3087 case CM_DREP_ATTR_ID: 3088 event = IB_CM_DREP_RECEIVED; 3089 break; 3090 case CM_SIDR_REQ_ATTR_ID: 3091 event = IB_CM_SIDR_REQ_RECEIVED; 3092 break; 3093 case CM_SIDR_REP_ATTR_ID: 3094 event = IB_CM_SIDR_REP_RECEIVED; 3095 break; 3096 case CM_LAP_ATTR_ID: 3097 paths = 1; 3098 event = IB_CM_LAP_RECEIVED; 3099 break; 3100 case CM_APR_ATTR_ID: 3101 event = IB_CM_APR_RECEIVED; 3102 break; 3103 default: 3104 ib_free_recv_mad(mad_recv_wc); 3105 return; 3106 } 3107 3108 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3109 GFP_KERNEL); 3110 if (!work) { 3111 ib_free_recv_mad(mad_recv_wc); 3112 return; 3113 } 3114 3115 INIT_WORK(&work->work, cm_work_handler, work); 3116 work->cm_event.event = event; 3117 work->mad_recv_wc = mad_recv_wc; 3118 work->port = (struct cm_port *)mad_agent->context; 3119 queue_work(cm.wq, &work->work); 3120} 3121 3122static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3123 struct ib_qp_attr *qp_attr, 3124 int *qp_attr_mask) 3125{ 3126 unsigned long flags; 3127 int ret; 3128 3129 spin_lock_irqsave(&cm_id_priv->lock, flags); 3130 switch (cm_id_priv->id.state) { 3131 case IB_CM_REQ_SENT: 3132 case IB_CM_MRA_REQ_RCVD: 3133 case IB_CM_REQ_RCVD: 3134 case IB_CM_MRA_REQ_SENT: 3135 case IB_CM_REP_RCVD: 3136 case IB_CM_MRA_REP_SENT: 3137 case IB_CM_REP_SENT: 3138 case IB_CM_MRA_REP_RCVD: 3139 case IB_CM_ESTABLISHED: 3140 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3141 IB_QP_PKEY_INDEX | IB_QP_PORT; 3142 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3143 IB_ACCESS_REMOTE_WRITE; 3144 if (cm_id_priv->responder_resources) 3145 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3146 IB_ACCESS_REMOTE_ATOMIC; 3147 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3148 qp_attr->port_num = cm_id_priv->av.port->port_num; 3149 ret = 0; 3150 break; 3151 default: 3152 ret = -EINVAL; 3153 break; 3154 } 3155 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3156 return ret; 3157} 3158 3159static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3160 struct ib_qp_attr *qp_attr, 3161 int *qp_attr_mask) 3162{ 3163 unsigned long flags; 3164 int ret; 3165 3166 spin_lock_irqsave(&cm_id_priv->lock, flags); 3167 switch (cm_id_priv->id.state) { 3168 case IB_CM_REQ_RCVD: 3169 case IB_CM_MRA_REQ_SENT: 3170 case IB_CM_REP_RCVD: 3171 case IB_CM_MRA_REP_SENT: 3172 case IB_CM_REP_SENT: 3173 case IB_CM_MRA_REP_RCVD: 3174 case IB_CM_ESTABLISHED: 3175 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3176 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3177 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3178 qp_attr->path_mtu = cm_id_priv->path_mtu; 3179 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3180 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3181 if (cm_id_priv->qp_type == IB_QPT_RC) { 3182 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3183 IB_QP_MIN_RNR_TIMER; 3184 qp_attr->max_dest_rd_atomic = 3185 cm_id_priv->responder_resources; 3186 qp_attr->min_rnr_timer = 0; 3187 } 3188 if (cm_id_priv->alt_av.ah_attr.dlid) { 3189 *qp_attr_mask |= IB_QP_ALT_PATH; 3190 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3191 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3192 } 3193 ret = 0; 3194 break; 3195 default: 3196 ret = -EINVAL; 3197 break; 3198 } 3199 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3200 return ret; 3201} 3202 3203static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3204 struct ib_qp_attr *qp_attr, 3205 int *qp_attr_mask) 3206{ 3207 unsigned long flags; 3208 int ret; 3209 3210 spin_lock_irqsave(&cm_id_priv->lock, flags); 3211 switch (cm_id_priv->id.state) { 3212 case IB_CM_REP_RCVD: 3213 case IB_CM_MRA_REP_SENT: 3214 case IB_CM_REP_SENT: 3215 case IB_CM_MRA_REP_RCVD: 3216 case IB_CM_ESTABLISHED: 3217 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3218 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3219 if (cm_id_priv->qp_type == IB_QPT_RC) { 3220 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3221 IB_QP_RNR_RETRY | 3222 IB_QP_MAX_QP_RD_ATOMIC; 3223 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3224 qp_attr->retry_cnt = cm_id_priv->retry_count; 3225 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3226 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3227 } 3228 if (cm_id_priv->alt_av.ah_attr.dlid) { 3229 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3230 qp_attr->path_mig_state = IB_MIG_REARM; 3231 } 3232 ret = 0; 3233 break; 3234 default: 3235 ret = -EINVAL; 3236 break; 3237 } 3238 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3239 return ret; 3240} 3241 3242int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3243 struct ib_qp_attr *qp_attr, 3244 int *qp_attr_mask) 3245{ 3246 struct cm_id_private *cm_id_priv; 3247 int ret; 3248 3249 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3250 switch (qp_attr->qp_state) { 3251 case IB_QPS_INIT: 3252 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3253 break; 3254 case IB_QPS_RTR: 3255 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3256 break; 3257 case IB_QPS_RTS: 3258 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3259 break; 3260 default: 3261 ret = -EINVAL; 3262 break; 3263 } 3264 return ret; 3265} 3266EXPORT_SYMBOL(ib_cm_init_qp_attr); 3267 3268static void cm_add_one(struct ib_device *device) 3269{ 3270 struct cm_device *cm_dev; 3271 struct cm_port *port; 3272 struct ib_mad_reg_req reg_req = { 3273 .mgmt_class = IB_MGMT_CLASS_CM, 3274 .mgmt_class_version = IB_CM_CLASS_VERSION 3275 }; 3276 struct ib_port_modify port_modify = { 3277 .set_port_cap_mask = IB_PORT_CM_SUP 3278 }; 3279 unsigned long flags; 3280 int ret; 3281 u8 i; 3282 3283 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3284 device->phys_port_cnt, GFP_KERNEL); 3285 if (!cm_dev) 3286 return; 3287 3288 cm_dev->device = device; 3289 cm_dev->ca_guid = device->node_guid; 3290 3291 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3292 for (i = 1; i <= device->phys_port_cnt; i++) { 3293 port = &cm_dev->port[i-1]; 3294 port->cm_dev = cm_dev; 3295 port->port_num = i; 3296 port->mad_agent = ib_register_mad_agent(device, i, 3297 IB_QPT_GSI, 3298 ®_req, 3299 0, 3300 cm_send_handler, 3301 cm_recv_handler, 3302 port); 3303 if (IS_ERR(port->mad_agent)) 3304 goto error1; 3305 3306 ret = ib_modify_port(device, i, 0, &port_modify); 3307 if (ret) 3308 goto error2; 3309 } 3310 ib_set_client_data(device, &cm_client, cm_dev); 3311 3312 write_lock_irqsave(&cm.device_lock, flags); 3313 list_add_tail(&cm_dev->list, &cm.device_list); 3314 write_unlock_irqrestore(&cm.device_lock, flags); 3315 return; 3316 3317error2: 3318 ib_unregister_mad_agent(port->mad_agent); 3319error1: 3320 port_modify.set_port_cap_mask = 0; 3321 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3322 while (--i) { 3323 port = &cm_dev->port[i-1]; 3324 ib_modify_port(device, port->port_num, 0, &port_modify); 3325 ib_unregister_mad_agent(port->mad_agent); 3326 } 3327 kfree(cm_dev); 3328} 3329 3330static void cm_remove_one(struct ib_device *device) 3331{ 3332 struct cm_device *cm_dev; 3333 struct cm_port *port; 3334 struct ib_port_modify port_modify = { 3335 .clr_port_cap_mask = IB_PORT_CM_SUP 3336 }; 3337 unsigned long flags; 3338 int i; 3339 3340 cm_dev = ib_get_client_data(device, &cm_client); 3341 if (!cm_dev) 3342 return; 3343 3344 write_lock_irqsave(&cm.device_lock, flags); 3345 list_del(&cm_dev->list); 3346 write_unlock_irqrestore(&cm.device_lock, flags); 3347 3348 for (i = 1; i <= device->phys_port_cnt; i++) { 3349 port = &cm_dev->port[i-1]; 3350 ib_modify_port(device, port->port_num, 0, &port_modify); 3351 ib_unregister_mad_agent(port->mad_agent); 3352 } 3353 kfree(cm_dev); 3354} 3355 3356static int __init ib_cm_init(void) 3357{ 3358 int ret; 3359 3360 memset(&cm, 0, sizeof cm); 3361 INIT_LIST_HEAD(&cm.device_list); 3362 rwlock_init(&cm.device_lock); 3363 spin_lock_init(&cm.lock); 3364 cm.listen_service_table = RB_ROOT; 3365 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3366 cm.remote_id_table = RB_ROOT; 3367 cm.remote_qp_table = RB_ROOT; 3368 cm.remote_sidr_table = RB_ROOT; 3369 idr_init(&cm.local_id_table); 3370 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3371 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3372 3373 cm.wq = create_workqueue("ib_cm"); 3374 if (!cm.wq) 3375 return -ENOMEM; 3376 3377 ret = ib_register_client(&cm_client); 3378 if (ret) 3379 goto error; 3380 3381 return 0; 3382error: 3383 destroy_workqueue(cm.wq); 3384 return ret; 3385} 3386 3387static void __exit ib_cm_cleanup(void) 3388{ 3389 destroy_workqueue(cm.wq); 3390 ib_unregister_client(&cm_client); 3391 idr_destroy(&cm.local_id_table); 3392} 3393 3394module_init(ib_cm_init); 3395module_exit(ib_cm_cleanup); 3396 3397