cm.c revision 87fd1a11ae91ab42fac978467667c61fee9f01da
1/* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ 36 */ 37#include <linux/dma-mapping.h> 38#include <linux/err.h> 39#include <linux/idr.h> 40#include <linux/interrupt.h> 41#include <linux/pci.h> 42#include <linux/rbtree.h> 43#include <linux/spinlock.h> 44#include <linux/workqueue.h> 45 46#include <rdma/ib_cache.h> 47#include <rdma/ib_cm.h> 48#include "cm_msgs.h" 49 50MODULE_AUTHOR("Sean Hefty"); 51MODULE_DESCRIPTION("InfiniBand CM"); 52MODULE_LICENSE("Dual BSD/GPL"); 53 54static void cm_add_one(struct ib_device *device); 55static void cm_remove_one(struct ib_device *device); 56 57static struct ib_client cm_client = { 58 .name = "cm", 59 .add = cm_add_one, 60 .remove = cm_remove_one 61}; 62 63static struct ib_cm { 64 spinlock_t lock; 65 struct list_head device_list; 66 rwlock_t device_lock; 67 struct rb_root listen_service_table; 68 u64 listen_service_id; 69 /* struct rb_root peer_service_table; todo: fix peer to peer */ 70 struct rb_root remote_qp_table; 71 struct rb_root remote_id_table; 72 struct rb_root remote_sidr_table; 73 struct idr local_id_table; 74 struct workqueue_struct *wq; 75} cm; 76 77struct cm_port { 78 struct cm_device *cm_dev; 79 struct ib_mad_agent *mad_agent; 80 u8 port_num; 81}; 82 83struct cm_device { 84 struct list_head list; 85 struct ib_device *device; 86 __be64 ca_guid; 87 struct cm_port port[0]; 88}; 89 90struct cm_av { 91 struct cm_port *port; 92 union ib_gid dgid; 93 struct ib_ah_attr ah_attr; 94 u16 pkey_index; 95 u8 packet_life_time; 96}; 97 98struct cm_work { 99 struct work_struct work; 100 struct list_head list; 101 struct cm_port *port; 102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 103 __be32 local_id; /* Established / timewait */ 104 __be32 remote_id; 105 struct ib_cm_event cm_event; 106 struct ib_sa_path_rec path[0]; 107}; 108 109struct cm_timewait_info { 110 struct cm_work work; /* Must be first. */ 111 struct rb_node remote_qp_node; 112 struct rb_node remote_id_node; 113 __be64 remote_ca_guid; 114 __be32 remote_qpn; 115 u8 inserted_remote_qp; 116 u8 inserted_remote_id; 117}; 118 119struct cm_id_private { 120 struct ib_cm_id id; 121 122 struct rb_node service_node; 123 struct rb_node sidr_id_node; 124 spinlock_t lock; /* Do not acquire inside cm.lock */ 125 wait_queue_head_t wait; 126 atomic_t refcount; 127 128 struct ib_mad_send_buf *msg; 129 struct cm_timewait_info *timewait_info; 130 /* todo: use alternate port on send failure */ 131 struct cm_av av; 132 struct cm_av alt_av; 133 134 void *private_data; 135 __be64 tid; 136 __be32 local_qpn; 137 __be32 remote_qpn; 138 enum ib_qp_type qp_type; 139 __be32 sq_psn; 140 __be32 rq_psn; 141 int timeout_ms; 142 enum ib_mtu path_mtu; 143 u8 private_data_len; 144 u8 max_cm_retries; 145 u8 peer_to_peer; 146 u8 responder_resources; 147 u8 initiator_depth; 148 u8 local_ack_timeout; 149 u8 retry_count; 150 u8 rnr_retry_count; 151 u8 service_timeout; 152 153 struct list_head work_list; 154 atomic_t work_count; 155}; 156 157static void cm_work_handler(void *data); 158 159static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 160{ 161 if (atomic_dec_and_test(&cm_id_priv->refcount)) 162 wake_up(&cm_id_priv->wait); 163} 164 165static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 166 struct ib_mad_send_buf **msg) 167{ 168 struct ib_mad_agent *mad_agent; 169 struct ib_mad_send_buf *m; 170 struct ib_ah *ah; 171 172 mad_agent = cm_id_priv->av.port->mad_agent; 173 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 174 if (IS_ERR(ah)) 175 return PTR_ERR(ah); 176 177 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 178 cm_id_priv->av.pkey_index, 179 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 180 GFP_ATOMIC); 181 if (IS_ERR(m)) { 182 ib_destroy_ah(ah); 183 return PTR_ERR(m); 184 } 185 186 /* Timeout set by caller if response is expected. */ 187 m->ah = ah; 188 m->retries = cm_id_priv->max_cm_retries; 189 190 atomic_inc(&cm_id_priv->refcount); 191 m->context[0] = cm_id_priv; 192 *msg = m; 193 return 0; 194} 195 196static int cm_alloc_response_msg(struct cm_port *port, 197 struct ib_mad_recv_wc *mad_recv_wc, 198 struct ib_mad_send_buf **msg) 199{ 200 struct ib_mad_send_buf *m; 201 struct ib_ah *ah; 202 203 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 204 mad_recv_wc->recv_buf.grh, port->port_num); 205 if (IS_ERR(ah)) 206 return PTR_ERR(ah); 207 208 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 209 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 210 GFP_ATOMIC); 211 if (IS_ERR(m)) { 212 ib_destroy_ah(ah); 213 return PTR_ERR(m); 214 } 215 m->ah = ah; 216 *msg = m; 217 return 0; 218} 219 220static void cm_free_msg(struct ib_mad_send_buf *msg) 221{ 222 ib_destroy_ah(msg->ah); 223 if (msg->context[0]) 224 cm_deref_id(msg->context[0]); 225 ib_free_send_mad(msg); 226} 227 228static void * cm_copy_private_data(const void *private_data, 229 u8 private_data_len) 230{ 231 void *data; 232 233 if (!private_data || !private_data_len) 234 return NULL; 235 236 data = kmalloc(private_data_len, GFP_KERNEL); 237 if (!data) 238 return ERR_PTR(-ENOMEM); 239 240 memcpy(data, private_data, private_data_len); 241 return data; 242} 243 244static void cm_set_private_data(struct cm_id_private *cm_id_priv, 245 void *private_data, u8 private_data_len) 246{ 247 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 248 kfree(cm_id_priv->private_data); 249 250 cm_id_priv->private_data = private_data; 251 cm_id_priv->private_data_len = private_data_len; 252} 253 254static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num, 255 u16 dlid, u8 sl, u16 src_path_bits) 256{ 257 memset(ah_attr, 0, sizeof ah_attr); 258 ah_attr->dlid = dlid; 259 ah_attr->sl = sl; 260 ah_attr->src_path_bits = src_path_bits; 261 ah_attr->port_num = port_num; 262} 263 264static void cm_init_av_for_response(struct cm_port *port, 265 struct ib_wc *wc, struct cm_av *av) 266{ 267 av->port = port; 268 av->pkey_index = wc->pkey_index; 269 cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid, 270 wc->sl, wc->dlid_path_bits); 271} 272 273static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 274{ 275 struct cm_device *cm_dev; 276 struct cm_port *port = NULL; 277 unsigned long flags; 278 int ret; 279 u8 p; 280 281 read_lock_irqsave(&cm.device_lock, flags); 282 list_for_each_entry(cm_dev, &cm.device_list, list) { 283 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 284 &p, NULL)) { 285 port = &cm_dev->port[p-1]; 286 break; 287 } 288 } 289 read_unlock_irqrestore(&cm.device_lock, flags); 290 291 if (!port) 292 return -EINVAL; 293 294 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 295 be16_to_cpu(path->pkey), &av->pkey_index); 296 if (ret) 297 return ret; 298 299 av->port = port; 300 cm_set_ah_attr(&av->ah_attr, av->port->port_num, 301 be16_to_cpu(path->dlid), path->sl, 302 be16_to_cpu(path->slid) & 0x7F); 303 av->packet_life_time = path->packet_life_time; 304 return 0; 305} 306 307static int cm_alloc_id(struct cm_id_private *cm_id_priv) 308{ 309 unsigned long flags; 310 int ret; 311 static int next_id; 312 313 do { 314 spin_lock_irqsave(&cm.lock, flags); 315 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, 316 (__force int *) &cm_id_priv->id.local_id); 317 spin_unlock_irqrestore(&cm.lock, flags); 318 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 319 return ret; 320} 321 322static void cm_free_id(__be32 local_id) 323{ 324 unsigned long flags; 325 326 spin_lock_irqsave(&cm.lock, flags); 327 idr_remove(&cm.local_id_table, (__force int) local_id); 328 spin_unlock_irqrestore(&cm.lock, flags); 329} 330 331static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 332{ 333 struct cm_id_private *cm_id_priv; 334 335 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); 336 if (cm_id_priv) { 337 if (cm_id_priv->id.remote_id == remote_id) 338 atomic_inc(&cm_id_priv->refcount); 339 else 340 cm_id_priv = NULL; 341 } 342 343 return cm_id_priv; 344} 345 346static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 347{ 348 struct cm_id_private *cm_id_priv; 349 unsigned long flags; 350 351 spin_lock_irqsave(&cm.lock, flags); 352 cm_id_priv = cm_get_id(local_id, remote_id); 353 spin_unlock_irqrestore(&cm.lock, flags); 354 355 return cm_id_priv; 356} 357 358static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 359{ 360 struct rb_node **link = &cm.listen_service_table.rb_node; 361 struct rb_node *parent = NULL; 362 struct cm_id_private *cur_cm_id_priv; 363 __be64 service_id = cm_id_priv->id.service_id; 364 __be64 service_mask = cm_id_priv->id.service_mask; 365 366 while (*link) { 367 parent = *link; 368 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 369 service_node); 370 if ((cur_cm_id_priv->id.service_mask & service_id) == 371 (service_mask & cur_cm_id_priv->id.service_id) && 372 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) 373 return cur_cm_id_priv; 374 375 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 376 link = &(*link)->rb_left; 377 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 378 link = &(*link)->rb_right; 379 else if (service_id < cur_cm_id_priv->id.service_id) 380 link = &(*link)->rb_left; 381 else 382 link = &(*link)->rb_right; 383 } 384 rb_link_node(&cm_id_priv->service_node, parent, link); 385 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 386 return NULL; 387} 388 389static struct cm_id_private * cm_find_listen(struct ib_device *device, 390 __be64 service_id) 391{ 392 struct rb_node *node = cm.listen_service_table.rb_node; 393 struct cm_id_private *cm_id_priv; 394 395 while (node) { 396 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 397 if ((cm_id_priv->id.service_mask & service_id) == 398 cm_id_priv->id.service_id && 399 (cm_id_priv->id.device == device)) 400 return cm_id_priv; 401 402 if (device < cm_id_priv->id.device) 403 node = node->rb_left; 404 else if (device > cm_id_priv->id.device) 405 node = node->rb_right; 406 else if (service_id < cm_id_priv->id.service_id) 407 node = node->rb_left; 408 else 409 node = node->rb_right; 410 } 411 return NULL; 412} 413 414static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 415 *timewait_info) 416{ 417 struct rb_node **link = &cm.remote_id_table.rb_node; 418 struct rb_node *parent = NULL; 419 struct cm_timewait_info *cur_timewait_info; 420 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 421 __be32 remote_id = timewait_info->work.remote_id; 422 423 while (*link) { 424 parent = *link; 425 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 426 remote_id_node); 427 if (remote_id < cur_timewait_info->work.remote_id) 428 link = &(*link)->rb_left; 429 else if (remote_id > cur_timewait_info->work.remote_id) 430 link = &(*link)->rb_right; 431 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 432 link = &(*link)->rb_left; 433 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 434 link = &(*link)->rb_right; 435 else 436 return cur_timewait_info; 437 } 438 timewait_info->inserted_remote_id = 1; 439 rb_link_node(&timewait_info->remote_id_node, parent, link); 440 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 441 return NULL; 442} 443 444static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 445 __be32 remote_id) 446{ 447 struct rb_node *node = cm.remote_id_table.rb_node; 448 struct cm_timewait_info *timewait_info; 449 450 while (node) { 451 timewait_info = rb_entry(node, struct cm_timewait_info, 452 remote_id_node); 453 if (remote_id < timewait_info->work.remote_id) 454 node = node->rb_left; 455 else if (remote_id > timewait_info->work.remote_id) 456 node = node->rb_right; 457 else if (remote_ca_guid < timewait_info->remote_ca_guid) 458 node = node->rb_left; 459 else if (remote_ca_guid > timewait_info->remote_ca_guid) 460 node = node->rb_right; 461 else 462 return timewait_info; 463 } 464 return NULL; 465} 466 467static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 468 *timewait_info) 469{ 470 struct rb_node **link = &cm.remote_qp_table.rb_node; 471 struct rb_node *parent = NULL; 472 struct cm_timewait_info *cur_timewait_info; 473 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 474 __be32 remote_qpn = timewait_info->remote_qpn; 475 476 while (*link) { 477 parent = *link; 478 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 479 remote_qp_node); 480 if (remote_qpn < cur_timewait_info->remote_qpn) 481 link = &(*link)->rb_left; 482 else if (remote_qpn > cur_timewait_info->remote_qpn) 483 link = &(*link)->rb_right; 484 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 485 link = &(*link)->rb_left; 486 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 487 link = &(*link)->rb_right; 488 else 489 return cur_timewait_info; 490 } 491 timewait_info->inserted_remote_qp = 1; 492 rb_link_node(&timewait_info->remote_qp_node, parent, link); 493 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 494 return NULL; 495} 496 497static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 498 *cm_id_priv) 499{ 500 struct rb_node **link = &cm.remote_sidr_table.rb_node; 501 struct rb_node *parent = NULL; 502 struct cm_id_private *cur_cm_id_priv; 503 union ib_gid *port_gid = &cm_id_priv->av.dgid; 504 __be32 remote_id = cm_id_priv->id.remote_id; 505 506 while (*link) { 507 parent = *link; 508 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 509 sidr_id_node); 510 if (remote_id < cur_cm_id_priv->id.remote_id) 511 link = &(*link)->rb_left; 512 else if (remote_id > cur_cm_id_priv->id.remote_id) 513 link = &(*link)->rb_right; 514 else { 515 int cmp; 516 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 517 sizeof *port_gid); 518 if (cmp < 0) 519 link = &(*link)->rb_left; 520 else if (cmp > 0) 521 link = &(*link)->rb_right; 522 else 523 return cur_cm_id_priv; 524 } 525 } 526 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 527 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 528 return NULL; 529} 530 531static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 532 enum ib_cm_sidr_status status) 533{ 534 struct ib_cm_sidr_rep_param param; 535 536 memset(¶m, 0, sizeof param); 537 param.status = status; 538 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 539} 540 541struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 542 ib_cm_handler cm_handler, 543 void *context) 544{ 545 struct cm_id_private *cm_id_priv; 546 int ret; 547 548 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 549 if (!cm_id_priv) 550 return ERR_PTR(-ENOMEM); 551 552 cm_id_priv->id.state = IB_CM_IDLE; 553 cm_id_priv->id.device = device; 554 cm_id_priv->id.cm_handler = cm_handler; 555 cm_id_priv->id.context = context; 556 cm_id_priv->id.remote_cm_qpn = 1; 557 ret = cm_alloc_id(cm_id_priv); 558 if (ret) 559 goto error; 560 561 spin_lock_init(&cm_id_priv->lock); 562 init_waitqueue_head(&cm_id_priv->wait); 563 INIT_LIST_HEAD(&cm_id_priv->work_list); 564 atomic_set(&cm_id_priv->work_count, -1); 565 atomic_set(&cm_id_priv->refcount, 1); 566 return &cm_id_priv->id; 567 568error: 569 kfree(cm_id_priv); 570 return ERR_PTR(-ENOMEM); 571} 572EXPORT_SYMBOL(ib_create_cm_id); 573 574static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 575{ 576 struct cm_work *work; 577 578 if (list_empty(&cm_id_priv->work_list)) 579 return NULL; 580 581 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 582 list_del(&work->list); 583 return work; 584} 585 586static void cm_free_work(struct cm_work *work) 587{ 588 if (work->mad_recv_wc) 589 ib_free_recv_mad(work->mad_recv_wc); 590 kfree(work); 591} 592 593static inline int cm_convert_to_ms(int iba_time) 594{ 595 /* approximate conversion to ms from 4.096us x 2^iba_time */ 596 return 1 << max(iba_time - 8, 0); 597} 598 599static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 600{ 601 unsigned long flags; 602 603 if (!timewait_info->inserted_remote_id && 604 !timewait_info->inserted_remote_qp) 605 return; 606 607 spin_lock_irqsave(&cm.lock, flags); 608 if (timewait_info->inserted_remote_id) { 609 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 610 timewait_info->inserted_remote_id = 0; 611 } 612 613 if (timewait_info->inserted_remote_qp) { 614 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 615 timewait_info->inserted_remote_qp = 0; 616 } 617 spin_unlock_irqrestore(&cm.lock, flags); 618} 619 620static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 621{ 622 struct cm_timewait_info *timewait_info; 623 624 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 625 if (!timewait_info) 626 return ERR_PTR(-ENOMEM); 627 628 timewait_info->work.local_id = local_id; 629 INIT_WORK(&timewait_info->work.work, cm_work_handler, 630 &timewait_info->work); 631 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 632 return timewait_info; 633} 634 635static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 636{ 637 int wait_time; 638 639 /* 640 * The cm_id could be destroyed by the user before we exit timewait. 641 * To protect against this, we search for the cm_id after exiting 642 * timewait before notifying the user that we've exited timewait. 643 */ 644 cm_id_priv->id.state = IB_CM_TIMEWAIT; 645 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 646 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 647 msecs_to_jiffies(wait_time)); 648 cm_id_priv->timewait_info = NULL; 649} 650 651static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 652{ 653 cm_id_priv->id.state = IB_CM_IDLE; 654 if (cm_id_priv->timewait_info) { 655 cm_cleanup_timewait(cm_id_priv->timewait_info); 656 kfree(cm_id_priv->timewait_info); 657 cm_id_priv->timewait_info = NULL; 658 } 659} 660 661void ib_destroy_cm_id(struct ib_cm_id *cm_id) 662{ 663 struct cm_id_private *cm_id_priv; 664 struct cm_work *work; 665 unsigned long flags; 666 667 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 668retest: 669 spin_lock_irqsave(&cm_id_priv->lock, flags); 670 switch (cm_id->state) { 671 case IB_CM_LISTEN: 672 cm_id->state = IB_CM_IDLE; 673 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 674 spin_lock_irqsave(&cm.lock, flags); 675 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 676 spin_unlock_irqrestore(&cm.lock, flags); 677 break; 678 case IB_CM_SIDR_REQ_SENT: 679 cm_id->state = IB_CM_IDLE; 680 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 681 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 682 break; 683 case IB_CM_SIDR_REQ_RCVD: 684 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 685 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 686 break; 687 case IB_CM_REQ_SENT: 688 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 689 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 690 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 691 &cm_id_priv->av.port->cm_dev->ca_guid, 692 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 693 NULL, 0); 694 break; 695 case IB_CM_MRA_REQ_RCVD: 696 case IB_CM_REP_SENT: 697 case IB_CM_MRA_REP_RCVD: 698 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 699 /* Fall through */ 700 case IB_CM_REQ_RCVD: 701 case IB_CM_MRA_REQ_SENT: 702 case IB_CM_REP_RCVD: 703 case IB_CM_MRA_REP_SENT: 704 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 705 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 706 NULL, 0, NULL, 0); 707 break; 708 case IB_CM_ESTABLISHED: 709 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 710 ib_send_cm_dreq(cm_id, NULL, 0); 711 goto retest; 712 case IB_CM_DREQ_SENT: 713 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 714 cm_enter_timewait(cm_id_priv); 715 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 716 break; 717 case IB_CM_DREQ_RCVD: 718 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 719 ib_send_cm_drep(cm_id, NULL, 0); 720 break; 721 default: 722 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 723 break; 724 } 725 726 cm_free_id(cm_id->local_id); 727 atomic_dec(&cm_id_priv->refcount); 728 wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); 729 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 730 cm_free_work(work); 731 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 732 kfree(cm_id_priv->private_data); 733 kfree(cm_id_priv); 734} 735EXPORT_SYMBOL(ib_destroy_cm_id); 736 737int ib_cm_listen(struct ib_cm_id *cm_id, 738 __be64 service_id, 739 __be64 service_mask) 740{ 741 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 742 unsigned long flags; 743 int ret = 0; 744 745 service_mask = service_mask ? service_mask : 746 __constant_cpu_to_be64(~0ULL); 747 service_id &= service_mask; 748 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 749 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 750 return -EINVAL; 751 752 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 753 BUG_ON(cm_id->state != IB_CM_IDLE); 754 755 cm_id->state = IB_CM_LISTEN; 756 757 spin_lock_irqsave(&cm.lock, flags); 758 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 759 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 760 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 761 } else { 762 cm_id->service_id = service_id; 763 cm_id->service_mask = service_mask; 764 } 765 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 766 spin_unlock_irqrestore(&cm.lock, flags); 767 768 if (cur_cm_id_priv) { 769 cm_id->state = IB_CM_IDLE; 770 ret = -EBUSY; 771 } 772 return ret; 773} 774EXPORT_SYMBOL(ib_cm_listen); 775 776static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 777 enum cm_msg_sequence msg_seq) 778{ 779 u64 hi_tid, low_tid; 780 781 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 782 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 783 (msg_seq << 30)); 784 return cpu_to_be64(hi_tid | low_tid); 785} 786 787static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 788 __be16 attr_id, __be64 tid) 789{ 790 hdr->base_version = IB_MGMT_BASE_VERSION; 791 hdr->mgmt_class = IB_MGMT_CLASS_CM; 792 hdr->class_version = IB_CM_CLASS_VERSION; 793 hdr->method = IB_MGMT_METHOD_SEND; 794 hdr->attr_id = attr_id; 795 hdr->tid = tid; 796} 797 798static void cm_format_req(struct cm_req_msg *req_msg, 799 struct cm_id_private *cm_id_priv, 800 struct ib_cm_req_param *param) 801{ 802 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 803 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 804 805 req_msg->local_comm_id = cm_id_priv->id.local_id; 806 req_msg->service_id = param->service_id; 807 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 808 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 809 cm_req_set_resp_res(req_msg, param->responder_resources); 810 cm_req_set_init_depth(req_msg, param->initiator_depth); 811 cm_req_set_remote_resp_timeout(req_msg, 812 param->remote_cm_response_timeout); 813 cm_req_set_qp_type(req_msg, param->qp_type); 814 cm_req_set_flow_ctrl(req_msg, param->flow_control); 815 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 816 cm_req_set_local_resp_timeout(req_msg, 817 param->local_cm_response_timeout); 818 cm_req_set_retry_count(req_msg, param->retry_count); 819 req_msg->pkey = param->primary_path->pkey; 820 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 821 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 822 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 823 cm_req_set_srq(req_msg, param->srq); 824 825 req_msg->primary_local_lid = param->primary_path->slid; 826 req_msg->primary_remote_lid = param->primary_path->dlid; 827 req_msg->primary_local_gid = param->primary_path->sgid; 828 req_msg->primary_remote_gid = param->primary_path->dgid; 829 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 830 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 831 req_msg->primary_traffic_class = param->primary_path->traffic_class; 832 req_msg->primary_hop_limit = param->primary_path->hop_limit; 833 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 834 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 835 cm_req_set_primary_local_ack_timeout(req_msg, 836 min(31, param->primary_path->packet_life_time + 1)); 837 838 if (param->alternate_path) { 839 req_msg->alt_local_lid = param->alternate_path->slid; 840 req_msg->alt_remote_lid = param->alternate_path->dlid; 841 req_msg->alt_local_gid = param->alternate_path->sgid; 842 req_msg->alt_remote_gid = param->alternate_path->dgid; 843 cm_req_set_alt_flow_label(req_msg, 844 param->alternate_path->flow_label); 845 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 846 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 847 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 848 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 849 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 850 cm_req_set_alt_local_ack_timeout(req_msg, 851 min(31, param->alternate_path->packet_life_time + 1)); 852 } 853 854 if (param->private_data && param->private_data_len) 855 memcpy(req_msg->private_data, param->private_data, 856 param->private_data_len); 857} 858 859static int cm_validate_req_param(struct ib_cm_req_param *param) 860{ 861 /* peer-to-peer not supported */ 862 if (param->peer_to_peer) 863 return -EINVAL; 864 865 if (!param->primary_path) 866 return -EINVAL; 867 868 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 869 return -EINVAL; 870 871 if (param->private_data && 872 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 873 return -EINVAL; 874 875 if (param->alternate_path && 876 (param->alternate_path->pkey != param->primary_path->pkey || 877 param->alternate_path->mtu != param->primary_path->mtu)) 878 return -EINVAL; 879 880 return 0; 881} 882 883int ib_send_cm_req(struct ib_cm_id *cm_id, 884 struct ib_cm_req_param *param) 885{ 886 struct cm_id_private *cm_id_priv; 887 struct cm_req_msg *req_msg; 888 unsigned long flags; 889 int ret; 890 891 ret = cm_validate_req_param(param); 892 if (ret) 893 return ret; 894 895 /* Verify that we're not in timewait. */ 896 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 897 spin_lock_irqsave(&cm_id_priv->lock, flags); 898 if (cm_id->state != IB_CM_IDLE) { 899 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 900 ret = -EINVAL; 901 goto out; 902 } 903 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 904 905 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 906 id.local_id); 907 if (IS_ERR(cm_id_priv->timewait_info)) 908 goto out; 909 910 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 911 if (ret) 912 goto error1; 913 if (param->alternate_path) { 914 ret = cm_init_av_by_path(param->alternate_path, 915 &cm_id_priv->alt_av); 916 if (ret) 917 goto error1; 918 } 919 cm_id->service_id = param->service_id; 920 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 921 cm_id_priv->timeout_ms = cm_convert_to_ms( 922 param->primary_path->packet_life_time) * 2 + 923 cm_convert_to_ms( 924 param->remote_cm_response_timeout); 925 cm_id_priv->max_cm_retries = param->max_cm_retries; 926 cm_id_priv->initiator_depth = param->initiator_depth; 927 cm_id_priv->responder_resources = param->responder_resources; 928 cm_id_priv->retry_count = param->retry_count; 929 cm_id_priv->path_mtu = param->primary_path->mtu; 930 cm_id_priv->qp_type = param->qp_type; 931 932 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 933 if (ret) 934 goto error1; 935 936 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 937 cm_format_req(req_msg, cm_id_priv, param); 938 cm_id_priv->tid = req_msg->hdr.tid; 939 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 940 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 941 942 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 943 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 944 cm_id_priv->local_ack_timeout = 945 cm_req_get_primary_local_ack_timeout(req_msg); 946 947 spin_lock_irqsave(&cm_id_priv->lock, flags); 948 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 949 if (ret) { 950 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 951 goto error2; 952 } 953 BUG_ON(cm_id->state != IB_CM_IDLE); 954 cm_id->state = IB_CM_REQ_SENT; 955 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 956 return 0; 957 958error2: cm_free_msg(cm_id_priv->msg); 959error1: kfree(cm_id_priv->timewait_info); 960out: return ret; 961} 962EXPORT_SYMBOL(ib_send_cm_req); 963 964static int cm_issue_rej(struct cm_port *port, 965 struct ib_mad_recv_wc *mad_recv_wc, 966 enum ib_cm_rej_reason reason, 967 enum cm_msg_response msg_rejected, 968 void *ari, u8 ari_length) 969{ 970 struct ib_mad_send_buf *msg = NULL; 971 struct cm_rej_msg *rej_msg, *rcv_msg; 972 int ret; 973 974 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 975 if (ret) 976 return ret; 977 978 /* We just need common CM header information. Cast to any message. */ 979 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 980 rej_msg = (struct cm_rej_msg *) msg->mad; 981 982 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 983 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 984 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 985 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 986 rej_msg->reason = cpu_to_be16(reason); 987 988 if (ari && ari_length) { 989 cm_rej_set_reject_info_len(rej_msg, ari_length); 990 memcpy(rej_msg->ari, ari, ari_length); 991 } 992 993 ret = ib_post_send_mad(msg, NULL); 994 if (ret) 995 cm_free_msg(msg); 996 997 return ret; 998} 999 1000static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1001 __be32 local_qpn, __be32 remote_qpn) 1002{ 1003 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1004 ((local_ca_guid == remote_ca_guid) && 1005 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1006} 1007 1008static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1009 struct ib_sa_path_rec *primary_path, 1010 struct ib_sa_path_rec *alt_path) 1011{ 1012 memset(primary_path, 0, sizeof *primary_path); 1013 primary_path->dgid = req_msg->primary_local_gid; 1014 primary_path->sgid = req_msg->primary_remote_gid; 1015 primary_path->dlid = req_msg->primary_local_lid; 1016 primary_path->slid = req_msg->primary_remote_lid; 1017 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1018 primary_path->hop_limit = req_msg->primary_hop_limit; 1019 primary_path->traffic_class = req_msg->primary_traffic_class; 1020 primary_path->reversible = 1; 1021 primary_path->pkey = req_msg->pkey; 1022 primary_path->sl = cm_req_get_primary_sl(req_msg); 1023 primary_path->mtu_selector = IB_SA_EQ; 1024 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1025 primary_path->rate_selector = IB_SA_EQ; 1026 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1027 primary_path->packet_life_time_selector = IB_SA_EQ; 1028 primary_path->packet_life_time = 1029 cm_req_get_primary_local_ack_timeout(req_msg); 1030 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1031 1032 if (req_msg->alt_local_lid) { 1033 memset(alt_path, 0, sizeof *alt_path); 1034 alt_path->dgid = req_msg->alt_local_gid; 1035 alt_path->sgid = req_msg->alt_remote_gid; 1036 alt_path->dlid = req_msg->alt_local_lid; 1037 alt_path->slid = req_msg->alt_remote_lid; 1038 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1039 alt_path->hop_limit = req_msg->alt_hop_limit; 1040 alt_path->traffic_class = req_msg->alt_traffic_class; 1041 alt_path->reversible = 1; 1042 alt_path->pkey = req_msg->pkey; 1043 alt_path->sl = cm_req_get_alt_sl(req_msg); 1044 alt_path->mtu_selector = IB_SA_EQ; 1045 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1046 alt_path->rate_selector = IB_SA_EQ; 1047 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1048 alt_path->packet_life_time_selector = IB_SA_EQ; 1049 alt_path->packet_life_time = 1050 cm_req_get_alt_local_ack_timeout(req_msg); 1051 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1052 } 1053} 1054 1055static void cm_format_req_event(struct cm_work *work, 1056 struct cm_id_private *cm_id_priv, 1057 struct ib_cm_id *listen_id) 1058{ 1059 struct cm_req_msg *req_msg; 1060 struct ib_cm_req_event_param *param; 1061 1062 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1063 param = &work->cm_event.param.req_rcvd; 1064 param->listen_id = listen_id; 1065 param->port = cm_id_priv->av.port->port_num; 1066 param->primary_path = &work->path[0]; 1067 if (req_msg->alt_local_lid) 1068 param->alternate_path = &work->path[1]; 1069 else 1070 param->alternate_path = NULL; 1071 param->remote_ca_guid = req_msg->local_ca_guid; 1072 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1073 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1074 param->qp_type = cm_req_get_qp_type(req_msg); 1075 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1076 param->responder_resources = cm_req_get_init_depth(req_msg); 1077 param->initiator_depth = cm_req_get_resp_res(req_msg); 1078 param->local_cm_response_timeout = 1079 cm_req_get_remote_resp_timeout(req_msg); 1080 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1081 param->remote_cm_response_timeout = 1082 cm_req_get_local_resp_timeout(req_msg); 1083 param->retry_count = cm_req_get_retry_count(req_msg); 1084 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1085 param->srq = cm_req_get_srq(req_msg); 1086 work->cm_event.private_data = &req_msg->private_data; 1087} 1088 1089static void cm_process_work(struct cm_id_private *cm_id_priv, 1090 struct cm_work *work) 1091{ 1092 unsigned long flags; 1093 int ret; 1094 1095 /* We will typically only have the current event to report. */ 1096 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1097 cm_free_work(work); 1098 1099 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1100 spin_lock_irqsave(&cm_id_priv->lock, flags); 1101 work = cm_dequeue_work(cm_id_priv); 1102 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1103 BUG_ON(!work); 1104 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1105 &work->cm_event); 1106 cm_free_work(work); 1107 } 1108 cm_deref_id(cm_id_priv); 1109 if (ret) 1110 ib_destroy_cm_id(&cm_id_priv->id); 1111} 1112 1113static void cm_format_mra(struct cm_mra_msg *mra_msg, 1114 struct cm_id_private *cm_id_priv, 1115 enum cm_msg_response msg_mraed, u8 service_timeout, 1116 const void *private_data, u8 private_data_len) 1117{ 1118 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1119 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1120 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1121 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1122 cm_mra_set_service_timeout(mra_msg, service_timeout); 1123 1124 if (private_data && private_data_len) 1125 memcpy(mra_msg->private_data, private_data, private_data_len); 1126} 1127 1128static void cm_format_rej(struct cm_rej_msg *rej_msg, 1129 struct cm_id_private *cm_id_priv, 1130 enum ib_cm_rej_reason reason, 1131 void *ari, 1132 u8 ari_length, 1133 const void *private_data, 1134 u8 private_data_len) 1135{ 1136 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1137 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1138 1139 switch(cm_id_priv->id.state) { 1140 case IB_CM_REQ_RCVD: 1141 rej_msg->local_comm_id = 0; 1142 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1143 break; 1144 case IB_CM_MRA_REQ_SENT: 1145 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1146 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1147 break; 1148 case IB_CM_REP_RCVD: 1149 case IB_CM_MRA_REP_SENT: 1150 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1151 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1152 break; 1153 default: 1154 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1155 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1156 break; 1157 } 1158 1159 rej_msg->reason = cpu_to_be16(reason); 1160 if (ari && ari_length) { 1161 cm_rej_set_reject_info_len(rej_msg, ari_length); 1162 memcpy(rej_msg->ari, ari, ari_length); 1163 } 1164 1165 if (private_data && private_data_len) 1166 memcpy(rej_msg->private_data, private_data, private_data_len); 1167} 1168 1169static void cm_dup_req_handler(struct cm_work *work, 1170 struct cm_id_private *cm_id_priv) 1171{ 1172 struct ib_mad_send_buf *msg = NULL; 1173 unsigned long flags; 1174 int ret; 1175 1176 /* Quick state check to discard duplicate REQs. */ 1177 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1178 return; 1179 1180 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1181 if (ret) 1182 return; 1183 1184 spin_lock_irqsave(&cm_id_priv->lock, flags); 1185 switch (cm_id_priv->id.state) { 1186 case IB_CM_MRA_REQ_SENT: 1187 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1188 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1189 cm_id_priv->private_data, 1190 cm_id_priv->private_data_len); 1191 break; 1192 case IB_CM_TIMEWAIT: 1193 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1194 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1195 break; 1196 default: 1197 goto unlock; 1198 } 1199 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1200 1201 ret = ib_post_send_mad(msg, NULL); 1202 if (ret) 1203 goto free; 1204 return; 1205 1206unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1207free: cm_free_msg(msg); 1208} 1209 1210static struct cm_id_private * cm_match_req(struct cm_work *work, 1211 struct cm_id_private *cm_id_priv) 1212{ 1213 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1214 struct cm_timewait_info *timewait_info; 1215 struct cm_req_msg *req_msg; 1216 unsigned long flags; 1217 1218 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1219 1220 /* Check for duplicate REQ and stale connections. */ 1221 spin_lock_irqsave(&cm.lock, flags); 1222 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1223 if (!timewait_info) 1224 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1225 1226 if (timewait_info) { 1227 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1228 timewait_info->work.remote_id); 1229 spin_unlock_irqrestore(&cm.lock, flags); 1230 if (cur_cm_id_priv) { 1231 cm_dup_req_handler(work, cur_cm_id_priv); 1232 cm_deref_id(cur_cm_id_priv); 1233 } else 1234 cm_issue_rej(work->port, work->mad_recv_wc, 1235 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1236 NULL, 0); 1237 goto error; 1238 } 1239 1240 /* Find matching listen request. */ 1241 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1242 req_msg->service_id); 1243 if (!listen_cm_id_priv) { 1244 spin_unlock_irqrestore(&cm.lock, flags); 1245 cm_issue_rej(work->port, work->mad_recv_wc, 1246 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1247 NULL, 0); 1248 goto error; 1249 } 1250 atomic_inc(&listen_cm_id_priv->refcount); 1251 atomic_inc(&cm_id_priv->refcount); 1252 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1253 atomic_inc(&cm_id_priv->work_count); 1254 spin_unlock_irqrestore(&cm.lock, flags); 1255 return listen_cm_id_priv; 1256 1257error: cm_cleanup_timewait(cm_id_priv->timewait_info); 1258 return NULL; 1259} 1260 1261static int cm_req_handler(struct cm_work *work) 1262{ 1263 struct ib_cm_id *cm_id; 1264 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1265 struct cm_req_msg *req_msg; 1266 int ret; 1267 1268 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1269 1270 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1271 if (IS_ERR(cm_id)) 1272 return PTR_ERR(cm_id); 1273 1274 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1275 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1276 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1277 &cm_id_priv->av); 1278 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1279 id.local_id); 1280 if (IS_ERR(cm_id_priv->timewait_info)) { 1281 ret = PTR_ERR(cm_id_priv->timewait_info); 1282 goto error1; 1283 } 1284 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1285 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1286 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1287 1288 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1289 if (!listen_cm_id_priv) { 1290 ret = -EINVAL; 1291 goto error2; 1292 } 1293 1294 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1295 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1296 cm_id_priv->id.service_id = req_msg->service_id; 1297 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1298 1299 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1300 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1301 if (ret) 1302 goto error3; 1303 if (req_msg->alt_local_lid) { 1304 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1305 if (ret) 1306 goto error3; 1307 } 1308 cm_id_priv->tid = req_msg->hdr.tid; 1309 cm_id_priv->timeout_ms = cm_convert_to_ms( 1310 cm_req_get_local_resp_timeout(req_msg)); 1311 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1312 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1313 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1314 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1315 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1316 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1317 cm_id_priv->local_ack_timeout = 1318 cm_req_get_primary_local_ack_timeout(req_msg); 1319 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1320 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1321 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1322 1323 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1324 cm_process_work(cm_id_priv, work); 1325 cm_deref_id(listen_cm_id_priv); 1326 return 0; 1327 1328error3: atomic_dec(&cm_id_priv->refcount); 1329 cm_deref_id(listen_cm_id_priv); 1330 cm_cleanup_timewait(cm_id_priv->timewait_info); 1331error2: kfree(cm_id_priv->timewait_info); 1332 cm_id_priv->timewait_info = NULL; 1333error1: ib_destroy_cm_id(&cm_id_priv->id); 1334 return ret; 1335} 1336 1337static void cm_format_rep(struct cm_rep_msg *rep_msg, 1338 struct cm_id_private *cm_id_priv, 1339 struct ib_cm_rep_param *param) 1340{ 1341 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1342 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1343 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1344 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1345 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1346 rep_msg->resp_resources = param->responder_resources; 1347 rep_msg->initiator_depth = param->initiator_depth; 1348 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1349 cm_rep_set_failover(rep_msg, param->failover_accepted); 1350 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1351 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1352 cm_rep_set_srq(rep_msg, param->srq); 1353 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1354 1355 if (param->private_data && param->private_data_len) 1356 memcpy(rep_msg->private_data, param->private_data, 1357 param->private_data_len); 1358} 1359 1360int ib_send_cm_rep(struct ib_cm_id *cm_id, 1361 struct ib_cm_rep_param *param) 1362{ 1363 struct cm_id_private *cm_id_priv; 1364 struct ib_mad_send_buf *msg; 1365 struct cm_rep_msg *rep_msg; 1366 unsigned long flags; 1367 int ret; 1368 1369 if (param->private_data && 1370 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1371 return -EINVAL; 1372 1373 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1374 spin_lock_irqsave(&cm_id_priv->lock, flags); 1375 if (cm_id->state != IB_CM_REQ_RCVD && 1376 cm_id->state != IB_CM_MRA_REQ_SENT) { 1377 ret = -EINVAL; 1378 goto out; 1379 } 1380 1381 ret = cm_alloc_msg(cm_id_priv, &msg); 1382 if (ret) 1383 goto out; 1384 1385 rep_msg = (struct cm_rep_msg *) msg->mad; 1386 cm_format_rep(rep_msg, cm_id_priv, param); 1387 msg->timeout_ms = cm_id_priv->timeout_ms; 1388 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1389 1390 ret = ib_post_send_mad(msg, NULL); 1391 if (ret) { 1392 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1393 cm_free_msg(msg); 1394 return ret; 1395 } 1396 1397 cm_id->state = IB_CM_REP_SENT; 1398 cm_id_priv->msg = msg; 1399 cm_id_priv->initiator_depth = param->initiator_depth; 1400 cm_id_priv->responder_resources = param->responder_resources; 1401 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1402 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1403 1404out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1405 return ret; 1406} 1407EXPORT_SYMBOL(ib_send_cm_rep); 1408 1409static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1410 struct cm_id_private *cm_id_priv, 1411 const void *private_data, 1412 u8 private_data_len) 1413{ 1414 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1415 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1416 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1417 1418 if (private_data && private_data_len) 1419 memcpy(rtu_msg->private_data, private_data, private_data_len); 1420} 1421 1422int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1423 const void *private_data, 1424 u8 private_data_len) 1425{ 1426 struct cm_id_private *cm_id_priv; 1427 struct ib_mad_send_buf *msg; 1428 unsigned long flags; 1429 void *data; 1430 int ret; 1431 1432 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1433 return -EINVAL; 1434 1435 data = cm_copy_private_data(private_data, private_data_len); 1436 if (IS_ERR(data)) 1437 return PTR_ERR(data); 1438 1439 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1440 spin_lock_irqsave(&cm_id_priv->lock, flags); 1441 if (cm_id->state != IB_CM_REP_RCVD && 1442 cm_id->state != IB_CM_MRA_REP_SENT) { 1443 ret = -EINVAL; 1444 goto error; 1445 } 1446 1447 ret = cm_alloc_msg(cm_id_priv, &msg); 1448 if (ret) 1449 goto error; 1450 1451 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1452 private_data, private_data_len); 1453 1454 ret = ib_post_send_mad(msg, NULL); 1455 if (ret) { 1456 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1457 cm_free_msg(msg); 1458 kfree(data); 1459 return ret; 1460 } 1461 1462 cm_id->state = IB_CM_ESTABLISHED; 1463 cm_set_private_data(cm_id_priv, data, private_data_len); 1464 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1465 return 0; 1466 1467error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1468 kfree(data); 1469 return ret; 1470} 1471EXPORT_SYMBOL(ib_send_cm_rtu); 1472 1473static void cm_format_rep_event(struct cm_work *work) 1474{ 1475 struct cm_rep_msg *rep_msg; 1476 struct ib_cm_rep_event_param *param; 1477 1478 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1479 param = &work->cm_event.param.rep_rcvd; 1480 param->remote_ca_guid = rep_msg->local_ca_guid; 1481 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1482 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1483 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1484 param->responder_resources = rep_msg->initiator_depth; 1485 param->initiator_depth = rep_msg->resp_resources; 1486 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1487 param->failover_accepted = cm_rep_get_failover(rep_msg); 1488 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1489 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1490 param->srq = cm_rep_get_srq(rep_msg); 1491 work->cm_event.private_data = &rep_msg->private_data; 1492} 1493 1494static void cm_dup_rep_handler(struct cm_work *work) 1495{ 1496 struct cm_id_private *cm_id_priv; 1497 struct cm_rep_msg *rep_msg; 1498 struct ib_mad_send_buf *msg = NULL; 1499 unsigned long flags; 1500 int ret; 1501 1502 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1503 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1504 rep_msg->local_comm_id); 1505 if (!cm_id_priv) 1506 return; 1507 1508 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1509 if (ret) 1510 goto deref; 1511 1512 spin_lock_irqsave(&cm_id_priv->lock, flags); 1513 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1514 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1515 cm_id_priv->private_data, 1516 cm_id_priv->private_data_len); 1517 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1518 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1519 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1520 cm_id_priv->private_data, 1521 cm_id_priv->private_data_len); 1522 else 1523 goto unlock; 1524 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1525 1526 ret = ib_post_send_mad(msg, NULL); 1527 if (ret) 1528 goto free; 1529 goto deref; 1530 1531unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1532free: cm_free_msg(msg); 1533deref: cm_deref_id(cm_id_priv); 1534} 1535 1536static int cm_rep_handler(struct cm_work *work) 1537{ 1538 struct cm_id_private *cm_id_priv; 1539 struct cm_rep_msg *rep_msg; 1540 unsigned long flags; 1541 int ret; 1542 1543 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1544 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1545 if (!cm_id_priv) { 1546 cm_dup_rep_handler(work); 1547 return -EINVAL; 1548 } 1549 1550 cm_format_rep_event(work); 1551 1552 spin_lock_irqsave(&cm_id_priv->lock, flags); 1553 switch (cm_id_priv->id.state) { 1554 case IB_CM_REQ_SENT: 1555 case IB_CM_MRA_REQ_RCVD: 1556 break; 1557 default: 1558 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1559 ret = -EINVAL; 1560 goto error; 1561 } 1562 1563 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1564 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1565 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1566 1567 spin_lock(&cm.lock); 1568 /* Check for duplicate REP. */ 1569 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1570 spin_unlock(&cm.lock); 1571 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1572 ret = -EINVAL; 1573 goto error; 1574 } 1575 /* Check for a stale connection. */ 1576 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1577 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1578 &cm.remote_id_table); 1579 cm_id_priv->timewait_info->inserted_remote_id = 0; 1580 spin_unlock(&cm.lock); 1581 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1582 cm_issue_rej(work->port, work->mad_recv_wc, 1583 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1584 NULL, 0); 1585 ret = -EINVAL; 1586 goto error; 1587 } 1588 spin_unlock(&cm.lock); 1589 1590 cm_id_priv->id.state = IB_CM_REP_RCVD; 1591 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1592 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1593 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1594 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1595 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1596 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1597 1598 /* todo: handle peer_to_peer */ 1599 1600 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1601 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1602 if (!ret) 1603 list_add_tail(&work->list, &cm_id_priv->work_list); 1604 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1605 1606 if (ret) 1607 cm_process_work(cm_id_priv, work); 1608 else 1609 cm_deref_id(cm_id_priv); 1610 return 0; 1611 1612error: 1613 cm_deref_id(cm_id_priv); 1614 return ret; 1615} 1616 1617static int cm_establish_handler(struct cm_work *work) 1618{ 1619 struct cm_id_private *cm_id_priv; 1620 unsigned long flags; 1621 int ret; 1622 1623 /* See comment in ib_cm_establish about lookup. */ 1624 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1625 if (!cm_id_priv) 1626 return -EINVAL; 1627 1628 spin_lock_irqsave(&cm_id_priv->lock, flags); 1629 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1630 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1631 goto out; 1632 } 1633 1634 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1635 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1636 if (!ret) 1637 list_add_tail(&work->list, &cm_id_priv->work_list); 1638 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1639 1640 if (ret) 1641 cm_process_work(cm_id_priv, work); 1642 else 1643 cm_deref_id(cm_id_priv); 1644 return 0; 1645out: 1646 cm_deref_id(cm_id_priv); 1647 return -EINVAL; 1648} 1649 1650static int cm_rtu_handler(struct cm_work *work) 1651{ 1652 struct cm_id_private *cm_id_priv; 1653 struct cm_rtu_msg *rtu_msg; 1654 unsigned long flags; 1655 int ret; 1656 1657 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1658 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1659 rtu_msg->local_comm_id); 1660 if (!cm_id_priv) 1661 return -EINVAL; 1662 1663 work->cm_event.private_data = &rtu_msg->private_data; 1664 1665 spin_lock_irqsave(&cm_id_priv->lock, flags); 1666 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1667 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1668 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1669 goto out; 1670 } 1671 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1672 1673 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1674 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1675 if (!ret) 1676 list_add_tail(&work->list, &cm_id_priv->work_list); 1677 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1678 1679 if (ret) 1680 cm_process_work(cm_id_priv, work); 1681 else 1682 cm_deref_id(cm_id_priv); 1683 return 0; 1684out: 1685 cm_deref_id(cm_id_priv); 1686 return -EINVAL; 1687} 1688 1689static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1690 struct cm_id_private *cm_id_priv, 1691 const void *private_data, 1692 u8 private_data_len) 1693{ 1694 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1695 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1696 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1697 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1698 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1699 1700 if (private_data && private_data_len) 1701 memcpy(dreq_msg->private_data, private_data, private_data_len); 1702} 1703 1704int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1705 const void *private_data, 1706 u8 private_data_len) 1707{ 1708 struct cm_id_private *cm_id_priv; 1709 struct ib_mad_send_buf *msg; 1710 unsigned long flags; 1711 int ret; 1712 1713 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1714 return -EINVAL; 1715 1716 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1717 spin_lock_irqsave(&cm_id_priv->lock, flags); 1718 if (cm_id->state != IB_CM_ESTABLISHED) { 1719 ret = -EINVAL; 1720 goto out; 1721 } 1722 1723 ret = cm_alloc_msg(cm_id_priv, &msg); 1724 if (ret) { 1725 cm_enter_timewait(cm_id_priv); 1726 goto out; 1727 } 1728 1729 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1730 private_data, private_data_len); 1731 msg->timeout_ms = cm_id_priv->timeout_ms; 1732 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1733 1734 ret = ib_post_send_mad(msg, NULL); 1735 if (ret) { 1736 cm_enter_timewait(cm_id_priv); 1737 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1738 cm_free_msg(msg); 1739 return ret; 1740 } 1741 1742 cm_id->state = IB_CM_DREQ_SENT; 1743 cm_id_priv->msg = msg; 1744out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1745 return ret; 1746} 1747EXPORT_SYMBOL(ib_send_cm_dreq); 1748 1749static void cm_format_drep(struct cm_drep_msg *drep_msg, 1750 struct cm_id_private *cm_id_priv, 1751 const void *private_data, 1752 u8 private_data_len) 1753{ 1754 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1755 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1756 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1757 1758 if (private_data && private_data_len) 1759 memcpy(drep_msg->private_data, private_data, private_data_len); 1760} 1761 1762int ib_send_cm_drep(struct ib_cm_id *cm_id, 1763 const void *private_data, 1764 u8 private_data_len) 1765{ 1766 struct cm_id_private *cm_id_priv; 1767 struct ib_mad_send_buf *msg; 1768 unsigned long flags; 1769 void *data; 1770 int ret; 1771 1772 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1773 return -EINVAL; 1774 1775 data = cm_copy_private_data(private_data, private_data_len); 1776 if (IS_ERR(data)) 1777 return PTR_ERR(data); 1778 1779 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1780 spin_lock_irqsave(&cm_id_priv->lock, flags); 1781 if (cm_id->state != IB_CM_DREQ_RCVD) { 1782 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1783 kfree(data); 1784 return -EINVAL; 1785 } 1786 1787 cm_set_private_data(cm_id_priv, data, private_data_len); 1788 cm_enter_timewait(cm_id_priv); 1789 1790 ret = cm_alloc_msg(cm_id_priv, &msg); 1791 if (ret) 1792 goto out; 1793 1794 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1795 private_data, private_data_len); 1796 1797 ret = ib_post_send_mad(msg, NULL); 1798 if (ret) { 1799 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1800 cm_free_msg(msg); 1801 return ret; 1802 } 1803 1804out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1805 return ret; 1806} 1807EXPORT_SYMBOL(ib_send_cm_drep); 1808 1809static int cm_dreq_handler(struct cm_work *work) 1810{ 1811 struct cm_id_private *cm_id_priv; 1812 struct cm_dreq_msg *dreq_msg; 1813 struct ib_mad_send_buf *msg = NULL; 1814 unsigned long flags; 1815 int ret; 1816 1817 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1818 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1819 dreq_msg->local_comm_id); 1820 if (!cm_id_priv) 1821 return -EINVAL; 1822 1823 work->cm_event.private_data = &dreq_msg->private_data; 1824 1825 spin_lock_irqsave(&cm_id_priv->lock, flags); 1826 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1827 goto unlock; 1828 1829 switch (cm_id_priv->id.state) { 1830 case IB_CM_REP_SENT: 1831 case IB_CM_DREQ_SENT: 1832 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1833 break; 1834 case IB_CM_ESTABLISHED: 1835 case IB_CM_MRA_REP_RCVD: 1836 break; 1837 case IB_CM_TIMEWAIT: 1838 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1839 goto unlock; 1840 1841 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1842 cm_id_priv->private_data, 1843 cm_id_priv->private_data_len); 1844 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1845 1846 if (ib_post_send_mad(msg, NULL)) 1847 cm_free_msg(msg); 1848 goto deref; 1849 default: 1850 goto unlock; 1851 } 1852 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1853 cm_id_priv->tid = dreq_msg->hdr.tid; 1854 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1855 if (!ret) 1856 list_add_tail(&work->list, &cm_id_priv->work_list); 1857 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1858 1859 if (ret) 1860 cm_process_work(cm_id_priv, work); 1861 else 1862 cm_deref_id(cm_id_priv); 1863 return 0; 1864 1865unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1866deref: cm_deref_id(cm_id_priv); 1867 return -EINVAL; 1868} 1869 1870static int cm_drep_handler(struct cm_work *work) 1871{ 1872 struct cm_id_private *cm_id_priv; 1873 struct cm_drep_msg *drep_msg; 1874 unsigned long flags; 1875 int ret; 1876 1877 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1878 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 1879 drep_msg->local_comm_id); 1880 if (!cm_id_priv) 1881 return -EINVAL; 1882 1883 work->cm_event.private_data = &drep_msg->private_data; 1884 1885 spin_lock_irqsave(&cm_id_priv->lock, flags); 1886 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 1887 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 1888 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1889 goto out; 1890 } 1891 cm_enter_timewait(cm_id_priv); 1892 1893 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1894 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1895 if (!ret) 1896 list_add_tail(&work->list, &cm_id_priv->work_list); 1897 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1898 1899 if (ret) 1900 cm_process_work(cm_id_priv, work); 1901 else 1902 cm_deref_id(cm_id_priv); 1903 return 0; 1904out: 1905 cm_deref_id(cm_id_priv); 1906 return -EINVAL; 1907} 1908 1909int ib_send_cm_rej(struct ib_cm_id *cm_id, 1910 enum ib_cm_rej_reason reason, 1911 void *ari, 1912 u8 ari_length, 1913 const void *private_data, 1914 u8 private_data_len) 1915{ 1916 struct cm_id_private *cm_id_priv; 1917 struct ib_mad_send_buf *msg; 1918 unsigned long flags; 1919 int ret; 1920 1921 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 1922 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 1923 return -EINVAL; 1924 1925 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1926 1927 spin_lock_irqsave(&cm_id_priv->lock, flags); 1928 switch (cm_id->state) { 1929 case IB_CM_REQ_SENT: 1930 case IB_CM_MRA_REQ_RCVD: 1931 case IB_CM_REQ_RCVD: 1932 case IB_CM_MRA_REQ_SENT: 1933 case IB_CM_REP_RCVD: 1934 case IB_CM_MRA_REP_SENT: 1935 ret = cm_alloc_msg(cm_id_priv, &msg); 1936 if (!ret) 1937 cm_format_rej((struct cm_rej_msg *) msg->mad, 1938 cm_id_priv, reason, ari, ari_length, 1939 private_data, private_data_len); 1940 1941 cm_reset_to_idle(cm_id_priv); 1942 break; 1943 case IB_CM_REP_SENT: 1944 case IB_CM_MRA_REP_RCVD: 1945 ret = cm_alloc_msg(cm_id_priv, &msg); 1946 if (!ret) 1947 cm_format_rej((struct cm_rej_msg *) msg->mad, 1948 cm_id_priv, reason, ari, ari_length, 1949 private_data, private_data_len); 1950 1951 cm_enter_timewait(cm_id_priv); 1952 break; 1953 default: 1954 ret = -EINVAL; 1955 goto out; 1956 } 1957 1958 if (ret) 1959 goto out; 1960 1961 ret = ib_post_send_mad(msg, NULL); 1962 if (ret) 1963 cm_free_msg(msg); 1964 1965out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1966 return ret; 1967} 1968EXPORT_SYMBOL(ib_send_cm_rej); 1969 1970static void cm_format_rej_event(struct cm_work *work) 1971{ 1972 struct cm_rej_msg *rej_msg; 1973 struct ib_cm_rej_event_param *param; 1974 1975 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 1976 param = &work->cm_event.param.rej_rcvd; 1977 param->ari = rej_msg->ari; 1978 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 1979 param->reason = __be16_to_cpu(rej_msg->reason); 1980 work->cm_event.private_data = &rej_msg->private_data; 1981} 1982 1983static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 1984{ 1985 struct cm_timewait_info *timewait_info; 1986 struct cm_id_private *cm_id_priv; 1987 unsigned long flags; 1988 __be32 remote_id; 1989 1990 remote_id = rej_msg->local_comm_id; 1991 1992 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 1993 spin_lock_irqsave(&cm.lock, flags); 1994 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 1995 remote_id); 1996 if (!timewait_info) { 1997 spin_unlock_irqrestore(&cm.lock, flags); 1998 return NULL; 1999 } 2000 cm_id_priv = idr_find(&cm.local_id_table, 2001 (__force int) timewait_info->work.local_id); 2002 if (cm_id_priv) { 2003 if (cm_id_priv->id.remote_id == remote_id) 2004 atomic_inc(&cm_id_priv->refcount); 2005 else 2006 cm_id_priv = NULL; 2007 } 2008 spin_unlock_irqrestore(&cm.lock, flags); 2009 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2010 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2011 else 2012 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2013 2014 return cm_id_priv; 2015} 2016 2017static int cm_rej_handler(struct cm_work *work) 2018{ 2019 struct cm_id_private *cm_id_priv; 2020 struct cm_rej_msg *rej_msg; 2021 unsigned long flags; 2022 int ret; 2023 2024 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2025 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2026 if (!cm_id_priv) 2027 return -EINVAL; 2028 2029 cm_format_rej_event(work); 2030 2031 spin_lock_irqsave(&cm_id_priv->lock, flags); 2032 switch (cm_id_priv->id.state) { 2033 case IB_CM_REQ_SENT: 2034 case IB_CM_MRA_REQ_RCVD: 2035 case IB_CM_REP_SENT: 2036 case IB_CM_MRA_REP_RCVD: 2037 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2038 /* fall through */ 2039 case IB_CM_REQ_RCVD: 2040 case IB_CM_MRA_REQ_SENT: 2041 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2042 cm_enter_timewait(cm_id_priv); 2043 else 2044 cm_reset_to_idle(cm_id_priv); 2045 break; 2046 case IB_CM_DREQ_SENT: 2047 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2048 /* fall through */ 2049 case IB_CM_REP_RCVD: 2050 case IB_CM_MRA_REP_SENT: 2051 case IB_CM_ESTABLISHED: 2052 cm_enter_timewait(cm_id_priv); 2053 break; 2054 default: 2055 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2056 ret = -EINVAL; 2057 goto out; 2058 } 2059 2060 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2061 if (!ret) 2062 list_add_tail(&work->list, &cm_id_priv->work_list); 2063 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2064 2065 if (ret) 2066 cm_process_work(cm_id_priv, work); 2067 else 2068 cm_deref_id(cm_id_priv); 2069 return 0; 2070out: 2071 cm_deref_id(cm_id_priv); 2072 return -EINVAL; 2073} 2074 2075int ib_send_cm_mra(struct ib_cm_id *cm_id, 2076 u8 service_timeout, 2077 const void *private_data, 2078 u8 private_data_len) 2079{ 2080 struct cm_id_private *cm_id_priv; 2081 struct ib_mad_send_buf *msg; 2082 void *data; 2083 unsigned long flags; 2084 int ret; 2085 2086 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2087 return -EINVAL; 2088 2089 data = cm_copy_private_data(private_data, private_data_len); 2090 if (IS_ERR(data)) 2091 return PTR_ERR(data); 2092 2093 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2094 2095 spin_lock_irqsave(&cm_id_priv->lock, flags); 2096 switch(cm_id_priv->id.state) { 2097 case IB_CM_REQ_RCVD: 2098 ret = cm_alloc_msg(cm_id_priv, &msg); 2099 if (ret) 2100 goto error1; 2101 2102 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2103 CM_MSG_RESPONSE_REQ, service_timeout, 2104 private_data, private_data_len); 2105 ret = ib_post_send_mad(msg, NULL); 2106 if (ret) 2107 goto error2; 2108 cm_id->state = IB_CM_MRA_REQ_SENT; 2109 break; 2110 case IB_CM_REP_RCVD: 2111 ret = cm_alloc_msg(cm_id_priv, &msg); 2112 if (ret) 2113 goto error1; 2114 2115 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2116 CM_MSG_RESPONSE_REP, service_timeout, 2117 private_data, private_data_len); 2118 ret = ib_post_send_mad(msg, NULL); 2119 if (ret) 2120 goto error2; 2121 cm_id->state = IB_CM_MRA_REP_SENT; 2122 break; 2123 case IB_CM_ESTABLISHED: 2124 ret = cm_alloc_msg(cm_id_priv, &msg); 2125 if (ret) 2126 goto error1; 2127 2128 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2129 CM_MSG_RESPONSE_OTHER, service_timeout, 2130 private_data, private_data_len); 2131 ret = ib_post_send_mad(msg, NULL); 2132 if (ret) 2133 goto error2; 2134 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2135 break; 2136 default: 2137 ret = -EINVAL; 2138 goto error1; 2139 } 2140 cm_id_priv->service_timeout = service_timeout; 2141 cm_set_private_data(cm_id_priv, data, private_data_len); 2142 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2143 return 0; 2144 2145error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2146 kfree(data); 2147 return ret; 2148 2149error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2150 kfree(data); 2151 cm_free_msg(msg); 2152 return ret; 2153} 2154EXPORT_SYMBOL(ib_send_cm_mra); 2155 2156static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2157{ 2158 switch (cm_mra_get_msg_mraed(mra_msg)) { 2159 case CM_MSG_RESPONSE_REQ: 2160 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2161 case CM_MSG_RESPONSE_REP: 2162 case CM_MSG_RESPONSE_OTHER: 2163 return cm_acquire_id(mra_msg->remote_comm_id, 2164 mra_msg->local_comm_id); 2165 default: 2166 return NULL; 2167 } 2168} 2169 2170static int cm_mra_handler(struct cm_work *work) 2171{ 2172 struct cm_id_private *cm_id_priv; 2173 struct cm_mra_msg *mra_msg; 2174 unsigned long flags; 2175 int timeout, ret; 2176 2177 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2178 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2179 if (!cm_id_priv) 2180 return -EINVAL; 2181 2182 work->cm_event.private_data = &mra_msg->private_data; 2183 work->cm_event.param.mra_rcvd.service_timeout = 2184 cm_mra_get_service_timeout(mra_msg); 2185 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2186 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2187 2188 spin_lock_irqsave(&cm_id_priv->lock, flags); 2189 switch (cm_id_priv->id.state) { 2190 case IB_CM_REQ_SENT: 2191 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2192 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2193 cm_id_priv->msg, timeout)) 2194 goto out; 2195 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2196 break; 2197 case IB_CM_REP_SENT: 2198 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2199 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2200 cm_id_priv->msg, timeout)) 2201 goto out; 2202 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2203 break; 2204 case IB_CM_ESTABLISHED: 2205 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2206 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2207 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2208 cm_id_priv->msg, timeout)) 2209 goto out; 2210 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2211 break; 2212 default: 2213 goto out; 2214 } 2215 2216 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2217 cm_id_priv->id.state; 2218 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2219 if (!ret) 2220 list_add_tail(&work->list, &cm_id_priv->work_list); 2221 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2222 2223 if (ret) 2224 cm_process_work(cm_id_priv, work); 2225 else 2226 cm_deref_id(cm_id_priv); 2227 return 0; 2228out: 2229 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2230 cm_deref_id(cm_id_priv); 2231 return -EINVAL; 2232} 2233 2234static void cm_format_lap(struct cm_lap_msg *lap_msg, 2235 struct cm_id_private *cm_id_priv, 2236 struct ib_sa_path_rec *alternate_path, 2237 const void *private_data, 2238 u8 private_data_len) 2239{ 2240 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2241 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2242 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2243 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2244 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2245 /* todo: need remote CM response timeout */ 2246 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2247 lap_msg->alt_local_lid = alternate_path->slid; 2248 lap_msg->alt_remote_lid = alternate_path->dlid; 2249 lap_msg->alt_local_gid = alternate_path->sgid; 2250 lap_msg->alt_remote_gid = alternate_path->dgid; 2251 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2252 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2253 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2254 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2255 cm_lap_set_sl(lap_msg, alternate_path->sl); 2256 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2257 cm_lap_set_local_ack_timeout(lap_msg, 2258 min(31, alternate_path->packet_life_time + 1)); 2259 2260 if (private_data && private_data_len) 2261 memcpy(lap_msg->private_data, private_data, private_data_len); 2262} 2263 2264int ib_send_cm_lap(struct ib_cm_id *cm_id, 2265 struct ib_sa_path_rec *alternate_path, 2266 const void *private_data, 2267 u8 private_data_len) 2268{ 2269 struct cm_id_private *cm_id_priv; 2270 struct ib_mad_send_buf *msg; 2271 unsigned long flags; 2272 int ret; 2273 2274 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2275 return -EINVAL; 2276 2277 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2278 spin_lock_irqsave(&cm_id_priv->lock, flags); 2279 if (cm_id->state != IB_CM_ESTABLISHED || 2280 cm_id->lap_state != IB_CM_LAP_IDLE) { 2281 ret = -EINVAL; 2282 goto out; 2283 } 2284 2285 ret = cm_alloc_msg(cm_id_priv, &msg); 2286 if (ret) 2287 goto out; 2288 2289 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2290 alternate_path, private_data, private_data_len); 2291 msg->timeout_ms = cm_id_priv->timeout_ms; 2292 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2293 2294 ret = ib_post_send_mad(msg, NULL); 2295 if (ret) { 2296 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2297 cm_free_msg(msg); 2298 return ret; 2299 } 2300 2301 cm_id->lap_state = IB_CM_LAP_SENT; 2302 cm_id_priv->msg = msg; 2303 2304out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2305 return ret; 2306} 2307EXPORT_SYMBOL(ib_send_cm_lap); 2308 2309static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2310 struct cm_lap_msg *lap_msg) 2311{ 2312 memset(path, 0, sizeof *path); 2313 path->dgid = lap_msg->alt_local_gid; 2314 path->sgid = lap_msg->alt_remote_gid; 2315 path->dlid = lap_msg->alt_local_lid; 2316 path->slid = lap_msg->alt_remote_lid; 2317 path->flow_label = cm_lap_get_flow_label(lap_msg); 2318 path->hop_limit = lap_msg->alt_hop_limit; 2319 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2320 path->reversible = 1; 2321 /* pkey is same as in REQ */ 2322 path->sl = cm_lap_get_sl(lap_msg); 2323 path->mtu_selector = IB_SA_EQ; 2324 /* mtu is same as in REQ */ 2325 path->rate_selector = IB_SA_EQ; 2326 path->rate = cm_lap_get_packet_rate(lap_msg); 2327 path->packet_life_time_selector = IB_SA_EQ; 2328 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2329 path->packet_life_time -= (path->packet_life_time > 0); 2330} 2331 2332static int cm_lap_handler(struct cm_work *work) 2333{ 2334 struct cm_id_private *cm_id_priv; 2335 struct cm_lap_msg *lap_msg; 2336 struct ib_cm_lap_event_param *param; 2337 struct ib_mad_send_buf *msg = NULL; 2338 unsigned long flags; 2339 int ret; 2340 2341 /* todo: verify LAP request and send reject APR if invalid. */ 2342 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2343 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2344 lap_msg->local_comm_id); 2345 if (!cm_id_priv) 2346 return -EINVAL; 2347 2348 param = &work->cm_event.param.lap_rcvd; 2349 param->alternate_path = &work->path[0]; 2350 cm_format_path_from_lap(param->alternate_path, lap_msg); 2351 work->cm_event.private_data = &lap_msg->private_data; 2352 2353 spin_lock_irqsave(&cm_id_priv->lock, flags); 2354 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2355 goto unlock; 2356 2357 switch (cm_id_priv->id.lap_state) { 2358 case IB_CM_LAP_IDLE: 2359 break; 2360 case IB_CM_MRA_LAP_SENT: 2361 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2362 goto unlock; 2363 2364 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2365 CM_MSG_RESPONSE_OTHER, 2366 cm_id_priv->service_timeout, 2367 cm_id_priv->private_data, 2368 cm_id_priv->private_data_len); 2369 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2370 2371 if (ib_post_send_mad(msg, NULL)) 2372 cm_free_msg(msg); 2373 goto deref; 2374 default: 2375 goto unlock; 2376 } 2377 2378 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2379 cm_id_priv->tid = lap_msg->hdr.tid; 2380 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2381 if (!ret) 2382 list_add_tail(&work->list, &cm_id_priv->work_list); 2383 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2384 2385 if (ret) 2386 cm_process_work(cm_id_priv, work); 2387 else 2388 cm_deref_id(cm_id_priv); 2389 return 0; 2390 2391unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2392deref: cm_deref_id(cm_id_priv); 2393 return -EINVAL; 2394} 2395 2396static void cm_format_apr(struct cm_apr_msg *apr_msg, 2397 struct cm_id_private *cm_id_priv, 2398 enum ib_cm_apr_status status, 2399 void *info, 2400 u8 info_length, 2401 const void *private_data, 2402 u8 private_data_len) 2403{ 2404 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2405 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2406 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2407 apr_msg->ap_status = (u8) status; 2408 2409 if (info && info_length) { 2410 apr_msg->info_length = info_length; 2411 memcpy(apr_msg->info, info, info_length); 2412 } 2413 2414 if (private_data && private_data_len) 2415 memcpy(apr_msg->private_data, private_data, private_data_len); 2416} 2417 2418int ib_send_cm_apr(struct ib_cm_id *cm_id, 2419 enum ib_cm_apr_status status, 2420 void *info, 2421 u8 info_length, 2422 const void *private_data, 2423 u8 private_data_len) 2424{ 2425 struct cm_id_private *cm_id_priv; 2426 struct ib_mad_send_buf *msg; 2427 unsigned long flags; 2428 int ret; 2429 2430 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2431 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2432 return -EINVAL; 2433 2434 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2435 spin_lock_irqsave(&cm_id_priv->lock, flags); 2436 if (cm_id->state != IB_CM_ESTABLISHED || 2437 (cm_id->lap_state != IB_CM_LAP_RCVD && 2438 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2439 ret = -EINVAL; 2440 goto out; 2441 } 2442 2443 ret = cm_alloc_msg(cm_id_priv, &msg); 2444 if (ret) 2445 goto out; 2446 2447 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2448 info, info_length, private_data, private_data_len); 2449 ret = ib_post_send_mad(msg, NULL); 2450 if (ret) { 2451 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2452 cm_free_msg(msg); 2453 return ret; 2454 } 2455 2456 cm_id->lap_state = IB_CM_LAP_IDLE; 2457out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2458 return ret; 2459} 2460EXPORT_SYMBOL(ib_send_cm_apr); 2461 2462static int cm_apr_handler(struct cm_work *work) 2463{ 2464 struct cm_id_private *cm_id_priv; 2465 struct cm_apr_msg *apr_msg; 2466 unsigned long flags; 2467 int ret; 2468 2469 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2470 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2471 apr_msg->local_comm_id); 2472 if (!cm_id_priv) 2473 return -EINVAL; /* Unmatched reply. */ 2474 2475 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2476 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2477 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2478 work->cm_event.private_data = &apr_msg->private_data; 2479 2480 spin_lock_irqsave(&cm_id_priv->lock, flags); 2481 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2482 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2483 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2484 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2485 goto out; 2486 } 2487 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2488 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2489 cm_id_priv->msg = NULL; 2490 2491 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2492 if (!ret) 2493 list_add_tail(&work->list, &cm_id_priv->work_list); 2494 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2495 2496 if (ret) 2497 cm_process_work(cm_id_priv, work); 2498 else 2499 cm_deref_id(cm_id_priv); 2500 return 0; 2501out: 2502 cm_deref_id(cm_id_priv); 2503 return -EINVAL; 2504} 2505 2506static int cm_timewait_handler(struct cm_work *work) 2507{ 2508 struct cm_timewait_info *timewait_info; 2509 struct cm_id_private *cm_id_priv; 2510 unsigned long flags; 2511 int ret; 2512 2513 timewait_info = (struct cm_timewait_info *)work; 2514 cm_cleanup_timewait(timewait_info); 2515 2516 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2517 timewait_info->work.remote_id); 2518 if (!cm_id_priv) 2519 return -EINVAL; 2520 2521 spin_lock_irqsave(&cm_id_priv->lock, flags); 2522 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2523 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2524 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2525 goto out; 2526 } 2527 cm_id_priv->id.state = IB_CM_IDLE; 2528 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2529 if (!ret) 2530 list_add_tail(&work->list, &cm_id_priv->work_list); 2531 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2532 2533 if (ret) 2534 cm_process_work(cm_id_priv, work); 2535 else 2536 cm_deref_id(cm_id_priv); 2537 return 0; 2538out: 2539 cm_deref_id(cm_id_priv); 2540 return -EINVAL; 2541} 2542 2543static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2544 struct cm_id_private *cm_id_priv, 2545 struct ib_cm_sidr_req_param *param) 2546{ 2547 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2548 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2549 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2550 sidr_req_msg->pkey = cpu_to_be16(param->pkey); 2551 sidr_req_msg->service_id = param->service_id; 2552 2553 if (param->private_data && param->private_data_len) 2554 memcpy(sidr_req_msg->private_data, param->private_data, 2555 param->private_data_len); 2556} 2557 2558int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2559 struct ib_cm_sidr_req_param *param) 2560{ 2561 struct cm_id_private *cm_id_priv; 2562 struct ib_mad_send_buf *msg; 2563 unsigned long flags; 2564 int ret; 2565 2566 if (!param->path || (param->private_data && 2567 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2568 return -EINVAL; 2569 2570 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2571 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2572 if (ret) 2573 goto out; 2574 2575 cm_id->service_id = param->service_id; 2576 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2577 cm_id_priv->timeout_ms = param->timeout_ms; 2578 cm_id_priv->max_cm_retries = param->max_cm_retries; 2579 ret = cm_alloc_msg(cm_id_priv, &msg); 2580 if (ret) 2581 goto out; 2582 2583 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2584 param); 2585 msg->timeout_ms = cm_id_priv->timeout_ms; 2586 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2587 2588 spin_lock_irqsave(&cm_id_priv->lock, flags); 2589 if (cm_id->state == IB_CM_IDLE) 2590 ret = ib_post_send_mad(msg, NULL); 2591 else 2592 ret = -EINVAL; 2593 2594 if (ret) { 2595 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2596 cm_free_msg(msg); 2597 goto out; 2598 } 2599 cm_id->state = IB_CM_SIDR_REQ_SENT; 2600 cm_id_priv->msg = msg; 2601 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2602out: 2603 return ret; 2604} 2605EXPORT_SYMBOL(ib_send_cm_sidr_req); 2606 2607static void cm_format_sidr_req_event(struct cm_work *work, 2608 struct ib_cm_id *listen_id) 2609{ 2610 struct cm_sidr_req_msg *sidr_req_msg; 2611 struct ib_cm_sidr_req_event_param *param; 2612 2613 sidr_req_msg = (struct cm_sidr_req_msg *) 2614 work->mad_recv_wc->recv_buf.mad; 2615 param = &work->cm_event.param.sidr_req_rcvd; 2616 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2617 param->listen_id = listen_id; 2618 param->port = work->port->port_num; 2619 work->cm_event.private_data = &sidr_req_msg->private_data; 2620} 2621 2622static int cm_sidr_req_handler(struct cm_work *work) 2623{ 2624 struct ib_cm_id *cm_id; 2625 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2626 struct cm_sidr_req_msg *sidr_req_msg; 2627 struct ib_wc *wc; 2628 unsigned long flags; 2629 2630 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2631 if (IS_ERR(cm_id)) 2632 return PTR_ERR(cm_id); 2633 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2634 2635 /* Record SGID/SLID and request ID for lookup. */ 2636 sidr_req_msg = (struct cm_sidr_req_msg *) 2637 work->mad_recv_wc->recv_buf.mad; 2638 wc = work->mad_recv_wc->wc; 2639 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2640 cm_id_priv->av.dgid.global.interface_id = 0; 2641 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2642 &cm_id_priv->av); 2643 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2644 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2645 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2646 atomic_inc(&cm_id_priv->work_count); 2647 2648 spin_lock_irqsave(&cm.lock, flags); 2649 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2650 if (cur_cm_id_priv) { 2651 spin_unlock_irqrestore(&cm.lock, flags); 2652 goto out; /* Duplicate message. */ 2653 } 2654 cur_cm_id_priv = cm_find_listen(cm_id->device, 2655 sidr_req_msg->service_id); 2656 if (!cur_cm_id_priv) { 2657 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2658 spin_unlock_irqrestore(&cm.lock, flags); 2659 /* todo: reply with no match */ 2660 goto out; /* No match. */ 2661 } 2662 atomic_inc(&cur_cm_id_priv->refcount); 2663 spin_unlock_irqrestore(&cm.lock, flags); 2664 2665 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2666 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2667 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2668 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2669 2670 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2671 cm_process_work(cm_id_priv, work); 2672 cm_deref_id(cur_cm_id_priv); 2673 return 0; 2674out: 2675 ib_destroy_cm_id(&cm_id_priv->id); 2676 return -EINVAL; 2677} 2678 2679static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2680 struct cm_id_private *cm_id_priv, 2681 struct ib_cm_sidr_rep_param *param) 2682{ 2683 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2684 cm_id_priv->tid); 2685 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2686 sidr_rep_msg->status = param->status; 2687 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2688 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2689 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2690 2691 if (param->info && param->info_length) 2692 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2693 2694 if (param->private_data && param->private_data_len) 2695 memcpy(sidr_rep_msg->private_data, param->private_data, 2696 param->private_data_len); 2697} 2698 2699int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2700 struct ib_cm_sidr_rep_param *param) 2701{ 2702 struct cm_id_private *cm_id_priv; 2703 struct ib_mad_send_buf *msg; 2704 unsigned long flags; 2705 int ret; 2706 2707 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2708 (param->private_data && 2709 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2710 return -EINVAL; 2711 2712 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2713 spin_lock_irqsave(&cm_id_priv->lock, flags); 2714 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2715 ret = -EINVAL; 2716 goto error; 2717 } 2718 2719 ret = cm_alloc_msg(cm_id_priv, &msg); 2720 if (ret) 2721 goto error; 2722 2723 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2724 param); 2725 ret = ib_post_send_mad(msg, NULL); 2726 if (ret) { 2727 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2728 cm_free_msg(msg); 2729 return ret; 2730 } 2731 cm_id->state = IB_CM_IDLE; 2732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2733 2734 spin_lock_irqsave(&cm.lock, flags); 2735 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2736 spin_unlock_irqrestore(&cm.lock, flags); 2737 return 0; 2738 2739error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2740 return ret; 2741} 2742EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2743 2744static void cm_format_sidr_rep_event(struct cm_work *work) 2745{ 2746 struct cm_sidr_rep_msg *sidr_rep_msg; 2747 struct ib_cm_sidr_rep_event_param *param; 2748 2749 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2750 work->mad_recv_wc->recv_buf.mad; 2751 param = &work->cm_event.param.sidr_rep_rcvd; 2752 param->status = sidr_rep_msg->status; 2753 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2754 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2755 param->info = &sidr_rep_msg->info; 2756 param->info_len = sidr_rep_msg->info_length; 2757 work->cm_event.private_data = &sidr_rep_msg->private_data; 2758} 2759 2760static int cm_sidr_rep_handler(struct cm_work *work) 2761{ 2762 struct cm_sidr_rep_msg *sidr_rep_msg; 2763 struct cm_id_private *cm_id_priv; 2764 unsigned long flags; 2765 2766 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2767 work->mad_recv_wc->recv_buf.mad; 2768 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2769 if (!cm_id_priv) 2770 return -EINVAL; /* Unmatched reply. */ 2771 2772 spin_lock_irqsave(&cm_id_priv->lock, flags); 2773 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2774 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2775 goto out; 2776 } 2777 cm_id_priv->id.state = IB_CM_IDLE; 2778 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2779 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2780 2781 cm_format_sidr_rep_event(work); 2782 cm_process_work(cm_id_priv, work); 2783 return 0; 2784out: 2785 cm_deref_id(cm_id_priv); 2786 return -EINVAL; 2787} 2788 2789static void cm_process_send_error(struct ib_mad_send_buf *msg, 2790 enum ib_wc_status wc_status) 2791{ 2792 struct cm_id_private *cm_id_priv; 2793 struct ib_cm_event cm_event; 2794 enum ib_cm_state state; 2795 unsigned long flags; 2796 int ret; 2797 2798 memset(&cm_event, 0, sizeof cm_event); 2799 cm_id_priv = msg->context[0]; 2800 2801 /* Discard old sends or ones without a response. */ 2802 spin_lock_irqsave(&cm_id_priv->lock, flags); 2803 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2804 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2805 goto discard; 2806 2807 switch (state) { 2808 case IB_CM_REQ_SENT: 2809 case IB_CM_MRA_REQ_RCVD: 2810 cm_reset_to_idle(cm_id_priv); 2811 cm_event.event = IB_CM_REQ_ERROR; 2812 break; 2813 case IB_CM_REP_SENT: 2814 case IB_CM_MRA_REP_RCVD: 2815 cm_reset_to_idle(cm_id_priv); 2816 cm_event.event = IB_CM_REP_ERROR; 2817 break; 2818 case IB_CM_DREQ_SENT: 2819 cm_enter_timewait(cm_id_priv); 2820 cm_event.event = IB_CM_DREQ_ERROR; 2821 break; 2822 case IB_CM_SIDR_REQ_SENT: 2823 cm_id_priv->id.state = IB_CM_IDLE; 2824 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2825 break; 2826 default: 2827 goto discard; 2828 } 2829 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2830 cm_event.param.send_status = wc_status; 2831 2832 /* No other events can occur on the cm_id at this point. */ 2833 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2834 cm_free_msg(msg); 2835 if (ret) 2836 ib_destroy_cm_id(&cm_id_priv->id); 2837 return; 2838discard: 2839 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2840 cm_free_msg(msg); 2841} 2842 2843static void cm_send_handler(struct ib_mad_agent *mad_agent, 2844 struct ib_mad_send_wc *mad_send_wc) 2845{ 2846 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2847 2848 switch (mad_send_wc->status) { 2849 case IB_WC_SUCCESS: 2850 case IB_WC_WR_FLUSH_ERR: 2851 cm_free_msg(msg); 2852 break; 2853 default: 2854 if (msg->context[0] && msg->context[1]) 2855 cm_process_send_error(msg, mad_send_wc->status); 2856 else 2857 cm_free_msg(msg); 2858 break; 2859 } 2860} 2861 2862static void cm_work_handler(void *data) 2863{ 2864 struct cm_work *work = data; 2865 int ret; 2866 2867 switch (work->cm_event.event) { 2868 case IB_CM_REQ_RECEIVED: 2869 ret = cm_req_handler(work); 2870 break; 2871 case IB_CM_MRA_RECEIVED: 2872 ret = cm_mra_handler(work); 2873 break; 2874 case IB_CM_REJ_RECEIVED: 2875 ret = cm_rej_handler(work); 2876 break; 2877 case IB_CM_REP_RECEIVED: 2878 ret = cm_rep_handler(work); 2879 break; 2880 case IB_CM_RTU_RECEIVED: 2881 ret = cm_rtu_handler(work); 2882 break; 2883 case IB_CM_USER_ESTABLISHED: 2884 ret = cm_establish_handler(work); 2885 break; 2886 case IB_CM_DREQ_RECEIVED: 2887 ret = cm_dreq_handler(work); 2888 break; 2889 case IB_CM_DREP_RECEIVED: 2890 ret = cm_drep_handler(work); 2891 break; 2892 case IB_CM_SIDR_REQ_RECEIVED: 2893 ret = cm_sidr_req_handler(work); 2894 break; 2895 case IB_CM_SIDR_REP_RECEIVED: 2896 ret = cm_sidr_rep_handler(work); 2897 break; 2898 case IB_CM_LAP_RECEIVED: 2899 ret = cm_lap_handler(work); 2900 break; 2901 case IB_CM_APR_RECEIVED: 2902 ret = cm_apr_handler(work); 2903 break; 2904 case IB_CM_TIMEWAIT_EXIT: 2905 ret = cm_timewait_handler(work); 2906 break; 2907 default: 2908 ret = -EINVAL; 2909 break; 2910 } 2911 if (ret) 2912 cm_free_work(work); 2913} 2914 2915int ib_cm_establish(struct ib_cm_id *cm_id) 2916{ 2917 struct cm_id_private *cm_id_priv; 2918 struct cm_work *work; 2919 unsigned long flags; 2920 int ret = 0; 2921 2922 work = kmalloc(sizeof *work, GFP_ATOMIC); 2923 if (!work) 2924 return -ENOMEM; 2925 2926 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2927 spin_lock_irqsave(&cm_id_priv->lock, flags); 2928 switch (cm_id->state) 2929 { 2930 case IB_CM_REP_SENT: 2931 case IB_CM_MRA_REP_RCVD: 2932 cm_id->state = IB_CM_ESTABLISHED; 2933 break; 2934 case IB_CM_ESTABLISHED: 2935 ret = -EISCONN; 2936 break; 2937 default: 2938 ret = -EINVAL; 2939 break; 2940 } 2941 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2942 2943 if (ret) { 2944 kfree(work); 2945 goto out; 2946 } 2947 2948 /* 2949 * The CM worker thread may try to destroy the cm_id before it 2950 * can execute this work item. To prevent potential deadlock, 2951 * we need to find the cm_id once we're in the context of the 2952 * worker thread, rather than holding a reference on it. 2953 */ 2954 INIT_WORK(&work->work, cm_work_handler, work); 2955 work->local_id = cm_id->local_id; 2956 work->remote_id = cm_id->remote_id; 2957 work->mad_recv_wc = NULL; 2958 work->cm_event.event = IB_CM_USER_ESTABLISHED; 2959 queue_work(cm.wq, &work->work); 2960out: 2961 return ret; 2962} 2963EXPORT_SYMBOL(ib_cm_establish); 2964 2965static void cm_recv_handler(struct ib_mad_agent *mad_agent, 2966 struct ib_mad_recv_wc *mad_recv_wc) 2967{ 2968 struct cm_work *work; 2969 enum ib_cm_event_type event; 2970 int paths = 0; 2971 2972 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 2973 case CM_REQ_ATTR_ID: 2974 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 2975 alt_local_lid != 0); 2976 event = IB_CM_REQ_RECEIVED; 2977 break; 2978 case CM_MRA_ATTR_ID: 2979 event = IB_CM_MRA_RECEIVED; 2980 break; 2981 case CM_REJ_ATTR_ID: 2982 event = IB_CM_REJ_RECEIVED; 2983 break; 2984 case CM_REP_ATTR_ID: 2985 event = IB_CM_REP_RECEIVED; 2986 break; 2987 case CM_RTU_ATTR_ID: 2988 event = IB_CM_RTU_RECEIVED; 2989 break; 2990 case CM_DREQ_ATTR_ID: 2991 event = IB_CM_DREQ_RECEIVED; 2992 break; 2993 case CM_DREP_ATTR_ID: 2994 event = IB_CM_DREP_RECEIVED; 2995 break; 2996 case CM_SIDR_REQ_ATTR_ID: 2997 event = IB_CM_SIDR_REQ_RECEIVED; 2998 break; 2999 case CM_SIDR_REP_ATTR_ID: 3000 event = IB_CM_SIDR_REP_RECEIVED; 3001 break; 3002 case CM_LAP_ATTR_ID: 3003 paths = 1; 3004 event = IB_CM_LAP_RECEIVED; 3005 break; 3006 case CM_APR_ATTR_ID: 3007 event = IB_CM_APR_RECEIVED; 3008 break; 3009 default: 3010 ib_free_recv_mad(mad_recv_wc); 3011 return; 3012 } 3013 3014 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3015 GFP_KERNEL); 3016 if (!work) { 3017 ib_free_recv_mad(mad_recv_wc); 3018 return; 3019 } 3020 3021 INIT_WORK(&work->work, cm_work_handler, work); 3022 work->cm_event.event = event; 3023 work->mad_recv_wc = mad_recv_wc; 3024 work->port = (struct cm_port *)mad_agent->context; 3025 queue_work(cm.wq, &work->work); 3026} 3027 3028static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3029 struct ib_qp_attr *qp_attr, 3030 int *qp_attr_mask) 3031{ 3032 unsigned long flags; 3033 int ret; 3034 3035 spin_lock_irqsave(&cm_id_priv->lock, flags); 3036 switch (cm_id_priv->id.state) { 3037 case IB_CM_REQ_SENT: 3038 case IB_CM_MRA_REQ_RCVD: 3039 case IB_CM_REQ_RCVD: 3040 case IB_CM_MRA_REQ_SENT: 3041 case IB_CM_REP_RCVD: 3042 case IB_CM_MRA_REP_SENT: 3043 case IB_CM_REP_SENT: 3044 case IB_CM_MRA_REP_RCVD: 3045 case IB_CM_ESTABLISHED: 3046 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3047 IB_QP_PKEY_INDEX | IB_QP_PORT; 3048 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3049 IB_ACCESS_REMOTE_WRITE; 3050 if (cm_id_priv->responder_resources) 3051 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; 3052 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3053 qp_attr->port_num = cm_id_priv->av.port->port_num; 3054 ret = 0; 3055 break; 3056 default: 3057 ret = -EINVAL; 3058 break; 3059 } 3060 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3061 return ret; 3062} 3063 3064static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3065 struct ib_qp_attr *qp_attr, 3066 int *qp_attr_mask) 3067{ 3068 unsigned long flags; 3069 int ret; 3070 3071 spin_lock_irqsave(&cm_id_priv->lock, flags); 3072 switch (cm_id_priv->id.state) { 3073 case IB_CM_REQ_RCVD: 3074 case IB_CM_MRA_REQ_SENT: 3075 case IB_CM_REP_RCVD: 3076 case IB_CM_MRA_REP_SENT: 3077 case IB_CM_REP_SENT: 3078 case IB_CM_MRA_REP_RCVD: 3079 case IB_CM_ESTABLISHED: 3080 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3081 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3082 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3083 qp_attr->path_mtu = cm_id_priv->path_mtu; 3084 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3085 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3086 if (cm_id_priv->qp_type == IB_QPT_RC) { 3087 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3088 IB_QP_MIN_RNR_TIMER; 3089 qp_attr->max_dest_rd_atomic = 3090 cm_id_priv->responder_resources; 3091 qp_attr->min_rnr_timer = 0; 3092 } 3093 if (cm_id_priv->alt_av.ah_attr.dlid) { 3094 *qp_attr_mask |= IB_QP_ALT_PATH; 3095 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3096 } 3097 ret = 0; 3098 break; 3099 default: 3100 ret = -EINVAL; 3101 break; 3102 } 3103 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3104 return ret; 3105} 3106 3107static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3108 struct ib_qp_attr *qp_attr, 3109 int *qp_attr_mask) 3110{ 3111 unsigned long flags; 3112 int ret; 3113 3114 spin_lock_irqsave(&cm_id_priv->lock, flags); 3115 switch (cm_id_priv->id.state) { 3116 case IB_CM_REP_RCVD: 3117 case IB_CM_MRA_REP_SENT: 3118 case IB_CM_REP_SENT: 3119 case IB_CM_MRA_REP_RCVD: 3120 case IB_CM_ESTABLISHED: 3121 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3122 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3123 if (cm_id_priv->qp_type == IB_QPT_RC) { 3124 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3125 IB_QP_RNR_RETRY | 3126 IB_QP_MAX_QP_RD_ATOMIC; 3127 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3128 qp_attr->retry_cnt = cm_id_priv->retry_count; 3129 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3130 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3131 } 3132 if (cm_id_priv->alt_av.ah_attr.dlid) { 3133 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3134 qp_attr->path_mig_state = IB_MIG_REARM; 3135 } 3136 ret = 0; 3137 break; 3138 default: 3139 ret = -EINVAL; 3140 break; 3141 } 3142 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3143 return ret; 3144} 3145 3146int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3147 struct ib_qp_attr *qp_attr, 3148 int *qp_attr_mask) 3149{ 3150 struct cm_id_private *cm_id_priv; 3151 int ret; 3152 3153 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3154 switch (qp_attr->qp_state) { 3155 case IB_QPS_INIT: 3156 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3157 break; 3158 case IB_QPS_RTR: 3159 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3160 break; 3161 case IB_QPS_RTS: 3162 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3163 break; 3164 default: 3165 ret = -EINVAL; 3166 break; 3167 } 3168 return ret; 3169} 3170EXPORT_SYMBOL(ib_cm_init_qp_attr); 3171 3172static void cm_add_one(struct ib_device *device) 3173{ 3174 struct cm_device *cm_dev; 3175 struct cm_port *port; 3176 struct ib_mad_reg_req reg_req = { 3177 .mgmt_class = IB_MGMT_CLASS_CM, 3178 .mgmt_class_version = IB_CM_CLASS_VERSION 3179 }; 3180 struct ib_port_modify port_modify = { 3181 .set_port_cap_mask = IB_PORT_CM_SUP 3182 }; 3183 unsigned long flags; 3184 int ret; 3185 u8 i; 3186 3187 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3188 device->phys_port_cnt, GFP_KERNEL); 3189 if (!cm_dev) 3190 return; 3191 3192 cm_dev->device = device; 3193 cm_dev->ca_guid = device->node_guid; 3194 3195 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3196 for (i = 1; i <= device->phys_port_cnt; i++) { 3197 port = &cm_dev->port[i-1]; 3198 port->cm_dev = cm_dev; 3199 port->port_num = i; 3200 port->mad_agent = ib_register_mad_agent(device, i, 3201 IB_QPT_GSI, 3202 ®_req, 3203 0, 3204 cm_send_handler, 3205 cm_recv_handler, 3206 port); 3207 if (IS_ERR(port->mad_agent)) 3208 goto error1; 3209 3210 ret = ib_modify_port(device, i, 0, &port_modify); 3211 if (ret) 3212 goto error2; 3213 } 3214 ib_set_client_data(device, &cm_client, cm_dev); 3215 3216 write_lock_irqsave(&cm.device_lock, flags); 3217 list_add_tail(&cm_dev->list, &cm.device_list); 3218 write_unlock_irqrestore(&cm.device_lock, flags); 3219 return; 3220 3221error2: 3222 ib_unregister_mad_agent(port->mad_agent); 3223error1: 3224 port_modify.set_port_cap_mask = 0; 3225 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3226 while (--i) { 3227 port = &cm_dev->port[i-1]; 3228 ib_modify_port(device, port->port_num, 0, &port_modify); 3229 ib_unregister_mad_agent(port->mad_agent); 3230 } 3231 kfree(cm_dev); 3232} 3233 3234static void cm_remove_one(struct ib_device *device) 3235{ 3236 struct cm_device *cm_dev; 3237 struct cm_port *port; 3238 struct ib_port_modify port_modify = { 3239 .clr_port_cap_mask = IB_PORT_CM_SUP 3240 }; 3241 unsigned long flags; 3242 int i; 3243 3244 cm_dev = ib_get_client_data(device, &cm_client); 3245 if (!cm_dev) 3246 return; 3247 3248 write_lock_irqsave(&cm.device_lock, flags); 3249 list_del(&cm_dev->list); 3250 write_unlock_irqrestore(&cm.device_lock, flags); 3251 3252 for (i = 1; i <= device->phys_port_cnt; i++) { 3253 port = &cm_dev->port[i-1]; 3254 ib_modify_port(device, port->port_num, 0, &port_modify); 3255 ib_unregister_mad_agent(port->mad_agent); 3256 } 3257 kfree(cm_dev); 3258} 3259 3260static int __init ib_cm_init(void) 3261{ 3262 int ret; 3263 3264 memset(&cm, 0, sizeof cm); 3265 INIT_LIST_HEAD(&cm.device_list); 3266 rwlock_init(&cm.device_lock); 3267 spin_lock_init(&cm.lock); 3268 cm.listen_service_table = RB_ROOT; 3269 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3270 cm.remote_id_table = RB_ROOT; 3271 cm.remote_qp_table = RB_ROOT; 3272 cm.remote_sidr_table = RB_ROOT; 3273 idr_init(&cm.local_id_table); 3274 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3275 3276 cm.wq = create_workqueue("ib_cm"); 3277 if (!cm.wq) 3278 return -ENOMEM; 3279 3280 ret = ib_register_client(&cm_client); 3281 if (ret) 3282 goto error; 3283 3284 return 0; 3285error: 3286 destroy_workqueue(cm.wq); 3287 return ret; 3288} 3289 3290static void __exit ib_cm_cleanup(void) 3291{ 3292 flush_workqueue(cm.wq); 3293 destroy_workqueue(cm.wq); 3294 ib_unregister_client(&cm_client); 3295 idr_destroy(&cm.local_id_table); 3296} 3297 3298module_init(ib_cm_init); 3299module_exit(ib_cm_cleanup); 3300 3301