cm.c revision 82a9c16a10521a0ceadbd27a549f6e8d5e70e0ab
1/* 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38#include <linux/completion.h> 39#include <linux/dma-mapping.h> 40#include <linux/err.h> 41#include <linux/idr.h> 42#include <linux/interrupt.h> 43#include <linux/pci.h> 44#include <linux/random.h> 45#include <linux/rbtree.h> 46#include <linux/spinlock.h> 47#include <linux/workqueue.h> 48 49#include <rdma/ib_cache.h> 50#include <rdma/ib_cm.h> 51#include "cm_msgs.h" 52 53MODULE_AUTHOR("Sean Hefty"); 54MODULE_DESCRIPTION("InfiniBand CM"); 55MODULE_LICENSE("Dual BSD/GPL"); 56 57static void cm_add_one(struct ib_device *device); 58static void cm_remove_one(struct ib_device *device); 59 60static struct ib_client cm_client = { 61 .name = "cm", 62 .add = cm_add_one, 63 .remove = cm_remove_one 64}; 65 66static struct ib_cm { 67 spinlock_t lock; 68 struct list_head device_list; 69 rwlock_t device_lock; 70 struct rb_root listen_service_table; 71 u64 listen_service_id; 72 /* struct rb_root peer_service_table; todo: fix peer to peer */ 73 struct rb_root remote_qp_table; 74 struct rb_root remote_id_table; 75 struct rb_root remote_sidr_table; 76 struct idr local_id_table; 77 __be32 random_id_operand; 78 struct list_head timewait_list; 79 struct workqueue_struct *wq; 80} cm; 81 82struct cm_port { 83 struct cm_device *cm_dev; 84 struct ib_mad_agent *mad_agent; 85 u8 port_num; 86}; 87 88struct cm_device { 89 struct list_head list; 90 struct ib_device *device; 91 __be64 ca_guid; 92 struct cm_port port[0]; 93}; 94 95struct cm_av { 96 struct cm_port *port; 97 union ib_gid dgid; 98 struct ib_ah_attr ah_attr; 99 u16 pkey_index; 100 u8 packet_life_time; 101}; 102 103struct cm_work { 104 struct work_struct work; 105 struct list_head list; 106 struct cm_port *port; 107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 108 __be32 local_id; /* Established / timewait */ 109 __be32 remote_id; 110 struct ib_cm_event cm_event; 111 struct ib_sa_path_rec path[0]; 112}; 113 114struct cm_timewait_info { 115 struct cm_work work; /* Must be first. */ 116 struct list_head list; 117 struct rb_node remote_qp_node; 118 struct rb_node remote_id_node; 119 __be64 remote_ca_guid; 120 __be32 remote_qpn; 121 u8 inserted_remote_qp; 122 u8 inserted_remote_id; 123}; 124 125struct cm_id_private { 126 struct ib_cm_id id; 127 128 struct rb_node service_node; 129 struct rb_node sidr_id_node; 130 spinlock_t lock; /* Do not acquire inside cm.lock */ 131 struct completion comp; 132 atomic_t refcount; 133 134 struct ib_mad_send_buf *msg; 135 struct cm_timewait_info *timewait_info; 136 /* todo: use alternate port on send failure */ 137 struct cm_av av; 138 struct cm_av alt_av; 139 struct ib_cm_compare_data *compare_data; 140 141 void *private_data; 142 __be64 tid; 143 __be32 local_qpn; 144 __be32 remote_qpn; 145 enum ib_qp_type qp_type; 146 __be32 sq_psn; 147 __be32 rq_psn; 148 int timeout_ms; 149 enum ib_mtu path_mtu; 150 u8 private_data_len; 151 u8 max_cm_retries; 152 u8 peer_to_peer; 153 u8 responder_resources; 154 u8 initiator_depth; 155 u8 local_ack_timeout; 156 u8 retry_count; 157 u8 rnr_retry_count; 158 u8 service_timeout; 159 160 struct list_head work_list; 161 atomic_t work_count; 162}; 163 164static void cm_work_handler(void *data); 165 166static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 167{ 168 if (atomic_dec_and_test(&cm_id_priv->refcount)) 169 complete(&cm_id_priv->comp); 170} 171 172static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 173 struct ib_mad_send_buf **msg) 174{ 175 struct ib_mad_agent *mad_agent; 176 struct ib_mad_send_buf *m; 177 struct ib_ah *ah; 178 179 mad_agent = cm_id_priv->av.port->mad_agent; 180 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 181 if (IS_ERR(ah)) 182 return PTR_ERR(ah); 183 184 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 185 cm_id_priv->av.pkey_index, 186 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 187 GFP_ATOMIC); 188 if (IS_ERR(m)) { 189 ib_destroy_ah(ah); 190 return PTR_ERR(m); 191 } 192 193 /* Timeout set by caller if response is expected. */ 194 m->ah = ah; 195 m->retries = cm_id_priv->max_cm_retries; 196 197 atomic_inc(&cm_id_priv->refcount); 198 m->context[0] = cm_id_priv; 199 *msg = m; 200 return 0; 201} 202 203static int cm_alloc_response_msg(struct cm_port *port, 204 struct ib_mad_recv_wc *mad_recv_wc, 205 struct ib_mad_send_buf **msg) 206{ 207 struct ib_mad_send_buf *m; 208 struct ib_ah *ah; 209 210 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 211 mad_recv_wc->recv_buf.grh, port->port_num); 212 if (IS_ERR(ah)) 213 return PTR_ERR(ah); 214 215 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 216 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 217 GFP_ATOMIC); 218 if (IS_ERR(m)) { 219 ib_destroy_ah(ah); 220 return PTR_ERR(m); 221 } 222 m->ah = ah; 223 *msg = m; 224 return 0; 225} 226 227static void cm_free_msg(struct ib_mad_send_buf *msg) 228{ 229 ib_destroy_ah(msg->ah); 230 if (msg->context[0]) 231 cm_deref_id(msg->context[0]); 232 ib_free_send_mad(msg); 233} 234 235static void * cm_copy_private_data(const void *private_data, 236 u8 private_data_len) 237{ 238 void *data; 239 240 if (!private_data || !private_data_len) 241 return NULL; 242 243 data = kmalloc(private_data_len, GFP_KERNEL); 244 if (!data) 245 return ERR_PTR(-ENOMEM); 246 247 memcpy(data, private_data, private_data_len); 248 return data; 249} 250 251static void cm_set_private_data(struct cm_id_private *cm_id_priv, 252 void *private_data, u8 private_data_len) 253{ 254 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 255 kfree(cm_id_priv->private_data); 256 257 cm_id_priv->private_data = private_data; 258 cm_id_priv->private_data_len = private_data_len; 259} 260 261static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 262 struct ib_grh *grh, struct cm_av *av) 263{ 264 av->port = port; 265 av->pkey_index = wc->pkey_index; 266 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 267 grh, &av->ah_attr); 268} 269 270static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 271{ 272 struct cm_device *cm_dev; 273 struct cm_port *port = NULL; 274 unsigned long flags; 275 int ret; 276 u8 p; 277 278 read_lock_irqsave(&cm.device_lock, flags); 279 list_for_each_entry(cm_dev, &cm.device_list, list) { 280 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 281 &p, NULL)) { 282 port = &cm_dev->port[p-1]; 283 break; 284 } 285 } 286 read_unlock_irqrestore(&cm.device_lock, flags); 287 288 if (!port) 289 return -EINVAL; 290 291 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 292 be16_to_cpu(path->pkey), &av->pkey_index); 293 if (ret) 294 return ret; 295 296 av->port = port; 297 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 298 &av->ah_attr); 299 av->packet_life_time = path->packet_life_time; 300 return 0; 301} 302 303static int cm_alloc_id(struct cm_id_private *cm_id_priv) 304{ 305 unsigned long flags; 306 int ret, id; 307 static int next_id; 308 309 do { 310 spin_lock_irqsave(&cm.lock, flags); 311 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 312 next_id++, &id); 313 spin_unlock_irqrestore(&cm.lock, flags); 314 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 315 316 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); 317 return ret; 318} 319 320static void cm_free_id(__be32 local_id) 321{ 322 unsigned long flags; 323 324 spin_lock_irqsave(&cm.lock, flags); 325 idr_remove(&cm.local_id_table, 326 (__force int) (local_id ^ cm.random_id_operand)); 327 spin_unlock_irqrestore(&cm.lock, flags); 328} 329 330static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 331{ 332 struct cm_id_private *cm_id_priv; 333 334 cm_id_priv = idr_find(&cm.local_id_table, 335 (__force int) (local_id ^ cm.random_id_operand)); 336 if (cm_id_priv) { 337 if (cm_id_priv->id.remote_id == remote_id) 338 atomic_inc(&cm_id_priv->refcount); 339 else 340 cm_id_priv = NULL; 341 } 342 343 return cm_id_priv; 344} 345 346static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 347{ 348 struct cm_id_private *cm_id_priv; 349 unsigned long flags; 350 351 spin_lock_irqsave(&cm.lock, flags); 352 cm_id_priv = cm_get_id(local_id, remote_id); 353 spin_unlock_irqrestore(&cm.lock, flags); 354 355 return cm_id_priv; 356} 357 358static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 359{ 360 int i; 361 362 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 363 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 364 ((unsigned long *) mask)[i]; 365} 366 367static int cm_compare_data(struct ib_cm_compare_data *src_data, 368 struct ib_cm_compare_data *dst_data) 369{ 370 u8 src[IB_CM_COMPARE_SIZE]; 371 u8 dst[IB_CM_COMPARE_SIZE]; 372 373 if (!src_data || !dst_data) 374 return 0; 375 376 cm_mask_copy(src, src_data->data, dst_data->mask); 377 cm_mask_copy(dst, dst_data->data, src_data->mask); 378 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 379} 380 381static int cm_compare_private_data(u8 *private_data, 382 struct ib_cm_compare_data *dst_data) 383{ 384 u8 src[IB_CM_COMPARE_SIZE]; 385 386 if (!dst_data) 387 return 0; 388 389 cm_mask_copy(src, private_data, dst_data->mask); 390 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 391} 392 393static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 394{ 395 struct rb_node **link = &cm.listen_service_table.rb_node; 396 struct rb_node *parent = NULL; 397 struct cm_id_private *cur_cm_id_priv; 398 __be64 service_id = cm_id_priv->id.service_id; 399 __be64 service_mask = cm_id_priv->id.service_mask; 400 int data_cmp; 401 402 while (*link) { 403 parent = *link; 404 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 405 service_node); 406 data_cmp = cm_compare_data(cm_id_priv->compare_data, 407 cur_cm_id_priv->compare_data); 408 if ((cur_cm_id_priv->id.service_mask & service_id) == 409 (service_mask & cur_cm_id_priv->id.service_id) && 410 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 411 !data_cmp) 412 return cur_cm_id_priv; 413 414 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 415 link = &(*link)->rb_left; 416 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 417 link = &(*link)->rb_right; 418 else if (service_id < cur_cm_id_priv->id.service_id) 419 link = &(*link)->rb_left; 420 else if (service_id > cur_cm_id_priv->id.service_id) 421 link = &(*link)->rb_right; 422 else if (data_cmp < 0) 423 link = &(*link)->rb_left; 424 else 425 link = &(*link)->rb_right; 426 } 427 rb_link_node(&cm_id_priv->service_node, parent, link); 428 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 429 return NULL; 430} 431 432static struct cm_id_private * cm_find_listen(struct ib_device *device, 433 __be64 service_id, 434 u8 *private_data) 435{ 436 struct rb_node *node = cm.listen_service_table.rb_node; 437 struct cm_id_private *cm_id_priv; 438 int data_cmp; 439 440 while (node) { 441 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 442 data_cmp = cm_compare_private_data(private_data, 443 cm_id_priv->compare_data); 444 if ((cm_id_priv->id.service_mask & service_id) == 445 cm_id_priv->id.service_id && 446 (cm_id_priv->id.device == device) && !data_cmp) 447 return cm_id_priv; 448 449 if (device < cm_id_priv->id.device) 450 node = node->rb_left; 451 else if (device > cm_id_priv->id.device) 452 node = node->rb_right; 453 else if (service_id < cm_id_priv->id.service_id) 454 node = node->rb_left; 455 else if (service_id > cm_id_priv->id.service_id) 456 node = node->rb_right; 457 else if (data_cmp < 0) 458 node = node->rb_left; 459 else 460 node = node->rb_right; 461 } 462 return NULL; 463} 464 465static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 466 *timewait_info) 467{ 468 struct rb_node **link = &cm.remote_id_table.rb_node; 469 struct rb_node *parent = NULL; 470 struct cm_timewait_info *cur_timewait_info; 471 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 472 __be32 remote_id = timewait_info->work.remote_id; 473 474 while (*link) { 475 parent = *link; 476 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 477 remote_id_node); 478 if (remote_id < cur_timewait_info->work.remote_id) 479 link = &(*link)->rb_left; 480 else if (remote_id > cur_timewait_info->work.remote_id) 481 link = &(*link)->rb_right; 482 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 483 link = &(*link)->rb_left; 484 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 485 link = &(*link)->rb_right; 486 else 487 return cur_timewait_info; 488 } 489 timewait_info->inserted_remote_id = 1; 490 rb_link_node(&timewait_info->remote_id_node, parent, link); 491 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 492 return NULL; 493} 494 495static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 496 __be32 remote_id) 497{ 498 struct rb_node *node = cm.remote_id_table.rb_node; 499 struct cm_timewait_info *timewait_info; 500 501 while (node) { 502 timewait_info = rb_entry(node, struct cm_timewait_info, 503 remote_id_node); 504 if (remote_id < timewait_info->work.remote_id) 505 node = node->rb_left; 506 else if (remote_id > timewait_info->work.remote_id) 507 node = node->rb_right; 508 else if (remote_ca_guid < timewait_info->remote_ca_guid) 509 node = node->rb_left; 510 else if (remote_ca_guid > timewait_info->remote_ca_guid) 511 node = node->rb_right; 512 else 513 return timewait_info; 514 } 515 return NULL; 516} 517 518static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 519 *timewait_info) 520{ 521 struct rb_node **link = &cm.remote_qp_table.rb_node; 522 struct rb_node *parent = NULL; 523 struct cm_timewait_info *cur_timewait_info; 524 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 525 __be32 remote_qpn = timewait_info->remote_qpn; 526 527 while (*link) { 528 parent = *link; 529 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 530 remote_qp_node); 531 if (remote_qpn < cur_timewait_info->remote_qpn) 532 link = &(*link)->rb_left; 533 else if (remote_qpn > cur_timewait_info->remote_qpn) 534 link = &(*link)->rb_right; 535 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 536 link = &(*link)->rb_left; 537 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 538 link = &(*link)->rb_right; 539 else 540 return cur_timewait_info; 541 } 542 timewait_info->inserted_remote_qp = 1; 543 rb_link_node(&timewait_info->remote_qp_node, parent, link); 544 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 545 return NULL; 546} 547 548static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 549 *cm_id_priv) 550{ 551 struct rb_node **link = &cm.remote_sidr_table.rb_node; 552 struct rb_node *parent = NULL; 553 struct cm_id_private *cur_cm_id_priv; 554 union ib_gid *port_gid = &cm_id_priv->av.dgid; 555 __be32 remote_id = cm_id_priv->id.remote_id; 556 557 while (*link) { 558 parent = *link; 559 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 560 sidr_id_node); 561 if (remote_id < cur_cm_id_priv->id.remote_id) 562 link = &(*link)->rb_left; 563 else if (remote_id > cur_cm_id_priv->id.remote_id) 564 link = &(*link)->rb_right; 565 else { 566 int cmp; 567 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 568 sizeof *port_gid); 569 if (cmp < 0) 570 link = &(*link)->rb_left; 571 else if (cmp > 0) 572 link = &(*link)->rb_right; 573 else 574 return cur_cm_id_priv; 575 } 576 } 577 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 578 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 579 return NULL; 580} 581 582static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 583 enum ib_cm_sidr_status status) 584{ 585 struct ib_cm_sidr_rep_param param; 586 587 memset(¶m, 0, sizeof param); 588 param.status = status; 589 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 590} 591 592struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 593 ib_cm_handler cm_handler, 594 void *context) 595{ 596 struct cm_id_private *cm_id_priv; 597 int ret; 598 599 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 600 if (!cm_id_priv) 601 return ERR_PTR(-ENOMEM); 602 603 cm_id_priv->id.state = IB_CM_IDLE; 604 cm_id_priv->id.device = device; 605 cm_id_priv->id.cm_handler = cm_handler; 606 cm_id_priv->id.context = context; 607 cm_id_priv->id.remote_cm_qpn = 1; 608 ret = cm_alloc_id(cm_id_priv); 609 if (ret) 610 goto error; 611 612 spin_lock_init(&cm_id_priv->lock); 613 init_completion(&cm_id_priv->comp); 614 INIT_LIST_HEAD(&cm_id_priv->work_list); 615 atomic_set(&cm_id_priv->work_count, -1); 616 atomic_set(&cm_id_priv->refcount, 1); 617 return &cm_id_priv->id; 618 619error: 620 kfree(cm_id_priv); 621 return ERR_PTR(-ENOMEM); 622} 623EXPORT_SYMBOL(ib_create_cm_id); 624 625static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 626{ 627 struct cm_work *work; 628 629 if (list_empty(&cm_id_priv->work_list)) 630 return NULL; 631 632 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 633 list_del(&work->list); 634 return work; 635} 636 637static void cm_free_work(struct cm_work *work) 638{ 639 if (work->mad_recv_wc) 640 ib_free_recv_mad(work->mad_recv_wc); 641 kfree(work); 642} 643 644static inline int cm_convert_to_ms(int iba_time) 645{ 646 /* approximate conversion to ms from 4.096us x 2^iba_time */ 647 return 1 << max(iba_time - 8, 0); 648} 649 650static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 651{ 652 if (timewait_info->inserted_remote_id) { 653 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 654 timewait_info->inserted_remote_id = 0; 655 } 656 657 if (timewait_info->inserted_remote_qp) { 658 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 659 timewait_info->inserted_remote_qp = 0; 660 } 661} 662 663static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 664{ 665 struct cm_timewait_info *timewait_info; 666 667 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 668 if (!timewait_info) 669 return ERR_PTR(-ENOMEM); 670 671 timewait_info->work.local_id = local_id; 672 INIT_WORK(&timewait_info->work.work, cm_work_handler, 673 &timewait_info->work); 674 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 675 return timewait_info; 676} 677 678static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 679{ 680 int wait_time; 681 unsigned long flags; 682 683 spin_lock_irqsave(&cm.lock, flags); 684 cm_cleanup_timewait(cm_id_priv->timewait_info); 685 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 686 spin_unlock_irqrestore(&cm.lock, flags); 687 688 /* 689 * The cm_id could be destroyed by the user before we exit timewait. 690 * To protect against this, we search for the cm_id after exiting 691 * timewait before notifying the user that we've exited timewait. 692 */ 693 cm_id_priv->id.state = IB_CM_TIMEWAIT; 694 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 695 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 696 msecs_to_jiffies(wait_time)); 697 cm_id_priv->timewait_info = NULL; 698} 699 700static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 701{ 702 unsigned long flags; 703 704 cm_id_priv->id.state = IB_CM_IDLE; 705 if (cm_id_priv->timewait_info) { 706 spin_lock_irqsave(&cm.lock, flags); 707 cm_cleanup_timewait(cm_id_priv->timewait_info); 708 spin_unlock_irqrestore(&cm.lock, flags); 709 kfree(cm_id_priv->timewait_info); 710 cm_id_priv->timewait_info = NULL; 711 } 712} 713 714static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 715{ 716 struct cm_id_private *cm_id_priv; 717 struct cm_work *work; 718 unsigned long flags; 719 720 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 721retest: 722 spin_lock_irqsave(&cm_id_priv->lock, flags); 723 switch (cm_id->state) { 724 case IB_CM_LISTEN: 725 cm_id->state = IB_CM_IDLE; 726 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 727 spin_lock_irqsave(&cm.lock, flags); 728 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 729 spin_unlock_irqrestore(&cm.lock, flags); 730 break; 731 case IB_CM_SIDR_REQ_SENT: 732 cm_id->state = IB_CM_IDLE; 733 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 734 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 735 break; 736 case IB_CM_SIDR_REQ_RCVD: 737 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 738 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 739 break; 740 case IB_CM_REQ_SENT: 741 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 742 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 743 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 744 &cm_id_priv->av.port->cm_dev->ca_guid, 745 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 746 NULL, 0); 747 break; 748 case IB_CM_REQ_RCVD: 749 if (err == -ENOMEM) { 750 /* Do not reject to allow future retries. */ 751 cm_reset_to_idle(cm_id_priv); 752 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 753 } else { 754 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 755 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 756 NULL, 0, NULL, 0); 757 } 758 break; 759 case IB_CM_MRA_REQ_RCVD: 760 case IB_CM_REP_SENT: 761 case IB_CM_MRA_REP_RCVD: 762 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 763 /* Fall through */ 764 case IB_CM_MRA_REQ_SENT: 765 case IB_CM_REP_RCVD: 766 case IB_CM_MRA_REP_SENT: 767 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 768 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 769 NULL, 0, NULL, 0); 770 break; 771 case IB_CM_ESTABLISHED: 772 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 773 ib_send_cm_dreq(cm_id, NULL, 0); 774 goto retest; 775 case IB_CM_DREQ_SENT: 776 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 777 cm_enter_timewait(cm_id_priv); 778 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 779 break; 780 case IB_CM_DREQ_RCVD: 781 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 782 ib_send_cm_drep(cm_id, NULL, 0); 783 break; 784 default: 785 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 786 break; 787 } 788 789 cm_free_id(cm_id->local_id); 790 cm_deref_id(cm_id_priv); 791 wait_for_completion(&cm_id_priv->comp); 792 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 793 cm_free_work(work); 794 kfree(cm_id_priv->compare_data); 795 kfree(cm_id_priv->private_data); 796 kfree(cm_id_priv); 797} 798 799void ib_destroy_cm_id(struct ib_cm_id *cm_id) 800{ 801 cm_destroy_id(cm_id, 0); 802} 803EXPORT_SYMBOL(ib_destroy_cm_id); 804 805int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 806 struct ib_cm_compare_data *compare_data) 807{ 808 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 809 unsigned long flags; 810 int ret = 0; 811 812 service_mask = service_mask ? service_mask : 813 __constant_cpu_to_be64(~0ULL); 814 service_id &= service_mask; 815 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 816 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 817 return -EINVAL; 818 819 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 820 if (cm_id->state != IB_CM_IDLE) 821 return -EINVAL; 822 823 if (compare_data) { 824 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 825 GFP_KERNEL); 826 if (!cm_id_priv->compare_data) 827 return -ENOMEM; 828 cm_mask_copy(cm_id_priv->compare_data->data, 829 compare_data->data, compare_data->mask); 830 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 831 IB_CM_COMPARE_SIZE); 832 } 833 834 cm_id->state = IB_CM_LISTEN; 835 836 spin_lock_irqsave(&cm.lock, flags); 837 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 838 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 839 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 840 } else { 841 cm_id->service_id = service_id; 842 cm_id->service_mask = service_mask; 843 } 844 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 845 spin_unlock_irqrestore(&cm.lock, flags); 846 847 if (cur_cm_id_priv) { 848 cm_id->state = IB_CM_IDLE; 849 kfree(cm_id_priv->compare_data); 850 cm_id_priv->compare_data = NULL; 851 ret = -EBUSY; 852 } 853 return ret; 854} 855EXPORT_SYMBOL(ib_cm_listen); 856 857static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 858 enum cm_msg_sequence msg_seq) 859{ 860 u64 hi_tid, low_tid; 861 862 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 863 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 864 (msg_seq << 30)); 865 return cpu_to_be64(hi_tid | low_tid); 866} 867 868static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 869 __be16 attr_id, __be64 tid) 870{ 871 hdr->base_version = IB_MGMT_BASE_VERSION; 872 hdr->mgmt_class = IB_MGMT_CLASS_CM; 873 hdr->class_version = IB_CM_CLASS_VERSION; 874 hdr->method = IB_MGMT_METHOD_SEND; 875 hdr->attr_id = attr_id; 876 hdr->tid = tid; 877} 878 879static void cm_format_req(struct cm_req_msg *req_msg, 880 struct cm_id_private *cm_id_priv, 881 struct ib_cm_req_param *param) 882{ 883 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 884 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 885 886 req_msg->local_comm_id = cm_id_priv->id.local_id; 887 req_msg->service_id = param->service_id; 888 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 889 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 890 cm_req_set_resp_res(req_msg, param->responder_resources); 891 cm_req_set_init_depth(req_msg, param->initiator_depth); 892 cm_req_set_remote_resp_timeout(req_msg, 893 param->remote_cm_response_timeout); 894 cm_req_set_qp_type(req_msg, param->qp_type); 895 cm_req_set_flow_ctrl(req_msg, param->flow_control); 896 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 897 cm_req_set_local_resp_timeout(req_msg, 898 param->local_cm_response_timeout); 899 cm_req_set_retry_count(req_msg, param->retry_count); 900 req_msg->pkey = param->primary_path->pkey; 901 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 902 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 903 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 904 cm_req_set_srq(req_msg, param->srq); 905 906 req_msg->primary_local_lid = param->primary_path->slid; 907 req_msg->primary_remote_lid = param->primary_path->dlid; 908 req_msg->primary_local_gid = param->primary_path->sgid; 909 req_msg->primary_remote_gid = param->primary_path->dgid; 910 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 911 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 912 req_msg->primary_traffic_class = param->primary_path->traffic_class; 913 req_msg->primary_hop_limit = param->primary_path->hop_limit; 914 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 915 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 916 cm_req_set_primary_local_ack_timeout(req_msg, 917 min(31, param->primary_path->packet_life_time + 1)); 918 919 if (param->alternate_path) { 920 req_msg->alt_local_lid = param->alternate_path->slid; 921 req_msg->alt_remote_lid = param->alternate_path->dlid; 922 req_msg->alt_local_gid = param->alternate_path->sgid; 923 req_msg->alt_remote_gid = param->alternate_path->dgid; 924 cm_req_set_alt_flow_label(req_msg, 925 param->alternate_path->flow_label); 926 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 927 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 928 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 929 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 930 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 931 cm_req_set_alt_local_ack_timeout(req_msg, 932 min(31, param->alternate_path->packet_life_time + 1)); 933 } 934 935 if (param->private_data && param->private_data_len) 936 memcpy(req_msg->private_data, param->private_data, 937 param->private_data_len); 938} 939 940static int cm_validate_req_param(struct ib_cm_req_param *param) 941{ 942 /* peer-to-peer not supported */ 943 if (param->peer_to_peer) 944 return -EINVAL; 945 946 if (!param->primary_path) 947 return -EINVAL; 948 949 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 950 return -EINVAL; 951 952 if (param->private_data && 953 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 954 return -EINVAL; 955 956 if (param->alternate_path && 957 (param->alternate_path->pkey != param->primary_path->pkey || 958 param->alternate_path->mtu != param->primary_path->mtu)) 959 return -EINVAL; 960 961 return 0; 962} 963 964int ib_send_cm_req(struct ib_cm_id *cm_id, 965 struct ib_cm_req_param *param) 966{ 967 struct cm_id_private *cm_id_priv; 968 struct cm_req_msg *req_msg; 969 unsigned long flags; 970 int ret; 971 972 ret = cm_validate_req_param(param); 973 if (ret) 974 return ret; 975 976 /* Verify that we're not in timewait. */ 977 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 978 spin_lock_irqsave(&cm_id_priv->lock, flags); 979 if (cm_id->state != IB_CM_IDLE) { 980 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 981 ret = -EINVAL; 982 goto out; 983 } 984 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 985 986 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 987 id.local_id); 988 if (IS_ERR(cm_id_priv->timewait_info)) { 989 ret = PTR_ERR(cm_id_priv->timewait_info); 990 goto out; 991 } 992 993 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 994 if (ret) 995 goto error1; 996 if (param->alternate_path) { 997 ret = cm_init_av_by_path(param->alternate_path, 998 &cm_id_priv->alt_av); 999 if (ret) 1000 goto error1; 1001 } 1002 cm_id->service_id = param->service_id; 1003 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1004 cm_id_priv->timeout_ms = cm_convert_to_ms( 1005 param->primary_path->packet_life_time) * 2 + 1006 cm_convert_to_ms( 1007 param->remote_cm_response_timeout); 1008 cm_id_priv->max_cm_retries = param->max_cm_retries; 1009 cm_id_priv->initiator_depth = param->initiator_depth; 1010 cm_id_priv->responder_resources = param->responder_resources; 1011 cm_id_priv->retry_count = param->retry_count; 1012 cm_id_priv->path_mtu = param->primary_path->mtu; 1013 cm_id_priv->qp_type = param->qp_type; 1014 1015 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1016 if (ret) 1017 goto error1; 1018 1019 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1020 cm_format_req(req_msg, cm_id_priv, param); 1021 cm_id_priv->tid = req_msg->hdr.tid; 1022 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1023 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1024 1025 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1026 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1027 cm_id_priv->local_ack_timeout = 1028 cm_req_get_primary_local_ack_timeout(req_msg); 1029 1030 spin_lock_irqsave(&cm_id_priv->lock, flags); 1031 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1032 if (ret) { 1033 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1034 goto error2; 1035 } 1036 BUG_ON(cm_id->state != IB_CM_IDLE); 1037 cm_id->state = IB_CM_REQ_SENT; 1038 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1039 return 0; 1040 1041error2: cm_free_msg(cm_id_priv->msg); 1042error1: kfree(cm_id_priv->timewait_info); 1043out: return ret; 1044} 1045EXPORT_SYMBOL(ib_send_cm_req); 1046 1047static int cm_issue_rej(struct cm_port *port, 1048 struct ib_mad_recv_wc *mad_recv_wc, 1049 enum ib_cm_rej_reason reason, 1050 enum cm_msg_response msg_rejected, 1051 void *ari, u8 ari_length) 1052{ 1053 struct ib_mad_send_buf *msg = NULL; 1054 struct cm_rej_msg *rej_msg, *rcv_msg; 1055 int ret; 1056 1057 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1058 if (ret) 1059 return ret; 1060 1061 /* We just need common CM header information. Cast to any message. */ 1062 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1063 rej_msg = (struct cm_rej_msg *) msg->mad; 1064 1065 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1066 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1067 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1068 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1069 rej_msg->reason = cpu_to_be16(reason); 1070 1071 if (ari && ari_length) { 1072 cm_rej_set_reject_info_len(rej_msg, ari_length); 1073 memcpy(rej_msg->ari, ari, ari_length); 1074 } 1075 1076 ret = ib_post_send_mad(msg, NULL); 1077 if (ret) 1078 cm_free_msg(msg); 1079 1080 return ret; 1081} 1082 1083static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1084 __be32 local_qpn, __be32 remote_qpn) 1085{ 1086 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1087 ((local_ca_guid == remote_ca_guid) && 1088 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1089} 1090 1091static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1092 struct ib_sa_path_rec *primary_path, 1093 struct ib_sa_path_rec *alt_path) 1094{ 1095 memset(primary_path, 0, sizeof *primary_path); 1096 primary_path->dgid = req_msg->primary_local_gid; 1097 primary_path->sgid = req_msg->primary_remote_gid; 1098 primary_path->dlid = req_msg->primary_local_lid; 1099 primary_path->slid = req_msg->primary_remote_lid; 1100 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1101 primary_path->hop_limit = req_msg->primary_hop_limit; 1102 primary_path->traffic_class = req_msg->primary_traffic_class; 1103 primary_path->reversible = 1; 1104 primary_path->pkey = req_msg->pkey; 1105 primary_path->sl = cm_req_get_primary_sl(req_msg); 1106 primary_path->mtu_selector = IB_SA_EQ; 1107 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1108 primary_path->rate_selector = IB_SA_EQ; 1109 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1110 primary_path->packet_life_time_selector = IB_SA_EQ; 1111 primary_path->packet_life_time = 1112 cm_req_get_primary_local_ack_timeout(req_msg); 1113 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1114 1115 if (req_msg->alt_local_lid) { 1116 memset(alt_path, 0, sizeof *alt_path); 1117 alt_path->dgid = req_msg->alt_local_gid; 1118 alt_path->sgid = req_msg->alt_remote_gid; 1119 alt_path->dlid = req_msg->alt_local_lid; 1120 alt_path->slid = req_msg->alt_remote_lid; 1121 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1122 alt_path->hop_limit = req_msg->alt_hop_limit; 1123 alt_path->traffic_class = req_msg->alt_traffic_class; 1124 alt_path->reversible = 1; 1125 alt_path->pkey = req_msg->pkey; 1126 alt_path->sl = cm_req_get_alt_sl(req_msg); 1127 alt_path->mtu_selector = IB_SA_EQ; 1128 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1129 alt_path->rate_selector = IB_SA_EQ; 1130 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1131 alt_path->packet_life_time_selector = IB_SA_EQ; 1132 alt_path->packet_life_time = 1133 cm_req_get_alt_local_ack_timeout(req_msg); 1134 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1135 } 1136} 1137 1138static void cm_format_req_event(struct cm_work *work, 1139 struct cm_id_private *cm_id_priv, 1140 struct ib_cm_id *listen_id) 1141{ 1142 struct cm_req_msg *req_msg; 1143 struct ib_cm_req_event_param *param; 1144 1145 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1146 param = &work->cm_event.param.req_rcvd; 1147 param->listen_id = listen_id; 1148 param->port = cm_id_priv->av.port->port_num; 1149 param->primary_path = &work->path[0]; 1150 if (req_msg->alt_local_lid) 1151 param->alternate_path = &work->path[1]; 1152 else 1153 param->alternate_path = NULL; 1154 param->remote_ca_guid = req_msg->local_ca_guid; 1155 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1156 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1157 param->qp_type = cm_req_get_qp_type(req_msg); 1158 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1159 param->responder_resources = cm_req_get_init_depth(req_msg); 1160 param->initiator_depth = cm_req_get_resp_res(req_msg); 1161 param->local_cm_response_timeout = 1162 cm_req_get_remote_resp_timeout(req_msg); 1163 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1164 param->remote_cm_response_timeout = 1165 cm_req_get_local_resp_timeout(req_msg); 1166 param->retry_count = cm_req_get_retry_count(req_msg); 1167 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1168 param->srq = cm_req_get_srq(req_msg); 1169 work->cm_event.private_data = &req_msg->private_data; 1170} 1171 1172static void cm_process_work(struct cm_id_private *cm_id_priv, 1173 struct cm_work *work) 1174{ 1175 unsigned long flags; 1176 int ret; 1177 1178 /* We will typically only have the current event to report. */ 1179 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1180 cm_free_work(work); 1181 1182 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1183 spin_lock_irqsave(&cm_id_priv->lock, flags); 1184 work = cm_dequeue_work(cm_id_priv); 1185 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1186 BUG_ON(!work); 1187 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1188 &work->cm_event); 1189 cm_free_work(work); 1190 } 1191 cm_deref_id(cm_id_priv); 1192 if (ret) 1193 cm_destroy_id(&cm_id_priv->id, ret); 1194} 1195 1196static void cm_format_mra(struct cm_mra_msg *mra_msg, 1197 struct cm_id_private *cm_id_priv, 1198 enum cm_msg_response msg_mraed, u8 service_timeout, 1199 const void *private_data, u8 private_data_len) 1200{ 1201 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1202 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1203 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1204 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1205 cm_mra_set_service_timeout(mra_msg, service_timeout); 1206 1207 if (private_data && private_data_len) 1208 memcpy(mra_msg->private_data, private_data, private_data_len); 1209} 1210 1211static void cm_format_rej(struct cm_rej_msg *rej_msg, 1212 struct cm_id_private *cm_id_priv, 1213 enum ib_cm_rej_reason reason, 1214 void *ari, 1215 u8 ari_length, 1216 const void *private_data, 1217 u8 private_data_len) 1218{ 1219 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1220 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1221 1222 switch(cm_id_priv->id.state) { 1223 case IB_CM_REQ_RCVD: 1224 rej_msg->local_comm_id = 0; 1225 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1226 break; 1227 case IB_CM_MRA_REQ_SENT: 1228 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1229 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1230 break; 1231 case IB_CM_REP_RCVD: 1232 case IB_CM_MRA_REP_SENT: 1233 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1234 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1235 break; 1236 default: 1237 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1238 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1239 break; 1240 } 1241 1242 rej_msg->reason = cpu_to_be16(reason); 1243 if (ari && ari_length) { 1244 cm_rej_set_reject_info_len(rej_msg, ari_length); 1245 memcpy(rej_msg->ari, ari, ari_length); 1246 } 1247 1248 if (private_data && private_data_len) 1249 memcpy(rej_msg->private_data, private_data, private_data_len); 1250} 1251 1252static void cm_dup_req_handler(struct cm_work *work, 1253 struct cm_id_private *cm_id_priv) 1254{ 1255 struct ib_mad_send_buf *msg = NULL; 1256 unsigned long flags; 1257 int ret; 1258 1259 /* Quick state check to discard duplicate REQs. */ 1260 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1261 return; 1262 1263 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1264 if (ret) 1265 return; 1266 1267 spin_lock_irqsave(&cm_id_priv->lock, flags); 1268 switch (cm_id_priv->id.state) { 1269 case IB_CM_MRA_REQ_SENT: 1270 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1271 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1272 cm_id_priv->private_data, 1273 cm_id_priv->private_data_len); 1274 break; 1275 case IB_CM_TIMEWAIT: 1276 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1277 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1278 break; 1279 default: 1280 goto unlock; 1281 } 1282 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1283 1284 ret = ib_post_send_mad(msg, NULL); 1285 if (ret) 1286 goto free; 1287 return; 1288 1289unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1290free: cm_free_msg(msg); 1291} 1292 1293static struct cm_id_private * cm_match_req(struct cm_work *work, 1294 struct cm_id_private *cm_id_priv) 1295{ 1296 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1297 struct cm_timewait_info *timewait_info; 1298 struct cm_req_msg *req_msg; 1299 unsigned long flags; 1300 1301 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1302 1303 /* Check for duplicate REQ and stale connections. */ 1304 spin_lock_irqsave(&cm.lock, flags); 1305 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1306 if (!timewait_info) 1307 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1308 1309 if (timewait_info) { 1310 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1311 timewait_info->work.remote_id); 1312 cm_cleanup_timewait(cm_id_priv->timewait_info); 1313 spin_unlock_irqrestore(&cm.lock, flags); 1314 if (cur_cm_id_priv) { 1315 cm_dup_req_handler(work, cur_cm_id_priv); 1316 cm_deref_id(cur_cm_id_priv); 1317 } else 1318 cm_issue_rej(work->port, work->mad_recv_wc, 1319 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1320 NULL, 0); 1321 listen_cm_id_priv = NULL; 1322 goto out; 1323 } 1324 1325 /* Find matching listen request. */ 1326 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1327 req_msg->service_id, 1328 req_msg->private_data); 1329 if (!listen_cm_id_priv) { 1330 cm_cleanup_timewait(cm_id_priv->timewait_info); 1331 spin_unlock_irqrestore(&cm.lock, flags); 1332 cm_issue_rej(work->port, work->mad_recv_wc, 1333 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1334 NULL, 0); 1335 goto out; 1336 } 1337 atomic_inc(&listen_cm_id_priv->refcount); 1338 atomic_inc(&cm_id_priv->refcount); 1339 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1340 atomic_inc(&cm_id_priv->work_count); 1341 spin_unlock_irqrestore(&cm.lock, flags); 1342out: 1343 return listen_cm_id_priv; 1344} 1345 1346static int cm_req_handler(struct cm_work *work) 1347{ 1348 struct ib_cm_id *cm_id; 1349 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1350 struct cm_req_msg *req_msg; 1351 int ret; 1352 1353 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1354 1355 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1356 if (IS_ERR(cm_id)) 1357 return PTR_ERR(cm_id); 1358 1359 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1360 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1361 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1362 work->mad_recv_wc->recv_buf.grh, 1363 &cm_id_priv->av); 1364 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1365 id.local_id); 1366 if (IS_ERR(cm_id_priv->timewait_info)) { 1367 ret = PTR_ERR(cm_id_priv->timewait_info); 1368 goto destroy; 1369 } 1370 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1371 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1372 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1373 1374 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1375 if (!listen_cm_id_priv) { 1376 ret = -EINVAL; 1377 kfree(cm_id_priv->timewait_info); 1378 goto destroy; 1379 } 1380 1381 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1382 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1383 cm_id_priv->id.service_id = req_msg->service_id; 1384 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1385 1386 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1387 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1388 if (ret) { 1389 ib_get_cached_gid(work->port->cm_dev->device, 1390 work->port->port_num, 0, &work->path[0].sgid); 1391 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1392 &work->path[0].sgid, sizeof work->path[0].sgid, 1393 NULL, 0); 1394 goto rejected; 1395 } 1396 if (req_msg->alt_local_lid) { 1397 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1398 if (ret) { 1399 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1400 &work->path[0].sgid, 1401 sizeof work->path[0].sgid, NULL, 0); 1402 goto rejected; 1403 } 1404 } 1405 cm_id_priv->tid = req_msg->hdr.tid; 1406 cm_id_priv->timeout_ms = cm_convert_to_ms( 1407 cm_req_get_local_resp_timeout(req_msg)); 1408 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1409 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1410 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1411 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1412 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1413 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1414 cm_id_priv->local_ack_timeout = 1415 cm_req_get_primary_local_ack_timeout(req_msg); 1416 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1417 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1418 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1419 1420 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1421 cm_process_work(cm_id_priv, work); 1422 cm_deref_id(listen_cm_id_priv); 1423 return 0; 1424 1425rejected: 1426 atomic_dec(&cm_id_priv->refcount); 1427 cm_deref_id(listen_cm_id_priv); 1428destroy: 1429 ib_destroy_cm_id(cm_id); 1430 return ret; 1431} 1432 1433static void cm_format_rep(struct cm_rep_msg *rep_msg, 1434 struct cm_id_private *cm_id_priv, 1435 struct ib_cm_rep_param *param) 1436{ 1437 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1438 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1439 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1440 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1441 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1442 rep_msg->resp_resources = param->responder_resources; 1443 rep_msg->initiator_depth = param->initiator_depth; 1444 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1445 cm_rep_set_failover(rep_msg, param->failover_accepted); 1446 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1447 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1448 cm_rep_set_srq(rep_msg, param->srq); 1449 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1450 1451 if (param->private_data && param->private_data_len) 1452 memcpy(rep_msg->private_data, param->private_data, 1453 param->private_data_len); 1454} 1455 1456int ib_send_cm_rep(struct ib_cm_id *cm_id, 1457 struct ib_cm_rep_param *param) 1458{ 1459 struct cm_id_private *cm_id_priv; 1460 struct ib_mad_send_buf *msg; 1461 struct cm_rep_msg *rep_msg; 1462 unsigned long flags; 1463 int ret; 1464 1465 if (param->private_data && 1466 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1467 return -EINVAL; 1468 1469 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1470 spin_lock_irqsave(&cm_id_priv->lock, flags); 1471 if (cm_id->state != IB_CM_REQ_RCVD && 1472 cm_id->state != IB_CM_MRA_REQ_SENT) { 1473 ret = -EINVAL; 1474 goto out; 1475 } 1476 1477 ret = cm_alloc_msg(cm_id_priv, &msg); 1478 if (ret) 1479 goto out; 1480 1481 rep_msg = (struct cm_rep_msg *) msg->mad; 1482 cm_format_rep(rep_msg, cm_id_priv, param); 1483 msg->timeout_ms = cm_id_priv->timeout_ms; 1484 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1485 1486 ret = ib_post_send_mad(msg, NULL); 1487 if (ret) { 1488 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1489 cm_free_msg(msg); 1490 return ret; 1491 } 1492 1493 cm_id->state = IB_CM_REP_SENT; 1494 cm_id_priv->msg = msg; 1495 cm_id_priv->initiator_depth = param->initiator_depth; 1496 cm_id_priv->responder_resources = param->responder_resources; 1497 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1498 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1499 1500out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1501 return ret; 1502} 1503EXPORT_SYMBOL(ib_send_cm_rep); 1504 1505static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1506 struct cm_id_private *cm_id_priv, 1507 const void *private_data, 1508 u8 private_data_len) 1509{ 1510 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1511 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1512 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1513 1514 if (private_data && private_data_len) 1515 memcpy(rtu_msg->private_data, private_data, private_data_len); 1516} 1517 1518int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1519 const void *private_data, 1520 u8 private_data_len) 1521{ 1522 struct cm_id_private *cm_id_priv; 1523 struct ib_mad_send_buf *msg; 1524 unsigned long flags; 1525 void *data; 1526 int ret; 1527 1528 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1529 return -EINVAL; 1530 1531 data = cm_copy_private_data(private_data, private_data_len); 1532 if (IS_ERR(data)) 1533 return PTR_ERR(data); 1534 1535 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1536 spin_lock_irqsave(&cm_id_priv->lock, flags); 1537 if (cm_id->state != IB_CM_REP_RCVD && 1538 cm_id->state != IB_CM_MRA_REP_SENT) { 1539 ret = -EINVAL; 1540 goto error; 1541 } 1542 1543 ret = cm_alloc_msg(cm_id_priv, &msg); 1544 if (ret) 1545 goto error; 1546 1547 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1548 private_data, private_data_len); 1549 1550 ret = ib_post_send_mad(msg, NULL); 1551 if (ret) { 1552 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1553 cm_free_msg(msg); 1554 kfree(data); 1555 return ret; 1556 } 1557 1558 cm_id->state = IB_CM_ESTABLISHED; 1559 cm_set_private_data(cm_id_priv, data, private_data_len); 1560 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1561 return 0; 1562 1563error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1564 kfree(data); 1565 return ret; 1566} 1567EXPORT_SYMBOL(ib_send_cm_rtu); 1568 1569static void cm_format_rep_event(struct cm_work *work) 1570{ 1571 struct cm_rep_msg *rep_msg; 1572 struct ib_cm_rep_event_param *param; 1573 1574 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1575 param = &work->cm_event.param.rep_rcvd; 1576 param->remote_ca_guid = rep_msg->local_ca_guid; 1577 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1578 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1579 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1580 param->responder_resources = rep_msg->initiator_depth; 1581 param->initiator_depth = rep_msg->resp_resources; 1582 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1583 param->failover_accepted = cm_rep_get_failover(rep_msg); 1584 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1585 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1586 param->srq = cm_rep_get_srq(rep_msg); 1587 work->cm_event.private_data = &rep_msg->private_data; 1588} 1589 1590static void cm_dup_rep_handler(struct cm_work *work) 1591{ 1592 struct cm_id_private *cm_id_priv; 1593 struct cm_rep_msg *rep_msg; 1594 struct ib_mad_send_buf *msg = NULL; 1595 unsigned long flags; 1596 int ret; 1597 1598 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1599 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1600 rep_msg->local_comm_id); 1601 if (!cm_id_priv) 1602 return; 1603 1604 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1605 if (ret) 1606 goto deref; 1607 1608 spin_lock_irqsave(&cm_id_priv->lock, flags); 1609 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1610 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1611 cm_id_priv->private_data, 1612 cm_id_priv->private_data_len); 1613 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1614 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1615 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1616 cm_id_priv->private_data, 1617 cm_id_priv->private_data_len); 1618 else 1619 goto unlock; 1620 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1621 1622 ret = ib_post_send_mad(msg, NULL); 1623 if (ret) 1624 goto free; 1625 goto deref; 1626 1627unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1628free: cm_free_msg(msg); 1629deref: cm_deref_id(cm_id_priv); 1630} 1631 1632static int cm_rep_handler(struct cm_work *work) 1633{ 1634 struct cm_id_private *cm_id_priv; 1635 struct cm_rep_msg *rep_msg; 1636 unsigned long flags; 1637 int ret; 1638 1639 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1640 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1641 if (!cm_id_priv) { 1642 cm_dup_rep_handler(work); 1643 return -EINVAL; 1644 } 1645 1646 cm_format_rep_event(work); 1647 1648 spin_lock_irqsave(&cm_id_priv->lock, flags); 1649 switch (cm_id_priv->id.state) { 1650 case IB_CM_REQ_SENT: 1651 case IB_CM_MRA_REQ_RCVD: 1652 break; 1653 default: 1654 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1655 ret = -EINVAL; 1656 goto error; 1657 } 1658 1659 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1660 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1661 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1662 1663 spin_lock(&cm.lock); 1664 /* Check for duplicate REP. */ 1665 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1666 spin_unlock(&cm.lock); 1667 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1668 ret = -EINVAL; 1669 goto error; 1670 } 1671 /* Check for a stale connection. */ 1672 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1673 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1674 &cm.remote_id_table); 1675 cm_id_priv->timewait_info->inserted_remote_id = 0; 1676 spin_unlock(&cm.lock); 1677 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1678 cm_issue_rej(work->port, work->mad_recv_wc, 1679 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1680 NULL, 0); 1681 ret = -EINVAL; 1682 goto error; 1683 } 1684 spin_unlock(&cm.lock); 1685 1686 cm_id_priv->id.state = IB_CM_REP_RCVD; 1687 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1688 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1689 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1690 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1691 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1692 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1693 1694 /* todo: handle peer_to_peer */ 1695 1696 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1697 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1698 if (!ret) 1699 list_add_tail(&work->list, &cm_id_priv->work_list); 1700 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1701 1702 if (ret) 1703 cm_process_work(cm_id_priv, work); 1704 else 1705 cm_deref_id(cm_id_priv); 1706 return 0; 1707 1708error: 1709 cm_deref_id(cm_id_priv); 1710 return ret; 1711} 1712 1713static int cm_establish_handler(struct cm_work *work) 1714{ 1715 struct cm_id_private *cm_id_priv; 1716 unsigned long flags; 1717 int ret; 1718 1719 /* See comment in ib_cm_establish about lookup. */ 1720 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1721 if (!cm_id_priv) 1722 return -EINVAL; 1723 1724 spin_lock_irqsave(&cm_id_priv->lock, flags); 1725 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1726 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1727 goto out; 1728 } 1729 1730 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1731 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1732 if (!ret) 1733 list_add_tail(&work->list, &cm_id_priv->work_list); 1734 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1735 1736 if (ret) 1737 cm_process_work(cm_id_priv, work); 1738 else 1739 cm_deref_id(cm_id_priv); 1740 return 0; 1741out: 1742 cm_deref_id(cm_id_priv); 1743 return -EINVAL; 1744} 1745 1746static int cm_rtu_handler(struct cm_work *work) 1747{ 1748 struct cm_id_private *cm_id_priv; 1749 struct cm_rtu_msg *rtu_msg; 1750 unsigned long flags; 1751 int ret; 1752 1753 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1754 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1755 rtu_msg->local_comm_id); 1756 if (!cm_id_priv) 1757 return -EINVAL; 1758 1759 work->cm_event.private_data = &rtu_msg->private_data; 1760 1761 spin_lock_irqsave(&cm_id_priv->lock, flags); 1762 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1763 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1764 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1765 goto out; 1766 } 1767 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1768 1769 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1770 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1771 if (!ret) 1772 list_add_tail(&work->list, &cm_id_priv->work_list); 1773 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1774 1775 if (ret) 1776 cm_process_work(cm_id_priv, work); 1777 else 1778 cm_deref_id(cm_id_priv); 1779 return 0; 1780out: 1781 cm_deref_id(cm_id_priv); 1782 return -EINVAL; 1783} 1784 1785static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1786 struct cm_id_private *cm_id_priv, 1787 const void *private_data, 1788 u8 private_data_len) 1789{ 1790 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1791 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1792 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1793 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1794 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1795 1796 if (private_data && private_data_len) 1797 memcpy(dreq_msg->private_data, private_data, private_data_len); 1798} 1799 1800int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1801 const void *private_data, 1802 u8 private_data_len) 1803{ 1804 struct cm_id_private *cm_id_priv; 1805 struct ib_mad_send_buf *msg; 1806 unsigned long flags; 1807 int ret; 1808 1809 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1810 return -EINVAL; 1811 1812 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1813 spin_lock_irqsave(&cm_id_priv->lock, flags); 1814 if (cm_id->state != IB_CM_ESTABLISHED) { 1815 ret = -EINVAL; 1816 goto out; 1817 } 1818 1819 ret = cm_alloc_msg(cm_id_priv, &msg); 1820 if (ret) { 1821 cm_enter_timewait(cm_id_priv); 1822 goto out; 1823 } 1824 1825 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1826 private_data, private_data_len); 1827 msg->timeout_ms = cm_id_priv->timeout_ms; 1828 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1829 1830 ret = ib_post_send_mad(msg, NULL); 1831 if (ret) { 1832 cm_enter_timewait(cm_id_priv); 1833 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1834 cm_free_msg(msg); 1835 return ret; 1836 } 1837 1838 cm_id->state = IB_CM_DREQ_SENT; 1839 cm_id_priv->msg = msg; 1840out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1841 return ret; 1842} 1843EXPORT_SYMBOL(ib_send_cm_dreq); 1844 1845static void cm_format_drep(struct cm_drep_msg *drep_msg, 1846 struct cm_id_private *cm_id_priv, 1847 const void *private_data, 1848 u8 private_data_len) 1849{ 1850 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1851 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1852 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1853 1854 if (private_data && private_data_len) 1855 memcpy(drep_msg->private_data, private_data, private_data_len); 1856} 1857 1858int ib_send_cm_drep(struct ib_cm_id *cm_id, 1859 const void *private_data, 1860 u8 private_data_len) 1861{ 1862 struct cm_id_private *cm_id_priv; 1863 struct ib_mad_send_buf *msg; 1864 unsigned long flags; 1865 void *data; 1866 int ret; 1867 1868 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1869 return -EINVAL; 1870 1871 data = cm_copy_private_data(private_data, private_data_len); 1872 if (IS_ERR(data)) 1873 return PTR_ERR(data); 1874 1875 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1876 spin_lock_irqsave(&cm_id_priv->lock, flags); 1877 if (cm_id->state != IB_CM_DREQ_RCVD) { 1878 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1879 kfree(data); 1880 return -EINVAL; 1881 } 1882 1883 cm_set_private_data(cm_id_priv, data, private_data_len); 1884 cm_enter_timewait(cm_id_priv); 1885 1886 ret = cm_alloc_msg(cm_id_priv, &msg); 1887 if (ret) 1888 goto out; 1889 1890 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1891 private_data, private_data_len); 1892 1893 ret = ib_post_send_mad(msg, NULL); 1894 if (ret) { 1895 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1896 cm_free_msg(msg); 1897 return ret; 1898 } 1899 1900out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1901 return ret; 1902} 1903EXPORT_SYMBOL(ib_send_cm_drep); 1904 1905static int cm_issue_drep(struct cm_port *port, 1906 struct ib_mad_recv_wc *mad_recv_wc) 1907{ 1908 struct ib_mad_send_buf *msg = NULL; 1909 struct cm_dreq_msg *dreq_msg; 1910 struct cm_drep_msg *drep_msg; 1911 int ret; 1912 1913 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1914 if (ret) 1915 return ret; 1916 1917 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 1918 drep_msg = (struct cm_drep_msg *) msg->mad; 1919 1920 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 1921 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 1922 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 1923 1924 ret = ib_post_send_mad(msg, NULL); 1925 if (ret) 1926 cm_free_msg(msg); 1927 1928 return ret; 1929} 1930 1931static int cm_dreq_handler(struct cm_work *work) 1932{ 1933 struct cm_id_private *cm_id_priv; 1934 struct cm_dreq_msg *dreq_msg; 1935 struct ib_mad_send_buf *msg = NULL; 1936 unsigned long flags; 1937 int ret; 1938 1939 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1940 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1941 dreq_msg->local_comm_id); 1942 if (!cm_id_priv) { 1943 cm_issue_drep(work->port, work->mad_recv_wc); 1944 return -EINVAL; 1945 } 1946 1947 work->cm_event.private_data = &dreq_msg->private_data; 1948 1949 spin_lock_irqsave(&cm_id_priv->lock, flags); 1950 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1951 goto unlock; 1952 1953 switch (cm_id_priv->id.state) { 1954 case IB_CM_REP_SENT: 1955 case IB_CM_DREQ_SENT: 1956 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1957 break; 1958 case IB_CM_ESTABLISHED: 1959 case IB_CM_MRA_REP_RCVD: 1960 break; 1961 case IB_CM_TIMEWAIT: 1962 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1963 goto unlock; 1964 1965 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1966 cm_id_priv->private_data, 1967 cm_id_priv->private_data_len); 1968 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1969 1970 if (ib_post_send_mad(msg, NULL)) 1971 cm_free_msg(msg); 1972 goto deref; 1973 default: 1974 goto unlock; 1975 } 1976 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1977 cm_id_priv->tid = dreq_msg->hdr.tid; 1978 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1979 if (!ret) 1980 list_add_tail(&work->list, &cm_id_priv->work_list); 1981 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1982 1983 if (ret) 1984 cm_process_work(cm_id_priv, work); 1985 else 1986 cm_deref_id(cm_id_priv); 1987 return 0; 1988 1989unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1990deref: cm_deref_id(cm_id_priv); 1991 return -EINVAL; 1992} 1993 1994static int cm_drep_handler(struct cm_work *work) 1995{ 1996 struct cm_id_private *cm_id_priv; 1997 struct cm_drep_msg *drep_msg; 1998 unsigned long flags; 1999 int ret; 2000 2001 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 2002 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 2003 drep_msg->local_comm_id); 2004 if (!cm_id_priv) 2005 return -EINVAL; 2006 2007 work->cm_event.private_data = &drep_msg->private_data; 2008 2009 spin_lock_irqsave(&cm_id_priv->lock, flags); 2010 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2011 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2012 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2013 goto out; 2014 } 2015 cm_enter_timewait(cm_id_priv); 2016 2017 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2018 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2019 if (!ret) 2020 list_add_tail(&work->list, &cm_id_priv->work_list); 2021 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2022 2023 if (ret) 2024 cm_process_work(cm_id_priv, work); 2025 else 2026 cm_deref_id(cm_id_priv); 2027 return 0; 2028out: 2029 cm_deref_id(cm_id_priv); 2030 return -EINVAL; 2031} 2032 2033int ib_send_cm_rej(struct ib_cm_id *cm_id, 2034 enum ib_cm_rej_reason reason, 2035 void *ari, 2036 u8 ari_length, 2037 const void *private_data, 2038 u8 private_data_len) 2039{ 2040 struct cm_id_private *cm_id_priv; 2041 struct ib_mad_send_buf *msg; 2042 unsigned long flags; 2043 int ret; 2044 2045 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2046 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2047 return -EINVAL; 2048 2049 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2050 2051 spin_lock_irqsave(&cm_id_priv->lock, flags); 2052 switch (cm_id->state) { 2053 case IB_CM_REQ_SENT: 2054 case IB_CM_MRA_REQ_RCVD: 2055 case IB_CM_REQ_RCVD: 2056 case IB_CM_MRA_REQ_SENT: 2057 case IB_CM_REP_RCVD: 2058 case IB_CM_MRA_REP_SENT: 2059 ret = cm_alloc_msg(cm_id_priv, &msg); 2060 if (!ret) 2061 cm_format_rej((struct cm_rej_msg *) msg->mad, 2062 cm_id_priv, reason, ari, ari_length, 2063 private_data, private_data_len); 2064 2065 cm_reset_to_idle(cm_id_priv); 2066 break; 2067 case IB_CM_REP_SENT: 2068 case IB_CM_MRA_REP_RCVD: 2069 ret = cm_alloc_msg(cm_id_priv, &msg); 2070 if (!ret) 2071 cm_format_rej((struct cm_rej_msg *) msg->mad, 2072 cm_id_priv, reason, ari, ari_length, 2073 private_data, private_data_len); 2074 2075 cm_enter_timewait(cm_id_priv); 2076 break; 2077 default: 2078 ret = -EINVAL; 2079 goto out; 2080 } 2081 2082 if (ret) 2083 goto out; 2084 2085 ret = ib_post_send_mad(msg, NULL); 2086 if (ret) 2087 cm_free_msg(msg); 2088 2089out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2090 return ret; 2091} 2092EXPORT_SYMBOL(ib_send_cm_rej); 2093 2094static void cm_format_rej_event(struct cm_work *work) 2095{ 2096 struct cm_rej_msg *rej_msg; 2097 struct ib_cm_rej_event_param *param; 2098 2099 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2100 param = &work->cm_event.param.rej_rcvd; 2101 param->ari = rej_msg->ari; 2102 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2103 param->reason = __be16_to_cpu(rej_msg->reason); 2104 work->cm_event.private_data = &rej_msg->private_data; 2105} 2106 2107static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2108{ 2109 struct cm_timewait_info *timewait_info; 2110 struct cm_id_private *cm_id_priv; 2111 unsigned long flags; 2112 __be32 remote_id; 2113 2114 remote_id = rej_msg->local_comm_id; 2115 2116 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2117 spin_lock_irqsave(&cm.lock, flags); 2118 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2119 remote_id); 2120 if (!timewait_info) { 2121 spin_unlock_irqrestore(&cm.lock, flags); 2122 return NULL; 2123 } 2124 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2125 (timewait_info->work.local_id ^ 2126 cm.random_id_operand)); 2127 if (cm_id_priv) { 2128 if (cm_id_priv->id.remote_id == remote_id) 2129 atomic_inc(&cm_id_priv->refcount); 2130 else 2131 cm_id_priv = NULL; 2132 } 2133 spin_unlock_irqrestore(&cm.lock, flags); 2134 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2135 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2136 else 2137 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2138 2139 return cm_id_priv; 2140} 2141 2142static int cm_rej_handler(struct cm_work *work) 2143{ 2144 struct cm_id_private *cm_id_priv; 2145 struct cm_rej_msg *rej_msg; 2146 unsigned long flags; 2147 int ret; 2148 2149 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2150 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2151 if (!cm_id_priv) 2152 return -EINVAL; 2153 2154 cm_format_rej_event(work); 2155 2156 spin_lock_irqsave(&cm_id_priv->lock, flags); 2157 switch (cm_id_priv->id.state) { 2158 case IB_CM_REQ_SENT: 2159 case IB_CM_MRA_REQ_RCVD: 2160 case IB_CM_REP_SENT: 2161 case IB_CM_MRA_REP_RCVD: 2162 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2163 /* fall through */ 2164 case IB_CM_REQ_RCVD: 2165 case IB_CM_MRA_REQ_SENT: 2166 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2167 cm_enter_timewait(cm_id_priv); 2168 else 2169 cm_reset_to_idle(cm_id_priv); 2170 break; 2171 case IB_CM_DREQ_SENT: 2172 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2173 /* fall through */ 2174 case IB_CM_REP_RCVD: 2175 case IB_CM_MRA_REP_SENT: 2176 case IB_CM_ESTABLISHED: 2177 cm_enter_timewait(cm_id_priv); 2178 break; 2179 default: 2180 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2181 ret = -EINVAL; 2182 goto out; 2183 } 2184 2185 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2186 if (!ret) 2187 list_add_tail(&work->list, &cm_id_priv->work_list); 2188 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2189 2190 if (ret) 2191 cm_process_work(cm_id_priv, work); 2192 else 2193 cm_deref_id(cm_id_priv); 2194 return 0; 2195out: 2196 cm_deref_id(cm_id_priv); 2197 return -EINVAL; 2198} 2199 2200int ib_send_cm_mra(struct ib_cm_id *cm_id, 2201 u8 service_timeout, 2202 const void *private_data, 2203 u8 private_data_len) 2204{ 2205 struct cm_id_private *cm_id_priv; 2206 struct ib_mad_send_buf *msg; 2207 void *data; 2208 unsigned long flags; 2209 int ret; 2210 2211 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2212 return -EINVAL; 2213 2214 data = cm_copy_private_data(private_data, private_data_len); 2215 if (IS_ERR(data)) 2216 return PTR_ERR(data); 2217 2218 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2219 2220 spin_lock_irqsave(&cm_id_priv->lock, flags); 2221 switch(cm_id_priv->id.state) { 2222 case IB_CM_REQ_RCVD: 2223 ret = cm_alloc_msg(cm_id_priv, &msg); 2224 if (ret) 2225 goto error1; 2226 2227 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2228 CM_MSG_RESPONSE_REQ, service_timeout, 2229 private_data, private_data_len); 2230 ret = ib_post_send_mad(msg, NULL); 2231 if (ret) 2232 goto error2; 2233 cm_id->state = IB_CM_MRA_REQ_SENT; 2234 break; 2235 case IB_CM_REP_RCVD: 2236 ret = cm_alloc_msg(cm_id_priv, &msg); 2237 if (ret) 2238 goto error1; 2239 2240 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2241 CM_MSG_RESPONSE_REP, service_timeout, 2242 private_data, private_data_len); 2243 ret = ib_post_send_mad(msg, NULL); 2244 if (ret) 2245 goto error2; 2246 cm_id->state = IB_CM_MRA_REP_SENT; 2247 break; 2248 case IB_CM_ESTABLISHED: 2249 ret = cm_alloc_msg(cm_id_priv, &msg); 2250 if (ret) 2251 goto error1; 2252 2253 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2254 CM_MSG_RESPONSE_OTHER, service_timeout, 2255 private_data, private_data_len); 2256 ret = ib_post_send_mad(msg, NULL); 2257 if (ret) 2258 goto error2; 2259 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2260 break; 2261 default: 2262 ret = -EINVAL; 2263 goto error1; 2264 } 2265 cm_id_priv->service_timeout = service_timeout; 2266 cm_set_private_data(cm_id_priv, data, private_data_len); 2267 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2268 return 0; 2269 2270error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2271 kfree(data); 2272 return ret; 2273 2274error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2275 kfree(data); 2276 cm_free_msg(msg); 2277 return ret; 2278} 2279EXPORT_SYMBOL(ib_send_cm_mra); 2280 2281static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2282{ 2283 switch (cm_mra_get_msg_mraed(mra_msg)) { 2284 case CM_MSG_RESPONSE_REQ: 2285 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2286 case CM_MSG_RESPONSE_REP: 2287 case CM_MSG_RESPONSE_OTHER: 2288 return cm_acquire_id(mra_msg->remote_comm_id, 2289 mra_msg->local_comm_id); 2290 default: 2291 return NULL; 2292 } 2293} 2294 2295static int cm_mra_handler(struct cm_work *work) 2296{ 2297 struct cm_id_private *cm_id_priv; 2298 struct cm_mra_msg *mra_msg; 2299 unsigned long flags; 2300 int timeout, ret; 2301 2302 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2303 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2304 if (!cm_id_priv) 2305 return -EINVAL; 2306 2307 work->cm_event.private_data = &mra_msg->private_data; 2308 work->cm_event.param.mra_rcvd.service_timeout = 2309 cm_mra_get_service_timeout(mra_msg); 2310 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2311 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2312 2313 spin_lock_irqsave(&cm_id_priv->lock, flags); 2314 switch (cm_id_priv->id.state) { 2315 case IB_CM_REQ_SENT: 2316 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2317 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2318 cm_id_priv->msg, timeout)) 2319 goto out; 2320 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2321 break; 2322 case IB_CM_REP_SENT: 2323 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2324 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2325 cm_id_priv->msg, timeout)) 2326 goto out; 2327 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2328 break; 2329 case IB_CM_ESTABLISHED: 2330 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2331 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2332 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2333 cm_id_priv->msg, timeout)) 2334 goto out; 2335 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2336 break; 2337 default: 2338 goto out; 2339 } 2340 2341 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2342 cm_id_priv->id.state; 2343 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2344 if (!ret) 2345 list_add_tail(&work->list, &cm_id_priv->work_list); 2346 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2347 2348 if (ret) 2349 cm_process_work(cm_id_priv, work); 2350 else 2351 cm_deref_id(cm_id_priv); 2352 return 0; 2353out: 2354 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2355 cm_deref_id(cm_id_priv); 2356 return -EINVAL; 2357} 2358 2359static void cm_format_lap(struct cm_lap_msg *lap_msg, 2360 struct cm_id_private *cm_id_priv, 2361 struct ib_sa_path_rec *alternate_path, 2362 const void *private_data, 2363 u8 private_data_len) 2364{ 2365 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2366 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2367 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2368 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2369 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2370 /* todo: need remote CM response timeout */ 2371 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2372 lap_msg->alt_local_lid = alternate_path->slid; 2373 lap_msg->alt_remote_lid = alternate_path->dlid; 2374 lap_msg->alt_local_gid = alternate_path->sgid; 2375 lap_msg->alt_remote_gid = alternate_path->dgid; 2376 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2377 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2378 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2379 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2380 cm_lap_set_sl(lap_msg, alternate_path->sl); 2381 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2382 cm_lap_set_local_ack_timeout(lap_msg, 2383 min(31, alternate_path->packet_life_time + 1)); 2384 2385 if (private_data && private_data_len) 2386 memcpy(lap_msg->private_data, private_data, private_data_len); 2387} 2388 2389int ib_send_cm_lap(struct ib_cm_id *cm_id, 2390 struct ib_sa_path_rec *alternate_path, 2391 const void *private_data, 2392 u8 private_data_len) 2393{ 2394 struct cm_id_private *cm_id_priv; 2395 struct ib_mad_send_buf *msg; 2396 unsigned long flags; 2397 int ret; 2398 2399 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2400 return -EINVAL; 2401 2402 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2403 spin_lock_irqsave(&cm_id_priv->lock, flags); 2404 if (cm_id->state != IB_CM_ESTABLISHED || 2405 cm_id->lap_state != IB_CM_LAP_IDLE) { 2406 ret = -EINVAL; 2407 goto out; 2408 } 2409 2410 ret = cm_alloc_msg(cm_id_priv, &msg); 2411 if (ret) 2412 goto out; 2413 2414 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2415 alternate_path, private_data, private_data_len); 2416 msg->timeout_ms = cm_id_priv->timeout_ms; 2417 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2418 2419 ret = ib_post_send_mad(msg, NULL); 2420 if (ret) { 2421 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2422 cm_free_msg(msg); 2423 return ret; 2424 } 2425 2426 cm_id->lap_state = IB_CM_LAP_SENT; 2427 cm_id_priv->msg = msg; 2428 2429out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2430 return ret; 2431} 2432EXPORT_SYMBOL(ib_send_cm_lap); 2433 2434static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2435 struct cm_lap_msg *lap_msg) 2436{ 2437 memset(path, 0, sizeof *path); 2438 path->dgid = lap_msg->alt_local_gid; 2439 path->sgid = lap_msg->alt_remote_gid; 2440 path->dlid = lap_msg->alt_local_lid; 2441 path->slid = lap_msg->alt_remote_lid; 2442 path->flow_label = cm_lap_get_flow_label(lap_msg); 2443 path->hop_limit = lap_msg->alt_hop_limit; 2444 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2445 path->reversible = 1; 2446 /* pkey is same as in REQ */ 2447 path->sl = cm_lap_get_sl(lap_msg); 2448 path->mtu_selector = IB_SA_EQ; 2449 /* mtu is same as in REQ */ 2450 path->rate_selector = IB_SA_EQ; 2451 path->rate = cm_lap_get_packet_rate(lap_msg); 2452 path->packet_life_time_selector = IB_SA_EQ; 2453 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2454 path->packet_life_time -= (path->packet_life_time > 0); 2455} 2456 2457static int cm_lap_handler(struct cm_work *work) 2458{ 2459 struct cm_id_private *cm_id_priv; 2460 struct cm_lap_msg *lap_msg; 2461 struct ib_cm_lap_event_param *param; 2462 struct ib_mad_send_buf *msg = NULL; 2463 unsigned long flags; 2464 int ret; 2465 2466 /* todo: verify LAP request and send reject APR if invalid. */ 2467 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2468 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2469 lap_msg->local_comm_id); 2470 if (!cm_id_priv) 2471 return -EINVAL; 2472 2473 param = &work->cm_event.param.lap_rcvd; 2474 param->alternate_path = &work->path[0]; 2475 cm_format_path_from_lap(param->alternate_path, lap_msg); 2476 work->cm_event.private_data = &lap_msg->private_data; 2477 2478 spin_lock_irqsave(&cm_id_priv->lock, flags); 2479 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2480 goto unlock; 2481 2482 switch (cm_id_priv->id.lap_state) { 2483 case IB_CM_LAP_IDLE: 2484 break; 2485 case IB_CM_MRA_LAP_SENT: 2486 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2487 goto unlock; 2488 2489 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2490 CM_MSG_RESPONSE_OTHER, 2491 cm_id_priv->service_timeout, 2492 cm_id_priv->private_data, 2493 cm_id_priv->private_data_len); 2494 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2495 2496 if (ib_post_send_mad(msg, NULL)) 2497 cm_free_msg(msg); 2498 goto deref; 2499 default: 2500 goto unlock; 2501 } 2502 2503 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2504 cm_id_priv->tid = lap_msg->hdr.tid; 2505 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2506 if (!ret) 2507 list_add_tail(&work->list, &cm_id_priv->work_list); 2508 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2509 2510 if (ret) 2511 cm_process_work(cm_id_priv, work); 2512 else 2513 cm_deref_id(cm_id_priv); 2514 return 0; 2515 2516unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2517deref: cm_deref_id(cm_id_priv); 2518 return -EINVAL; 2519} 2520 2521static void cm_format_apr(struct cm_apr_msg *apr_msg, 2522 struct cm_id_private *cm_id_priv, 2523 enum ib_cm_apr_status status, 2524 void *info, 2525 u8 info_length, 2526 const void *private_data, 2527 u8 private_data_len) 2528{ 2529 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2530 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2531 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2532 apr_msg->ap_status = (u8) status; 2533 2534 if (info && info_length) { 2535 apr_msg->info_length = info_length; 2536 memcpy(apr_msg->info, info, info_length); 2537 } 2538 2539 if (private_data && private_data_len) 2540 memcpy(apr_msg->private_data, private_data, private_data_len); 2541} 2542 2543int ib_send_cm_apr(struct ib_cm_id *cm_id, 2544 enum ib_cm_apr_status status, 2545 void *info, 2546 u8 info_length, 2547 const void *private_data, 2548 u8 private_data_len) 2549{ 2550 struct cm_id_private *cm_id_priv; 2551 struct ib_mad_send_buf *msg; 2552 unsigned long flags; 2553 int ret; 2554 2555 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2556 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2557 return -EINVAL; 2558 2559 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2560 spin_lock_irqsave(&cm_id_priv->lock, flags); 2561 if (cm_id->state != IB_CM_ESTABLISHED || 2562 (cm_id->lap_state != IB_CM_LAP_RCVD && 2563 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2564 ret = -EINVAL; 2565 goto out; 2566 } 2567 2568 ret = cm_alloc_msg(cm_id_priv, &msg); 2569 if (ret) 2570 goto out; 2571 2572 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2573 info, info_length, private_data, private_data_len); 2574 ret = ib_post_send_mad(msg, NULL); 2575 if (ret) { 2576 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2577 cm_free_msg(msg); 2578 return ret; 2579 } 2580 2581 cm_id->lap_state = IB_CM_LAP_IDLE; 2582out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2583 return ret; 2584} 2585EXPORT_SYMBOL(ib_send_cm_apr); 2586 2587static int cm_apr_handler(struct cm_work *work) 2588{ 2589 struct cm_id_private *cm_id_priv; 2590 struct cm_apr_msg *apr_msg; 2591 unsigned long flags; 2592 int ret; 2593 2594 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2595 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2596 apr_msg->local_comm_id); 2597 if (!cm_id_priv) 2598 return -EINVAL; /* Unmatched reply. */ 2599 2600 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2601 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2602 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2603 work->cm_event.private_data = &apr_msg->private_data; 2604 2605 spin_lock_irqsave(&cm_id_priv->lock, flags); 2606 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2607 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2608 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2609 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2610 goto out; 2611 } 2612 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2613 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2614 cm_id_priv->msg = NULL; 2615 2616 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2617 if (!ret) 2618 list_add_tail(&work->list, &cm_id_priv->work_list); 2619 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2620 2621 if (ret) 2622 cm_process_work(cm_id_priv, work); 2623 else 2624 cm_deref_id(cm_id_priv); 2625 return 0; 2626out: 2627 cm_deref_id(cm_id_priv); 2628 return -EINVAL; 2629} 2630 2631static int cm_timewait_handler(struct cm_work *work) 2632{ 2633 struct cm_timewait_info *timewait_info; 2634 struct cm_id_private *cm_id_priv; 2635 int ret; 2636 2637 timewait_info = (struct cm_timewait_info *)work; 2638 spin_lock_irq(&cm.lock); 2639 list_del(&timewait_info->list); 2640 spin_unlock_irq(&cm.lock); 2641 2642 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2643 timewait_info->work.remote_id); 2644 if (!cm_id_priv) 2645 return -EINVAL; 2646 2647 spin_lock_irq(&cm_id_priv->lock); 2648 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2649 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2650 spin_unlock_irq(&cm_id_priv->lock); 2651 goto out; 2652 } 2653 cm_id_priv->id.state = IB_CM_IDLE; 2654 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2655 if (!ret) 2656 list_add_tail(&work->list, &cm_id_priv->work_list); 2657 spin_unlock_irq(&cm_id_priv->lock); 2658 2659 if (ret) 2660 cm_process_work(cm_id_priv, work); 2661 else 2662 cm_deref_id(cm_id_priv); 2663 return 0; 2664out: 2665 cm_deref_id(cm_id_priv); 2666 return -EINVAL; 2667} 2668 2669static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2670 struct cm_id_private *cm_id_priv, 2671 struct ib_cm_sidr_req_param *param) 2672{ 2673 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2674 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2675 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2676 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2677 sidr_req_msg->service_id = param->service_id; 2678 2679 if (param->private_data && param->private_data_len) 2680 memcpy(sidr_req_msg->private_data, param->private_data, 2681 param->private_data_len); 2682} 2683 2684int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2685 struct ib_cm_sidr_req_param *param) 2686{ 2687 struct cm_id_private *cm_id_priv; 2688 struct ib_mad_send_buf *msg; 2689 unsigned long flags; 2690 int ret; 2691 2692 if (!param->path || (param->private_data && 2693 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2694 return -EINVAL; 2695 2696 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2697 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2698 if (ret) 2699 goto out; 2700 2701 cm_id->service_id = param->service_id; 2702 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2703 cm_id_priv->timeout_ms = param->timeout_ms; 2704 cm_id_priv->max_cm_retries = param->max_cm_retries; 2705 ret = cm_alloc_msg(cm_id_priv, &msg); 2706 if (ret) 2707 goto out; 2708 2709 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2710 param); 2711 msg->timeout_ms = cm_id_priv->timeout_ms; 2712 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2713 2714 spin_lock_irqsave(&cm_id_priv->lock, flags); 2715 if (cm_id->state == IB_CM_IDLE) 2716 ret = ib_post_send_mad(msg, NULL); 2717 else 2718 ret = -EINVAL; 2719 2720 if (ret) { 2721 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2722 cm_free_msg(msg); 2723 goto out; 2724 } 2725 cm_id->state = IB_CM_SIDR_REQ_SENT; 2726 cm_id_priv->msg = msg; 2727 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2728out: 2729 return ret; 2730} 2731EXPORT_SYMBOL(ib_send_cm_sidr_req); 2732 2733static void cm_format_sidr_req_event(struct cm_work *work, 2734 struct ib_cm_id *listen_id) 2735{ 2736 struct cm_sidr_req_msg *sidr_req_msg; 2737 struct ib_cm_sidr_req_event_param *param; 2738 2739 sidr_req_msg = (struct cm_sidr_req_msg *) 2740 work->mad_recv_wc->recv_buf.mad; 2741 param = &work->cm_event.param.sidr_req_rcvd; 2742 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2743 param->listen_id = listen_id; 2744 param->port = work->port->port_num; 2745 work->cm_event.private_data = &sidr_req_msg->private_data; 2746} 2747 2748static int cm_sidr_req_handler(struct cm_work *work) 2749{ 2750 struct ib_cm_id *cm_id; 2751 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2752 struct cm_sidr_req_msg *sidr_req_msg; 2753 struct ib_wc *wc; 2754 unsigned long flags; 2755 2756 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2757 if (IS_ERR(cm_id)) 2758 return PTR_ERR(cm_id); 2759 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2760 2761 /* Record SGID/SLID and request ID for lookup. */ 2762 sidr_req_msg = (struct cm_sidr_req_msg *) 2763 work->mad_recv_wc->recv_buf.mad; 2764 wc = work->mad_recv_wc->wc; 2765 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2766 cm_id_priv->av.dgid.global.interface_id = 0; 2767 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2768 work->mad_recv_wc->recv_buf.grh, 2769 &cm_id_priv->av); 2770 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2771 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2772 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2773 atomic_inc(&cm_id_priv->work_count); 2774 2775 spin_lock_irqsave(&cm.lock, flags); 2776 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2777 if (cur_cm_id_priv) { 2778 spin_unlock_irqrestore(&cm.lock, flags); 2779 goto out; /* Duplicate message. */ 2780 } 2781 cur_cm_id_priv = cm_find_listen(cm_id->device, 2782 sidr_req_msg->service_id, 2783 sidr_req_msg->private_data); 2784 if (!cur_cm_id_priv) { 2785 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2786 spin_unlock_irqrestore(&cm.lock, flags); 2787 /* todo: reply with no match */ 2788 goto out; /* No match. */ 2789 } 2790 atomic_inc(&cur_cm_id_priv->refcount); 2791 spin_unlock_irqrestore(&cm.lock, flags); 2792 2793 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2794 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2795 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2796 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2797 2798 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2799 cm_process_work(cm_id_priv, work); 2800 cm_deref_id(cur_cm_id_priv); 2801 return 0; 2802out: 2803 ib_destroy_cm_id(&cm_id_priv->id); 2804 return -EINVAL; 2805} 2806 2807static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2808 struct cm_id_private *cm_id_priv, 2809 struct ib_cm_sidr_rep_param *param) 2810{ 2811 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2812 cm_id_priv->tid); 2813 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2814 sidr_rep_msg->status = param->status; 2815 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2816 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2817 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2818 2819 if (param->info && param->info_length) 2820 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2821 2822 if (param->private_data && param->private_data_len) 2823 memcpy(sidr_rep_msg->private_data, param->private_data, 2824 param->private_data_len); 2825} 2826 2827int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2828 struct ib_cm_sidr_rep_param *param) 2829{ 2830 struct cm_id_private *cm_id_priv; 2831 struct ib_mad_send_buf *msg; 2832 unsigned long flags; 2833 int ret; 2834 2835 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2836 (param->private_data && 2837 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2838 return -EINVAL; 2839 2840 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2841 spin_lock_irqsave(&cm_id_priv->lock, flags); 2842 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2843 ret = -EINVAL; 2844 goto error; 2845 } 2846 2847 ret = cm_alloc_msg(cm_id_priv, &msg); 2848 if (ret) 2849 goto error; 2850 2851 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2852 param); 2853 ret = ib_post_send_mad(msg, NULL); 2854 if (ret) { 2855 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2856 cm_free_msg(msg); 2857 return ret; 2858 } 2859 cm_id->state = IB_CM_IDLE; 2860 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2861 2862 spin_lock_irqsave(&cm.lock, flags); 2863 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2864 spin_unlock_irqrestore(&cm.lock, flags); 2865 return 0; 2866 2867error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2868 return ret; 2869} 2870EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2871 2872static void cm_format_sidr_rep_event(struct cm_work *work) 2873{ 2874 struct cm_sidr_rep_msg *sidr_rep_msg; 2875 struct ib_cm_sidr_rep_event_param *param; 2876 2877 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2878 work->mad_recv_wc->recv_buf.mad; 2879 param = &work->cm_event.param.sidr_rep_rcvd; 2880 param->status = sidr_rep_msg->status; 2881 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2882 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2883 param->info = &sidr_rep_msg->info; 2884 param->info_len = sidr_rep_msg->info_length; 2885 work->cm_event.private_data = &sidr_rep_msg->private_data; 2886} 2887 2888static int cm_sidr_rep_handler(struct cm_work *work) 2889{ 2890 struct cm_sidr_rep_msg *sidr_rep_msg; 2891 struct cm_id_private *cm_id_priv; 2892 unsigned long flags; 2893 2894 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2895 work->mad_recv_wc->recv_buf.mad; 2896 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2897 if (!cm_id_priv) 2898 return -EINVAL; /* Unmatched reply. */ 2899 2900 spin_lock_irqsave(&cm_id_priv->lock, flags); 2901 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2902 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2903 goto out; 2904 } 2905 cm_id_priv->id.state = IB_CM_IDLE; 2906 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2907 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2908 2909 cm_format_sidr_rep_event(work); 2910 cm_process_work(cm_id_priv, work); 2911 return 0; 2912out: 2913 cm_deref_id(cm_id_priv); 2914 return -EINVAL; 2915} 2916 2917static void cm_process_send_error(struct ib_mad_send_buf *msg, 2918 enum ib_wc_status wc_status) 2919{ 2920 struct cm_id_private *cm_id_priv; 2921 struct ib_cm_event cm_event; 2922 enum ib_cm_state state; 2923 unsigned long flags; 2924 int ret; 2925 2926 memset(&cm_event, 0, sizeof cm_event); 2927 cm_id_priv = msg->context[0]; 2928 2929 /* Discard old sends or ones without a response. */ 2930 spin_lock_irqsave(&cm_id_priv->lock, flags); 2931 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2932 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2933 goto discard; 2934 2935 switch (state) { 2936 case IB_CM_REQ_SENT: 2937 case IB_CM_MRA_REQ_RCVD: 2938 cm_reset_to_idle(cm_id_priv); 2939 cm_event.event = IB_CM_REQ_ERROR; 2940 break; 2941 case IB_CM_REP_SENT: 2942 case IB_CM_MRA_REP_RCVD: 2943 cm_reset_to_idle(cm_id_priv); 2944 cm_event.event = IB_CM_REP_ERROR; 2945 break; 2946 case IB_CM_DREQ_SENT: 2947 cm_enter_timewait(cm_id_priv); 2948 cm_event.event = IB_CM_DREQ_ERROR; 2949 break; 2950 case IB_CM_SIDR_REQ_SENT: 2951 cm_id_priv->id.state = IB_CM_IDLE; 2952 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2953 break; 2954 default: 2955 goto discard; 2956 } 2957 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2958 cm_event.param.send_status = wc_status; 2959 2960 /* No other events can occur on the cm_id at this point. */ 2961 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2962 cm_free_msg(msg); 2963 if (ret) 2964 ib_destroy_cm_id(&cm_id_priv->id); 2965 return; 2966discard: 2967 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2968 cm_free_msg(msg); 2969} 2970 2971static void cm_send_handler(struct ib_mad_agent *mad_agent, 2972 struct ib_mad_send_wc *mad_send_wc) 2973{ 2974 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2975 2976 switch (mad_send_wc->status) { 2977 case IB_WC_SUCCESS: 2978 case IB_WC_WR_FLUSH_ERR: 2979 cm_free_msg(msg); 2980 break; 2981 default: 2982 if (msg->context[0] && msg->context[1]) 2983 cm_process_send_error(msg, mad_send_wc->status); 2984 else 2985 cm_free_msg(msg); 2986 break; 2987 } 2988} 2989 2990static void cm_work_handler(void *data) 2991{ 2992 struct cm_work *work = data; 2993 int ret; 2994 2995 switch (work->cm_event.event) { 2996 case IB_CM_REQ_RECEIVED: 2997 ret = cm_req_handler(work); 2998 break; 2999 case IB_CM_MRA_RECEIVED: 3000 ret = cm_mra_handler(work); 3001 break; 3002 case IB_CM_REJ_RECEIVED: 3003 ret = cm_rej_handler(work); 3004 break; 3005 case IB_CM_REP_RECEIVED: 3006 ret = cm_rep_handler(work); 3007 break; 3008 case IB_CM_RTU_RECEIVED: 3009 ret = cm_rtu_handler(work); 3010 break; 3011 case IB_CM_USER_ESTABLISHED: 3012 ret = cm_establish_handler(work); 3013 break; 3014 case IB_CM_DREQ_RECEIVED: 3015 ret = cm_dreq_handler(work); 3016 break; 3017 case IB_CM_DREP_RECEIVED: 3018 ret = cm_drep_handler(work); 3019 break; 3020 case IB_CM_SIDR_REQ_RECEIVED: 3021 ret = cm_sidr_req_handler(work); 3022 break; 3023 case IB_CM_SIDR_REP_RECEIVED: 3024 ret = cm_sidr_rep_handler(work); 3025 break; 3026 case IB_CM_LAP_RECEIVED: 3027 ret = cm_lap_handler(work); 3028 break; 3029 case IB_CM_APR_RECEIVED: 3030 ret = cm_apr_handler(work); 3031 break; 3032 case IB_CM_TIMEWAIT_EXIT: 3033 ret = cm_timewait_handler(work); 3034 break; 3035 default: 3036 ret = -EINVAL; 3037 break; 3038 } 3039 if (ret) 3040 cm_free_work(work); 3041} 3042 3043int ib_cm_establish(struct ib_cm_id *cm_id) 3044{ 3045 struct cm_id_private *cm_id_priv; 3046 struct cm_work *work; 3047 unsigned long flags; 3048 int ret = 0; 3049 3050 work = kmalloc(sizeof *work, GFP_ATOMIC); 3051 if (!work) 3052 return -ENOMEM; 3053 3054 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3055 spin_lock_irqsave(&cm_id_priv->lock, flags); 3056 switch (cm_id->state) 3057 { 3058 case IB_CM_REP_SENT: 3059 case IB_CM_MRA_REP_RCVD: 3060 cm_id->state = IB_CM_ESTABLISHED; 3061 break; 3062 case IB_CM_ESTABLISHED: 3063 ret = -EISCONN; 3064 break; 3065 default: 3066 ret = -EINVAL; 3067 break; 3068 } 3069 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3070 3071 if (ret) { 3072 kfree(work); 3073 goto out; 3074 } 3075 3076 /* 3077 * The CM worker thread may try to destroy the cm_id before it 3078 * can execute this work item. To prevent potential deadlock, 3079 * we need to find the cm_id once we're in the context of the 3080 * worker thread, rather than holding a reference on it. 3081 */ 3082 INIT_WORK(&work->work, cm_work_handler, work); 3083 work->local_id = cm_id->local_id; 3084 work->remote_id = cm_id->remote_id; 3085 work->mad_recv_wc = NULL; 3086 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3087 queue_work(cm.wq, &work->work); 3088out: 3089 return ret; 3090} 3091EXPORT_SYMBOL(ib_cm_establish); 3092 3093static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3094 struct ib_mad_recv_wc *mad_recv_wc) 3095{ 3096 struct cm_work *work; 3097 enum ib_cm_event_type event; 3098 int paths = 0; 3099 3100 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3101 case CM_REQ_ATTR_ID: 3102 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3103 alt_local_lid != 0); 3104 event = IB_CM_REQ_RECEIVED; 3105 break; 3106 case CM_MRA_ATTR_ID: 3107 event = IB_CM_MRA_RECEIVED; 3108 break; 3109 case CM_REJ_ATTR_ID: 3110 event = IB_CM_REJ_RECEIVED; 3111 break; 3112 case CM_REP_ATTR_ID: 3113 event = IB_CM_REP_RECEIVED; 3114 break; 3115 case CM_RTU_ATTR_ID: 3116 event = IB_CM_RTU_RECEIVED; 3117 break; 3118 case CM_DREQ_ATTR_ID: 3119 event = IB_CM_DREQ_RECEIVED; 3120 break; 3121 case CM_DREP_ATTR_ID: 3122 event = IB_CM_DREP_RECEIVED; 3123 break; 3124 case CM_SIDR_REQ_ATTR_ID: 3125 event = IB_CM_SIDR_REQ_RECEIVED; 3126 break; 3127 case CM_SIDR_REP_ATTR_ID: 3128 event = IB_CM_SIDR_REP_RECEIVED; 3129 break; 3130 case CM_LAP_ATTR_ID: 3131 paths = 1; 3132 event = IB_CM_LAP_RECEIVED; 3133 break; 3134 case CM_APR_ATTR_ID: 3135 event = IB_CM_APR_RECEIVED; 3136 break; 3137 default: 3138 ib_free_recv_mad(mad_recv_wc); 3139 return; 3140 } 3141 3142 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3143 GFP_KERNEL); 3144 if (!work) { 3145 ib_free_recv_mad(mad_recv_wc); 3146 return; 3147 } 3148 3149 INIT_WORK(&work->work, cm_work_handler, work); 3150 work->cm_event.event = event; 3151 work->mad_recv_wc = mad_recv_wc; 3152 work->port = (struct cm_port *)mad_agent->context; 3153 queue_work(cm.wq, &work->work); 3154} 3155 3156static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3157 struct ib_qp_attr *qp_attr, 3158 int *qp_attr_mask) 3159{ 3160 unsigned long flags; 3161 int ret; 3162 3163 spin_lock_irqsave(&cm_id_priv->lock, flags); 3164 switch (cm_id_priv->id.state) { 3165 case IB_CM_REQ_SENT: 3166 case IB_CM_MRA_REQ_RCVD: 3167 case IB_CM_REQ_RCVD: 3168 case IB_CM_MRA_REQ_SENT: 3169 case IB_CM_REP_RCVD: 3170 case IB_CM_MRA_REP_SENT: 3171 case IB_CM_REP_SENT: 3172 case IB_CM_MRA_REP_RCVD: 3173 case IB_CM_ESTABLISHED: 3174 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3175 IB_QP_PKEY_INDEX | IB_QP_PORT; 3176 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3177 IB_ACCESS_REMOTE_WRITE; 3178 if (cm_id_priv->responder_resources) 3179 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3180 IB_ACCESS_REMOTE_ATOMIC; 3181 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3182 qp_attr->port_num = cm_id_priv->av.port->port_num; 3183 ret = 0; 3184 break; 3185 default: 3186 ret = -EINVAL; 3187 break; 3188 } 3189 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3190 return ret; 3191} 3192 3193static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3194 struct ib_qp_attr *qp_attr, 3195 int *qp_attr_mask) 3196{ 3197 unsigned long flags; 3198 int ret; 3199 3200 spin_lock_irqsave(&cm_id_priv->lock, flags); 3201 switch (cm_id_priv->id.state) { 3202 case IB_CM_REQ_RCVD: 3203 case IB_CM_MRA_REQ_SENT: 3204 case IB_CM_REP_RCVD: 3205 case IB_CM_MRA_REP_SENT: 3206 case IB_CM_REP_SENT: 3207 case IB_CM_MRA_REP_RCVD: 3208 case IB_CM_ESTABLISHED: 3209 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3210 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3211 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3212 qp_attr->path_mtu = cm_id_priv->path_mtu; 3213 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3214 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3215 if (cm_id_priv->qp_type == IB_QPT_RC) { 3216 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3217 IB_QP_MIN_RNR_TIMER; 3218 qp_attr->max_dest_rd_atomic = 3219 cm_id_priv->responder_resources; 3220 qp_attr->min_rnr_timer = 0; 3221 } 3222 if (cm_id_priv->alt_av.ah_attr.dlid) { 3223 *qp_attr_mask |= IB_QP_ALT_PATH; 3224 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3225 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3226 } 3227 ret = 0; 3228 break; 3229 default: 3230 ret = -EINVAL; 3231 break; 3232 } 3233 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3234 return ret; 3235} 3236 3237static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3238 struct ib_qp_attr *qp_attr, 3239 int *qp_attr_mask) 3240{ 3241 unsigned long flags; 3242 int ret; 3243 3244 spin_lock_irqsave(&cm_id_priv->lock, flags); 3245 switch (cm_id_priv->id.state) { 3246 case IB_CM_REP_RCVD: 3247 case IB_CM_MRA_REP_SENT: 3248 case IB_CM_REP_SENT: 3249 case IB_CM_MRA_REP_RCVD: 3250 case IB_CM_ESTABLISHED: 3251 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3252 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3253 if (cm_id_priv->qp_type == IB_QPT_RC) { 3254 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3255 IB_QP_RNR_RETRY | 3256 IB_QP_MAX_QP_RD_ATOMIC; 3257 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3258 qp_attr->retry_cnt = cm_id_priv->retry_count; 3259 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3260 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3261 } 3262 if (cm_id_priv->alt_av.ah_attr.dlid) { 3263 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3264 qp_attr->path_mig_state = IB_MIG_REARM; 3265 } 3266 ret = 0; 3267 break; 3268 default: 3269 ret = -EINVAL; 3270 break; 3271 } 3272 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3273 return ret; 3274} 3275 3276int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3277 struct ib_qp_attr *qp_attr, 3278 int *qp_attr_mask) 3279{ 3280 struct cm_id_private *cm_id_priv; 3281 int ret; 3282 3283 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3284 switch (qp_attr->qp_state) { 3285 case IB_QPS_INIT: 3286 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3287 break; 3288 case IB_QPS_RTR: 3289 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3290 break; 3291 case IB_QPS_RTS: 3292 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3293 break; 3294 default: 3295 ret = -EINVAL; 3296 break; 3297 } 3298 return ret; 3299} 3300EXPORT_SYMBOL(ib_cm_init_qp_attr); 3301 3302static void cm_add_one(struct ib_device *device) 3303{ 3304 struct cm_device *cm_dev; 3305 struct cm_port *port; 3306 struct ib_mad_reg_req reg_req = { 3307 .mgmt_class = IB_MGMT_CLASS_CM, 3308 .mgmt_class_version = IB_CM_CLASS_VERSION 3309 }; 3310 struct ib_port_modify port_modify = { 3311 .set_port_cap_mask = IB_PORT_CM_SUP 3312 }; 3313 unsigned long flags; 3314 int ret; 3315 u8 i; 3316 3317 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3318 return; 3319 3320 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3321 device->phys_port_cnt, GFP_KERNEL); 3322 if (!cm_dev) 3323 return; 3324 3325 cm_dev->device = device; 3326 cm_dev->ca_guid = device->node_guid; 3327 3328 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3329 for (i = 1; i <= device->phys_port_cnt; i++) { 3330 port = &cm_dev->port[i-1]; 3331 port->cm_dev = cm_dev; 3332 port->port_num = i; 3333 port->mad_agent = ib_register_mad_agent(device, i, 3334 IB_QPT_GSI, 3335 ®_req, 3336 0, 3337 cm_send_handler, 3338 cm_recv_handler, 3339 port); 3340 if (IS_ERR(port->mad_agent)) 3341 goto error1; 3342 3343 ret = ib_modify_port(device, i, 0, &port_modify); 3344 if (ret) 3345 goto error2; 3346 } 3347 ib_set_client_data(device, &cm_client, cm_dev); 3348 3349 write_lock_irqsave(&cm.device_lock, flags); 3350 list_add_tail(&cm_dev->list, &cm.device_list); 3351 write_unlock_irqrestore(&cm.device_lock, flags); 3352 return; 3353 3354error2: 3355 ib_unregister_mad_agent(port->mad_agent); 3356error1: 3357 port_modify.set_port_cap_mask = 0; 3358 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3359 while (--i) { 3360 port = &cm_dev->port[i-1]; 3361 ib_modify_port(device, port->port_num, 0, &port_modify); 3362 ib_unregister_mad_agent(port->mad_agent); 3363 } 3364 kfree(cm_dev); 3365} 3366 3367static void cm_remove_one(struct ib_device *device) 3368{ 3369 struct cm_device *cm_dev; 3370 struct cm_port *port; 3371 struct ib_port_modify port_modify = { 3372 .clr_port_cap_mask = IB_PORT_CM_SUP 3373 }; 3374 unsigned long flags; 3375 int i; 3376 3377 cm_dev = ib_get_client_data(device, &cm_client); 3378 if (!cm_dev) 3379 return; 3380 3381 write_lock_irqsave(&cm.device_lock, flags); 3382 list_del(&cm_dev->list); 3383 write_unlock_irqrestore(&cm.device_lock, flags); 3384 3385 for (i = 1; i <= device->phys_port_cnt; i++) { 3386 port = &cm_dev->port[i-1]; 3387 ib_modify_port(device, port->port_num, 0, &port_modify); 3388 ib_unregister_mad_agent(port->mad_agent); 3389 } 3390 kfree(cm_dev); 3391} 3392 3393static int __init ib_cm_init(void) 3394{ 3395 int ret; 3396 3397 memset(&cm, 0, sizeof cm); 3398 INIT_LIST_HEAD(&cm.device_list); 3399 rwlock_init(&cm.device_lock); 3400 spin_lock_init(&cm.lock); 3401 cm.listen_service_table = RB_ROOT; 3402 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3403 cm.remote_id_table = RB_ROOT; 3404 cm.remote_qp_table = RB_ROOT; 3405 cm.remote_sidr_table = RB_ROOT; 3406 idr_init(&cm.local_id_table); 3407 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3408 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3409 INIT_LIST_HEAD(&cm.timewait_list); 3410 3411 cm.wq = create_workqueue("ib_cm"); 3412 if (!cm.wq) 3413 return -ENOMEM; 3414 3415 ret = ib_register_client(&cm_client); 3416 if (ret) 3417 goto error; 3418 3419 return 0; 3420error: 3421 destroy_workqueue(cm.wq); 3422 return ret; 3423} 3424 3425static void __exit ib_cm_cleanup(void) 3426{ 3427 struct cm_timewait_info *timewait_info, *tmp; 3428 3429 spin_lock_irq(&cm.lock); 3430 list_for_each_entry(timewait_info, &cm.timewait_list, list) 3431 cancel_delayed_work(&timewait_info->work.work); 3432 spin_unlock_irq(&cm.lock); 3433 3434 destroy_workqueue(cm.wq); 3435 3436 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 3437 list_del(&timewait_info->list); 3438 kfree(timewait_info); 3439 } 3440 3441 ib_unregister_client(&cm_client); 3442 idr_destroy(&cm.local_id_table); 3443} 3444 3445module_init(ib_cm_init); 3446module_exit(ib_cm_cleanup); 3447 3448