ipoib_cm.c revision 2dfbfc37121d307e1f1d24c2979382cb17b19347
1/* 2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id$ 33 */ 34 35#include <rdma/ib_cm.h> 36#include <rdma/ib_cache.h> 37#include <net/dst.h> 38#include <net/icmp.h> 39#include <linux/icmpv6.h> 40#include <linux/delay.h> 41 42#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 43static int data_debug_level; 44 45module_param_named(cm_data_debug_level, data_debug_level, int, 0644); 46MODULE_PARM_DESC(cm_data_debug_level, 47 "Enable data path debug tracing for connected mode if > 0"); 48#endif 49 50#include "ipoib.h" 51 52#define IPOIB_CM_IETF_ID 0x1000000000000000ULL 53 54#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) 55#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) 56#define IPOIB_CM_RX_DELAY (3 * 256 * HZ) 57#define IPOIB_CM_RX_UPDATE_MASK (0x3) 58 59struct ipoib_cm_id { 60 struct ib_cm_id *id; 61 int flags; 62 u32 remote_qpn; 63 u32 remote_mtu; 64}; 65 66static struct ib_qp_attr ipoib_cm_err_attr = { 67 .qp_state = IB_QPS_ERR 68}; 69 70#define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff 71 72static struct ib_recv_wr ipoib_cm_rx_drain_wr = { 73 .wr_id = IPOIB_CM_RX_DRAIN_WRID 74}; 75 76static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 77 struct ib_cm_event *event); 78 79static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, 80 u64 mapping[IPOIB_CM_RX_SG]) 81{ 82 int i; 83 84 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 85 86 for (i = 0; i < frags; ++i) 87 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 88} 89 90static int ipoib_cm_post_receive(struct net_device *dev, int id) 91{ 92 struct ipoib_dev_priv *priv = netdev_priv(dev); 93 struct ib_recv_wr *bad_wr; 94 int i, ret; 95 96 priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ; 97 98 for (i = 0; i < IPOIB_CM_RX_SG; ++i) 99 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; 100 101 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 102 if (unlikely(ret)) { 103 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 104 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 105 priv->cm.srq_ring[id].mapping); 106 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 107 priv->cm.srq_ring[id].skb = NULL; 108 } 109 110 return ret; 111} 112 113static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags, 114 u64 mapping[IPOIB_CM_RX_SG]) 115{ 116 struct ipoib_dev_priv *priv = netdev_priv(dev); 117 struct sk_buff *skb; 118 int i; 119 120 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 121 if (unlikely(!skb)) 122 return NULL; 123 124 /* 125 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the 126 * IP header to a multiple of 16. 127 */ 128 skb_reserve(skb, 12); 129 130 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, 131 DMA_FROM_DEVICE); 132 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { 133 dev_kfree_skb_any(skb); 134 return NULL; 135 } 136 137 for (i = 0; i < frags; i++) { 138 struct page *page = alloc_page(GFP_ATOMIC); 139 140 if (!page) 141 goto partial_error; 142 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); 143 144 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, 145 0, PAGE_SIZE, DMA_FROM_DEVICE); 146 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) 147 goto partial_error; 148 } 149 150 priv->cm.srq_ring[id].skb = skb; 151 return skb; 152 153partial_error: 154 155 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 156 157 for (; i >= 0; --i) 158 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 159 160 dev_kfree_skb_any(skb); 161 return NULL; 162} 163 164static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv) 165{ 166 struct ib_recv_wr *bad_wr; 167 168 /* rx_drain_qp send queue depth is 1, so 169 * make sure we have at most 1 outstanding WR. */ 170 if (list_empty(&priv->cm.rx_flush_list) || 171 !list_empty(&priv->cm.rx_drain_list)) 172 return; 173 174 if (ib_post_recv(priv->cm.rx_drain_qp, &ipoib_cm_rx_drain_wr, &bad_wr)) 175 ipoib_warn(priv, "failed to post rx_drain wr\n"); 176 177 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); 178} 179 180static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) 181{ 182 struct ipoib_cm_rx *p = ctx; 183 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 184 unsigned long flags; 185 186 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) 187 return; 188 189 spin_lock_irqsave(&priv->lock, flags); 190 list_move(&p->list, &priv->cm.rx_flush_list); 191 p->state = IPOIB_CM_RX_FLUSH; 192 ipoib_cm_start_rx_drain(priv); 193 spin_unlock_irqrestore(&priv->lock, flags); 194} 195 196static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, 197 struct ipoib_cm_rx *p) 198{ 199 struct ipoib_dev_priv *priv = netdev_priv(dev); 200 struct ib_qp_init_attr attr = { 201 .event_handler = ipoib_cm_rx_event_handler, 202 .send_cq = priv->cq, /* does not matter, we never send anything */ 203 .recv_cq = priv->cq, 204 .srq = priv->cm.srq, 205 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */ 206 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ 207 .sq_sig_type = IB_SIGNAL_ALL_WR, 208 .qp_type = IB_QPT_RC, 209 .qp_context = p, 210 }; 211 return ib_create_qp(priv->pd, &attr); 212} 213 214static int ipoib_cm_modify_rx_qp(struct net_device *dev, 215 struct ib_cm_id *cm_id, struct ib_qp *qp, 216 unsigned psn) 217{ 218 struct ipoib_dev_priv *priv = netdev_priv(dev); 219 struct ib_qp_attr qp_attr; 220 int qp_attr_mask, ret; 221 222 qp_attr.qp_state = IB_QPS_INIT; 223 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 224 if (ret) { 225 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); 226 return ret; 227 } 228 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 229 if (ret) { 230 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); 231 return ret; 232 } 233 qp_attr.qp_state = IB_QPS_RTR; 234 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 235 if (ret) { 236 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 237 return ret; 238 } 239 qp_attr.rq_psn = psn; 240 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 241 if (ret) { 242 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 243 return ret; 244 } 245 return 0; 246} 247 248static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, 249 struct ib_qp *qp, struct ib_cm_req_event_param *req, 250 unsigned psn) 251{ 252 struct ipoib_dev_priv *priv = netdev_priv(dev); 253 struct ipoib_cm_data data = {}; 254 struct ib_cm_rep_param rep = {}; 255 256 data.qpn = cpu_to_be32(priv->qp->qp_num); 257 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); 258 259 rep.private_data = &data; 260 rep.private_data_len = sizeof data; 261 rep.flow_control = 0; 262 rep.rnr_retry_count = req->rnr_retry_count; 263 rep.target_ack_delay = 20; /* FIXME */ 264 rep.srq = 1; 265 rep.qp_num = qp->qp_num; 266 rep.starting_psn = psn; 267 return ib_send_cm_rep(cm_id, &rep); 268} 269 270static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 271{ 272 struct net_device *dev = cm_id->context; 273 struct ipoib_dev_priv *priv = netdev_priv(dev); 274 struct ipoib_cm_rx *p; 275 unsigned psn; 276 int ret; 277 278 ipoib_dbg(priv, "REQ arrived\n"); 279 p = kzalloc(sizeof *p, GFP_KERNEL); 280 if (!p) 281 return -ENOMEM; 282 p->dev = dev; 283 p->id = cm_id; 284 p->qp = ipoib_cm_create_rx_qp(dev, p); 285 if (IS_ERR(p->qp)) { 286 ret = PTR_ERR(p->qp); 287 goto err_qp; 288 } 289 290 psn = random32() & 0xffffff; 291 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); 292 if (ret) 293 goto err_modify; 294 295 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); 296 if (ret) { 297 ipoib_warn(priv, "failed to send REP: %d\n", ret); 298 goto err_rep; 299 } 300 301 cm_id->context = p; 302 p->jiffies = jiffies; 303 p->state = IPOIB_CM_RX_LIVE; 304 spin_lock_irq(&priv->lock); 305 if (list_empty(&priv->cm.passive_ids)) 306 queue_delayed_work(ipoib_workqueue, 307 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 308 list_add(&p->list, &priv->cm.passive_ids); 309 spin_unlock_irq(&priv->lock); 310 return 0; 311 312err_rep: 313err_modify: 314 ib_destroy_qp(p->qp); 315err_qp: 316 kfree(p); 317 return ret; 318} 319 320static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, 321 struct ib_cm_event *event) 322{ 323 struct ipoib_cm_rx *p; 324 struct ipoib_dev_priv *priv; 325 326 switch (event->event) { 327 case IB_CM_REQ_RECEIVED: 328 return ipoib_cm_req_handler(cm_id, event); 329 case IB_CM_DREQ_RECEIVED: 330 p = cm_id->context; 331 ib_send_cm_drep(cm_id, NULL, 0); 332 /* Fall through */ 333 case IB_CM_REJ_RECEIVED: 334 p = cm_id->context; 335 priv = netdev_priv(p->dev); 336 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 337 ipoib_warn(priv, "unable to move qp to error state\n"); 338 /* Fall through */ 339 default: 340 return 0; 341 } 342} 343/* Adjust length of skb with fragments to match received data */ 344static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 345 unsigned int length, struct sk_buff *toskb) 346{ 347 int i, num_frags; 348 unsigned int size; 349 350 /* put header into skb */ 351 size = min(length, hdr_space); 352 skb->tail += size; 353 skb->len += size; 354 length -= size; 355 356 num_frags = skb_shinfo(skb)->nr_frags; 357 for (i = 0; i < num_frags; i++) { 358 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 359 360 if (length == 0) { 361 /* don't need this page */ 362 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); 363 --skb_shinfo(skb)->nr_frags; 364 } else { 365 size = min(length, (unsigned) PAGE_SIZE); 366 367 frag->size = size; 368 skb->data_len += size; 369 skb->truesize += size; 370 skb->len += size; 371 length -= size; 372 } 373 } 374} 375 376void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) 377{ 378 struct ipoib_dev_priv *priv = netdev_priv(dev); 379 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; 380 struct sk_buff *skb, *newskb; 381 struct ipoib_cm_rx *p; 382 unsigned long flags; 383 u64 mapping[IPOIB_CM_RX_SG]; 384 int frags; 385 386 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", 387 wr_id, wc->status); 388 389 if (unlikely(wr_id >= ipoib_recvq_size)) { 390 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~IPOIB_CM_OP_SRQ)) { 391 spin_lock_irqsave(&priv->lock, flags); 392 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 393 ipoib_cm_start_rx_drain(priv); 394 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 395 spin_unlock_irqrestore(&priv->lock, flags); 396 } else 397 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 398 wr_id, ipoib_recvq_size); 399 return; 400 } 401 402 skb = priv->cm.srq_ring[wr_id].skb; 403 404 if (unlikely(wc->status != IB_WC_SUCCESS)) { 405 ipoib_dbg(priv, "cm recv error " 406 "(status=%d, wrid=%d vend_err %x)\n", 407 wc->status, wr_id, wc->vendor_err); 408 ++priv->stats.rx_dropped; 409 goto repost; 410 } 411 412 if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) { 413 p = wc->qp->qp_context; 414 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { 415 spin_lock_irqsave(&priv->lock, flags); 416 p->jiffies = jiffies; 417 /* Move this entry to list head, but do not re-add it 418 * if it has been moved out of list. */ 419 if (p->state == IPOIB_CM_RX_LIVE) 420 list_move(&p->list, &priv->cm.passive_ids); 421 spin_unlock_irqrestore(&priv->lock, flags); 422 } 423 } 424 425 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, 426 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; 427 428 newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping); 429 if (unlikely(!newskb)) { 430 /* 431 * If we can't allocate a new RX buffer, dump 432 * this packet and reuse the old buffer. 433 */ 434 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); 435 ++priv->stats.rx_dropped; 436 goto repost; 437 } 438 439 ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping); 440 memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); 441 442 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 443 wc->byte_len, wc->slid); 444 445 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); 446 447 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 448 skb_reset_mac_header(skb); 449 skb_pull(skb, IPOIB_ENCAP_LEN); 450 451 dev->last_rx = jiffies; 452 ++priv->stats.rx_packets; 453 priv->stats.rx_bytes += skb->len; 454 455 skb->dev = dev; 456 /* XXX get correct PACKET_ type here */ 457 skb->pkt_type = PACKET_HOST; 458 netif_receive_skb(skb); 459 460repost: 461 if (unlikely(ipoib_cm_post_receive(dev, wr_id))) 462 ipoib_warn(priv, "ipoib_cm_post_receive failed " 463 "for buf %d\n", wr_id); 464} 465 466static inline int post_send(struct ipoib_dev_priv *priv, 467 struct ipoib_cm_tx *tx, 468 unsigned int wr_id, 469 u64 addr, int len) 470{ 471 struct ib_send_wr *bad_wr; 472 473 priv->tx_sge.addr = addr; 474 priv->tx_sge.length = len; 475 476 priv->tx_wr.wr_id = wr_id; 477 478 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 479} 480 481void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 482{ 483 struct ipoib_dev_priv *priv = netdev_priv(dev); 484 struct ipoib_tx_buf *tx_req; 485 u64 addr; 486 487 if (unlikely(skb->len > tx->mtu)) { 488 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 489 skb->len, tx->mtu); 490 ++priv->stats.tx_dropped; 491 ++priv->stats.tx_errors; 492 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); 493 return; 494 } 495 496 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", 497 tx->tx_head, skb->len, tx->qp->qp_num); 498 499 /* 500 * We put the skb into the tx_ring _before_ we call post_send() 501 * because it's entirely possible that the completion handler will 502 * run before we execute anything after the post_send(). That 503 * means we have to make sure everything is properly recorded and 504 * our state is consistent before we call post_send(). 505 */ 506 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 507 tx_req->skb = skb; 508 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 509 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 510 ++priv->stats.tx_errors; 511 dev_kfree_skb_any(skb); 512 return; 513 } 514 515 tx_req->mapping = addr; 516 517 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 518 addr, skb->len))) { 519 ipoib_warn(priv, "post_send failed\n"); 520 ++priv->stats.tx_errors; 521 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 522 dev_kfree_skb_any(skb); 523 } else { 524 dev->trans_start = jiffies; 525 ++tx->tx_head; 526 527 if (tx->tx_head - tx->tx_tail == ipoib_sendq_size) { 528 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 529 tx->qp->qp_num); 530 netif_stop_queue(dev); 531 set_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags); 532 } 533 } 534} 535 536static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx, 537 struct ib_wc *wc) 538{ 539 struct ipoib_dev_priv *priv = netdev_priv(dev); 540 unsigned int wr_id = wc->wr_id; 541 struct ipoib_tx_buf *tx_req; 542 unsigned long flags; 543 544 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 545 wr_id, wc->status); 546 547 if (unlikely(wr_id >= ipoib_sendq_size)) { 548 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", 549 wr_id, ipoib_sendq_size); 550 return; 551 } 552 553 tx_req = &tx->tx_ring[wr_id]; 554 555 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 556 557 /* FIXME: is this right? Shouldn't we only increment on success? */ 558 ++priv->stats.tx_packets; 559 priv->stats.tx_bytes += tx_req->skb->len; 560 561 dev_kfree_skb_any(tx_req->skb); 562 563 spin_lock_irqsave(&priv->tx_lock, flags); 564 ++tx->tx_tail; 565 if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) && 566 tx->tx_head - tx->tx_tail <= ipoib_sendq_size >> 1) { 567 clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags); 568 netif_wake_queue(dev); 569 } 570 571 if (wc->status != IB_WC_SUCCESS && 572 wc->status != IB_WC_WR_FLUSH_ERR) { 573 struct ipoib_neigh *neigh; 574 575 ipoib_dbg(priv, "failed cm send event " 576 "(status=%d, wrid=%d vend_err %x)\n", 577 wc->status, wr_id, wc->vendor_err); 578 579 spin_lock(&priv->lock); 580 neigh = tx->neigh; 581 582 if (neigh) { 583 neigh->cm = NULL; 584 list_del(&neigh->list); 585 if (neigh->ah) 586 ipoib_put_ah(neigh->ah); 587 ipoib_neigh_free(dev, neigh); 588 589 tx->neigh = NULL; 590 } 591 592 /* queue would be re-started anyway when TX is destroyed, 593 * but it makes sense to do it ASAP here. */ 594 if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) 595 netif_wake_queue(dev); 596 597 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 598 list_move(&tx->list, &priv->cm.reap_list); 599 queue_work(ipoib_workqueue, &priv->cm.reap_task); 600 } 601 602 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 603 604 spin_unlock(&priv->lock); 605 } 606 607 spin_unlock_irqrestore(&priv->tx_lock, flags); 608} 609 610static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr) 611{ 612 struct ipoib_cm_tx *tx = tx_ptr; 613 int n, i; 614 615 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 616 do { 617 n = ib_poll_cq(cq, IPOIB_NUM_WC, tx->ibwc); 618 for (i = 0; i < n; ++i) 619 ipoib_cm_handle_tx_wc(tx->dev, tx, tx->ibwc + i); 620 } while (n == IPOIB_NUM_WC); 621} 622 623int ipoib_cm_dev_open(struct net_device *dev) 624{ 625 struct ipoib_dev_priv *priv = netdev_priv(dev); 626 struct ib_qp_init_attr qp_init_attr = { 627 .send_cq = priv->cq, /* does not matter, we never send anything */ 628 .recv_cq = priv->cq, 629 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */ 630 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ 631 .cap.max_recv_wr = 1, 632 .cap.max_recv_sge = 1, /* FIXME: 0 Seems not to work */ 633 .sq_sig_type = IB_SIGNAL_ALL_WR, 634 .qp_type = IB_QPT_UC, 635 }; 636 int ret; 637 638 if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) 639 return 0; 640 641 priv->cm.rx_drain_qp = ib_create_qp(priv->pd, &qp_init_attr); 642 if (IS_ERR(priv->cm.rx_drain_qp)) { 643 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 644 ret = PTR_ERR(priv->cm.rx_drain_qp); 645 return ret; 646 } 647 648 /* 649 * We put the QP in error state directly. This way, a "flush 650 * error" WC will be immediately generated for each WR we post. 651 */ 652 ret = ib_modify_qp(priv->cm.rx_drain_qp, &ipoib_cm_err_attr, IB_QP_STATE); 653 if (ret) { 654 ipoib_warn(priv, "failed to modify drain QP to error: %d\n", ret); 655 goto err_qp; 656 } 657 658 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); 659 if (IS_ERR(priv->cm.id)) { 660 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 661 ret = PTR_ERR(priv->cm.id); 662 goto err_cm; 663 } 664 665 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 666 0, NULL); 667 if (ret) { 668 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, 669 IPOIB_CM_IETF_ID | priv->qp->qp_num); 670 goto err_listen; 671 } 672 673 return 0; 674 675err_listen: 676 ib_destroy_cm_id(priv->cm.id); 677err_cm: 678 priv->cm.id = NULL; 679err_qp: 680 ib_destroy_qp(priv->cm.rx_drain_qp); 681 return ret; 682} 683 684void ipoib_cm_dev_stop(struct net_device *dev) 685{ 686 struct ipoib_dev_priv *priv = netdev_priv(dev); 687 struct ipoib_cm_rx *p, *n; 688 unsigned long begin; 689 LIST_HEAD(list); 690 int ret; 691 692 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) 693 return; 694 695 ib_destroy_cm_id(priv->cm.id); 696 priv->cm.id = NULL; 697 698 spin_lock_irq(&priv->lock); 699 while (!list_empty(&priv->cm.passive_ids)) { 700 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 701 list_move(&p->list, &priv->cm.rx_error_list); 702 p->state = IPOIB_CM_RX_ERROR; 703 spin_unlock_irq(&priv->lock); 704 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 705 if (ret) 706 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 707 spin_lock_irq(&priv->lock); 708 } 709 710 /* Wait for all RX to be drained */ 711 begin = jiffies; 712 713 while (!list_empty(&priv->cm.rx_error_list) || 714 !list_empty(&priv->cm.rx_flush_list) || 715 !list_empty(&priv->cm.rx_drain_list)) { 716 if (time_after(jiffies, begin + 5 * HZ)) { 717 ipoib_warn(priv, "RX drain timing out\n"); 718 719 /* 720 * assume the HW is wedged and just free up everything. 721 */ 722 list_splice_init(&priv->cm.rx_flush_list, &list); 723 list_splice_init(&priv->cm.rx_error_list, &list); 724 list_splice_init(&priv->cm.rx_drain_list, &list); 725 break; 726 } 727 spin_unlock_irq(&priv->lock); 728 msleep(1); 729 ipoib_drain_cq(dev); 730 spin_lock_irq(&priv->lock); 731 } 732 733 list_splice_init(&priv->cm.rx_reap_list, &list); 734 735 spin_unlock_irq(&priv->lock); 736 737 list_for_each_entry_safe(p, n, &list, list) { 738 ib_destroy_cm_id(p->id); 739 ib_destroy_qp(p->qp); 740 kfree(p); 741 } 742 743 ib_destroy_qp(priv->cm.rx_drain_qp); 744 cancel_delayed_work(&priv->cm.stale_task); 745} 746 747static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 748{ 749 struct ipoib_cm_tx *p = cm_id->context; 750 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 751 struct ipoib_cm_data *data = event->private_data; 752 struct sk_buff_head skqueue; 753 struct ib_qp_attr qp_attr; 754 int qp_attr_mask, ret; 755 struct sk_buff *skb; 756 757 p->mtu = be32_to_cpu(data->mtu); 758 759 if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) { 760 ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n", 761 p->mtu, priv->dev->mtu); 762 return -EINVAL; 763 } 764 765 qp_attr.qp_state = IB_QPS_RTR; 766 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 767 if (ret) { 768 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 769 return ret; 770 } 771 772 qp_attr.rq_psn = 0 /* FIXME */; 773 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 774 if (ret) { 775 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 776 return ret; 777 } 778 779 qp_attr.qp_state = IB_QPS_RTS; 780 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 781 if (ret) { 782 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 783 return ret; 784 } 785 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 786 if (ret) { 787 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 788 return ret; 789 } 790 791 skb_queue_head_init(&skqueue); 792 793 spin_lock_irq(&priv->lock); 794 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 795 if (p->neigh) 796 while ((skb = __skb_dequeue(&p->neigh->queue))) 797 __skb_queue_tail(&skqueue, skb); 798 spin_unlock_irq(&priv->lock); 799 800 while ((skb = __skb_dequeue(&skqueue))) { 801 skb->dev = p->dev; 802 if (dev_queue_xmit(skb)) 803 ipoib_warn(priv, "dev_queue_xmit failed " 804 "to requeue packet\n"); 805 } 806 807 ret = ib_send_cm_rtu(cm_id, NULL, 0); 808 if (ret) { 809 ipoib_warn(priv, "failed to send RTU: %d\n", ret); 810 return ret; 811 } 812 return 0; 813} 814 815static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ib_cq *cq) 816{ 817 struct ipoib_dev_priv *priv = netdev_priv(dev); 818 struct ib_qp_init_attr attr = {}; 819 attr.recv_cq = priv->cq; 820 attr.srq = priv->cm.srq; 821 attr.cap.max_send_wr = ipoib_sendq_size; 822 attr.cap.max_send_sge = 1; 823 attr.sq_sig_type = IB_SIGNAL_ALL_WR; 824 attr.qp_type = IB_QPT_RC; 825 attr.send_cq = cq; 826 return ib_create_qp(priv->pd, &attr); 827} 828 829static int ipoib_cm_send_req(struct net_device *dev, 830 struct ib_cm_id *id, struct ib_qp *qp, 831 u32 qpn, 832 struct ib_sa_path_rec *pathrec) 833{ 834 struct ipoib_dev_priv *priv = netdev_priv(dev); 835 struct ipoib_cm_data data = {}; 836 struct ib_cm_req_param req = {}; 837 838 data.qpn = cpu_to_be32(priv->qp->qp_num); 839 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); 840 841 req.primary_path = pathrec; 842 req.alternate_path = NULL; 843 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); 844 req.qp_num = qp->qp_num; 845 req.qp_type = qp->qp_type; 846 req.private_data = &data; 847 req.private_data_len = sizeof data; 848 req.flow_control = 0; 849 850 req.starting_psn = 0; /* FIXME */ 851 852 /* 853 * Pick some arbitrary defaults here; we could make these 854 * module parameters if anyone cared about setting them. 855 */ 856 req.responder_resources = 4; 857 req.remote_cm_response_timeout = 20; 858 req.local_cm_response_timeout = 20; 859 req.retry_count = 0; /* RFC draft warns against retries */ 860 req.rnr_retry_count = 0; /* RFC draft warns against retries */ 861 req.max_cm_retries = 15; 862 req.srq = 1; 863 return ib_send_cm_req(id, &req); 864} 865 866static int ipoib_cm_modify_tx_init(struct net_device *dev, 867 struct ib_cm_id *cm_id, struct ib_qp *qp) 868{ 869 struct ipoib_dev_priv *priv = netdev_priv(dev); 870 struct ib_qp_attr qp_attr; 871 int qp_attr_mask, ret; 872 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); 873 if (ret) { 874 ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret); 875 return ret; 876 } 877 878 qp_attr.qp_state = IB_QPS_INIT; 879 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 880 qp_attr.port_num = priv->port; 881 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 882 883 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 884 if (ret) { 885 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); 886 return ret; 887 } 888 return 0; 889} 890 891static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, 892 struct ib_sa_path_rec *pathrec) 893{ 894 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 895 int ret; 896 897 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, 898 GFP_KERNEL); 899 if (!p->tx_ring) { 900 ipoib_warn(priv, "failed to allocate tx ring\n"); 901 ret = -ENOMEM; 902 goto err_tx; 903 } 904 905 p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, 906 ipoib_sendq_size + 1, 0); 907 if (IS_ERR(p->cq)) { 908 ret = PTR_ERR(p->cq); 909 ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); 910 goto err_cq; 911 } 912 913 ret = ib_req_notify_cq(p->cq, IB_CQ_NEXT_COMP); 914 if (ret) { 915 ipoib_warn(priv, "failed to request completion notification: %d\n", ret); 916 goto err_req_notify; 917 } 918 919 p->qp = ipoib_cm_create_tx_qp(p->dev, p->cq); 920 if (IS_ERR(p->qp)) { 921 ret = PTR_ERR(p->qp); 922 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); 923 goto err_qp; 924 } 925 926 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); 927 if (IS_ERR(p->id)) { 928 ret = PTR_ERR(p->id); 929 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); 930 goto err_id; 931 } 932 933 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); 934 if (ret) { 935 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); 936 goto err_modify; 937 } 938 939 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); 940 if (ret) { 941 ipoib_warn(priv, "failed to send cm req: %d\n", ret); 942 goto err_send_cm; 943 } 944 945 ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n", 946 p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn); 947 948 return 0; 949 950err_send_cm: 951err_modify: 952 ib_destroy_cm_id(p->id); 953err_id: 954 p->id = NULL; 955 ib_destroy_qp(p->qp); 956err_req_notify: 957err_qp: 958 p->qp = NULL; 959 ib_destroy_cq(p->cq); 960err_cq: 961 p->cq = NULL; 962err_tx: 963 return ret; 964} 965 966static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 967{ 968 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 969 struct ipoib_tx_buf *tx_req; 970 971 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 972 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); 973 974 if (p->id) 975 ib_destroy_cm_id(p->id); 976 977 if (p->qp) 978 ib_destroy_qp(p->qp); 979 980 if (p->cq) 981 ib_destroy_cq(p->cq); 982 983 if (test_bit(IPOIB_FLAG_NETIF_STOPPED, &p->flags)) 984 netif_wake_queue(p->dev); 985 986 if (p->tx_ring) { 987 while ((int) p->tx_tail - (int) p->tx_head < 0) { 988 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 989 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, 990 DMA_TO_DEVICE); 991 dev_kfree_skb_any(tx_req->skb); 992 ++p->tx_tail; 993 } 994 995 kfree(p->tx_ring); 996 } 997 998 kfree(p); 999} 1000 1001static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 1002 struct ib_cm_event *event) 1003{ 1004 struct ipoib_cm_tx *tx = cm_id->context; 1005 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 1006 struct net_device *dev = priv->dev; 1007 struct ipoib_neigh *neigh; 1008 int ret; 1009 1010 switch (event->event) { 1011 case IB_CM_DREQ_RECEIVED: 1012 ipoib_dbg(priv, "DREQ received.\n"); 1013 ib_send_cm_drep(cm_id, NULL, 0); 1014 break; 1015 case IB_CM_REP_RECEIVED: 1016 ipoib_dbg(priv, "REP received.\n"); 1017 ret = ipoib_cm_rep_handler(cm_id, event); 1018 if (ret) 1019 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 1020 NULL, 0, NULL, 0); 1021 break; 1022 case IB_CM_REQ_ERROR: 1023 case IB_CM_REJ_RECEIVED: 1024 case IB_CM_TIMEWAIT_EXIT: 1025 ipoib_dbg(priv, "CM error %d.\n", event->event); 1026 spin_lock_irq(&priv->tx_lock); 1027 spin_lock(&priv->lock); 1028 neigh = tx->neigh; 1029 1030 if (neigh) { 1031 neigh->cm = NULL; 1032 list_del(&neigh->list); 1033 if (neigh->ah) 1034 ipoib_put_ah(neigh->ah); 1035 ipoib_neigh_free(dev, neigh); 1036 1037 tx->neigh = NULL; 1038 } 1039 1040 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1041 list_move(&tx->list, &priv->cm.reap_list); 1042 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1043 } 1044 1045 spin_unlock(&priv->lock); 1046 spin_unlock_irq(&priv->tx_lock); 1047 break; 1048 default: 1049 break; 1050 } 1051 1052 return 0; 1053} 1054 1055struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, 1056 struct ipoib_neigh *neigh) 1057{ 1058 struct ipoib_dev_priv *priv = netdev_priv(dev); 1059 struct ipoib_cm_tx *tx; 1060 1061 tx = kzalloc(sizeof *tx, GFP_ATOMIC); 1062 if (!tx) 1063 return NULL; 1064 1065 neigh->cm = tx; 1066 tx->neigh = neigh; 1067 tx->path = path; 1068 tx->dev = dev; 1069 list_add(&tx->list, &priv->cm.start_list); 1070 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1071 queue_work(ipoib_workqueue, &priv->cm.start_task); 1072 return tx; 1073} 1074 1075void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) 1076{ 1077 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 1078 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1079 list_move(&tx->list, &priv->cm.reap_list); 1080 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1081 ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n", 1082 IPOIB_GID_ARG(tx->neigh->dgid)); 1083 tx->neigh = NULL; 1084 } 1085} 1086 1087static void ipoib_cm_tx_start(struct work_struct *work) 1088{ 1089 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1090 cm.start_task); 1091 struct net_device *dev = priv->dev; 1092 struct ipoib_neigh *neigh; 1093 struct ipoib_cm_tx *p; 1094 unsigned long flags; 1095 int ret; 1096 1097 struct ib_sa_path_rec pathrec; 1098 u32 qpn; 1099 1100 spin_lock_irqsave(&priv->tx_lock, flags); 1101 spin_lock(&priv->lock); 1102 while (!list_empty(&priv->cm.start_list)) { 1103 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1104 list_del_init(&p->list); 1105 neigh = p->neigh; 1106 qpn = IPOIB_QPN(neigh->neighbour->ha); 1107 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1108 spin_unlock(&priv->lock); 1109 spin_unlock_irqrestore(&priv->tx_lock, flags); 1110 ret = ipoib_cm_tx_init(p, qpn, &pathrec); 1111 spin_lock_irqsave(&priv->tx_lock, flags); 1112 spin_lock(&priv->lock); 1113 if (ret) { 1114 neigh = p->neigh; 1115 if (neigh) { 1116 neigh->cm = NULL; 1117 list_del(&neigh->list); 1118 if (neigh->ah) 1119 ipoib_put_ah(neigh->ah); 1120 ipoib_neigh_free(dev, neigh); 1121 } 1122 list_del(&p->list); 1123 kfree(p); 1124 } 1125 } 1126 spin_unlock(&priv->lock); 1127 spin_unlock_irqrestore(&priv->tx_lock, flags); 1128} 1129 1130static void ipoib_cm_tx_reap(struct work_struct *work) 1131{ 1132 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1133 cm.reap_task); 1134 struct ipoib_cm_tx *p; 1135 1136 spin_lock_irq(&priv->tx_lock); 1137 spin_lock(&priv->lock); 1138 while (!list_empty(&priv->cm.reap_list)) { 1139 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1140 list_del(&p->list); 1141 spin_unlock(&priv->lock); 1142 spin_unlock_irq(&priv->tx_lock); 1143 ipoib_cm_tx_destroy(p); 1144 spin_lock_irq(&priv->tx_lock); 1145 spin_lock(&priv->lock); 1146 } 1147 spin_unlock(&priv->lock); 1148 spin_unlock_irq(&priv->tx_lock); 1149} 1150 1151static void ipoib_cm_skb_reap(struct work_struct *work) 1152{ 1153 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1154 cm.skb_task); 1155 struct net_device *dev = priv->dev; 1156 struct sk_buff *skb; 1157 1158 unsigned mtu = priv->mcast_mtu; 1159 1160 spin_lock_irq(&priv->tx_lock); 1161 spin_lock(&priv->lock); 1162 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { 1163 spin_unlock(&priv->lock); 1164 spin_unlock_irq(&priv->tx_lock); 1165 if (skb->protocol == htons(ETH_P_IP)) 1166 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1167#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1168 else if (skb->protocol == htons(ETH_P_IPV6)) 1169 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1170#endif 1171 dev_kfree_skb_any(skb); 1172 spin_lock_irq(&priv->tx_lock); 1173 spin_lock(&priv->lock); 1174 } 1175 spin_unlock(&priv->lock); 1176 spin_unlock_irq(&priv->tx_lock); 1177} 1178 1179void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, 1180 unsigned int mtu) 1181{ 1182 struct ipoib_dev_priv *priv = netdev_priv(dev); 1183 int e = skb_queue_empty(&priv->cm.skb_queue); 1184 1185 if (skb->dst) 1186 skb->dst->ops->update_pmtu(skb->dst, mtu); 1187 1188 skb_queue_tail(&priv->cm.skb_queue, skb); 1189 if (e) 1190 queue_work(ipoib_workqueue, &priv->cm.skb_task); 1191} 1192 1193static void ipoib_cm_rx_reap(struct work_struct *work) 1194{ 1195 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1196 cm.rx_reap_task); 1197 struct ipoib_cm_rx *p, *n; 1198 LIST_HEAD(list); 1199 1200 spin_lock_irq(&priv->lock); 1201 list_splice_init(&priv->cm.rx_reap_list, &list); 1202 spin_unlock_irq(&priv->lock); 1203 1204 list_for_each_entry_safe(p, n, &list, list) { 1205 ib_destroy_cm_id(p->id); 1206 ib_destroy_qp(p->qp); 1207 kfree(p); 1208 } 1209} 1210 1211static void ipoib_cm_stale_task(struct work_struct *work) 1212{ 1213 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1214 cm.stale_task.work); 1215 struct ipoib_cm_rx *p; 1216 int ret; 1217 1218 spin_lock_irq(&priv->lock); 1219 while (!list_empty(&priv->cm.passive_ids)) { 1220 /* List is sorted by LRU, start from tail, 1221 * stop when we see a recently used entry */ 1222 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); 1223 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1224 break; 1225 list_move(&p->list, &priv->cm.rx_error_list); 1226 p->state = IPOIB_CM_RX_ERROR; 1227 spin_unlock_irq(&priv->lock); 1228 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 1229 if (ret) 1230 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 1231 spin_lock_irq(&priv->lock); 1232 } 1233 1234 if (!list_empty(&priv->cm.passive_ids)) 1235 queue_delayed_work(ipoib_workqueue, 1236 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1237 spin_unlock_irq(&priv->lock); 1238} 1239 1240 1241static ssize_t show_mode(struct device *d, struct device_attribute *attr, 1242 char *buf) 1243{ 1244 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); 1245 1246 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 1247 return sprintf(buf, "connected\n"); 1248 else 1249 return sprintf(buf, "datagram\n"); 1250} 1251 1252static ssize_t set_mode(struct device *d, struct device_attribute *attr, 1253 const char *buf, size_t count) 1254{ 1255 struct net_device *dev = to_net_dev(d); 1256 struct ipoib_dev_priv *priv = netdev_priv(dev); 1257 1258 /* flush paths if we switch modes so that connections are restarted */ 1259 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 1260 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1261 ipoib_warn(priv, "enabling connected mode " 1262 "will cause multicast packet drops\n"); 1263 ipoib_flush_paths(dev); 1264 return count; 1265 } 1266 1267 if (!strcmp(buf, "datagram\n")) { 1268 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1269 dev->mtu = min(priv->mcast_mtu, dev->mtu); 1270 ipoib_flush_paths(dev); 1271 return count; 1272 } 1273 1274 return -EINVAL; 1275} 1276 1277static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); 1278 1279int ipoib_cm_add_mode_attr(struct net_device *dev) 1280{ 1281 return device_create_file(&dev->dev, &dev_attr_mode); 1282} 1283 1284int ipoib_cm_dev_init(struct net_device *dev) 1285{ 1286 struct ipoib_dev_priv *priv = netdev_priv(dev); 1287 struct ib_srq_init_attr srq_init_attr = { 1288 .attr = { 1289 .max_wr = ipoib_recvq_size, 1290 .max_sge = IPOIB_CM_RX_SG 1291 } 1292 }; 1293 int ret, i; 1294 1295 INIT_LIST_HEAD(&priv->cm.passive_ids); 1296 INIT_LIST_HEAD(&priv->cm.reap_list); 1297 INIT_LIST_HEAD(&priv->cm.start_list); 1298 INIT_LIST_HEAD(&priv->cm.rx_error_list); 1299 INIT_LIST_HEAD(&priv->cm.rx_flush_list); 1300 INIT_LIST_HEAD(&priv->cm.rx_drain_list); 1301 INIT_LIST_HEAD(&priv->cm.rx_reap_list); 1302 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); 1303 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); 1304 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); 1305 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); 1306 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); 1307 1308 skb_queue_head_init(&priv->cm.skb_queue); 1309 1310 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); 1311 if (IS_ERR(priv->cm.srq)) { 1312 ret = PTR_ERR(priv->cm.srq); 1313 priv->cm.srq = NULL; 1314 return ret; 1315 } 1316 1317 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, 1318 GFP_KERNEL); 1319 if (!priv->cm.srq_ring) { 1320 printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n", 1321 priv->ca->name, ipoib_recvq_size); 1322 ipoib_cm_dev_cleanup(dev); 1323 return -ENOMEM; 1324 } 1325 1326 for (i = 0; i < IPOIB_CM_RX_SG; ++i) 1327 priv->cm.rx_sge[i].lkey = priv->mr->lkey; 1328 1329 priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE; 1330 for (i = 1; i < IPOIB_CM_RX_SG; ++i) 1331 priv->cm.rx_sge[i].length = PAGE_SIZE; 1332 priv->cm.rx_wr.next = NULL; 1333 priv->cm.rx_wr.sg_list = priv->cm.rx_sge; 1334 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; 1335 1336 for (i = 0; i < ipoib_recvq_size; ++i) { 1337 if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1, 1338 priv->cm.srq_ring[i].mapping)) { 1339 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 1340 ipoib_cm_dev_cleanup(dev); 1341 return -ENOMEM; 1342 } 1343 if (ipoib_cm_post_receive(dev, i)) { 1344 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); 1345 ipoib_cm_dev_cleanup(dev); 1346 return -EIO; 1347 } 1348 } 1349 1350 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; 1351 return 0; 1352} 1353 1354void ipoib_cm_dev_cleanup(struct net_device *dev) 1355{ 1356 struct ipoib_dev_priv *priv = netdev_priv(dev); 1357 int i, ret; 1358 1359 if (!priv->cm.srq) 1360 return; 1361 1362 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); 1363 1364 ret = ib_destroy_srq(priv->cm.srq); 1365 if (ret) 1366 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); 1367 1368 priv->cm.srq = NULL; 1369 if (!priv->cm.srq_ring) 1370 return; 1371 for (i = 0; i < ipoib_recvq_size; ++i) 1372 if (priv->cm.srq_ring[i].skb) { 1373 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 1374 priv->cm.srq_ring[i].mapping); 1375 dev_kfree_skb_any(priv->cm.srq_ring[i].skb); 1376 priv->cm.srq_ring[i].skb = NULL; 1377 } 1378 kfree(priv->cm.srq_ring); 1379 priv->cm.srq_ring = NULL; 1380} 1381