ipoib_ib.c revision 31c02e215700c2b704d9441f629ae87bb9aeb561
1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $ 36 */ 37 38#include <linux/delay.h> 39#include <linux/dma-mapping.h> 40 41#include <rdma/ib_cache.h> 42 43#include "ipoib.h" 44 45#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 46static int data_debug_level; 47 48module_param(data_debug_level, int, 0644); 49MODULE_PARM_DESC(data_debug_level, 50 "Enable data path debug tracing if > 0"); 51#endif 52 53#define IPOIB_OP_RECV (1ul << 31) 54 55static DEFINE_MUTEX(pkey_mutex); 56 57struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 58 struct ib_pd *pd, struct ib_ah_attr *attr) 59{ 60 struct ipoib_ah *ah; 61 62 ah = kmalloc(sizeof *ah, GFP_KERNEL); 63 if (!ah) 64 return NULL; 65 66 ah->dev = dev; 67 ah->last_send = 0; 68 kref_init(&ah->ref); 69 70 ah->ah = ib_create_ah(pd, attr); 71 if (IS_ERR(ah->ah)) { 72 kfree(ah); 73 ah = NULL; 74 } else 75 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); 76 77 return ah; 78} 79 80void ipoib_free_ah(struct kref *kref) 81{ 82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); 83 struct ipoib_dev_priv *priv = netdev_priv(ah->dev); 84 85 unsigned long flags; 86 87 spin_lock_irqsave(&priv->lock, flags); 88 list_add_tail(&ah->list, &priv->dead_ahs); 89 spin_unlock_irqrestore(&priv->lock, flags); 90} 91 92static int ipoib_ib_post_receive(struct net_device *dev, int id) 93{ 94 struct ipoib_dev_priv *priv = netdev_priv(dev); 95 struct ib_sge list; 96 struct ib_recv_wr param; 97 struct ib_recv_wr *bad_wr; 98 int ret; 99 100 list.addr = priv->rx_ring[id].mapping; 101 list.length = IPOIB_BUF_SIZE; 102 list.lkey = priv->mr->lkey; 103 104 param.next = NULL; 105 param.wr_id = id | IPOIB_OP_RECV; 106 param.sg_list = &list; 107 param.num_sge = 1; 108 109 ret = ib_post_recv(priv->qp, ¶m, &bad_wr); 110 if (unlikely(ret)) { 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 112 dma_unmap_single(priv->ca->dma_device, 113 priv->rx_ring[id].mapping, 114 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 115 dev_kfree_skb_any(priv->rx_ring[id].skb); 116 priv->rx_ring[id].skb = NULL; 117 } 118 119 return ret; 120} 121 122static int ipoib_alloc_rx_skb(struct net_device *dev, int id) 123{ 124 struct ipoib_dev_priv *priv = netdev_priv(dev); 125 struct sk_buff *skb; 126 dma_addr_t addr; 127 128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 129 if (!skb) 130 return -ENOMEM; 131 132 /* 133 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte 134 * header. So we need 4 more bytes to get to 48 and align the 135 * IP header to a multiple of 16. 136 */ 137 skb_reserve(skb, 4); 138 139 addr = dma_map_single(priv->ca->dma_device, 140 skb->data, IPOIB_BUF_SIZE, 141 DMA_FROM_DEVICE); 142 if (unlikely(dma_mapping_error(addr))) { 143 dev_kfree_skb_any(skb); 144 return -EIO; 145 } 146 147 priv->rx_ring[id].skb = skb; 148 priv->rx_ring[id].mapping = addr; 149 150 return 0; 151} 152 153static int ipoib_ib_post_receives(struct net_device *dev) 154{ 155 struct ipoib_dev_priv *priv = netdev_priv(dev); 156 int i; 157 158 for (i = 0; i < ipoib_recvq_size; ++i) { 159 if (ipoib_alloc_rx_skb(dev, i)) { 160 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 161 return -ENOMEM; 162 } 163 if (ipoib_ib_post_receive(dev, i)) { 164 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); 165 return -EIO; 166 } 167 } 168 169 return 0; 170} 171 172static void ipoib_ib_handle_wc(struct net_device *dev, 173 struct ib_wc *wc) 174{ 175 struct ipoib_dev_priv *priv = netdev_priv(dev); 176 unsigned int wr_id = wc->wr_id; 177 178 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", 179 wr_id, wc->opcode, wc->status); 180 181 if (wr_id & IPOIB_OP_RECV) { 182 wr_id &= ~IPOIB_OP_RECV; 183 184 if (wr_id < ipoib_recvq_size) { 185 struct sk_buff *skb = priv->rx_ring[wr_id].skb; 186 dma_addr_t addr = priv->rx_ring[wr_id].mapping; 187 188 if (unlikely(wc->status != IB_WC_SUCCESS)) { 189 if (wc->status != IB_WC_WR_FLUSH_ERR) 190 ipoib_warn(priv, "failed recv event " 191 "(status=%d, wrid=%d vend_err %x)\n", 192 wc->status, wr_id, wc->vendor_err); 193 dma_unmap_single(priv->ca->dma_device, addr, 194 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 195 dev_kfree_skb_any(skb); 196 priv->rx_ring[wr_id].skb = NULL; 197 return; 198 } 199 200 /* 201 * If we can't allocate a new RX buffer, dump 202 * this packet and reuse the old buffer. 203 */ 204 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 205 ++priv->stats.rx_dropped; 206 goto repost; 207 } 208 209 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 210 wc->byte_len, wc->slid); 211 212 dma_unmap_single(priv->ca->dma_device, addr, 213 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 214 215 skb_put(skb, wc->byte_len); 216 skb_pull(skb, IB_GRH_BYTES); 217 218 if (wc->slid != priv->local_lid || 219 wc->src_qp != priv->qp->qp_num) { 220 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 221 skb->mac.raw = skb->data; 222 skb_pull(skb, IPOIB_ENCAP_LEN); 223 224 dev->last_rx = jiffies; 225 ++priv->stats.rx_packets; 226 priv->stats.rx_bytes += skb->len; 227 228 skb->dev = dev; 229 /* XXX get correct PACKET_ type here */ 230 skb->pkt_type = PACKET_HOST; 231 netif_rx_ni(skb); 232 } else { 233 ipoib_dbg_data(priv, "dropping loopback packet\n"); 234 dev_kfree_skb_any(skb); 235 } 236 237 repost: 238 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 239 ipoib_warn(priv, "ipoib_ib_post_receive failed " 240 "for buf %d\n", wr_id); 241 } else 242 ipoib_warn(priv, "completion event with wrid %d\n", 243 wr_id); 244 245 } else { 246 struct ipoib_tx_buf *tx_req; 247 unsigned long flags; 248 249 if (wr_id >= ipoib_sendq_size) { 250 ipoib_warn(priv, "completion event with wrid %d (> %d)\n", 251 wr_id, ipoib_sendq_size); 252 return; 253 } 254 255 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); 256 257 tx_req = &priv->tx_ring[wr_id]; 258 259 dma_unmap_single(priv->ca->dma_device, 260 pci_unmap_addr(tx_req, mapping), 261 tx_req->skb->len, 262 DMA_TO_DEVICE); 263 264 ++priv->stats.tx_packets; 265 priv->stats.tx_bytes += tx_req->skb->len; 266 267 dev_kfree_skb_any(tx_req->skb); 268 269 spin_lock_irqsave(&priv->tx_lock, flags); 270 ++priv->tx_tail; 271 if (netif_queue_stopped(dev) && 272 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && 273 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) 274 netif_wake_queue(dev); 275 spin_unlock_irqrestore(&priv->tx_lock, flags); 276 277 if (wc->status != IB_WC_SUCCESS && 278 wc->status != IB_WC_WR_FLUSH_ERR) 279 ipoib_warn(priv, "failed send event " 280 "(status=%d, wrid=%d vend_err %x)\n", 281 wc->status, wr_id, wc->vendor_err); 282 } 283} 284 285void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 286{ 287 struct net_device *dev = (struct net_device *) dev_ptr; 288 struct ipoib_dev_priv *priv = netdev_priv(dev); 289 int n, i; 290 291 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 292 do { 293 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc); 294 for (i = 0; i < n; ++i) 295 ipoib_ib_handle_wc(dev, priv->ibwc + i); 296 } while (n == IPOIB_NUM_WC); 297} 298 299static inline int post_send(struct ipoib_dev_priv *priv, 300 unsigned int wr_id, 301 struct ib_ah *address, u32 qpn, 302 dma_addr_t addr, int len) 303{ 304 struct ib_send_wr *bad_wr; 305 306 priv->tx_sge.addr = addr; 307 priv->tx_sge.length = len; 308 309 priv->tx_wr.wr_id = wr_id; 310 priv->tx_wr.wr.ud.remote_qpn = qpn; 311 priv->tx_wr.wr.ud.ah = address; 312 313 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); 314} 315 316void ipoib_send(struct net_device *dev, struct sk_buff *skb, 317 struct ipoib_ah *address, u32 qpn) 318{ 319 struct ipoib_dev_priv *priv = netdev_priv(dev); 320 struct ipoib_tx_buf *tx_req; 321 dma_addr_t addr; 322 323 if (skb->len > dev->mtu + INFINIBAND_ALEN) { 324 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 325 skb->len, dev->mtu + INFINIBAND_ALEN); 326 ++priv->stats.tx_dropped; 327 ++priv->stats.tx_errors; 328 dev_kfree_skb_any(skb); 329 return; 330 } 331 332 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 333 skb->len, address, qpn); 334 335 /* 336 * We put the skb into the tx_ring _before_ we call post_send() 337 * because it's entirely possible that the completion handler will 338 * run before we execute anything after the post_send(). That 339 * means we have to make sure everything is properly recorded and 340 * our state is consistent before we call post_send(). 341 */ 342 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 343 tx_req->skb = skb; 344 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 345 DMA_TO_DEVICE); 346 pci_unmap_addr_set(tx_req, mapping, addr); 347 348 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 349 address->ah, qpn, addr, skb->len))) { 350 ipoib_warn(priv, "post_send failed\n"); 351 ++priv->stats.tx_errors; 352 dma_unmap_single(priv->ca->dma_device, addr, skb->len, 353 DMA_TO_DEVICE); 354 dev_kfree_skb_any(skb); 355 } else { 356 dev->trans_start = jiffies; 357 358 address->last_send = priv->tx_head; 359 ++priv->tx_head; 360 361 if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) { 362 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 363 netif_stop_queue(dev); 364 } 365 } 366} 367 368static void __ipoib_reap_ah(struct net_device *dev) 369{ 370 struct ipoib_dev_priv *priv = netdev_priv(dev); 371 struct ipoib_ah *ah, *tah; 372 LIST_HEAD(remove_list); 373 374 spin_lock_irq(&priv->tx_lock); 375 spin_lock(&priv->lock); 376 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 377 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 378 list_del(&ah->list); 379 ib_destroy_ah(ah->ah); 380 kfree(ah); 381 } 382 spin_unlock(&priv->lock); 383 spin_unlock_irq(&priv->tx_lock); 384} 385 386void ipoib_reap_ah(void *dev_ptr) 387{ 388 struct net_device *dev = dev_ptr; 389 struct ipoib_dev_priv *priv = netdev_priv(dev); 390 391 __ipoib_reap_ah(dev); 392 393 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 394 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); 395} 396 397int ipoib_ib_dev_open(struct net_device *dev) 398{ 399 struct ipoib_dev_priv *priv = netdev_priv(dev); 400 int ret; 401 402 ret = ipoib_init_qp(dev); 403 if (ret) { 404 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); 405 return -1; 406 } 407 408 ret = ipoib_ib_post_receives(dev); 409 if (ret) { 410 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 411 ipoib_ib_dev_stop(dev); 412 return -1; 413 } 414 415 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 416 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); 417 418 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 419 420 return 0; 421} 422 423static void ipoib_pkey_dev_check_presence(struct net_device *dev) 424{ 425 struct ipoib_dev_priv *priv = netdev_priv(dev); 426 u16 pkey_index = 0; 427 428 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 429 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 430 else 431 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 432} 433 434int ipoib_ib_dev_up(struct net_device *dev) 435{ 436 struct ipoib_dev_priv *priv = netdev_priv(dev); 437 438 ipoib_pkey_dev_check_presence(dev); 439 440 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 441 ipoib_dbg(priv, "PKEY is not assigned.\n"); 442 return 0; 443 } 444 445 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 446 447 return ipoib_mcast_start_thread(dev); 448} 449 450int ipoib_ib_dev_down(struct net_device *dev, int flush) 451{ 452 struct ipoib_dev_priv *priv = netdev_priv(dev); 453 454 ipoib_dbg(priv, "downing ib_dev\n"); 455 456 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 457 netif_carrier_off(dev); 458 459 /* Shutdown the P_Key thread if still active */ 460 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 461 mutex_lock(&pkey_mutex); 462 set_bit(IPOIB_PKEY_STOP, &priv->flags); 463 cancel_delayed_work(&priv->pkey_task); 464 mutex_unlock(&pkey_mutex); 465 if (flush) 466 flush_workqueue(ipoib_workqueue); 467 } 468 469 ipoib_mcast_stop_thread(dev, flush); 470 ipoib_mcast_dev_flush(dev); 471 472 ipoib_flush_paths(dev); 473 474 return 0; 475} 476 477static int recvs_pending(struct net_device *dev) 478{ 479 struct ipoib_dev_priv *priv = netdev_priv(dev); 480 int pending = 0; 481 int i; 482 483 for (i = 0; i < ipoib_recvq_size; ++i) 484 if (priv->rx_ring[i].skb) 485 ++pending; 486 487 return pending; 488} 489 490int ipoib_ib_dev_stop(struct net_device *dev) 491{ 492 struct ipoib_dev_priv *priv = netdev_priv(dev); 493 struct ib_qp_attr qp_attr; 494 unsigned long begin; 495 struct ipoib_tx_buf *tx_req; 496 int i; 497 498 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 499 500 /* 501 * Move our QP to the error state and then reinitialize in 502 * when all work requests have completed or have been flushed. 503 */ 504 qp_attr.qp_state = IB_QPS_ERR; 505 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 506 ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 507 508 /* Wait for all sends and receives to complete */ 509 begin = jiffies; 510 511 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { 512 if (time_after(jiffies, begin + 5 * HZ)) { 513 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", 514 priv->tx_head - priv->tx_tail, recvs_pending(dev)); 515 516 /* 517 * assume the HW is wedged and just free up 518 * all our pending work requests. 519 */ 520 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 521 tx_req = &priv->tx_ring[priv->tx_tail & 522 (ipoib_sendq_size - 1)]; 523 dma_unmap_single(priv->ca->dma_device, 524 pci_unmap_addr(tx_req, mapping), 525 tx_req->skb->len, 526 DMA_TO_DEVICE); 527 dev_kfree_skb_any(tx_req->skb); 528 ++priv->tx_tail; 529 } 530 531 for (i = 0; i < ipoib_recvq_size; ++i) 532 if (priv->rx_ring[i].skb) { 533 dma_unmap_single(priv->ca->dma_device, 534 pci_unmap_addr(&priv->rx_ring[i], 535 mapping), 536 IPOIB_BUF_SIZE, 537 DMA_FROM_DEVICE); 538 dev_kfree_skb_any(priv->rx_ring[i].skb); 539 priv->rx_ring[i].skb = NULL; 540 } 541 542 goto timeout; 543 } 544 545 msleep(1); 546 } 547 548 ipoib_dbg(priv, "All sends and receives done.\n"); 549 550timeout: 551 qp_attr.qp_state = IB_QPS_RESET; 552 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 553 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 554 555 /* Wait for all AHs to be reaped */ 556 set_bit(IPOIB_STOP_REAPER, &priv->flags); 557 cancel_delayed_work(&priv->ah_reap_task); 558 flush_workqueue(ipoib_workqueue); 559 560 begin = jiffies; 561 562 while (!list_empty(&priv->dead_ahs)) { 563 __ipoib_reap_ah(dev); 564 565 if (time_after(jiffies, begin + HZ)) { 566 ipoib_warn(priv, "timing out; will leak address handles\n"); 567 break; 568 } 569 570 msleep(1); 571 } 572 573 return 0; 574} 575 576int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 577{ 578 struct ipoib_dev_priv *priv = netdev_priv(dev); 579 580 priv->ca = ca; 581 priv->port = port; 582 priv->qp = NULL; 583 584 if (ipoib_transport_dev_init(dev, ca)) { 585 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); 586 return -ENODEV; 587 } 588 589 if (dev->flags & IFF_UP) { 590 if (ipoib_ib_dev_open(dev)) { 591 ipoib_transport_dev_cleanup(dev); 592 return -ENODEV; 593 } 594 } 595 596 return 0; 597} 598 599void ipoib_ib_dev_flush(void *_dev) 600{ 601 struct net_device *dev = (struct net_device *)_dev; 602 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; 603 604 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { 605 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 606 return; 607 } 608 609 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 610 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); 611 return; 612 } 613 614 ipoib_dbg(priv, "flushing\n"); 615 616 ipoib_ib_dev_down(dev, 0); 617 618 /* 619 * The device could have been brought down between the start and when 620 * we get here, don't bring it back up if it's not configured up 621 */ 622 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 623 ipoib_ib_dev_up(dev); 624 625 mutex_lock(&priv->vlan_mutex); 626 627 /* Flush any child interfaces too */ 628 list_for_each_entry(cpriv, &priv->child_intfs, list) 629 ipoib_ib_dev_flush(cpriv->dev); 630 631 mutex_unlock(&priv->vlan_mutex); 632} 633 634void ipoib_ib_dev_cleanup(struct net_device *dev) 635{ 636 struct ipoib_dev_priv *priv = netdev_priv(dev); 637 638 ipoib_dbg(priv, "cleaning up ib_dev\n"); 639 640 ipoib_mcast_stop_thread(dev, 1); 641 ipoib_mcast_dev_flush(dev); 642 643 ipoib_transport_dev_cleanup(dev); 644} 645 646/* 647 * Delayed P_Key Assigment Interim Support 648 * 649 * The following is initial implementation of delayed P_Key assigment 650 * mechanism. It is using the same approach implemented for the multicast 651 * group join. The single goal of this implementation is to quickly address 652 * Bug #2507. This implementation will probably be removed when the P_Key 653 * change async notification is available. 654 */ 655 656void ipoib_pkey_poll(void *dev_ptr) 657{ 658 struct net_device *dev = dev_ptr; 659 struct ipoib_dev_priv *priv = netdev_priv(dev); 660 661 ipoib_pkey_dev_check_presence(dev); 662 663 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 664 ipoib_open(dev); 665 else { 666 mutex_lock(&pkey_mutex); 667 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 668 queue_delayed_work(ipoib_workqueue, 669 &priv->pkey_task, 670 HZ); 671 mutex_unlock(&pkey_mutex); 672 } 673} 674 675int ipoib_pkey_dev_delay_open(struct net_device *dev) 676{ 677 struct ipoib_dev_priv *priv = netdev_priv(dev); 678 679 /* Look for the interface pkey value in the IB Port P_Key table and */ 680 /* set the interface pkey assigment flag */ 681 ipoib_pkey_dev_check_presence(dev); 682 683 /* P_Key value not assigned yet - start polling */ 684 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 685 mutex_lock(&pkey_mutex); 686 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 687 queue_delayed_work(ipoib_workqueue, 688 &priv->pkey_task, 689 HZ); 690 mutex_unlock(&pkey_mutex); 691 return 1; 692 } 693 694 return 0; 695} 696