ipath_qp.c revision 6022943eb4cb3cb9e43f27f1faeaba38e162d966
1/* 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/err.h> 35#include <linux/vmalloc.h> 36 37#include "ipath_verbs.h" 38#include "ipath_kernel.h" 39 40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 42#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \ 43 (off)) 44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \ 45 BITS_PER_PAGE, off) 46 47/* 48 * Convert the AETH credit code into the number of credits. 49 */ 50static u32 credit_table[31] = { 51 0, /* 0 */ 52 1, /* 1 */ 53 2, /* 2 */ 54 3, /* 3 */ 55 4, /* 4 */ 56 6, /* 5 */ 57 8, /* 6 */ 58 12, /* 7 */ 59 16, /* 8 */ 60 24, /* 9 */ 61 32, /* A */ 62 48, /* B */ 63 64, /* C */ 64 96, /* D */ 65 128, /* E */ 66 192, /* F */ 67 256, /* 10 */ 68 384, /* 11 */ 69 512, /* 12 */ 70 768, /* 13 */ 71 1024, /* 14 */ 72 1536, /* 15 */ 73 2048, /* 16 */ 74 3072, /* 17 */ 75 4096, /* 18 */ 76 6144, /* 19 */ 77 8192, /* 1A */ 78 12288, /* 1B */ 79 16384, /* 1C */ 80 24576, /* 1D */ 81 32768 /* 1E */ 82}; 83 84static u32 alloc_qpn(struct ipath_qp_table *qpt) 85{ 86 u32 i, offset, max_scan, qpn; 87 struct qpn_map *map; 88 u32 ret; 89 90 qpn = qpt->last + 1; 91 if (qpn >= QPN_MAX) 92 qpn = 2; 93 offset = qpn & BITS_PER_PAGE_MASK; 94 map = &qpt->map[qpn / BITS_PER_PAGE]; 95 max_scan = qpt->nmaps - !offset; 96 for (i = 0;;) { 97 if (unlikely(!map->page)) { 98 unsigned long page = get_zeroed_page(GFP_KERNEL); 99 unsigned long flags; 100 101 /* 102 * Free the page if someone raced with us 103 * installing it: 104 */ 105 spin_lock_irqsave(&qpt->lock, flags); 106 if (map->page) 107 free_page(page); 108 else 109 map->page = (void *)page; 110 spin_unlock_irqrestore(&qpt->lock, flags); 111 if (unlikely(!map->page)) 112 break; 113 } 114 if (likely(atomic_read(&map->n_free))) { 115 do { 116 if (!test_and_set_bit(offset, map->page)) { 117 atomic_dec(&map->n_free); 118 qpt->last = qpn; 119 ret = qpn; 120 goto bail; 121 } 122 offset = find_next_offset(map, offset); 123 qpn = mk_qpn(qpt, map, offset); 124 /* 125 * This test differs from alloc_pidmap(). 126 * If find_next_offset() does find a zero 127 * bit, we don't need to check for QPN 128 * wrapping around past our starting QPN. 129 * We just need to be sure we don't loop 130 * forever. 131 */ 132 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 133 } 134 /* 135 * In order to keep the number of pages allocated to a 136 * minimum, we scan the all existing pages before increasing 137 * the size of the bitmap table. 138 */ 139 if (++i > max_scan) { 140 if (qpt->nmaps == QPNMAP_ENTRIES) 141 break; 142 map = &qpt->map[qpt->nmaps++]; 143 offset = 0; 144 } else if (map < &qpt->map[qpt->nmaps]) { 145 ++map; 146 offset = 0; 147 } else { 148 map = &qpt->map[0]; 149 offset = 2; 150 } 151 qpn = mk_qpn(qpt, map, offset); 152 } 153 154 ret = 0; 155 156bail: 157 return ret; 158} 159 160static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) 161{ 162 struct qpn_map *map; 163 164 map = qpt->map + qpn / BITS_PER_PAGE; 165 if (map->page) 166 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 167 atomic_inc(&map->n_free); 168} 169 170/** 171 * ipath_alloc_qpn - allocate a QP number 172 * @qpt: the QP table 173 * @qp: the QP 174 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special) 175 * 176 * Allocate the next available QPN and put the QP into the hash table. 177 * The hash table holds a reference to the QP. 178 */ 179static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, 180 enum ib_qp_type type) 181{ 182 unsigned long flags; 183 u32 qpn; 184 int ret; 185 186 if (type == IB_QPT_SMI) 187 qpn = 0; 188 else if (type == IB_QPT_GSI) 189 qpn = 1; 190 else { 191 /* Allocate the next available QPN */ 192 qpn = alloc_qpn(qpt); 193 if (qpn == 0) { 194 ret = -ENOMEM; 195 goto bail; 196 } 197 } 198 qp->ibqp.qp_num = qpn; 199 200 /* Add the QP to the hash table. */ 201 spin_lock_irqsave(&qpt->lock, flags); 202 203 qpn %= qpt->max; 204 qp->next = qpt->table[qpn]; 205 qpt->table[qpn] = qp; 206 atomic_inc(&qp->refcount); 207 208 spin_unlock_irqrestore(&qpt->lock, flags); 209 ret = 0; 210 211bail: 212 return ret; 213} 214 215/** 216 * ipath_free_qp - remove a QP from the QP table 217 * @qpt: the QP table 218 * @qp: the QP to remove 219 * 220 * Remove the QP from the table so it can't be found asynchronously by 221 * the receive interrupt routine. 222 */ 223static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) 224{ 225 struct ipath_qp *q, **qpp; 226 unsigned long flags; 227 int fnd = 0; 228 229 spin_lock_irqsave(&qpt->lock, flags); 230 231 /* Remove QP from the hash table. */ 232 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; 233 for (; (q = *qpp) != NULL; qpp = &q->next) { 234 if (q == qp) { 235 *qpp = qp->next; 236 qp->next = NULL; 237 atomic_dec(&qp->refcount); 238 fnd = 1; 239 break; 240 } 241 } 242 243 spin_unlock_irqrestore(&qpt->lock, flags); 244 245 if (!fnd) 246 return; 247 248 /* If QPN is not reserved, mark QPN free in the bitmap. */ 249 if (qp->ibqp.qp_num > 1) 250 free_qpn(qpt, qp->ibqp.qp_num); 251 252 wait_event(qp->wait, !atomic_read(&qp->refcount)); 253} 254 255/** 256 * ipath_free_all_qps - remove all QPs from the table 257 * @qpt: the QP table to empty 258 */ 259void ipath_free_all_qps(struct ipath_qp_table *qpt) 260{ 261 unsigned long flags; 262 struct ipath_qp *qp, *nqp; 263 u32 n; 264 265 for (n = 0; n < qpt->max; n++) { 266 spin_lock_irqsave(&qpt->lock, flags); 267 qp = qpt->table[n]; 268 qpt->table[n] = NULL; 269 spin_unlock_irqrestore(&qpt->lock, flags); 270 271 while (qp) { 272 nqp = qp->next; 273 if (qp->ibqp.qp_num > 1) 274 free_qpn(qpt, qp->ibqp.qp_num); 275 if (!atomic_dec_and_test(&qp->refcount) || 276 !ipath_destroy_qp(&qp->ibqp)) 277 ipath_dbg(KERN_INFO "QP memory leak!\n"); 278 qp = nqp; 279 } 280 } 281 282 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { 283 if (qpt->map[n].page) 284 free_page((unsigned long)qpt->map[n].page); 285 } 286} 287 288/** 289 * ipath_lookup_qpn - return the QP with the given QPN 290 * @qpt: the QP table 291 * @qpn: the QP number to look up 292 * 293 * The caller is responsible for decrementing the QP reference count 294 * when done. 295 */ 296struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) 297{ 298 unsigned long flags; 299 struct ipath_qp *qp; 300 301 spin_lock_irqsave(&qpt->lock, flags); 302 303 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { 304 if (qp->ibqp.qp_num == qpn) { 305 atomic_inc(&qp->refcount); 306 break; 307 } 308 } 309 310 spin_unlock_irqrestore(&qpt->lock, flags); 311 return qp; 312} 313 314/** 315 * ipath_reset_qp - initialize the QP state to the reset state 316 * @qp: the QP to reset 317 */ 318static void ipath_reset_qp(struct ipath_qp *qp) 319{ 320 qp->remote_qpn = 0; 321 qp->qkey = 0; 322 qp->qp_access_flags = 0; 323 clear_bit(IPATH_S_BUSY, &qp->s_flags); 324 qp->s_hdrwords = 0; 325 qp->s_psn = 0; 326 qp->r_psn = 0; 327 qp->r_msn = 0; 328 if (qp->ibqp.qp_type == IB_QPT_RC) { 329 qp->s_state = IB_OPCODE_RC_SEND_LAST; 330 qp->r_state = IB_OPCODE_RC_SEND_LAST; 331 } else { 332 qp->s_state = IB_OPCODE_UC_SEND_LAST; 333 qp->r_state = IB_OPCODE_UC_SEND_LAST; 334 } 335 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 336 qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 337 qp->r_nak_state = 0; 338 qp->s_rnr_timeout = 0; 339 qp->s_head = 0; 340 qp->s_tail = 0; 341 qp->s_cur = 0; 342 qp->s_last = 0; 343 qp->s_ssn = 1; 344 qp->s_lsn = 0; 345 qp->s_wait_credit = 0; 346 if (qp->r_rq.wq) { 347 qp->r_rq.wq->head = 0; 348 qp->r_rq.wq->tail = 0; 349 } 350 qp->r_reuse_sge = 0; 351} 352 353/** 354 * ipath_error_qp - put a QP into an error state 355 * @qp: the QP to put into an error state 356 * 357 * Flushes both send and receive work queues. 358 * QP s_lock should be held and interrupts disabled. 359 */ 360 361void ipath_error_qp(struct ipath_qp *qp) 362{ 363 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 364 struct ib_wc wc; 365 366 ipath_dbg(KERN_INFO "QP%d/%d in error state\n", 367 qp->ibqp.qp_num, qp->remote_qpn); 368 369 spin_lock(&dev->pending_lock); 370 /* XXX What if its already removed by the timeout code? */ 371 if (!list_empty(&qp->timerwait)) 372 list_del_init(&qp->timerwait); 373 if (!list_empty(&qp->piowait)) 374 list_del_init(&qp->piowait); 375 spin_unlock(&dev->pending_lock); 376 377 wc.status = IB_WC_WR_FLUSH_ERR; 378 wc.vendor_err = 0; 379 wc.byte_len = 0; 380 wc.imm_data = 0; 381 wc.qp_num = qp->ibqp.qp_num; 382 wc.src_qp = 0; 383 wc.wc_flags = 0; 384 wc.pkey_index = 0; 385 wc.slid = 0; 386 wc.sl = 0; 387 wc.dlid_path_bits = 0; 388 wc.port_num = 0; 389 390 while (qp->s_last != qp->s_head) { 391 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 392 393 wc.wr_id = wqe->wr.wr_id; 394 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 395 if (++qp->s_last >= qp->s_size) 396 qp->s_last = 0; 397 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 398 } 399 qp->s_cur = qp->s_tail = qp->s_head; 400 qp->s_hdrwords = 0; 401 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 402 403 if (qp->r_rq.wq) { 404 struct ipath_rwq *wq; 405 u32 head; 406 u32 tail; 407 408 spin_lock(&qp->r_rq.lock); 409 410 /* sanity check pointers before trusting them */ 411 wq = qp->r_rq.wq; 412 head = wq->head; 413 if (head >= qp->r_rq.size) 414 head = 0; 415 tail = wq->tail; 416 if (tail >= qp->r_rq.size) 417 tail = 0; 418 wc.opcode = IB_WC_RECV; 419 while (tail != head) { 420 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 421 if (++tail >= qp->r_rq.size) 422 tail = 0; 423 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 424 } 425 wq->tail = tail; 426 427 spin_unlock(&qp->r_rq.lock); 428 } 429} 430 431/** 432 * ipath_modify_qp - modify the attributes of a queue pair 433 * @ibqp: the queue pair who's attributes we're modifying 434 * @attr: the new attributes 435 * @attr_mask: the mask of attributes to modify 436 * @udata: user data for ipathverbs.so 437 * 438 * Returns 0 on success, otherwise returns an errno. 439 */ 440int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 441 int attr_mask, struct ib_udata *udata) 442{ 443 struct ipath_ibdev *dev = to_idev(ibqp->device); 444 struct ipath_qp *qp = to_iqp(ibqp); 445 enum ib_qp_state cur_state, new_state; 446 unsigned long flags; 447 int ret; 448 449 spin_lock_irqsave(&qp->s_lock, flags); 450 451 cur_state = attr_mask & IB_QP_CUR_STATE ? 452 attr->cur_qp_state : qp->state; 453 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 454 455 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 456 attr_mask)) 457 goto inval; 458 459 if (attr_mask & IB_QP_AV) { 460 if (attr->ah_attr.dlid == 0 || 461 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) 462 goto inval; 463 464 if ((attr->ah_attr.ah_flags & IB_AH_GRH) && 465 (attr->ah_attr.grh.sgid_index > 1)) 466 goto inval; 467 } 468 469 if (attr_mask & IB_QP_PKEY_INDEX) 470 if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) 471 goto inval; 472 473 if (attr_mask & IB_QP_MIN_RNR_TIMER) 474 if (attr->min_rnr_timer > 31) 475 goto inval; 476 477 if (attr_mask & IB_QP_PORT) 478 if (attr->port_num == 0 || 479 attr->port_num > ibqp->device->phys_port_cnt) 480 goto inval; 481 482 if (attr_mask & IB_QP_PATH_MTU) 483 if (attr->path_mtu > IB_MTU_4096) 484 goto inval; 485 486 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 487 if (attr->max_dest_rd_atomic > 1) 488 goto inval; 489 490 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 491 if (attr->max_rd_atomic > 1) 492 goto inval; 493 494 if (attr_mask & IB_QP_PATH_MIG_STATE) 495 if (attr->path_mig_state != IB_MIG_MIGRATED && 496 attr->path_mig_state != IB_MIG_REARM) 497 goto inval; 498 499 switch (new_state) { 500 case IB_QPS_RESET: 501 ipath_reset_qp(qp); 502 break; 503 504 case IB_QPS_ERR: 505 ipath_error_qp(qp); 506 break; 507 508 default: 509 break; 510 511 } 512 513 if (attr_mask & IB_QP_PKEY_INDEX) 514 qp->s_pkey_index = attr->pkey_index; 515 516 if (attr_mask & IB_QP_DEST_QPN) 517 qp->remote_qpn = attr->dest_qp_num; 518 519 if (attr_mask & IB_QP_SQ_PSN) { 520 qp->s_psn = qp->s_next_psn = attr->sq_psn; 521 qp->s_last_psn = qp->s_next_psn - 1; 522 } 523 524 if (attr_mask & IB_QP_RQ_PSN) 525 qp->r_psn = attr->rq_psn; 526 527 if (attr_mask & IB_QP_ACCESS_FLAGS) 528 qp->qp_access_flags = attr->qp_access_flags; 529 530 if (attr_mask & IB_QP_AV) 531 qp->remote_ah_attr = attr->ah_attr; 532 533 if (attr_mask & IB_QP_PATH_MTU) 534 qp->path_mtu = attr->path_mtu; 535 536 if (attr_mask & IB_QP_RETRY_CNT) 537 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; 538 539 if (attr_mask & IB_QP_RNR_RETRY) { 540 qp->s_rnr_retry = attr->rnr_retry; 541 if (qp->s_rnr_retry > 7) 542 qp->s_rnr_retry = 7; 543 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 544 } 545 546 if (attr_mask & IB_QP_MIN_RNR_TIMER) 547 qp->r_min_rnr_timer = attr->min_rnr_timer; 548 549 if (attr_mask & IB_QP_TIMEOUT) 550 qp->timeout = attr->timeout; 551 552 if (attr_mask & IB_QP_QKEY) 553 qp->qkey = attr->qkey; 554 555 qp->state = new_state; 556 spin_unlock_irqrestore(&qp->s_lock, flags); 557 558 ret = 0; 559 goto bail; 560 561inval: 562 spin_unlock_irqrestore(&qp->s_lock, flags); 563 ret = -EINVAL; 564 565bail: 566 return ret; 567} 568 569int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 570 int attr_mask, struct ib_qp_init_attr *init_attr) 571{ 572 struct ipath_qp *qp = to_iqp(ibqp); 573 574 attr->qp_state = qp->state; 575 attr->cur_qp_state = attr->qp_state; 576 attr->path_mtu = qp->path_mtu; 577 attr->path_mig_state = 0; 578 attr->qkey = qp->qkey; 579 attr->rq_psn = qp->r_psn; 580 attr->sq_psn = qp->s_next_psn; 581 attr->dest_qp_num = qp->remote_qpn; 582 attr->qp_access_flags = qp->qp_access_flags; 583 attr->cap.max_send_wr = qp->s_size - 1; 584 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 585 attr->cap.max_send_sge = qp->s_max_sge; 586 attr->cap.max_recv_sge = qp->r_rq.max_sge; 587 attr->cap.max_inline_data = 0; 588 attr->ah_attr = qp->remote_ah_attr; 589 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); 590 attr->pkey_index = qp->s_pkey_index; 591 attr->alt_pkey_index = 0; 592 attr->en_sqd_async_notify = 0; 593 attr->sq_draining = 0; 594 attr->max_rd_atomic = 1; 595 attr->max_dest_rd_atomic = 1; 596 attr->min_rnr_timer = qp->r_min_rnr_timer; 597 attr->port_num = 1; 598 attr->timeout = qp->timeout; 599 attr->retry_cnt = qp->s_retry_cnt; 600 attr->rnr_retry = qp->s_rnr_retry; 601 attr->alt_port_num = 0; 602 attr->alt_timeout = 0; 603 604 init_attr->event_handler = qp->ibqp.event_handler; 605 init_attr->qp_context = qp->ibqp.qp_context; 606 init_attr->send_cq = qp->ibqp.send_cq; 607 init_attr->recv_cq = qp->ibqp.recv_cq; 608 init_attr->srq = qp->ibqp.srq; 609 init_attr->cap = attr->cap; 610 if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) 611 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 612 else 613 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 614 init_attr->qp_type = qp->ibqp.qp_type; 615 init_attr->port_num = 1; 616 return 0; 617} 618 619/** 620 * ipath_compute_aeth - compute the AETH (syndrome + MSN) 621 * @qp: the queue pair to compute the AETH for 622 * 623 * Returns the AETH. 624 */ 625__be32 ipath_compute_aeth(struct ipath_qp *qp) 626{ 627 u32 aeth = qp->r_msn & IPATH_MSN_MASK; 628 629 if (qp->ibqp.srq) { 630 /* 631 * Shared receive queues don't generate credits. 632 * Set the credit field to the invalid value. 633 */ 634 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; 635 } else { 636 u32 min, max, x; 637 u32 credits; 638 struct ipath_rwq *wq = qp->r_rq.wq; 639 u32 head; 640 u32 tail; 641 642 /* sanity check pointers before trusting them */ 643 head = wq->head; 644 if (head >= qp->r_rq.size) 645 head = 0; 646 tail = wq->tail; 647 if (tail >= qp->r_rq.size) 648 tail = 0; 649 /* 650 * Compute the number of credits available (RWQEs). 651 * XXX Not holding the r_rq.lock here so there is a small 652 * chance that the pair of reads are not atomic. 653 */ 654 credits = head - tail; 655 if ((int)credits < 0) 656 credits += qp->r_rq.size; 657 /* 658 * Binary search the credit table to find the code to 659 * use. 660 */ 661 min = 0; 662 max = 31; 663 for (;;) { 664 x = (min + max) / 2; 665 if (credit_table[x] == credits) 666 break; 667 if (credit_table[x] > credits) 668 max = x; 669 else if (min == x) 670 break; 671 else 672 min = x; 673 } 674 aeth |= x << IPATH_AETH_CREDIT_SHIFT; 675 } 676 return cpu_to_be32(aeth); 677} 678 679/** 680 * ipath_create_qp - create a queue pair for a device 681 * @ibpd: the protection domain who's device we create the queue pair for 682 * @init_attr: the attributes of the queue pair 683 * @udata: unused by InfiniPath 684 * 685 * Returns the queue pair on success, otherwise returns an errno. 686 * 687 * Called by the ib_create_qp() core verbs function. 688 */ 689struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, 690 struct ib_qp_init_attr *init_attr, 691 struct ib_udata *udata) 692{ 693 struct ipath_qp *qp; 694 int err; 695 struct ipath_swqe *swq = NULL; 696 struct ipath_ibdev *dev; 697 size_t sz; 698 struct ib_qp *ret; 699 700 if (init_attr->cap.max_send_sge > ib_ipath_max_sges || 701 init_attr->cap.max_recv_sge > ib_ipath_max_sges || 702 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs || 703 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { 704 ret = ERR_PTR(-ENOMEM); 705 goto bail; 706 } 707 708 if (init_attr->cap.max_send_sge + 709 init_attr->cap.max_recv_sge + 710 init_attr->cap.max_send_wr + 711 init_attr->cap.max_recv_wr == 0) { 712 ret = ERR_PTR(-EINVAL); 713 goto bail; 714 } 715 716 switch (init_attr->qp_type) { 717 case IB_QPT_UC: 718 case IB_QPT_RC: 719 sz = sizeof(struct ipath_sge) * 720 init_attr->cap.max_send_sge + 721 sizeof(struct ipath_swqe); 722 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); 723 if (swq == NULL) { 724 ret = ERR_PTR(-ENOMEM); 725 goto bail; 726 } 727 /* FALLTHROUGH */ 728 case IB_QPT_UD: 729 case IB_QPT_SMI: 730 case IB_QPT_GSI: 731 sz = sizeof(*qp); 732 if (init_attr->srq) { 733 struct ipath_srq *srq = to_isrq(init_attr->srq); 734 735 sz += sizeof(*qp->r_sg_list) * 736 srq->rq.max_sge; 737 } else 738 sz += sizeof(*qp->r_sg_list) * 739 init_attr->cap.max_recv_sge; 740 qp = kmalloc(sz, GFP_KERNEL); 741 if (!qp) { 742 ret = ERR_PTR(-ENOMEM); 743 goto bail_swq; 744 } 745 if (init_attr->srq) { 746 sz = 0; 747 qp->r_rq.size = 0; 748 qp->r_rq.max_sge = 0; 749 qp->r_rq.wq = NULL; 750 init_attr->cap.max_recv_wr = 0; 751 init_attr->cap.max_recv_sge = 0; 752 } else { 753 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 754 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 755 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 756 sizeof(struct ipath_rwqe); 757 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + 758 qp->r_rq.size * sz); 759 if (!qp->r_rq.wq) { 760 ret = ERR_PTR(-ENOMEM); 761 goto bail_qp; 762 } 763 } 764 765 /* 766 * ib_create_qp() will initialize qp->ibqp 767 * except for qp->ibqp.qp_num. 768 */ 769 spin_lock_init(&qp->s_lock); 770 spin_lock_init(&qp->r_rq.lock); 771 atomic_set(&qp->refcount, 0); 772 init_waitqueue_head(&qp->wait); 773 tasklet_init(&qp->s_task, ipath_do_ruc_send, 774 (unsigned long)qp); 775 INIT_LIST_HEAD(&qp->piowait); 776 INIT_LIST_HEAD(&qp->timerwait); 777 qp->state = IB_QPS_RESET; 778 qp->s_wq = swq; 779 qp->s_size = init_attr->cap.max_send_wr + 1; 780 qp->s_max_sge = init_attr->cap.max_send_sge; 781 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 782 qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; 783 else 784 qp->s_flags = 0; 785 dev = to_idev(ibpd->device); 786 err = ipath_alloc_qpn(&dev->qp_table, qp, 787 init_attr->qp_type); 788 if (err) { 789 ret = ERR_PTR(err); 790 goto bail_rwq; 791 } 792 qp->ip = NULL; 793 ipath_reset_qp(qp); 794 break; 795 796 default: 797 /* Don't support raw QPs */ 798 ret = ERR_PTR(-ENOSYS); 799 goto bail; 800 } 801 802 init_attr->cap.max_inline_data = 0; 803 804 /* 805 * Return the address of the RWQ as the offset to mmap. 806 * See ipath_mmap() for details. 807 */ 808 if (udata && udata->outlen >= sizeof(__u64)) { 809 struct ipath_mmap_info *ip; 810 __u64 offset = (__u64) qp->r_rq.wq; 811 int err; 812 813 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 814 if (err) { 815 ret = ERR_PTR(err); 816 goto bail_rwq; 817 } 818 819 if (qp->r_rq.wq) { 820 /* Allocate info for ipath_mmap(). */ 821 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 822 if (!ip) { 823 ret = ERR_PTR(-ENOMEM); 824 goto bail_rwq; 825 } 826 qp->ip = ip; 827 ip->context = ibpd->uobject->context; 828 ip->obj = qp->r_rq.wq; 829 kref_init(&ip->ref); 830 ip->mmap_cnt = 0; 831 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + 832 qp->r_rq.size * sz); 833 spin_lock_irq(&dev->pending_lock); 834 ip->next = dev->pending_mmaps; 835 dev->pending_mmaps = ip; 836 spin_unlock_irq(&dev->pending_lock); 837 } 838 } 839 840 spin_lock(&dev->n_qps_lock); 841 if (dev->n_qps_allocated == ib_ipath_max_qps) { 842 spin_unlock(&dev->n_qps_lock); 843 ret = ERR_PTR(-ENOMEM); 844 goto bail_ip; 845 } 846 847 dev->n_qps_allocated++; 848 spin_unlock(&dev->n_qps_lock); 849 850 ret = &qp->ibqp; 851 goto bail; 852 853bail_ip: 854 kfree(qp->ip); 855bail_rwq: 856 vfree(qp->r_rq.wq); 857bail_qp: 858 kfree(qp); 859bail_swq: 860 vfree(swq); 861bail: 862 return ret; 863} 864 865/** 866 * ipath_destroy_qp - destroy a queue pair 867 * @ibqp: the queue pair to destroy 868 * 869 * Returns 0 on success. 870 * 871 * Note that this can be called while the QP is actively sending or 872 * receiving! 873 */ 874int ipath_destroy_qp(struct ib_qp *ibqp) 875{ 876 struct ipath_qp *qp = to_iqp(ibqp); 877 struct ipath_ibdev *dev = to_idev(ibqp->device); 878 unsigned long flags; 879 880 spin_lock_irqsave(&qp->s_lock, flags); 881 qp->state = IB_QPS_ERR; 882 spin_unlock_irqrestore(&qp->s_lock, flags); 883 spin_lock(&dev->n_qps_lock); 884 dev->n_qps_allocated--; 885 spin_unlock(&dev->n_qps_lock); 886 887 /* Stop the sending tasklet. */ 888 tasklet_kill(&qp->s_task); 889 890 /* Make sure the QP isn't on the timeout list. */ 891 spin_lock_irqsave(&dev->pending_lock, flags); 892 if (!list_empty(&qp->timerwait)) 893 list_del_init(&qp->timerwait); 894 if (!list_empty(&qp->piowait)) 895 list_del_init(&qp->piowait); 896 spin_unlock_irqrestore(&dev->pending_lock, flags); 897 898 /* 899 * Make sure that the QP is not in the QPN table so receive 900 * interrupts will discard packets for this QP. XXX Also remove QP 901 * from multicast table. 902 */ 903 if (atomic_read(&qp->refcount) != 0) 904 ipath_free_qp(&dev->qp_table, qp); 905 906 if (qp->ip) 907 kref_put(&qp->ip->ref, ipath_release_mmap_info); 908 else 909 vfree(qp->r_rq.wq); 910 vfree(qp->s_wq); 911 kfree(qp); 912 return 0; 913} 914 915/** 916 * ipath_init_qp_table - initialize the QP table for a device 917 * @idev: the device who's QP table we're initializing 918 * @size: the size of the QP table 919 * 920 * Returns 0 on success, otherwise returns an errno. 921 */ 922int ipath_init_qp_table(struct ipath_ibdev *idev, int size) 923{ 924 int i; 925 int ret; 926 927 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ 928 idev->qp_table.max = size; 929 idev->qp_table.nmaps = 1; 930 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), 931 GFP_KERNEL); 932 if (idev->qp_table.table == NULL) { 933 ret = -ENOMEM; 934 goto bail; 935 } 936 937 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) { 938 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE); 939 idev->qp_table.map[i].page = NULL; 940 } 941 942 ret = 0; 943 944bail: 945 return ret; 946} 947 948/** 949 * ipath_sqerror_qp - put a QP's send queue into an error state 950 * @qp: QP who's send queue will be put into an error state 951 * @wc: the WC responsible for putting the QP in this state 952 * 953 * Flushes the send work queue. 954 * The QP s_lock should be held. 955 */ 956 957void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 958{ 959 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 960 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 961 962 ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n", 963 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 964 965 spin_lock(&dev->pending_lock); 966 /* XXX What if its already removed by the timeout code? */ 967 if (!list_empty(&qp->timerwait)) 968 list_del_init(&qp->timerwait); 969 if (!list_empty(&qp->piowait)) 970 list_del_init(&qp->piowait); 971 spin_unlock(&dev->pending_lock); 972 973 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 974 if (++qp->s_last >= qp->s_size) 975 qp->s_last = 0; 976 977 wc->status = IB_WC_WR_FLUSH_ERR; 978 979 while (qp->s_last != qp->s_head) { 980 wc->wr_id = wqe->wr.wr_id; 981 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 982 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 983 if (++qp->s_last >= qp->s_size) 984 qp->s_last = 0; 985 wqe = get_swqe_ptr(qp, qp->s_last); 986 } 987 qp->s_cur = qp->s_tail = qp->s_head; 988 qp->state = IB_QPS_SQE; 989} 990 991/** 992 * ipath_get_credit - flush the send work queue of a QP 993 * @qp: the qp who's send work queue to flush 994 * @aeth: the Acknowledge Extended Transport Header 995 * 996 * The QP s_lock should be held. 997 */ 998void ipath_get_credit(struct ipath_qp *qp, u32 aeth) 999{ 1000 u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; 1001 1002 /* 1003 * If the credit is invalid, we can send 1004 * as many packets as we like. Otherwise, we have to 1005 * honor the credit field. 1006 */ 1007 if (credit == IPATH_AETH_CREDIT_INVAL) 1008 qp->s_lsn = (u32) -1; 1009 else if (qp->s_lsn != (u32) -1) { 1010 /* Compute new LSN (i.e., MSN + credit) */ 1011 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; 1012 if (ipath_cmp24(credit, qp->s_lsn) > 0) 1013 qp->s_lsn = credit; 1014 } 1015 1016 /* Restart sending if it was blocked due to lack of credits. */ 1017 if (qp->s_cur != qp->s_head && 1018 (qp->s_lsn == (u32) -1 || 1019 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, 1020 qp->s_lsn + 1) <= 0)) 1021 tasklet_hi_schedule(&qp->s_task); 1022} 1023