ipath_qp.c revision a2f76cd69f4913079cad10670f8520ffe07f4067
1/* 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/err.h> 35#include <linux/vmalloc.h> 36 37#include "ipath_verbs.h" 38#include "ipath_kernel.h" 39 40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 42#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \ 43 (off)) 44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \ 45 BITS_PER_PAGE, off) 46 47/* 48 * Convert the AETH credit code into the number of credits. 49 */ 50static u32 credit_table[31] = { 51 0, /* 0 */ 52 1, /* 1 */ 53 2, /* 2 */ 54 3, /* 3 */ 55 4, /* 4 */ 56 6, /* 5 */ 57 8, /* 6 */ 58 12, /* 7 */ 59 16, /* 8 */ 60 24, /* 9 */ 61 32, /* A */ 62 48, /* B */ 63 64, /* C */ 64 96, /* D */ 65 128, /* E */ 66 192, /* F */ 67 256, /* 10 */ 68 384, /* 11 */ 69 512, /* 12 */ 70 768, /* 13 */ 71 1024, /* 14 */ 72 1536, /* 15 */ 73 2048, /* 16 */ 74 3072, /* 17 */ 75 4096, /* 18 */ 76 6144, /* 19 */ 77 8192, /* 1A */ 78 12288, /* 1B */ 79 16384, /* 1C */ 80 24576, /* 1D */ 81 32768 /* 1E */ 82}; 83 84 85static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map) 86{ 87 unsigned long page = get_zeroed_page(GFP_KERNEL); 88 unsigned long flags; 89 90 /* 91 * Free the page if someone raced with us installing it. 92 */ 93 94 spin_lock_irqsave(&qpt->lock, flags); 95 if (map->page) 96 free_page(page); 97 else 98 map->page = (void *)page; 99 spin_unlock_irqrestore(&qpt->lock, flags); 100} 101 102 103static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type) 104{ 105 u32 i, offset, max_scan, qpn; 106 struct qpn_map *map; 107 u32 ret = -1; 108 109 if (type == IB_QPT_SMI) 110 ret = 0; 111 else if (type == IB_QPT_GSI) 112 ret = 1; 113 114 if (ret != -1) { 115 map = &qpt->map[0]; 116 if (unlikely(!map->page)) { 117 get_map_page(qpt, map); 118 if (unlikely(!map->page)) { 119 ret = -ENOMEM; 120 goto bail; 121 } 122 } 123 if (!test_and_set_bit(ret, map->page)) 124 atomic_dec(&map->n_free); 125 else 126 ret = -EBUSY; 127 goto bail; 128 } 129 130 qpn = qpt->last + 1; 131 if (qpn >= QPN_MAX) 132 qpn = 2; 133 offset = qpn & BITS_PER_PAGE_MASK; 134 map = &qpt->map[qpn / BITS_PER_PAGE]; 135 max_scan = qpt->nmaps - !offset; 136 for (i = 0;;) { 137 if (unlikely(!map->page)) { 138 get_map_page(qpt, map); 139 if (unlikely(!map->page)) 140 break; 141 } 142 if (likely(atomic_read(&map->n_free))) { 143 do { 144 if (!test_and_set_bit(offset, map->page)) { 145 atomic_dec(&map->n_free); 146 qpt->last = qpn; 147 ret = qpn; 148 goto bail; 149 } 150 offset = find_next_offset(map, offset); 151 qpn = mk_qpn(qpt, map, offset); 152 /* 153 * This test differs from alloc_pidmap(). 154 * If find_next_offset() does find a zero 155 * bit, we don't need to check for QPN 156 * wrapping around past our starting QPN. 157 * We just need to be sure we don't loop 158 * forever. 159 */ 160 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 161 } 162 /* 163 * In order to keep the number of pages allocated to a 164 * minimum, we scan the all existing pages before increasing 165 * the size of the bitmap table. 166 */ 167 if (++i > max_scan) { 168 if (qpt->nmaps == QPNMAP_ENTRIES) 169 break; 170 map = &qpt->map[qpt->nmaps++]; 171 offset = 0; 172 } else if (map < &qpt->map[qpt->nmaps]) { 173 ++map; 174 offset = 0; 175 } else { 176 map = &qpt->map[0]; 177 offset = 2; 178 } 179 qpn = mk_qpn(qpt, map, offset); 180 } 181 182 ret = -ENOMEM; 183 184bail: 185 return ret; 186} 187 188static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) 189{ 190 struct qpn_map *map; 191 192 map = qpt->map + qpn / BITS_PER_PAGE; 193 if (map->page) 194 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 195 atomic_inc(&map->n_free); 196} 197 198/** 199 * ipath_alloc_qpn - allocate a QP number 200 * @qpt: the QP table 201 * @qp: the QP 202 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special) 203 * 204 * Allocate the next available QPN and put the QP into the hash table. 205 * The hash table holds a reference to the QP. 206 */ 207static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, 208 enum ib_qp_type type) 209{ 210 unsigned long flags; 211 int ret; 212 213 ret = alloc_qpn(qpt, type); 214 if (ret < 0) 215 goto bail; 216 qp->ibqp.qp_num = ret; 217 218 /* Add the QP to the hash table. */ 219 spin_lock_irqsave(&qpt->lock, flags); 220 221 ret %= qpt->max; 222 qp->next = qpt->table[ret]; 223 qpt->table[ret] = qp; 224 atomic_inc(&qp->refcount); 225 226 spin_unlock_irqrestore(&qpt->lock, flags); 227 ret = 0; 228 229bail: 230 return ret; 231} 232 233/** 234 * ipath_free_qp - remove a QP from the QP table 235 * @qpt: the QP table 236 * @qp: the QP to remove 237 * 238 * Remove the QP from the table so it can't be found asynchronously by 239 * the receive interrupt routine. 240 */ 241static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) 242{ 243 struct ipath_qp *q, **qpp; 244 unsigned long flags; 245 int fnd = 0; 246 247 spin_lock_irqsave(&qpt->lock, flags); 248 249 /* Remove QP from the hash table. */ 250 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; 251 for (; (q = *qpp) != NULL; qpp = &q->next) { 252 if (q == qp) { 253 *qpp = qp->next; 254 qp->next = NULL; 255 atomic_dec(&qp->refcount); 256 fnd = 1; 257 break; 258 } 259 } 260 261 spin_unlock_irqrestore(&qpt->lock, flags); 262 263 if (!fnd) 264 return; 265 266 free_qpn(qpt, qp->ibqp.qp_num); 267 268 wait_event(qp->wait, !atomic_read(&qp->refcount)); 269} 270 271/** 272 * ipath_free_all_qps - remove all QPs from the table 273 * @qpt: the QP table to empty 274 */ 275void ipath_free_all_qps(struct ipath_qp_table *qpt) 276{ 277 unsigned long flags; 278 struct ipath_qp *qp, *nqp; 279 u32 n; 280 281 for (n = 0; n < qpt->max; n++) { 282 spin_lock_irqsave(&qpt->lock, flags); 283 qp = qpt->table[n]; 284 qpt->table[n] = NULL; 285 spin_unlock_irqrestore(&qpt->lock, flags); 286 287 while (qp) { 288 nqp = qp->next; 289 free_qpn(qpt, qp->ibqp.qp_num); 290 if (!atomic_dec_and_test(&qp->refcount) || 291 !ipath_destroy_qp(&qp->ibqp)) 292 ipath_dbg("QP memory leak!\n"); 293 qp = nqp; 294 } 295 } 296 297 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { 298 if (qpt->map[n].page) 299 free_page((unsigned long)qpt->map[n].page); 300 } 301} 302 303/** 304 * ipath_lookup_qpn - return the QP with the given QPN 305 * @qpt: the QP table 306 * @qpn: the QP number to look up 307 * 308 * The caller is responsible for decrementing the QP reference count 309 * when done. 310 */ 311struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) 312{ 313 unsigned long flags; 314 struct ipath_qp *qp; 315 316 spin_lock_irqsave(&qpt->lock, flags); 317 318 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { 319 if (qp->ibqp.qp_num == qpn) { 320 atomic_inc(&qp->refcount); 321 break; 322 } 323 } 324 325 spin_unlock_irqrestore(&qpt->lock, flags); 326 return qp; 327} 328 329/** 330 * ipath_reset_qp - initialize the QP state to the reset state 331 * @qp: the QP to reset 332 */ 333static void ipath_reset_qp(struct ipath_qp *qp) 334{ 335 qp->remote_qpn = 0; 336 qp->qkey = 0; 337 qp->qp_access_flags = 0; 338 qp->s_busy = 0; 339 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 340 qp->s_hdrwords = 0; 341 qp->s_wqe = NULL; 342 qp->s_psn = 0; 343 qp->r_psn = 0; 344 qp->r_msn = 0; 345 if (qp->ibqp.qp_type == IB_QPT_RC) { 346 qp->s_state = IB_OPCODE_RC_SEND_LAST; 347 qp->r_state = IB_OPCODE_RC_SEND_LAST; 348 } else { 349 qp->s_state = IB_OPCODE_UC_SEND_LAST; 350 qp->r_state = IB_OPCODE_UC_SEND_LAST; 351 } 352 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 353 qp->r_nak_state = 0; 354 qp->r_wrid_valid = 0; 355 qp->s_rnr_timeout = 0; 356 qp->s_head = 0; 357 qp->s_tail = 0; 358 qp->s_cur = 0; 359 qp->s_last = 0; 360 qp->s_ssn = 1; 361 qp->s_lsn = 0; 362 qp->s_wait_credit = 0; 363 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 364 qp->r_head_ack_queue = 0; 365 qp->s_tail_ack_queue = 0; 366 qp->s_num_rd_atomic = 0; 367 if (qp->r_rq.wq) { 368 qp->r_rq.wq->head = 0; 369 qp->r_rq.wq->tail = 0; 370 } 371 qp->r_reuse_sge = 0; 372} 373 374/** 375 * ipath_error_qp - put a QP into an error state 376 * @qp: the QP to put into an error state 377 * @err: the receive completion error to signal if a RWQE is active 378 * 379 * Flushes both send and receive work queues. 380 * Returns true if last WQE event should be generated. 381 * The QP s_lock should be held and interrupts disabled. 382 */ 383 384int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) 385{ 386 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 387 struct ib_wc wc; 388 int ret = 0; 389 390 ipath_dbg("QP%d/%d in error state (%d)\n", 391 qp->ibqp.qp_num, qp->remote_qpn, err); 392 393 spin_lock(&dev->pending_lock); 394 /* XXX What if its already removed by the timeout code? */ 395 if (!list_empty(&qp->timerwait)) 396 list_del_init(&qp->timerwait); 397 if (!list_empty(&qp->piowait)) 398 list_del_init(&qp->piowait); 399 spin_unlock(&dev->pending_lock); 400 401 wc.vendor_err = 0; 402 wc.byte_len = 0; 403 wc.imm_data = 0; 404 wc.qp = &qp->ibqp; 405 wc.src_qp = 0; 406 wc.wc_flags = 0; 407 wc.pkey_index = 0; 408 wc.slid = 0; 409 wc.sl = 0; 410 wc.dlid_path_bits = 0; 411 wc.port_num = 0; 412 if (qp->r_wrid_valid) { 413 qp->r_wrid_valid = 0; 414 wc.wr_id = qp->r_wr_id; 415 wc.opcode = IB_WC_RECV; 416 wc.status = err; 417 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 418 } 419 wc.status = IB_WC_WR_FLUSH_ERR; 420 421 while (qp->s_last != qp->s_head) { 422 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 423 424 wc.wr_id = wqe->wr.wr_id; 425 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 426 if (++qp->s_last >= qp->s_size) 427 qp->s_last = 0; 428 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 429 } 430 qp->s_cur = qp->s_tail = qp->s_head; 431 qp->s_hdrwords = 0; 432 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 433 434 if (qp->r_rq.wq) { 435 struct ipath_rwq *wq; 436 u32 head; 437 u32 tail; 438 439 spin_lock(&qp->r_rq.lock); 440 441 /* sanity check pointers before trusting them */ 442 wq = qp->r_rq.wq; 443 head = wq->head; 444 if (head >= qp->r_rq.size) 445 head = 0; 446 tail = wq->tail; 447 if (tail >= qp->r_rq.size) 448 tail = 0; 449 wc.opcode = IB_WC_RECV; 450 while (tail != head) { 451 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 452 if (++tail >= qp->r_rq.size) 453 tail = 0; 454 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 455 } 456 wq->tail = tail; 457 458 spin_unlock(&qp->r_rq.lock); 459 } else if (qp->ibqp.event_handler) 460 ret = 1; 461 462 return ret; 463} 464 465/** 466 * ipath_modify_qp - modify the attributes of a queue pair 467 * @ibqp: the queue pair who's attributes we're modifying 468 * @attr: the new attributes 469 * @attr_mask: the mask of attributes to modify 470 * @udata: user data for ipathverbs.so 471 * 472 * Returns 0 on success, otherwise returns an errno. 473 */ 474int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 475 int attr_mask, struct ib_udata *udata) 476{ 477 struct ipath_ibdev *dev = to_idev(ibqp->device); 478 struct ipath_qp *qp = to_iqp(ibqp); 479 enum ib_qp_state cur_state, new_state; 480 unsigned long flags; 481 int lastwqe = 0; 482 int ret; 483 484 spin_lock_irqsave(&qp->s_lock, flags); 485 486 cur_state = attr_mask & IB_QP_CUR_STATE ? 487 attr->cur_qp_state : qp->state; 488 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 489 490 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 491 attr_mask)) 492 goto inval; 493 494 if (attr_mask & IB_QP_AV) { 495 if (attr->ah_attr.dlid == 0 || 496 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) 497 goto inval; 498 499 if ((attr->ah_attr.ah_flags & IB_AH_GRH) && 500 (attr->ah_attr.grh.sgid_index > 1)) 501 goto inval; 502 } 503 504 if (attr_mask & IB_QP_PKEY_INDEX) 505 if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) 506 goto inval; 507 508 if (attr_mask & IB_QP_MIN_RNR_TIMER) 509 if (attr->min_rnr_timer > 31) 510 goto inval; 511 512 if (attr_mask & IB_QP_PORT) 513 if (attr->port_num == 0 || 514 attr->port_num > ibqp->device->phys_port_cnt) 515 goto inval; 516 517 /* 518 * Note: the chips support a maximum MTU of 4096, but the driver 519 * hasn't implemented this feature yet, so don't allow Path MTU 520 * values greater than 2048. 521 */ 522 if (attr_mask & IB_QP_PATH_MTU) 523 if (attr->path_mtu > IB_MTU_2048) 524 goto inval; 525 526 if (attr_mask & IB_QP_PATH_MIG_STATE) 527 if (attr->path_mig_state != IB_MIG_MIGRATED && 528 attr->path_mig_state != IB_MIG_REARM) 529 goto inval; 530 531 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 532 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) 533 goto inval; 534 535 switch (new_state) { 536 case IB_QPS_RESET: 537 ipath_reset_qp(qp); 538 break; 539 540 case IB_QPS_ERR: 541 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 542 break; 543 544 default: 545 break; 546 547 } 548 549 if (attr_mask & IB_QP_PKEY_INDEX) 550 qp->s_pkey_index = attr->pkey_index; 551 552 if (attr_mask & IB_QP_DEST_QPN) 553 qp->remote_qpn = attr->dest_qp_num; 554 555 if (attr_mask & IB_QP_SQ_PSN) { 556 qp->s_psn = qp->s_next_psn = attr->sq_psn; 557 qp->s_last_psn = qp->s_next_psn - 1; 558 } 559 560 if (attr_mask & IB_QP_RQ_PSN) 561 qp->r_psn = attr->rq_psn; 562 563 if (attr_mask & IB_QP_ACCESS_FLAGS) 564 qp->qp_access_flags = attr->qp_access_flags; 565 566 if (attr_mask & IB_QP_AV) 567 qp->remote_ah_attr = attr->ah_attr; 568 569 if (attr_mask & IB_QP_PATH_MTU) 570 qp->path_mtu = attr->path_mtu; 571 572 if (attr_mask & IB_QP_RETRY_CNT) 573 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; 574 575 if (attr_mask & IB_QP_RNR_RETRY) { 576 qp->s_rnr_retry = attr->rnr_retry; 577 if (qp->s_rnr_retry > 7) 578 qp->s_rnr_retry = 7; 579 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 580 } 581 582 if (attr_mask & IB_QP_MIN_RNR_TIMER) 583 qp->r_min_rnr_timer = attr->min_rnr_timer; 584 585 if (attr_mask & IB_QP_TIMEOUT) 586 qp->timeout = attr->timeout; 587 588 if (attr_mask & IB_QP_QKEY) 589 qp->qkey = attr->qkey; 590 591 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 592 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 593 594 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 595 qp->s_max_rd_atomic = attr->max_rd_atomic; 596 597 qp->state = new_state; 598 spin_unlock_irqrestore(&qp->s_lock, flags); 599 600 if (lastwqe) { 601 struct ib_event ev; 602 603 ev.device = qp->ibqp.device; 604 ev.element.qp = &qp->ibqp; 605 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 606 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 607 } 608 ret = 0; 609 goto bail; 610 611inval: 612 spin_unlock_irqrestore(&qp->s_lock, flags); 613 ret = -EINVAL; 614 615bail: 616 return ret; 617} 618 619int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 620 int attr_mask, struct ib_qp_init_attr *init_attr) 621{ 622 struct ipath_qp *qp = to_iqp(ibqp); 623 624 attr->qp_state = qp->state; 625 attr->cur_qp_state = attr->qp_state; 626 attr->path_mtu = qp->path_mtu; 627 attr->path_mig_state = 0; 628 attr->qkey = qp->qkey; 629 attr->rq_psn = qp->r_psn; 630 attr->sq_psn = qp->s_next_psn; 631 attr->dest_qp_num = qp->remote_qpn; 632 attr->qp_access_flags = qp->qp_access_flags; 633 attr->cap.max_send_wr = qp->s_size - 1; 634 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 635 attr->cap.max_send_sge = qp->s_max_sge; 636 attr->cap.max_recv_sge = qp->r_rq.max_sge; 637 attr->cap.max_inline_data = 0; 638 attr->ah_attr = qp->remote_ah_attr; 639 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); 640 attr->pkey_index = qp->s_pkey_index; 641 attr->alt_pkey_index = 0; 642 attr->en_sqd_async_notify = 0; 643 attr->sq_draining = 0; 644 attr->max_rd_atomic = qp->s_max_rd_atomic; 645 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 646 attr->min_rnr_timer = qp->r_min_rnr_timer; 647 attr->port_num = 1; 648 attr->timeout = qp->timeout; 649 attr->retry_cnt = qp->s_retry_cnt; 650 attr->rnr_retry = qp->s_rnr_retry; 651 attr->alt_port_num = 0; 652 attr->alt_timeout = 0; 653 654 init_attr->event_handler = qp->ibqp.event_handler; 655 init_attr->qp_context = qp->ibqp.qp_context; 656 init_attr->send_cq = qp->ibqp.send_cq; 657 init_attr->recv_cq = qp->ibqp.recv_cq; 658 init_attr->srq = qp->ibqp.srq; 659 init_attr->cap = attr->cap; 660 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) 661 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 662 else 663 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 664 init_attr->qp_type = qp->ibqp.qp_type; 665 init_attr->port_num = 1; 666 return 0; 667} 668 669/** 670 * ipath_compute_aeth - compute the AETH (syndrome + MSN) 671 * @qp: the queue pair to compute the AETH for 672 * 673 * Returns the AETH. 674 */ 675__be32 ipath_compute_aeth(struct ipath_qp *qp) 676{ 677 u32 aeth = qp->r_msn & IPATH_MSN_MASK; 678 679 if (qp->ibqp.srq) { 680 /* 681 * Shared receive queues don't generate credits. 682 * Set the credit field to the invalid value. 683 */ 684 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; 685 } else { 686 u32 min, max, x; 687 u32 credits; 688 struct ipath_rwq *wq = qp->r_rq.wq; 689 u32 head; 690 u32 tail; 691 692 /* sanity check pointers before trusting them */ 693 head = wq->head; 694 if (head >= qp->r_rq.size) 695 head = 0; 696 tail = wq->tail; 697 if (tail >= qp->r_rq.size) 698 tail = 0; 699 /* 700 * Compute the number of credits available (RWQEs). 701 * XXX Not holding the r_rq.lock here so there is a small 702 * chance that the pair of reads are not atomic. 703 */ 704 credits = head - tail; 705 if ((int)credits < 0) 706 credits += qp->r_rq.size; 707 /* 708 * Binary search the credit table to find the code to 709 * use. 710 */ 711 min = 0; 712 max = 31; 713 for (;;) { 714 x = (min + max) / 2; 715 if (credit_table[x] == credits) 716 break; 717 if (credit_table[x] > credits) 718 max = x; 719 else if (min == x) 720 break; 721 else 722 min = x; 723 } 724 aeth |= x << IPATH_AETH_CREDIT_SHIFT; 725 } 726 return cpu_to_be32(aeth); 727} 728 729/** 730 * ipath_create_qp - create a queue pair for a device 731 * @ibpd: the protection domain who's device we create the queue pair for 732 * @init_attr: the attributes of the queue pair 733 * @udata: unused by InfiniPath 734 * 735 * Returns the queue pair on success, otherwise returns an errno. 736 * 737 * Called by the ib_create_qp() core verbs function. 738 */ 739struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, 740 struct ib_qp_init_attr *init_attr, 741 struct ib_udata *udata) 742{ 743 struct ipath_qp *qp; 744 int err; 745 struct ipath_swqe *swq = NULL; 746 struct ipath_ibdev *dev; 747 size_t sz; 748 struct ib_qp *ret; 749 750 if (init_attr->cap.max_send_sge > ib_ipath_max_sges || 751 init_attr->cap.max_recv_sge > ib_ipath_max_sges || 752 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs || 753 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { 754 ret = ERR_PTR(-ENOMEM); 755 goto bail; 756 } 757 758 if (init_attr->cap.max_send_sge + 759 init_attr->cap.max_recv_sge + 760 init_attr->cap.max_send_wr + 761 init_attr->cap.max_recv_wr == 0) { 762 ret = ERR_PTR(-EINVAL); 763 goto bail; 764 } 765 766 switch (init_attr->qp_type) { 767 case IB_QPT_UC: 768 case IB_QPT_RC: 769 case IB_QPT_UD: 770 case IB_QPT_SMI: 771 case IB_QPT_GSI: 772 sz = sizeof(struct ipath_sge) * 773 init_attr->cap.max_send_sge + 774 sizeof(struct ipath_swqe); 775 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); 776 if (swq == NULL) { 777 ret = ERR_PTR(-ENOMEM); 778 goto bail; 779 } 780 sz = sizeof(*qp); 781 if (init_attr->srq) { 782 struct ipath_srq *srq = to_isrq(init_attr->srq); 783 784 sz += sizeof(*qp->r_sg_list) * 785 srq->rq.max_sge; 786 } else 787 sz += sizeof(*qp->r_sg_list) * 788 init_attr->cap.max_recv_sge; 789 qp = kmalloc(sz, GFP_KERNEL); 790 if (!qp) { 791 ret = ERR_PTR(-ENOMEM); 792 goto bail_swq; 793 } 794 if (init_attr->srq) { 795 sz = 0; 796 qp->r_rq.size = 0; 797 qp->r_rq.max_sge = 0; 798 qp->r_rq.wq = NULL; 799 init_attr->cap.max_recv_wr = 0; 800 init_attr->cap.max_recv_sge = 0; 801 } else { 802 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 803 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 804 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 805 sizeof(struct ipath_rwqe); 806 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + 807 qp->r_rq.size * sz); 808 if (!qp->r_rq.wq) { 809 ret = ERR_PTR(-ENOMEM); 810 goto bail_qp; 811 } 812 } 813 814 /* 815 * ib_create_qp() will initialize qp->ibqp 816 * except for qp->ibqp.qp_num. 817 */ 818 spin_lock_init(&qp->s_lock); 819 spin_lock_init(&qp->r_rq.lock); 820 atomic_set(&qp->refcount, 0); 821 init_waitqueue_head(&qp->wait); 822 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); 823 INIT_LIST_HEAD(&qp->piowait); 824 INIT_LIST_HEAD(&qp->timerwait); 825 qp->state = IB_QPS_RESET; 826 qp->s_wq = swq; 827 qp->s_size = init_attr->cap.max_send_wr + 1; 828 qp->s_max_sge = init_attr->cap.max_send_sge; 829 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 830 qp->s_flags = IPATH_S_SIGNAL_REQ_WR; 831 else 832 qp->s_flags = 0; 833 dev = to_idev(ibpd->device); 834 err = ipath_alloc_qpn(&dev->qp_table, qp, 835 init_attr->qp_type); 836 if (err) { 837 ret = ERR_PTR(err); 838 vfree(qp->r_rq.wq); 839 goto bail_qp; 840 } 841 qp->ip = NULL; 842 ipath_reset_qp(qp); 843 break; 844 845 default: 846 /* Don't support raw QPs */ 847 ret = ERR_PTR(-ENOSYS); 848 goto bail; 849 } 850 851 init_attr->cap.max_inline_data = 0; 852 853 /* 854 * Return the address of the RWQ as the offset to mmap. 855 * See ipath_mmap() for details. 856 */ 857 if (udata && udata->outlen >= sizeof(__u64)) { 858 int err; 859 860 if (!qp->r_rq.wq) { 861 __u64 offset = 0; 862 863 err = ib_copy_to_udata(udata, &offset, 864 sizeof(offset)); 865 if (err) { 866 ret = ERR_PTR(err); 867 goto bail_ip; 868 } 869 } else { 870 u32 s = sizeof(struct ipath_rwq) + 871 qp->r_rq.size * sz; 872 873 qp->ip = 874 ipath_create_mmap_info(dev, s, 875 ibpd->uobject->context, 876 qp->r_rq.wq); 877 if (!qp->ip) { 878 ret = ERR_PTR(-ENOMEM); 879 goto bail_ip; 880 } 881 882 err = ib_copy_to_udata(udata, &(qp->ip->offset), 883 sizeof(qp->ip->offset)); 884 if (err) { 885 ret = ERR_PTR(err); 886 goto bail_ip; 887 } 888 } 889 } 890 891 spin_lock(&dev->n_qps_lock); 892 if (dev->n_qps_allocated == ib_ipath_max_qps) { 893 spin_unlock(&dev->n_qps_lock); 894 ret = ERR_PTR(-ENOMEM); 895 goto bail_ip; 896 } 897 898 dev->n_qps_allocated++; 899 spin_unlock(&dev->n_qps_lock); 900 901 if (qp->ip) { 902 spin_lock_irq(&dev->pending_lock); 903 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); 904 spin_unlock_irq(&dev->pending_lock); 905 } 906 907 ret = &qp->ibqp; 908 goto bail; 909 910bail_ip: 911 if (qp->ip) 912 kref_put(&qp->ip->ref, ipath_release_mmap_info); 913 else 914 vfree(qp->r_rq.wq); 915 ipath_free_qp(&dev->qp_table, qp); 916bail_qp: 917 kfree(qp); 918bail_swq: 919 vfree(swq); 920bail: 921 return ret; 922} 923 924/** 925 * ipath_destroy_qp - destroy a queue pair 926 * @ibqp: the queue pair to destroy 927 * 928 * Returns 0 on success. 929 * 930 * Note that this can be called while the QP is actively sending or 931 * receiving! 932 */ 933int ipath_destroy_qp(struct ib_qp *ibqp) 934{ 935 struct ipath_qp *qp = to_iqp(ibqp); 936 struct ipath_ibdev *dev = to_idev(ibqp->device); 937 unsigned long flags; 938 939 spin_lock_irqsave(&qp->s_lock, flags); 940 qp->state = IB_QPS_ERR; 941 spin_unlock_irqrestore(&qp->s_lock, flags); 942 spin_lock(&dev->n_qps_lock); 943 dev->n_qps_allocated--; 944 spin_unlock(&dev->n_qps_lock); 945 946 /* Stop the sending tasklet. */ 947 tasklet_kill(&qp->s_task); 948 949 /* Make sure the QP isn't on the timeout list. */ 950 spin_lock_irqsave(&dev->pending_lock, flags); 951 if (!list_empty(&qp->timerwait)) 952 list_del_init(&qp->timerwait); 953 if (!list_empty(&qp->piowait)) 954 list_del_init(&qp->piowait); 955 spin_unlock_irqrestore(&dev->pending_lock, flags); 956 957 /* 958 * Make sure that the QP is not in the QPN table so receive 959 * interrupts will discard packets for this QP. XXX Also remove QP 960 * from multicast table. 961 */ 962 if (atomic_read(&qp->refcount) != 0) 963 ipath_free_qp(&dev->qp_table, qp); 964 965 if (qp->ip) 966 kref_put(&qp->ip->ref, ipath_release_mmap_info); 967 else 968 vfree(qp->r_rq.wq); 969 vfree(qp->s_wq); 970 kfree(qp); 971 return 0; 972} 973 974/** 975 * ipath_init_qp_table - initialize the QP table for a device 976 * @idev: the device who's QP table we're initializing 977 * @size: the size of the QP table 978 * 979 * Returns 0 on success, otherwise returns an errno. 980 */ 981int ipath_init_qp_table(struct ipath_ibdev *idev, int size) 982{ 983 int i; 984 int ret; 985 986 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ 987 idev->qp_table.max = size; 988 idev->qp_table.nmaps = 1; 989 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), 990 GFP_KERNEL); 991 if (idev->qp_table.table == NULL) { 992 ret = -ENOMEM; 993 goto bail; 994 } 995 996 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) { 997 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE); 998 idev->qp_table.map[i].page = NULL; 999 } 1000 1001 ret = 0; 1002 1003bail: 1004 return ret; 1005} 1006 1007/** 1008 * ipath_sqerror_qp - put a QP's send queue into an error state 1009 * @qp: QP who's send queue will be put into an error state 1010 * @wc: the WC responsible for putting the QP in this state 1011 * 1012 * Flushes the send work queue. 1013 * The QP s_lock should be held and interrupts disabled. 1014 */ 1015 1016void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 1017{ 1018 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 1019 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 1020 1021 ipath_dbg("Send queue error on QP%d/%d: err: %d\n", 1022 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 1023 1024 spin_lock(&dev->pending_lock); 1025 /* XXX What if its already removed by the timeout code? */ 1026 if (!list_empty(&qp->timerwait)) 1027 list_del_init(&qp->timerwait); 1028 if (!list_empty(&qp->piowait)) 1029 list_del_init(&qp->piowait); 1030 spin_unlock(&dev->pending_lock); 1031 1032 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1033 if (++qp->s_last >= qp->s_size) 1034 qp->s_last = 0; 1035 1036 wc->status = IB_WC_WR_FLUSH_ERR; 1037 1038 while (qp->s_last != qp->s_head) { 1039 wqe = get_swqe_ptr(qp, qp->s_last); 1040 wc->wr_id = wqe->wr.wr_id; 1041 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1042 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1043 if (++qp->s_last >= qp->s_size) 1044 qp->s_last = 0; 1045 } 1046 qp->s_cur = qp->s_tail = qp->s_head; 1047 qp->state = IB_QPS_SQE; 1048} 1049 1050/** 1051 * ipath_get_credit - flush the send work queue of a QP 1052 * @qp: the qp who's send work queue to flush 1053 * @aeth: the Acknowledge Extended Transport Header 1054 * 1055 * The QP s_lock should be held. 1056 */ 1057void ipath_get_credit(struct ipath_qp *qp, u32 aeth) 1058{ 1059 u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; 1060 1061 /* 1062 * If the credit is invalid, we can send 1063 * as many packets as we like. Otherwise, we have to 1064 * honor the credit field. 1065 */ 1066 if (credit == IPATH_AETH_CREDIT_INVAL) 1067 qp->s_lsn = (u32) -1; 1068 else if (qp->s_lsn != (u32) -1) { 1069 /* Compute new LSN (i.e., MSN + credit) */ 1070 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; 1071 if (ipath_cmp24(credit, qp->s_lsn) > 0) 1072 qp->s_lsn = credit; 1073 } 1074 1075 /* Restart sending if it was blocked due to lack of credits. */ 1076 if (qp->s_cur != qp->s_head && 1077 (qp->s_lsn == (u32) -1 || 1078 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, 1079 qp->s_lsn + 1) <= 0)) 1080 tasklet_hi_schedule(&qp->s_task); 1081} 1082