ipath_qp.c revision 39c0d0b919ae5080163bd2d41c0271cda250d382
1/* 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/err.h> 35#include <linux/vmalloc.h> 36 37#include "ipath_verbs.h" 38#include "ipath_kernel.h" 39 40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 42#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \ 43 (off)) 44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \ 45 BITS_PER_PAGE, off) 46 47/* 48 * Convert the AETH credit code into the number of credits. 49 */ 50static u32 credit_table[31] = { 51 0, /* 0 */ 52 1, /* 1 */ 53 2, /* 2 */ 54 3, /* 3 */ 55 4, /* 4 */ 56 6, /* 5 */ 57 8, /* 6 */ 58 12, /* 7 */ 59 16, /* 8 */ 60 24, /* 9 */ 61 32, /* A */ 62 48, /* B */ 63 64, /* C */ 64 96, /* D */ 65 128, /* E */ 66 192, /* F */ 67 256, /* 10 */ 68 384, /* 11 */ 69 512, /* 12 */ 70 768, /* 13 */ 71 1024, /* 14 */ 72 1536, /* 15 */ 73 2048, /* 16 */ 74 3072, /* 17 */ 75 4096, /* 18 */ 76 6144, /* 19 */ 77 8192, /* 1A */ 78 12288, /* 1B */ 79 16384, /* 1C */ 80 24576, /* 1D */ 81 32768 /* 1E */ 82}; 83 84static u32 alloc_qpn(struct ipath_qp_table *qpt) 85{ 86 u32 i, offset, max_scan, qpn; 87 struct qpn_map *map; 88 u32 ret; 89 90 qpn = qpt->last + 1; 91 if (qpn >= QPN_MAX) 92 qpn = 2; 93 offset = qpn & BITS_PER_PAGE_MASK; 94 map = &qpt->map[qpn / BITS_PER_PAGE]; 95 max_scan = qpt->nmaps - !offset; 96 for (i = 0;;) { 97 if (unlikely(!map->page)) { 98 unsigned long page = get_zeroed_page(GFP_KERNEL); 99 unsigned long flags; 100 101 /* 102 * Free the page if someone raced with us 103 * installing it: 104 */ 105 spin_lock_irqsave(&qpt->lock, flags); 106 if (map->page) 107 free_page(page); 108 else 109 map->page = (void *)page; 110 spin_unlock_irqrestore(&qpt->lock, flags); 111 if (unlikely(!map->page)) 112 break; 113 } 114 if (likely(atomic_read(&map->n_free))) { 115 do { 116 if (!test_and_set_bit(offset, map->page)) { 117 atomic_dec(&map->n_free); 118 qpt->last = qpn; 119 ret = qpn; 120 goto bail; 121 } 122 offset = find_next_offset(map, offset); 123 qpn = mk_qpn(qpt, map, offset); 124 /* 125 * This test differs from alloc_pidmap(). 126 * If find_next_offset() does find a zero 127 * bit, we don't need to check for QPN 128 * wrapping around past our starting QPN. 129 * We just need to be sure we don't loop 130 * forever. 131 */ 132 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 133 } 134 /* 135 * In order to keep the number of pages allocated to a 136 * minimum, we scan the all existing pages before increasing 137 * the size of the bitmap table. 138 */ 139 if (++i > max_scan) { 140 if (qpt->nmaps == QPNMAP_ENTRIES) 141 break; 142 map = &qpt->map[qpt->nmaps++]; 143 offset = 0; 144 } else if (map < &qpt->map[qpt->nmaps]) { 145 ++map; 146 offset = 0; 147 } else { 148 map = &qpt->map[0]; 149 offset = 2; 150 } 151 qpn = mk_qpn(qpt, map, offset); 152 } 153 154 ret = 0; 155 156bail: 157 return ret; 158} 159 160static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) 161{ 162 struct qpn_map *map; 163 164 map = qpt->map + qpn / BITS_PER_PAGE; 165 if (map->page) 166 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 167 atomic_inc(&map->n_free); 168} 169 170/** 171 * ipath_alloc_qpn - allocate a QP number 172 * @qpt: the QP table 173 * @qp: the QP 174 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special) 175 * 176 * Allocate the next available QPN and put the QP into the hash table. 177 * The hash table holds a reference to the QP. 178 */ 179static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, 180 enum ib_qp_type type) 181{ 182 unsigned long flags; 183 u32 qpn; 184 int ret; 185 186 if (type == IB_QPT_SMI) 187 qpn = 0; 188 else if (type == IB_QPT_GSI) 189 qpn = 1; 190 else { 191 /* Allocate the next available QPN */ 192 qpn = alloc_qpn(qpt); 193 if (qpn == 0) { 194 ret = -ENOMEM; 195 goto bail; 196 } 197 } 198 qp->ibqp.qp_num = qpn; 199 200 /* Add the QP to the hash table. */ 201 spin_lock_irqsave(&qpt->lock, flags); 202 203 qpn %= qpt->max; 204 qp->next = qpt->table[qpn]; 205 qpt->table[qpn] = qp; 206 atomic_inc(&qp->refcount); 207 208 spin_unlock_irqrestore(&qpt->lock, flags); 209 ret = 0; 210 211bail: 212 return ret; 213} 214 215/** 216 * ipath_free_qp - remove a QP from the QP table 217 * @qpt: the QP table 218 * @qp: the QP to remove 219 * 220 * Remove the QP from the table so it can't be found asynchronously by 221 * the receive interrupt routine. 222 */ 223static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) 224{ 225 struct ipath_qp *q, **qpp; 226 unsigned long flags; 227 int fnd = 0; 228 229 spin_lock_irqsave(&qpt->lock, flags); 230 231 /* Remove QP from the hash table. */ 232 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; 233 for (; (q = *qpp) != NULL; qpp = &q->next) { 234 if (q == qp) { 235 *qpp = qp->next; 236 qp->next = NULL; 237 atomic_dec(&qp->refcount); 238 fnd = 1; 239 break; 240 } 241 } 242 243 spin_unlock_irqrestore(&qpt->lock, flags); 244 245 if (!fnd) 246 return; 247 248 /* If QPN is not reserved, mark QPN free in the bitmap. */ 249 if (qp->ibqp.qp_num > 1) 250 free_qpn(qpt, qp->ibqp.qp_num); 251 252 wait_event(qp->wait, !atomic_read(&qp->refcount)); 253} 254 255/** 256 * ipath_free_all_qps - remove all QPs from the table 257 * @qpt: the QP table to empty 258 */ 259void ipath_free_all_qps(struct ipath_qp_table *qpt) 260{ 261 unsigned long flags; 262 struct ipath_qp *qp, *nqp; 263 u32 n; 264 265 for (n = 0; n < qpt->max; n++) { 266 spin_lock_irqsave(&qpt->lock, flags); 267 qp = qpt->table[n]; 268 qpt->table[n] = NULL; 269 spin_unlock_irqrestore(&qpt->lock, flags); 270 271 while (qp) { 272 nqp = qp->next; 273 if (qp->ibqp.qp_num > 1) 274 free_qpn(qpt, qp->ibqp.qp_num); 275 if (!atomic_dec_and_test(&qp->refcount) || 276 !ipath_destroy_qp(&qp->ibqp)) 277 ipath_dbg("QP memory leak!\n"); 278 qp = nqp; 279 } 280 } 281 282 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { 283 if (qpt->map[n].page) 284 free_page((unsigned long)qpt->map[n].page); 285 } 286} 287 288/** 289 * ipath_lookup_qpn - return the QP with the given QPN 290 * @qpt: the QP table 291 * @qpn: the QP number to look up 292 * 293 * The caller is responsible for decrementing the QP reference count 294 * when done. 295 */ 296struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) 297{ 298 unsigned long flags; 299 struct ipath_qp *qp; 300 301 spin_lock_irqsave(&qpt->lock, flags); 302 303 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { 304 if (qp->ibqp.qp_num == qpn) { 305 atomic_inc(&qp->refcount); 306 break; 307 } 308 } 309 310 spin_unlock_irqrestore(&qpt->lock, flags); 311 return qp; 312} 313 314/** 315 * ipath_reset_qp - initialize the QP state to the reset state 316 * @qp: the QP to reset 317 */ 318static void ipath_reset_qp(struct ipath_qp *qp) 319{ 320 qp->remote_qpn = 0; 321 qp->qkey = 0; 322 qp->qp_access_flags = 0; 323 qp->s_busy = 0; 324 qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR; 325 qp->s_hdrwords = 0; 326 qp->s_psn = 0; 327 qp->r_psn = 0; 328 qp->r_msn = 0; 329 if (qp->ibqp.qp_type == IB_QPT_RC) { 330 qp->s_state = IB_OPCODE_RC_SEND_LAST; 331 qp->r_state = IB_OPCODE_RC_SEND_LAST; 332 } else { 333 qp->s_state = IB_OPCODE_UC_SEND_LAST; 334 qp->r_state = IB_OPCODE_UC_SEND_LAST; 335 } 336 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 337 qp->r_nak_state = 0; 338 qp->r_wrid_valid = 0; 339 qp->s_rnr_timeout = 0; 340 qp->s_head = 0; 341 qp->s_tail = 0; 342 qp->s_cur = 0; 343 qp->s_last = 0; 344 qp->s_ssn = 1; 345 qp->s_lsn = 0; 346 qp->s_wait_credit = 0; 347 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 348 qp->r_head_ack_queue = 0; 349 qp->s_tail_ack_queue = 0; 350 qp->s_num_rd_atomic = 0; 351 if (qp->r_rq.wq) { 352 qp->r_rq.wq->head = 0; 353 qp->r_rq.wq->tail = 0; 354 } 355 qp->r_reuse_sge = 0; 356} 357 358/** 359 * ipath_error_qp - put a QP into an error state 360 * @qp: the QP to put into an error state 361 * @err: the receive completion error to signal if a RWQE is active 362 * 363 * Flushes both send and receive work queues. 364 * QP s_lock should be held and interrupts disabled. 365 */ 366 367void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) 368{ 369 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 370 struct ib_wc wc; 371 372 ipath_dbg("QP%d/%d in error state\n", 373 qp->ibqp.qp_num, qp->remote_qpn); 374 375 spin_lock(&dev->pending_lock); 376 /* XXX What if its already removed by the timeout code? */ 377 if (!list_empty(&qp->timerwait)) 378 list_del_init(&qp->timerwait); 379 if (!list_empty(&qp->piowait)) 380 list_del_init(&qp->piowait); 381 spin_unlock(&dev->pending_lock); 382 383 wc.vendor_err = 0; 384 wc.byte_len = 0; 385 wc.imm_data = 0; 386 wc.qp = &qp->ibqp; 387 wc.src_qp = 0; 388 wc.wc_flags = 0; 389 wc.pkey_index = 0; 390 wc.slid = 0; 391 wc.sl = 0; 392 wc.dlid_path_bits = 0; 393 wc.port_num = 0; 394 if (qp->r_wrid_valid) { 395 qp->r_wrid_valid = 0; 396 wc.status = err; 397 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 398 } 399 wc.status = IB_WC_WR_FLUSH_ERR; 400 401 while (qp->s_last != qp->s_head) { 402 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 403 404 wc.wr_id = wqe->wr.wr_id; 405 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 406 if (++qp->s_last >= qp->s_size) 407 qp->s_last = 0; 408 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 409 } 410 qp->s_cur = qp->s_tail = qp->s_head; 411 qp->s_hdrwords = 0; 412 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 413 414 if (qp->r_rq.wq) { 415 struct ipath_rwq *wq; 416 u32 head; 417 u32 tail; 418 419 spin_lock(&qp->r_rq.lock); 420 421 /* sanity check pointers before trusting them */ 422 wq = qp->r_rq.wq; 423 head = wq->head; 424 if (head >= qp->r_rq.size) 425 head = 0; 426 tail = wq->tail; 427 if (tail >= qp->r_rq.size) 428 tail = 0; 429 wc.opcode = IB_WC_RECV; 430 while (tail != head) { 431 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 432 if (++tail >= qp->r_rq.size) 433 tail = 0; 434 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 435 } 436 wq->tail = tail; 437 438 spin_unlock(&qp->r_rq.lock); 439 } 440} 441 442/** 443 * ipath_modify_qp - modify the attributes of a queue pair 444 * @ibqp: the queue pair who's attributes we're modifying 445 * @attr: the new attributes 446 * @attr_mask: the mask of attributes to modify 447 * @udata: user data for ipathverbs.so 448 * 449 * Returns 0 on success, otherwise returns an errno. 450 */ 451int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 452 int attr_mask, struct ib_udata *udata) 453{ 454 struct ipath_ibdev *dev = to_idev(ibqp->device); 455 struct ipath_qp *qp = to_iqp(ibqp); 456 enum ib_qp_state cur_state, new_state; 457 unsigned long flags; 458 int ret; 459 460 spin_lock_irqsave(&qp->s_lock, flags); 461 462 cur_state = attr_mask & IB_QP_CUR_STATE ? 463 attr->cur_qp_state : qp->state; 464 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 465 466 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 467 attr_mask)) 468 goto inval; 469 470 if (attr_mask & IB_QP_AV) { 471 if (attr->ah_attr.dlid == 0 || 472 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) 473 goto inval; 474 475 if ((attr->ah_attr.ah_flags & IB_AH_GRH) && 476 (attr->ah_attr.grh.sgid_index > 1)) 477 goto inval; 478 } 479 480 if (attr_mask & IB_QP_PKEY_INDEX) 481 if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) 482 goto inval; 483 484 if (attr_mask & IB_QP_MIN_RNR_TIMER) 485 if (attr->min_rnr_timer > 31) 486 goto inval; 487 488 if (attr_mask & IB_QP_PORT) 489 if (attr->port_num == 0 || 490 attr->port_num > ibqp->device->phys_port_cnt) 491 goto inval; 492 493 if (attr_mask & IB_QP_PATH_MTU) 494 if (attr->path_mtu > IB_MTU_4096) 495 goto inval; 496 497 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 498 if (attr->max_dest_rd_atomic > 1) 499 goto inval; 500 501 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 502 if (attr->max_rd_atomic > 1) 503 goto inval; 504 505 if (attr_mask & IB_QP_PATH_MIG_STATE) 506 if (attr->path_mig_state != IB_MIG_MIGRATED && 507 attr->path_mig_state != IB_MIG_REARM) 508 goto inval; 509 510 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 511 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) 512 goto inval; 513 514 switch (new_state) { 515 case IB_QPS_RESET: 516 ipath_reset_qp(qp); 517 break; 518 519 case IB_QPS_ERR: 520 ipath_error_qp(qp, IB_WC_GENERAL_ERR); 521 break; 522 523 default: 524 break; 525 526 } 527 528 if (attr_mask & IB_QP_PKEY_INDEX) 529 qp->s_pkey_index = attr->pkey_index; 530 531 if (attr_mask & IB_QP_DEST_QPN) 532 qp->remote_qpn = attr->dest_qp_num; 533 534 if (attr_mask & IB_QP_SQ_PSN) { 535 qp->s_psn = qp->s_next_psn = attr->sq_psn; 536 qp->s_last_psn = qp->s_next_psn - 1; 537 } 538 539 if (attr_mask & IB_QP_RQ_PSN) 540 qp->r_psn = attr->rq_psn; 541 542 if (attr_mask & IB_QP_ACCESS_FLAGS) 543 qp->qp_access_flags = attr->qp_access_flags; 544 545 if (attr_mask & IB_QP_AV) 546 qp->remote_ah_attr = attr->ah_attr; 547 548 if (attr_mask & IB_QP_PATH_MTU) 549 qp->path_mtu = attr->path_mtu; 550 551 if (attr_mask & IB_QP_RETRY_CNT) 552 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; 553 554 if (attr_mask & IB_QP_RNR_RETRY) { 555 qp->s_rnr_retry = attr->rnr_retry; 556 if (qp->s_rnr_retry > 7) 557 qp->s_rnr_retry = 7; 558 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 559 } 560 561 if (attr_mask & IB_QP_MIN_RNR_TIMER) 562 qp->r_min_rnr_timer = attr->min_rnr_timer; 563 564 if (attr_mask & IB_QP_TIMEOUT) 565 qp->timeout = attr->timeout; 566 567 if (attr_mask & IB_QP_QKEY) 568 qp->qkey = attr->qkey; 569 570 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 571 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 572 573 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 574 qp->s_max_rd_atomic = attr->max_rd_atomic; 575 576 qp->state = new_state; 577 spin_unlock_irqrestore(&qp->s_lock, flags); 578 579 ret = 0; 580 goto bail; 581 582inval: 583 spin_unlock_irqrestore(&qp->s_lock, flags); 584 ret = -EINVAL; 585 586bail: 587 return ret; 588} 589 590int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 591 int attr_mask, struct ib_qp_init_attr *init_attr) 592{ 593 struct ipath_qp *qp = to_iqp(ibqp); 594 595 attr->qp_state = qp->state; 596 attr->cur_qp_state = attr->qp_state; 597 attr->path_mtu = qp->path_mtu; 598 attr->path_mig_state = 0; 599 attr->qkey = qp->qkey; 600 attr->rq_psn = qp->r_psn; 601 attr->sq_psn = qp->s_next_psn; 602 attr->dest_qp_num = qp->remote_qpn; 603 attr->qp_access_flags = qp->qp_access_flags; 604 attr->cap.max_send_wr = qp->s_size - 1; 605 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 606 attr->cap.max_send_sge = qp->s_max_sge; 607 attr->cap.max_recv_sge = qp->r_rq.max_sge; 608 attr->cap.max_inline_data = 0; 609 attr->ah_attr = qp->remote_ah_attr; 610 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); 611 attr->pkey_index = qp->s_pkey_index; 612 attr->alt_pkey_index = 0; 613 attr->en_sqd_async_notify = 0; 614 attr->sq_draining = 0; 615 attr->max_rd_atomic = qp->s_max_rd_atomic; 616 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 617 attr->min_rnr_timer = qp->r_min_rnr_timer; 618 attr->port_num = 1; 619 attr->timeout = qp->timeout; 620 attr->retry_cnt = qp->s_retry_cnt; 621 attr->rnr_retry = qp->s_rnr_retry; 622 attr->alt_port_num = 0; 623 attr->alt_timeout = 0; 624 625 init_attr->event_handler = qp->ibqp.event_handler; 626 init_attr->qp_context = qp->ibqp.qp_context; 627 init_attr->send_cq = qp->ibqp.send_cq; 628 init_attr->recv_cq = qp->ibqp.recv_cq; 629 init_attr->srq = qp->ibqp.srq; 630 init_attr->cap = attr->cap; 631 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) 632 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 633 else 634 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 635 init_attr->qp_type = qp->ibqp.qp_type; 636 init_attr->port_num = 1; 637 return 0; 638} 639 640/** 641 * ipath_compute_aeth - compute the AETH (syndrome + MSN) 642 * @qp: the queue pair to compute the AETH for 643 * 644 * Returns the AETH. 645 */ 646__be32 ipath_compute_aeth(struct ipath_qp *qp) 647{ 648 u32 aeth = qp->r_msn & IPATH_MSN_MASK; 649 650 if (qp->ibqp.srq) { 651 /* 652 * Shared receive queues don't generate credits. 653 * Set the credit field to the invalid value. 654 */ 655 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; 656 } else { 657 u32 min, max, x; 658 u32 credits; 659 struct ipath_rwq *wq = qp->r_rq.wq; 660 u32 head; 661 u32 tail; 662 663 /* sanity check pointers before trusting them */ 664 head = wq->head; 665 if (head >= qp->r_rq.size) 666 head = 0; 667 tail = wq->tail; 668 if (tail >= qp->r_rq.size) 669 tail = 0; 670 /* 671 * Compute the number of credits available (RWQEs). 672 * XXX Not holding the r_rq.lock here so there is a small 673 * chance that the pair of reads are not atomic. 674 */ 675 credits = head - tail; 676 if ((int)credits < 0) 677 credits += qp->r_rq.size; 678 /* 679 * Binary search the credit table to find the code to 680 * use. 681 */ 682 min = 0; 683 max = 31; 684 for (;;) { 685 x = (min + max) / 2; 686 if (credit_table[x] == credits) 687 break; 688 if (credit_table[x] > credits) 689 max = x; 690 else if (min == x) 691 break; 692 else 693 min = x; 694 } 695 aeth |= x << IPATH_AETH_CREDIT_SHIFT; 696 } 697 return cpu_to_be32(aeth); 698} 699 700/** 701 * ipath_create_qp - create a queue pair for a device 702 * @ibpd: the protection domain who's device we create the queue pair for 703 * @init_attr: the attributes of the queue pair 704 * @udata: unused by InfiniPath 705 * 706 * Returns the queue pair on success, otherwise returns an errno. 707 * 708 * Called by the ib_create_qp() core verbs function. 709 */ 710struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, 711 struct ib_qp_init_attr *init_attr, 712 struct ib_udata *udata) 713{ 714 struct ipath_qp *qp; 715 int err; 716 struct ipath_swqe *swq = NULL; 717 struct ipath_ibdev *dev; 718 size_t sz; 719 struct ib_qp *ret; 720 721 if (init_attr->cap.max_send_sge > ib_ipath_max_sges || 722 init_attr->cap.max_recv_sge > ib_ipath_max_sges || 723 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs || 724 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { 725 ret = ERR_PTR(-ENOMEM); 726 goto bail; 727 } 728 729 if (init_attr->cap.max_send_sge + 730 init_attr->cap.max_recv_sge + 731 init_attr->cap.max_send_wr + 732 init_attr->cap.max_recv_wr == 0) { 733 ret = ERR_PTR(-EINVAL); 734 goto bail; 735 } 736 737 switch (init_attr->qp_type) { 738 case IB_QPT_UC: 739 case IB_QPT_RC: 740 sz = sizeof(struct ipath_sge) * 741 init_attr->cap.max_send_sge + 742 sizeof(struct ipath_swqe); 743 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); 744 if (swq == NULL) { 745 ret = ERR_PTR(-ENOMEM); 746 goto bail; 747 } 748 /* FALLTHROUGH */ 749 case IB_QPT_UD: 750 case IB_QPT_SMI: 751 case IB_QPT_GSI: 752 sz = sizeof(*qp); 753 if (init_attr->srq) { 754 struct ipath_srq *srq = to_isrq(init_attr->srq); 755 756 sz += sizeof(*qp->r_sg_list) * 757 srq->rq.max_sge; 758 } else 759 sz += sizeof(*qp->r_sg_list) * 760 init_attr->cap.max_recv_sge; 761 qp = kmalloc(sz, GFP_KERNEL); 762 if (!qp) { 763 ret = ERR_PTR(-ENOMEM); 764 goto bail_swq; 765 } 766 if (init_attr->srq) { 767 sz = 0; 768 qp->r_rq.size = 0; 769 qp->r_rq.max_sge = 0; 770 qp->r_rq.wq = NULL; 771 init_attr->cap.max_recv_wr = 0; 772 init_attr->cap.max_recv_sge = 0; 773 } else { 774 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 775 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 776 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 777 sizeof(struct ipath_rwqe); 778 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + 779 qp->r_rq.size * sz); 780 if (!qp->r_rq.wq) { 781 ret = ERR_PTR(-ENOMEM); 782 goto bail_qp; 783 } 784 } 785 786 /* 787 * ib_create_qp() will initialize qp->ibqp 788 * except for qp->ibqp.qp_num. 789 */ 790 spin_lock_init(&qp->s_lock); 791 spin_lock_init(&qp->r_rq.lock); 792 atomic_set(&qp->refcount, 0); 793 init_waitqueue_head(&qp->wait); 794 tasklet_init(&qp->s_task, ipath_do_ruc_send, 795 (unsigned long)qp); 796 INIT_LIST_HEAD(&qp->piowait); 797 INIT_LIST_HEAD(&qp->timerwait); 798 qp->state = IB_QPS_RESET; 799 qp->s_wq = swq; 800 qp->s_size = init_attr->cap.max_send_wr + 1; 801 qp->s_max_sge = init_attr->cap.max_send_sge; 802 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 803 qp->s_flags = IPATH_S_SIGNAL_REQ_WR; 804 else 805 qp->s_flags = 0; 806 dev = to_idev(ibpd->device); 807 err = ipath_alloc_qpn(&dev->qp_table, qp, 808 init_attr->qp_type); 809 if (err) { 810 ret = ERR_PTR(err); 811 goto bail_rwq; 812 } 813 qp->ip = NULL; 814 ipath_reset_qp(qp); 815 break; 816 817 default: 818 /* Don't support raw QPs */ 819 ret = ERR_PTR(-ENOSYS); 820 goto bail; 821 } 822 823 init_attr->cap.max_inline_data = 0; 824 825 /* 826 * Return the address of the RWQ as the offset to mmap. 827 * See ipath_mmap() for details. 828 */ 829 if (udata && udata->outlen >= sizeof(__u64)) { 830 struct ipath_mmap_info *ip; 831 __u64 offset = (__u64) qp->r_rq.wq; 832 int err; 833 834 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 835 if (err) { 836 ret = ERR_PTR(err); 837 goto bail_rwq; 838 } 839 840 if (qp->r_rq.wq) { 841 /* Allocate info for ipath_mmap(). */ 842 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 843 if (!ip) { 844 ret = ERR_PTR(-ENOMEM); 845 goto bail_rwq; 846 } 847 qp->ip = ip; 848 ip->context = ibpd->uobject->context; 849 ip->obj = qp->r_rq.wq; 850 kref_init(&ip->ref); 851 ip->mmap_cnt = 0; 852 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + 853 qp->r_rq.size * sz); 854 spin_lock_irq(&dev->pending_lock); 855 ip->next = dev->pending_mmaps; 856 dev->pending_mmaps = ip; 857 spin_unlock_irq(&dev->pending_lock); 858 } 859 } 860 861 spin_lock(&dev->n_qps_lock); 862 if (dev->n_qps_allocated == ib_ipath_max_qps) { 863 spin_unlock(&dev->n_qps_lock); 864 ret = ERR_PTR(-ENOMEM); 865 goto bail_ip; 866 } 867 868 dev->n_qps_allocated++; 869 spin_unlock(&dev->n_qps_lock); 870 871 ret = &qp->ibqp; 872 goto bail; 873 874bail_ip: 875 kfree(qp->ip); 876bail_rwq: 877 vfree(qp->r_rq.wq); 878bail_qp: 879 kfree(qp); 880bail_swq: 881 vfree(swq); 882bail: 883 return ret; 884} 885 886/** 887 * ipath_destroy_qp - destroy a queue pair 888 * @ibqp: the queue pair to destroy 889 * 890 * Returns 0 on success. 891 * 892 * Note that this can be called while the QP is actively sending or 893 * receiving! 894 */ 895int ipath_destroy_qp(struct ib_qp *ibqp) 896{ 897 struct ipath_qp *qp = to_iqp(ibqp); 898 struct ipath_ibdev *dev = to_idev(ibqp->device); 899 unsigned long flags; 900 901 spin_lock_irqsave(&qp->s_lock, flags); 902 qp->state = IB_QPS_ERR; 903 spin_unlock_irqrestore(&qp->s_lock, flags); 904 spin_lock(&dev->n_qps_lock); 905 dev->n_qps_allocated--; 906 spin_unlock(&dev->n_qps_lock); 907 908 /* Stop the sending tasklet. */ 909 tasklet_kill(&qp->s_task); 910 911 /* Make sure the QP isn't on the timeout list. */ 912 spin_lock_irqsave(&dev->pending_lock, flags); 913 if (!list_empty(&qp->timerwait)) 914 list_del_init(&qp->timerwait); 915 if (!list_empty(&qp->piowait)) 916 list_del_init(&qp->piowait); 917 spin_unlock_irqrestore(&dev->pending_lock, flags); 918 919 /* 920 * Make sure that the QP is not in the QPN table so receive 921 * interrupts will discard packets for this QP. XXX Also remove QP 922 * from multicast table. 923 */ 924 if (atomic_read(&qp->refcount) != 0) 925 ipath_free_qp(&dev->qp_table, qp); 926 927 if (qp->ip) 928 kref_put(&qp->ip->ref, ipath_release_mmap_info); 929 else 930 vfree(qp->r_rq.wq); 931 vfree(qp->s_wq); 932 kfree(qp); 933 return 0; 934} 935 936/** 937 * ipath_init_qp_table - initialize the QP table for a device 938 * @idev: the device who's QP table we're initializing 939 * @size: the size of the QP table 940 * 941 * Returns 0 on success, otherwise returns an errno. 942 */ 943int ipath_init_qp_table(struct ipath_ibdev *idev, int size) 944{ 945 int i; 946 int ret; 947 948 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ 949 idev->qp_table.max = size; 950 idev->qp_table.nmaps = 1; 951 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), 952 GFP_KERNEL); 953 if (idev->qp_table.table == NULL) { 954 ret = -ENOMEM; 955 goto bail; 956 } 957 958 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) { 959 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE); 960 idev->qp_table.map[i].page = NULL; 961 } 962 963 ret = 0; 964 965bail: 966 return ret; 967} 968 969/** 970 * ipath_sqerror_qp - put a QP's send queue into an error state 971 * @qp: QP who's send queue will be put into an error state 972 * @wc: the WC responsible for putting the QP in this state 973 * 974 * Flushes the send work queue. 975 * The QP s_lock should be held. 976 */ 977 978void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 979{ 980 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 981 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 982 983 ipath_dbg("Send queue error on QP%d/%d: err: %d\n", 984 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 985 986 spin_lock(&dev->pending_lock); 987 /* XXX What if its already removed by the timeout code? */ 988 if (!list_empty(&qp->timerwait)) 989 list_del_init(&qp->timerwait); 990 if (!list_empty(&qp->piowait)) 991 list_del_init(&qp->piowait); 992 spin_unlock(&dev->pending_lock); 993 994 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 995 if (++qp->s_last >= qp->s_size) 996 qp->s_last = 0; 997 998 wc->status = IB_WC_WR_FLUSH_ERR; 999 1000 while (qp->s_last != qp->s_head) { 1001 wc->wr_id = wqe->wr.wr_id; 1002 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1003 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1004 if (++qp->s_last >= qp->s_size) 1005 qp->s_last = 0; 1006 wqe = get_swqe_ptr(qp, qp->s_last); 1007 } 1008 qp->s_cur = qp->s_tail = qp->s_head; 1009 qp->state = IB_QPS_SQE; 1010} 1011 1012/** 1013 * ipath_get_credit - flush the send work queue of a QP 1014 * @qp: the qp who's send work queue to flush 1015 * @aeth: the Acknowledge Extended Transport Header 1016 * 1017 * The QP s_lock should be held. 1018 */ 1019void ipath_get_credit(struct ipath_qp *qp, u32 aeth) 1020{ 1021 u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; 1022 1023 /* 1024 * If the credit is invalid, we can send 1025 * as many packets as we like. Otherwise, we have to 1026 * honor the credit field. 1027 */ 1028 if (credit == IPATH_AETH_CREDIT_INVAL) 1029 qp->s_lsn = (u32) -1; 1030 else if (qp->s_lsn != (u32) -1) { 1031 /* Compute new LSN (i.e., MSN + credit) */ 1032 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; 1033 if (ipath_cmp24(credit, qp->s_lsn) > 0) 1034 qp->s_lsn = credit; 1035 } 1036 1037 /* Restart sending if it was blocked due to lack of credits. */ 1038 if (qp->s_cur != qp->s_head && 1039 (qp->s_lsn == (u32) -1 || 1040 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, 1041 qp->s_lsn + 1) <= 0)) 1042 tasklet_hi_schedule(&qp->s_task); 1043} 1044