ipath_qp.c revision b228b43c491c53d1838e06f47a7470db9f84d899
1/* 2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/err.h> 34#include <linux/vmalloc.h> 35 36#include "ipath_verbs.h" 37#include "ips_common.h" 38 39#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 40#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 41#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \ 42 (off)) 43#define find_next_offset(map, off) find_next_zero_bit((map)->page, \ 44 BITS_PER_PAGE, off) 45 46#define TRANS_INVALID 0 47#define TRANS_ANY2RST 1 48#define TRANS_RST2INIT 2 49#define TRANS_INIT2INIT 3 50#define TRANS_INIT2RTR 4 51#define TRANS_RTR2RTS 5 52#define TRANS_RTS2RTS 6 53#define TRANS_SQERR2RTS 7 54#define TRANS_ANY2ERR 8 55#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */ 56#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */ 57#define TRANS_SQD2RTS 11 /* error if not drained */ 58 59/* 60 * Convert the AETH credit code into the number of credits. 61 */ 62static u32 credit_table[31] = { 63 0, /* 0 */ 64 1, /* 1 */ 65 2, /* 2 */ 66 3, /* 3 */ 67 4, /* 4 */ 68 6, /* 5 */ 69 8, /* 6 */ 70 12, /* 7 */ 71 16, /* 8 */ 72 24, /* 9 */ 73 32, /* A */ 74 48, /* B */ 75 64, /* C */ 76 96, /* D */ 77 128, /* E */ 78 192, /* F */ 79 256, /* 10 */ 80 384, /* 11 */ 81 512, /* 12 */ 82 768, /* 13 */ 83 1024, /* 14 */ 84 1536, /* 15 */ 85 2048, /* 16 */ 86 3072, /* 17 */ 87 4096, /* 18 */ 88 6144, /* 19 */ 89 8192, /* 1A */ 90 12288, /* 1B */ 91 16384, /* 1C */ 92 24576, /* 1D */ 93 32768 /* 1E */ 94}; 95 96static u32 alloc_qpn(struct ipath_qp_table *qpt) 97{ 98 u32 i, offset, max_scan, qpn; 99 struct qpn_map *map; 100 u32 ret; 101 102 qpn = qpt->last + 1; 103 if (qpn >= QPN_MAX) 104 qpn = 2; 105 offset = qpn & BITS_PER_PAGE_MASK; 106 map = &qpt->map[qpn / BITS_PER_PAGE]; 107 max_scan = qpt->nmaps - !offset; 108 for (i = 0;;) { 109 if (unlikely(!map->page)) { 110 unsigned long page = get_zeroed_page(GFP_KERNEL); 111 unsigned long flags; 112 113 /* 114 * Free the page if someone raced with us 115 * installing it: 116 */ 117 spin_lock_irqsave(&qpt->lock, flags); 118 if (map->page) 119 free_page(page); 120 else 121 map->page = (void *)page; 122 spin_unlock_irqrestore(&qpt->lock, flags); 123 if (unlikely(!map->page)) 124 break; 125 } 126 if (likely(atomic_read(&map->n_free))) { 127 do { 128 if (!test_and_set_bit(offset, map->page)) { 129 atomic_dec(&map->n_free); 130 qpt->last = qpn; 131 ret = qpn; 132 goto bail; 133 } 134 offset = find_next_offset(map, offset); 135 qpn = mk_qpn(qpt, map, offset); 136 /* 137 * This test differs from alloc_pidmap(). 138 * If find_next_offset() does find a zero 139 * bit, we don't need to check for QPN 140 * wrapping around past our starting QPN. 141 * We just need to be sure we don't loop 142 * forever. 143 */ 144 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 145 } 146 /* 147 * In order to keep the number of pages allocated to a 148 * minimum, we scan the all existing pages before increasing 149 * the size of the bitmap table. 150 */ 151 if (++i > max_scan) { 152 if (qpt->nmaps == QPNMAP_ENTRIES) 153 break; 154 map = &qpt->map[qpt->nmaps++]; 155 offset = 0; 156 } else if (map < &qpt->map[qpt->nmaps]) { 157 ++map; 158 offset = 0; 159 } else { 160 map = &qpt->map[0]; 161 offset = 2; 162 } 163 qpn = mk_qpn(qpt, map, offset); 164 } 165 166 ret = 0; 167 168bail: 169 return ret; 170} 171 172static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) 173{ 174 struct qpn_map *map; 175 176 map = qpt->map + qpn / BITS_PER_PAGE; 177 if (map->page) 178 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 179 atomic_inc(&map->n_free); 180} 181 182/** 183 * ipath_alloc_qpn - allocate a QP number 184 * @qpt: the QP table 185 * @qp: the QP 186 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special) 187 * 188 * Allocate the next available QPN and put the QP into the hash table. 189 * The hash table holds a reference to the QP. 190 */ 191static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, 192 enum ib_qp_type type) 193{ 194 unsigned long flags; 195 u32 qpn; 196 int ret; 197 198 if (type == IB_QPT_SMI) 199 qpn = 0; 200 else if (type == IB_QPT_GSI) 201 qpn = 1; 202 else { 203 /* Allocate the next available QPN */ 204 qpn = alloc_qpn(qpt); 205 if (qpn == 0) { 206 ret = -ENOMEM; 207 goto bail; 208 } 209 } 210 qp->ibqp.qp_num = qpn; 211 212 /* Add the QP to the hash table. */ 213 spin_lock_irqsave(&qpt->lock, flags); 214 215 qpn %= qpt->max; 216 qp->next = qpt->table[qpn]; 217 qpt->table[qpn] = qp; 218 atomic_inc(&qp->refcount); 219 220 spin_unlock_irqrestore(&qpt->lock, flags); 221 ret = 0; 222 223bail: 224 return ret; 225} 226 227/** 228 * ipath_free_qp - remove a QP from the QP table 229 * @qpt: the QP table 230 * @qp: the QP to remove 231 * 232 * Remove the QP from the table so it can't be found asynchronously by 233 * the receive interrupt routine. 234 */ 235static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) 236{ 237 struct ipath_qp *q, **qpp; 238 unsigned long flags; 239 int fnd = 0; 240 241 spin_lock_irqsave(&qpt->lock, flags); 242 243 /* Remove QP from the hash table. */ 244 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; 245 for (; (q = *qpp) != NULL; qpp = &q->next) { 246 if (q == qp) { 247 *qpp = qp->next; 248 qp->next = NULL; 249 atomic_dec(&qp->refcount); 250 fnd = 1; 251 break; 252 } 253 } 254 255 spin_unlock_irqrestore(&qpt->lock, flags); 256 257 if (!fnd) 258 return; 259 260 /* If QPN is not reserved, mark QPN free in the bitmap. */ 261 if (qp->ibqp.qp_num > 1) 262 free_qpn(qpt, qp->ibqp.qp_num); 263 264 wait_event(qp->wait, !atomic_read(&qp->refcount)); 265} 266 267/** 268 * ipath_free_all_qps - remove all QPs from the table 269 * @qpt: the QP table to empty 270 */ 271void ipath_free_all_qps(struct ipath_qp_table *qpt) 272{ 273 unsigned long flags; 274 struct ipath_qp *qp, *nqp; 275 u32 n; 276 277 for (n = 0; n < qpt->max; n++) { 278 spin_lock_irqsave(&qpt->lock, flags); 279 qp = qpt->table[n]; 280 qpt->table[n] = NULL; 281 spin_unlock_irqrestore(&qpt->lock, flags); 282 283 while (qp) { 284 nqp = qp->next; 285 if (qp->ibqp.qp_num > 1) 286 free_qpn(qpt, qp->ibqp.qp_num); 287 if (!atomic_dec_and_test(&qp->refcount) || 288 !ipath_destroy_qp(&qp->ibqp)) 289 _VERBS_INFO("QP memory leak!\n"); 290 qp = nqp; 291 } 292 } 293 294 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { 295 if (qpt->map[n].page) 296 free_page((unsigned long)qpt->map[n].page); 297 } 298} 299 300/** 301 * ipath_lookup_qpn - return the QP with the given QPN 302 * @qpt: the QP table 303 * @qpn: the QP number to look up 304 * 305 * The caller is responsible for decrementing the QP reference count 306 * when done. 307 */ 308struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) 309{ 310 unsigned long flags; 311 struct ipath_qp *qp; 312 313 spin_lock_irqsave(&qpt->lock, flags); 314 315 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { 316 if (qp->ibqp.qp_num == qpn) { 317 atomic_inc(&qp->refcount); 318 break; 319 } 320 } 321 322 spin_unlock_irqrestore(&qpt->lock, flags); 323 return qp; 324} 325 326/** 327 * ipath_reset_qp - initialize the QP state to the reset state 328 * @qp: the QP to reset 329 */ 330static void ipath_reset_qp(struct ipath_qp *qp) 331{ 332 qp->remote_qpn = 0; 333 qp->qkey = 0; 334 qp->qp_access_flags = 0; 335 qp->s_hdrwords = 0; 336 qp->s_psn = 0; 337 qp->r_psn = 0; 338 atomic_set(&qp->msn, 0); 339 if (qp->ibqp.qp_type == IB_QPT_RC) { 340 qp->s_state = IB_OPCODE_RC_SEND_LAST; 341 qp->r_state = IB_OPCODE_RC_SEND_LAST; 342 } else { 343 qp->s_state = IB_OPCODE_UC_SEND_LAST; 344 qp->r_state = IB_OPCODE_UC_SEND_LAST; 345 } 346 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 347 qp->s_nak_state = 0; 348 qp->s_rnr_timeout = 0; 349 qp->s_head = 0; 350 qp->s_tail = 0; 351 qp->s_cur = 0; 352 qp->s_last = 0; 353 qp->s_ssn = 1; 354 qp->s_lsn = 0; 355 qp->r_rq.head = 0; 356 qp->r_rq.tail = 0; 357 qp->r_reuse_sge = 0; 358} 359 360/** 361 * ipath_error_qp - put a QP into an error state 362 * @qp: the QP to put into an error state 363 * 364 * Flushes both send and receive work queues. 365 * QP r_rq.lock and s_lock should be held. 366 */ 367 368static void ipath_error_qp(struct ipath_qp *qp) 369{ 370 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 371 struct ib_wc wc; 372 373 _VERBS_INFO("QP%d/%d in error state\n", 374 qp->ibqp.qp_num, qp->remote_qpn); 375 376 spin_lock(&dev->pending_lock); 377 /* XXX What if its already removed by the timeout code? */ 378 if (qp->timerwait.next != LIST_POISON1) 379 list_del(&qp->timerwait); 380 if (qp->piowait.next != LIST_POISON1) 381 list_del(&qp->piowait); 382 spin_unlock(&dev->pending_lock); 383 384 wc.status = IB_WC_WR_FLUSH_ERR; 385 wc.vendor_err = 0; 386 wc.byte_len = 0; 387 wc.imm_data = 0; 388 wc.qp_num = qp->ibqp.qp_num; 389 wc.src_qp = 0; 390 wc.wc_flags = 0; 391 wc.pkey_index = 0; 392 wc.slid = 0; 393 wc.sl = 0; 394 wc.dlid_path_bits = 0; 395 wc.port_num = 0; 396 397 while (qp->s_last != qp->s_head) { 398 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 399 400 wc.wr_id = wqe->wr.wr_id; 401 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 402 if (++qp->s_last >= qp->s_size) 403 qp->s_last = 0; 404 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 405 } 406 qp->s_cur = qp->s_tail = qp->s_head; 407 qp->s_hdrwords = 0; 408 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 409 410 wc.opcode = IB_WC_RECV; 411 while (qp->r_rq.tail != qp->r_rq.head) { 412 wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; 413 if (++qp->r_rq.tail >= qp->r_rq.size) 414 qp->r_rq.tail = 0; 415 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 416 } 417} 418 419/** 420 * ipath_modify_qp - modify the attributes of a queue pair 421 * @ibqp: the queue pair who's attributes we're modifying 422 * @attr: the new attributes 423 * @attr_mask: the mask of attributes to modify 424 * 425 * Returns 0 on success, otherwise returns an errno. 426 */ 427int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 428 int attr_mask) 429{ 430 struct ipath_ibdev *dev = to_idev(ibqp->device); 431 struct ipath_qp *qp = to_iqp(ibqp); 432 enum ib_qp_state cur_state, new_state; 433 unsigned long flags; 434 int ret; 435 436 spin_lock_irqsave(&qp->r_rq.lock, flags); 437 spin_lock(&qp->s_lock); 438 439 cur_state = attr_mask & IB_QP_CUR_STATE ? 440 attr->cur_qp_state : qp->state; 441 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 442 443 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 444 attr_mask)) 445 goto inval; 446 447 if (attr_mask & IB_QP_AV) 448 if (attr->ah_attr.dlid == 0 || 449 attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) 450 goto inval; 451 452 if (attr_mask & IB_QP_PKEY_INDEX) 453 if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 454 goto inval; 455 456 if (attr_mask & IB_QP_MIN_RNR_TIMER) 457 if (attr->min_rnr_timer > 31) 458 goto inval; 459 460 switch (new_state) { 461 case IB_QPS_RESET: 462 ipath_reset_qp(qp); 463 break; 464 465 case IB_QPS_ERR: 466 ipath_error_qp(qp); 467 break; 468 469 default: 470 break; 471 472 } 473 474 if (attr_mask & IB_QP_PKEY_INDEX) 475 qp->s_pkey_index = attr->pkey_index; 476 477 if (attr_mask & IB_QP_DEST_QPN) 478 qp->remote_qpn = attr->dest_qp_num; 479 480 if (attr_mask & IB_QP_SQ_PSN) { 481 qp->s_next_psn = attr->sq_psn; 482 qp->s_last_psn = qp->s_next_psn - 1; 483 } 484 485 if (attr_mask & IB_QP_RQ_PSN) 486 qp->r_psn = attr->rq_psn; 487 488 if (attr_mask & IB_QP_ACCESS_FLAGS) 489 qp->qp_access_flags = attr->qp_access_flags; 490 491 if (attr_mask & IB_QP_AV) 492 qp->remote_ah_attr = attr->ah_attr; 493 494 if (attr_mask & IB_QP_PATH_MTU) 495 qp->path_mtu = attr->path_mtu; 496 497 if (attr_mask & IB_QP_RETRY_CNT) 498 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; 499 500 if (attr_mask & IB_QP_RNR_RETRY) { 501 qp->s_rnr_retry = attr->rnr_retry; 502 if (qp->s_rnr_retry > 7) 503 qp->s_rnr_retry = 7; 504 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 505 } 506 507 if (attr_mask & IB_QP_MIN_RNR_TIMER) 508 qp->s_min_rnr_timer = attr->min_rnr_timer; 509 510 if (attr_mask & IB_QP_QKEY) 511 qp->qkey = attr->qkey; 512 513 if (attr_mask & IB_QP_PKEY_INDEX) 514 qp->s_pkey_index = attr->pkey_index; 515 516 qp->state = new_state; 517 spin_unlock(&qp->s_lock); 518 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 519 520 /* 521 * If QP1 changed to the RTS state, try to move to the link to INIT 522 * even if it was ACTIVE so the SM will reinitialize the SMA's 523 * state. 524 */ 525 if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) { 526 struct ipath_ibdev *dev = to_idev(ibqp->device); 527 528 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); 529 } 530 ret = 0; 531 goto bail; 532 533inval: 534 spin_unlock(&qp->s_lock); 535 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 536 ret = -EINVAL; 537 538bail: 539 return ret; 540} 541 542int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 543 int attr_mask, struct ib_qp_init_attr *init_attr) 544{ 545 struct ipath_qp *qp = to_iqp(ibqp); 546 547 attr->qp_state = qp->state; 548 attr->cur_qp_state = attr->qp_state; 549 attr->path_mtu = qp->path_mtu; 550 attr->path_mig_state = 0; 551 attr->qkey = qp->qkey; 552 attr->rq_psn = qp->r_psn; 553 attr->sq_psn = qp->s_next_psn; 554 attr->dest_qp_num = qp->remote_qpn; 555 attr->qp_access_flags = qp->qp_access_flags; 556 attr->cap.max_send_wr = qp->s_size - 1; 557 attr->cap.max_recv_wr = qp->r_rq.size - 1; 558 attr->cap.max_send_sge = qp->s_max_sge; 559 attr->cap.max_recv_sge = qp->r_rq.max_sge; 560 attr->cap.max_inline_data = 0; 561 attr->ah_attr = qp->remote_ah_attr; 562 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); 563 attr->pkey_index = qp->s_pkey_index; 564 attr->alt_pkey_index = 0; 565 attr->en_sqd_async_notify = 0; 566 attr->sq_draining = 0; 567 attr->max_rd_atomic = 1; 568 attr->max_dest_rd_atomic = 1; 569 attr->min_rnr_timer = qp->s_min_rnr_timer; 570 attr->port_num = 1; 571 attr->timeout = 0; 572 attr->retry_cnt = qp->s_retry_cnt; 573 attr->rnr_retry = qp->s_rnr_retry; 574 attr->alt_port_num = 0; 575 attr->alt_timeout = 0; 576 577 init_attr->event_handler = qp->ibqp.event_handler; 578 init_attr->qp_context = qp->ibqp.qp_context; 579 init_attr->send_cq = qp->ibqp.send_cq; 580 init_attr->recv_cq = qp->ibqp.recv_cq; 581 init_attr->srq = qp->ibqp.srq; 582 init_attr->cap = attr->cap; 583 init_attr->sq_sig_type = 584 (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) 585 ? IB_SIGNAL_REQ_WR : 0; 586 init_attr->qp_type = qp->ibqp.qp_type; 587 init_attr->port_num = 1; 588 return 0; 589} 590 591/** 592 * ipath_compute_aeth - compute the AETH (syndrome + MSN) 593 * @qp: the queue pair to compute the AETH for 594 * 595 * Returns the AETH. 596 * 597 * The QP s_lock should be held. 598 */ 599__be32 ipath_compute_aeth(struct ipath_qp *qp) 600{ 601 u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK; 602 603 if (qp->s_nak_state) { 604 aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT; 605 } else if (qp->ibqp.srq) { 606 /* 607 * Shared receive queues don't generate credits. 608 * Set the credit field to the invalid value. 609 */ 610 aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT; 611 } else { 612 u32 min, max, x; 613 u32 credits; 614 615 /* 616 * Compute the number of credits available (RWQEs). 617 * XXX Not holding the r_rq.lock here so there is a small 618 * chance that the pair of reads are not atomic. 619 */ 620 credits = qp->r_rq.head - qp->r_rq.tail; 621 if ((int)credits < 0) 622 credits += qp->r_rq.size; 623 /* 624 * Binary search the credit table to find the code to 625 * use. 626 */ 627 min = 0; 628 max = 31; 629 for (;;) { 630 x = (min + max) / 2; 631 if (credit_table[x] == credits) 632 break; 633 if (credit_table[x] > credits) 634 max = x; 635 else if (min == x) 636 break; 637 else 638 min = x; 639 } 640 aeth |= x << IPS_AETH_CREDIT_SHIFT; 641 } 642 return cpu_to_be32(aeth); 643} 644 645/** 646 * ipath_create_qp - create a queue pair for a device 647 * @ibpd: the protection domain who's device we create the queue pair for 648 * @init_attr: the attributes of the queue pair 649 * @udata: unused by InfiniPath 650 * 651 * Returns the queue pair on success, otherwise returns an errno. 652 * 653 * Called by the ib_create_qp() core verbs function. 654 */ 655struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, 656 struct ib_qp_init_attr *init_attr, 657 struct ib_udata *udata) 658{ 659 struct ipath_qp *qp; 660 int err; 661 struct ipath_swqe *swq = NULL; 662 struct ipath_ibdev *dev; 663 size_t sz; 664 struct ib_qp *ret; 665 666 if (init_attr->cap.max_send_sge > 255 || 667 init_attr->cap.max_recv_sge > 255) { 668 ret = ERR_PTR(-ENOMEM); 669 goto bail; 670 } 671 672 switch (init_attr->qp_type) { 673 case IB_QPT_UC: 674 case IB_QPT_RC: 675 sz = sizeof(struct ipath_sge) * 676 init_attr->cap.max_send_sge + 677 sizeof(struct ipath_swqe); 678 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); 679 if (swq == NULL) { 680 ret = ERR_PTR(-ENOMEM); 681 goto bail; 682 } 683 /* FALLTHROUGH */ 684 case IB_QPT_UD: 685 case IB_QPT_SMI: 686 case IB_QPT_GSI: 687 qp = kmalloc(sizeof(*qp), GFP_KERNEL); 688 if (!qp) { 689 ret = ERR_PTR(-ENOMEM); 690 goto bail; 691 } 692 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 693 sz = sizeof(struct ipath_sge) * 694 init_attr->cap.max_recv_sge + 695 sizeof(struct ipath_rwqe); 696 qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); 697 if (!qp->r_rq.wq) { 698 kfree(qp); 699 ret = ERR_PTR(-ENOMEM); 700 goto bail; 701 } 702 703 /* 704 * ib_create_qp() will initialize qp->ibqp 705 * except for qp->ibqp.qp_num. 706 */ 707 spin_lock_init(&qp->s_lock); 708 spin_lock_init(&qp->r_rq.lock); 709 atomic_set(&qp->refcount, 0); 710 init_waitqueue_head(&qp->wait); 711 tasklet_init(&qp->s_task, 712 init_attr->qp_type == IB_QPT_RC ? 713 ipath_do_rc_send : ipath_do_uc_send, 714 (unsigned long)qp); 715 qp->piowait.next = LIST_POISON1; 716 qp->piowait.prev = LIST_POISON2; 717 qp->timerwait.next = LIST_POISON1; 718 qp->timerwait.prev = LIST_POISON2; 719 qp->state = IB_QPS_RESET; 720 qp->s_wq = swq; 721 qp->s_size = init_attr->cap.max_send_wr + 1; 722 qp->s_max_sge = init_attr->cap.max_send_sge; 723 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 724 qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? 725 1 << IPATH_S_SIGNAL_REQ_WR : 0; 726 dev = to_idev(ibpd->device); 727 err = ipath_alloc_qpn(&dev->qp_table, qp, 728 init_attr->qp_type); 729 if (err) { 730 vfree(swq); 731 vfree(qp->r_rq.wq); 732 kfree(qp); 733 ret = ERR_PTR(err); 734 goto bail; 735 } 736 ipath_reset_qp(qp); 737 738 /* Tell the core driver that the kernel SMA is present. */ 739 if (qp->ibqp.qp_type == IB_QPT_SMI) 740 ipath_layer_set_verbs_flags(dev->dd, 741 IPATH_VERBS_KERNEL_SMA); 742 break; 743 744 default: 745 /* Don't support raw QPs */ 746 ret = ERR_PTR(-ENOSYS); 747 goto bail; 748 } 749 750 init_attr->cap.max_inline_data = 0; 751 752 ret = &qp->ibqp; 753 754bail: 755 return ret; 756} 757 758/** 759 * ipath_destroy_qp - destroy a queue pair 760 * @ibqp: the queue pair to destroy 761 * 762 * Returns 0 on success. 763 * 764 * Note that this can be called while the QP is actively sending or 765 * receiving! 766 */ 767int ipath_destroy_qp(struct ib_qp *ibqp) 768{ 769 struct ipath_qp *qp = to_iqp(ibqp); 770 struct ipath_ibdev *dev = to_idev(ibqp->device); 771 unsigned long flags; 772 773 /* Tell the core driver that the kernel SMA is gone. */ 774 if (qp->ibqp.qp_type == IB_QPT_SMI) 775 ipath_layer_set_verbs_flags(dev->dd, 0); 776 777 spin_lock_irqsave(&qp->r_rq.lock, flags); 778 spin_lock(&qp->s_lock); 779 qp->state = IB_QPS_ERR; 780 spin_unlock(&qp->s_lock); 781 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 782 783 /* Stop the sending tasklet. */ 784 tasklet_kill(&qp->s_task); 785 786 /* Make sure the QP isn't on the timeout list. */ 787 spin_lock_irqsave(&dev->pending_lock, flags); 788 if (qp->timerwait.next != LIST_POISON1) 789 list_del(&qp->timerwait); 790 if (qp->piowait.next != LIST_POISON1) 791 list_del(&qp->piowait); 792 spin_unlock_irqrestore(&dev->pending_lock, flags); 793 794 /* 795 * Make sure that the QP is not in the QPN table so receive 796 * interrupts will discard packets for this QP. XXX Also remove QP 797 * from multicast table. 798 */ 799 if (atomic_read(&qp->refcount) != 0) 800 ipath_free_qp(&dev->qp_table, qp); 801 802 vfree(qp->s_wq); 803 vfree(qp->r_rq.wq); 804 kfree(qp); 805 return 0; 806} 807 808/** 809 * ipath_init_qp_table - initialize the QP table for a device 810 * @idev: the device who's QP table we're initializing 811 * @size: the size of the QP table 812 * 813 * Returns 0 on success, otherwise returns an errno. 814 */ 815int ipath_init_qp_table(struct ipath_ibdev *idev, int size) 816{ 817 int i; 818 int ret; 819 820 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ 821 idev->qp_table.max = size; 822 idev->qp_table.nmaps = 1; 823 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), 824 GFP_KERNEL); 825 if (idev->qp_table.table == NULL) { 826 ret = -ENOMEM; 827 goto bail; 828 } 829 830 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) { 831 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE); 832 idev->qp_table.map[i].page = NULL; 833 } 834 835 ret = 0; 836 837bail: 838 return ret; 839} 840 841/** 842 * ipath_sqerror_qp - put a QP's send queue into an error state 843 * @qp: QP who's send queue will be put into an error state 844 * @wc: the WC responsible for putting the QP in this state 845 * 846 * Flushes the send work queue. 847 * The QP s_lock should be held. 848 */ 849 850void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 851{ 852 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 853 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 854 855 _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n", 856 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 857 858 spin_lock(&dev->pending_lock); 859 /* XXX What if its already removed by the timeout code? */ 860 if (qp->timerwait.next != LIST_POISON1) 861 list_del(&qp->timerwait); 862 if (qp->piowait.next != LIST_POISON1) 863 list_del(&qp->piowait); 864 spin_unlock(&dev->pending_lock); 865 866 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 867 if (++qp->s_last >= qp->s_size) 868 qp->s_last = 0; 869 870 wc->status = IB_WC_WR_FLUSH_ERR; 871 872 while (qp->s_last != qp->s_head) { 873 wc->wr_id = wqe->wr.wr_id; 874 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 875 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 876 if (++qp->s_last >= qp->s_size) 877 qp->s_last = 0; 878 wqe = get_swqe_ptr(qp, qp->s_last); 879 } 880 qp->s_cur = qp->s_tail = qp->s_head; 881 qp->state = IB_QPS_SQE; 882} 883 884/** 885 * ipath_get_credit - flush the send work queue of a QP 886 * @qp: the qp who's send work queue to flush 887 * @aeth: the Acknowledge Extended Transport Header 888 * 889 * The QP s_lock should be held. 890 */ 891void ipath_get_credit(struct ipath_qp *qp, u32 aeth) 892{ 893 u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK; 894 895 /* 896 * If the credit is invalid, we can send 897 * as many packets as we like. Otherwise, we have to 898 * honor the credit field. 899 */ 900 if (credit == IPS_AETH_CREDIT_INVAL) { 901 qp->s_lsn = (u32) -1; 902 } else if (qp->s_lsn != (u32) -1) { 903 /* Compute new LSN (i.e., MSN + credit) */ 904 credit = (aeth + credit_table[credit]) & IPS_MSN_MASK; 905 if (ipath_cmp24(credit, qp->s_lsn) > 0) 906 qp->s_lsn = credit; 907 } 908 909 /* Restart sending if it was blocked due to lack of credits. */ 910 if (qp->s_cur != qp->s_head && 911 (qp->s_lsn == (u32) -1 || 912 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, 913 qp->s_lsn + 1) <= 0)) 914 tasklet_hi_schedule(&qp->s_task); 915} 916