mthca_srq.c revision 6bdd61d876e6eacea5c59230b6b2d988b22793e6
1/* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ 33 */ 34 35#include <linux/slab.h> 36#include <linux/string.h> 37 38#include <asm/io.h> 39 40#include "mthca_dev.h" 41#include "mthca_cmd.h" 42#include "mthca_memfree.h" 43#include "mthca_wqe.h" 44 45enum { 46 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 47}; 48 49struct mthca_tavor_srq_context { 50 __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 51 __be32 state_pd; 52 __be32 lkey; 53 __be32 uar; 54 __be16 limit_watermark; 55 __be16 wqe_cnt; 56 u32 reserved[2]; 57}; 58 59struct mthca_arbel_srq_context { 60 __be32 state_logsize_srqn; 61 __be32 lkey; 62 __be32 db_index; 63 __be32 logstride_usrpage; 64 __be64 wqe_base; 65 __be32 eq_pd; 66 __be16 limit_watermark; 67 __be16 wqe_cnt; 68 u16 reserved1; 69 __be16 wqe_counter; 70 u32 reserved2[3]; 71}; 72 73static void *get_wqe(struct mthca_srq *srq, int n) 74{ 75 if (srq->is_direct) 76 return srq->queue.direct.buf + (n << srq->wqe_shift); 77 else 78 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 79 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 80} 81 82/* 83 * Return a pointer to the location within a WQE that we're using as a 84 * link when the WQE is in the free list. We use the imm field 85 * because in the Tavor case, posting a WQE may overwrite the next 86 * segment of the previous WQE, but a receive WQE will never touch the 87 * imm field. This avoids corrupting our free list if the previous 88 * WQE has already completed and been put on the free list when we 89 * post the next WQE. 90 */ 91static inline int *wqe_to_link(void *wqe) 92{ 93 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 94} 95 96static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 97 struct mthca_pd *pd, 98 struct mthca_srq *srq, 99 struct mthca_tavor_srq_context *context) 100{ 101 memset(context, 0, sizeof *context); 102 103 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 104 context->state_pd = cpu_to_be32(pd->pd_num); 105 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 106 107 if (pd->ibpd.uobject) 108 context->uar = 109 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 110 else 111 context->uar = cpu_to_be32(dev->driver_uar.index); 112} 113 114static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 115 struct mthca_pd *pd, 116 struct mthca_srq *srq, 117 struct mthca_arbel_srq_context *context) 118{ 119 int logsize, max; 120 121 memset(context, 0, sizeof *context); 122 123 /* 124 * Put max in a temporary variable to work around gcc bug 125 * triggered by ilog2() on sparc64. 126 */ 127 max = srq->max; 128 logsize = ilog2(max); 129 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 130 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 131 context->db_index = cpu_to_be32(srq->db_index); 132 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 133 if (pd->ibpd.uobject) 134 context->logstride_usrpage |= 135 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 136 else 137 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 138 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 139} 140 141static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 142{ 143 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 144 srq->is_direct, &srq->mr); 145 kfree(srq->wrid); 146} 147 148static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 149 struct mthca_srq *srq) 150{ 151 struct mthca_data_seg *scatter; 152 void *wqe; 153 int err; 154 int i; 155 156 if (pd->ibpd.uobject) 157 return 0; 158 159 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 160 if (!srq->wrid) 161 return -ENOMEM; 162 163 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 164 MTHCA_MAX_DIRECT_SRQ_SIZE, 165 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 166 if (err) { 167 kfree(srq->wrid); 168 return err; 169 } 170 171 /* 172 * Now initialize the SRQ buffer so that all of the WQEs are 173 * linked into the list of free WQEs. In addition, set the 174 * scatter list L_Keys to the sentry value of 0x100. 175 */ 176 for (i = 0; i < srq->max; ++i) { 177 wqe = get_wqe(srq, i); 178 179 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 180 181 for (scatter = wqe + sizeof (struct mthca_next_seg); 182 (void *) scatter < wqe + (1 << srq->wqe_shift); 183 ++scatter) 184 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 185 } 186 187 srq->last = get_wqe(srq, srq->max - 1); 188 189 return 0; 190} 191 192int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 193 struct ib_srq_attr *attr, struct mthca_srq *srq) 194{ 195 struct mthca_mailbox *mailbox; 196 u8 status; 197 int ds; 198 int err; 199 200 /* Sanity check SRQ size before proceeding */ 201 if (attr->max_wr > dev->limits.max_srq_wqes || 202 attr->max_sge > dev->limits.max_srq_sge) 203 return -EINVAL; 204 205 srq->max = attr->max_wr; 206 srq->max_gs = attr->max_sge; 207 srq->counter = 0; 208 209 if (mthca_is_memfree(dev)) 210 srq->max = roundup_pow_of_two(srq->max + 1); 211 else 212 srq->max = srq->max + 1; 213 214 ds = max(64UL, 215 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 216 srq->max_gs * sizeof (struct mthca_data_seg))); 217 218 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) 219 return -EINVAL; 220 221 srq->wqe_shift = ilog2(ds); 222 223 srq->srqn = mthca_alloc(&dev->srq_table.alloc); 224 if (srq->srqn == -1) 225 return -ENOMEM; 226 227 if (mthca_is_memfree(dev)) { 228 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 229 if (err) 230 goto err_out; 231 232 if (!pd->ibpd.uobject) { 233 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 234 srq->srqn, &srq->db); 235 if (srq->db_index < 0) { 236 err = -ENOMEM; 237 goto err_out_icm; 238 } 239 } 240 } 241 242 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 243 if (IS_ERR(mailbox)) { 244 err = PTR_ERR(mailbox); 245 goto err_out_db; 246 } 247 248 err = mthca_alloc_srq_buf(dev, pd, srq); 249 if (err) 250 goto err_out_mailbox; 251 252 spin_lock_init(&srq->lock); 253 srq->refcount = 1; 254 init_waitqueue_head(&srq->wait); 255 mutex_init(&srq->mutex); 256 257 if (mthca_is_memfree(dev)) 258 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 259 else 260 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 261 262 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 263 264 if (err) { 265 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 266 goto err_out_free_buf; 267 } 268 if (status) { 269 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 270 status); 271 err = -EINVAL; 272 goto err_out_free_buf; 273 } 274 275 spin_lock_irq(&dev->srq_table.lock); 276 if (mthca_array_set(&dev->srq_table.srq, 277 srq->srqn & (dev->limits.num_srqs - 1), 278 srq)) { 279 spin_unlock_irq(&dev->srq_table.lock); 280 goto err_out_free_srq; 281 } 282 spin_unlock_irq(&dev->srq_table.lock); 283 284 mthca_free_mailbox(dev, mailbox); 285 286 srq->first_free = 0; 287 srq->last_free = srq->max - 1; 288 289 attr->max_wr = srq->max - 1; 290 attr->max_sge = srq->max_gs; 291 292 return 0; 293 294err_out_free_srq: 295 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 296 if (err) 297 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 298 else if (status) 299 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 300 301err_out_free_buf: 302 if (!pd->ibpd.uobject) 303 mthca_free_srq_buf(dev, srq); 304 305err_out_mailbox: 306 mthca_free_mailbox(dev, mailbox); 307 308err_out_db: 309 if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 310 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 311 312err_out_icm: 313 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 314 315err_out: 316 mthca_free(&dev->srq_table.alloc, srq->srqn); 317 318 return err; 319} 320 321static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) 322{ 323 int c; 324 325 spin_lock_irq(&dev->srq_table.lock); 326 c = srq->refcount; 327 spin_unlock_irq(&dev->srq_table.lock); 328 329 return c; 330} 331 332void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 333{ 334 struct mthca_mailbox *mailbox; 335 int err; 336 u8 status; 337 338 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 339 if (IS_ERR(mailbox)) { 340 mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 341 return; 342 } 343 344 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 345 if (err) 346 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 347 else if (status) 348 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 349 350 spin_lock_irq(&dev->srq_table.lock); 351 mthca_array_clear(&dev->srq_table.srq, 352 srq->srqn & (dev->limits.num_srqs - 1)); 353 --srq->refcount; 354 spin_unlock_irq(&dev->srq_table.lock); 355 356 wait_event(srq->wait, !get_srq_refcount(dev, srq)); 357 358 if (!srq->ibsrq.uobject) { 359 mthca_free_srq_buf(dev, srq); 360 if (mthca_is_memfree(dev)) 361 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 362 } 363 364 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 365 mthca_free(&dev->srq_table.alloc, srq->srqn); 366 mthca_free_mailbox(dev, mailbox); 367} 368 369int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 370 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 371{ 372 struct mthca_dev *dev = to_mdev(ibsrq->device); 373 struct mthca_srq *srq = to_msrq(ibsrq); 374 int ret; 375 u8 status; 376 377 /* We don't support resizing SRQs (yet?) */ 378 if (attr_mask & IB_SRQ_MAX_WR) 379 return -EINVAL; 380 381 if (attr_mask & IB_SRQ_LIMIT) { 382 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; 383 if (attr->srq_limit > max_wr) 384 return -EINVAL; 385 386 mutex_lock(&srq->mutex); 387 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 388 mutex_unlock(&srq->mutex); 389 390 if (ret) 391 return ret; 392 if (status) 393 return -EINVAL; 394 } 395 396 return 0; 397} 398 399int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 400{ 401 struct mthca_dev *dev = to_mdev(ibsrq->device); 402 struct mthca_srq *srq = to_msrq(ibsrq); 403 struct mthca_mailbox *mailbox; 404 struct mthca_arbel_srq_context *arbel_ctx; 405 struct mthca_tavor_srq_context *tavor_ctx; 406 u8 status; 407 int err; 408 409 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 410 if (IS_ERR(mailbox)) 411 return PTR_ERR(mailbox); 412 413 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 414 if (err) 415 goto out; 416 417 if (mthca_is_memfree(dev)) { 418 arbel_ctx = mailbox->buf; 419 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark); 420 } else { 421 tavor_ctx = mailbox->buf; 422 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark); 423 } 424 425 srq_attr->max_wr = srq->max - 1; 426 srq_attr->max_sge = srq->max_gs; 427 428out: 429 mthca_free_mailbox(dev, mailbox); 430 431 return err; 432} 433 434void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 435 enum ib_event_type event_type) 436{ 437 struct mthca_srq *srq; 438 struct ib_event event; 439 440 spin_lock(&dev->srq_table.lock); 441 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 442 if (srq) 443 ++srq->refcount; 444 spin_unlock(&dev->srq_table.lock); 445 446 if (!srq) { 447 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 448 return; 449 } 450 451 if (!srq->ibsrq.event_handler) 452 goto out; 453 454 event.device = &dev->ib_dev; 455 event.event = event_type; 456 event.element.srq = &srq->ibsrq; 457 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 458 459out: 460 spin_lock(&dev->srq_table.lock); 461 if (!--srq->refcount) 462 wake_up(&srq->wait); 463 spin_unlock(&dev->srq_table.lock); 464} 465 466/* 467 * This function must be called with IRQs disabled. 468 */ 469void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 470{ 471 int ind; 472 473 ind = wqe_addr >> srq->wqe_shift; 474 475 spin_lock(&srq->lock); 476 477 if (likely(srq->first_free >= 0)) 478 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 479 else 480 srq->first_free = ind; 481 482 *wqe_to_link(get_wqe(srq, ind)) = -1; 483 srq->last_free = ind; 484 485 spin_unlock(&srq->lock); 486} 487 488int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 489 struct ib_recv_wr **bad_wr) 490{ 491 struct mthca_dev *dev = to_mdev(ibsrq->device); 492 struct mthca_srq *srq = to_msrq(ibsrq); 493 __be32 doorbell[2]; 494 unsigned long flags; 495 int err = 0; 496 int first_ind; 497 int ind; 498 int next_ind; 499 int nreq; 500 int i; 501 void *wqe; 502 void *prev_wqe; 503 504 spin_lock_irqsave(&srq->lock, flags); 505 506 first_ind = srq->first_free; 507 508 for (nreq = 0; wr; wr = wr->next) { 509 ind = srq->first_free; 510 511 if (ind < 0) { 512 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 513 err = -ENOMEM; 514 *bad_wr = wr; 515 break; 516 } 517 518 wqe = get_wqe(srq, ind); 519 next_ind = *wqe_to_link(wqe); 520 521 if (next_ind < 0) { 522 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 523 err = -ENOMEM; 524 *bad_wr = wr; 525 break; 526 } 527 528 prev_wqe = srq->last; 529 srq->last = wqe; 530 531 ((struct mthca_next_seg *) wqe)->nda_op = 0; 532 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 533 /* flags field will always remain 0 */ 534 535 wqe += sizeof (struct mthca_next_seg); 536 537 if (unlikely(wr->num_sge > srq->max_gs)) { 538 err = -EINVAL; 539 *bad_wr = wr; 540 srq->last = prev_wqe; 541 break; 542 } 543 544 for (i = 0; i < wr->num_sge; ++i) { 545 ((struct mthca_data_seg *) wqe)->byte_count = 546 cpu_to_be32(wr->sg_list[i].length); 547 ((struct mthca_data_seg *) wqe)->lkey = 548 cpu_to_be32(wr->sg_list[i].lkey); 549 ((struct mthca_data_seg *) wqe)->addr = 550 cpu_to_be64(wr->sg_list[i].addr); 551 wqe += sizeof (struct mthca_data_seg); 552 } 553 554 if (i < srq->max_gs) { 555 ((struct mthca_data_seg *) wqe)->byte_count = 0; 556 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 557 ((struct mthca_data_seg *) wqe)->addr = 0; 558 } 559 560 ((struct mthca_next_seg *) prev_wqe)->nda_op = 561 cpu_to_be32((ind << srq->wqe_shift) | 1); 562 wmb(); 563 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 564 cpu_to_be32(MTHCA_NEXT_DBD); 565 566 srq->wrid[ind] = wr->wr_id; 567 srq->first_free = next_ind; 568 569 ++nreq; 570 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 571 nreq = 0; 572 573 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 574 doorbell[1] = cpu_to_be32(srq->srqn << 8); 575 576 /* 577 * Make sure that descriptors are written 578 * before doorbell is rung. 579 */ 580 wmb(); 581 582 mthca_write64(doorbell, 583 dev->kar + MTHCA_RECEIVE_DOORBELL, 584 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 585 586 first_ind = srq->first_free; 587 } 588 } 589 590 if (likely(nreq)) { 591 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 592 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); 593 594 /* 595 * Make sure that descriptors are written before 596 * doorbell is rung. 597 */ 598 wmb(); 599 600 mthca_write64(doorbell, 601 dev->kar + MTHCA_RECEIVE_DOORBELL, 602 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 603 } 604 605 /* 606 * Make sure doorbells don't leak out of SRQ spinlock and 607 * reach the HCA out of order: 608 */ 609 mmiowb(); 610 611 spin_unlock_irqrestore(&srq->lock, flags); 612 return err; 613} 614 615int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 616 struct ib_recv_wr **bad_wr) 617{ 618 struct mthca_dev *dev = to_mdev(ibsrq->device); 619 struct mthca_srq *srq = to_msrq(ibsrq); 620 unsigned long flags; 621 int err = 0; 622 int ind; 623 int next_ind; 624 int nreq; 625 int i; 626 void *wqe; 627 628 spin_lock_irqsave(&srq->lock, flags); 629 630 for (nreq = 0; wr; ++nreq, wr = wr->next) { 631 ind = srq->first_free; 632 633 if (ind < 0) { 634 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 635 err = -ENOMEM; 636 *bad_wr = wr; 637 break; 638 } 639 640 wqe = get_wqe(srq, ind); 641 next_ind = *wqe_to_link(wqe); 642 643 if (next_ind < 0) { 644 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 645 err = -ENOMEM; 646 *bad_wr = wr; 647 break; 648 } 649 650 ((struct mthca_next_seg *) wqe)->nda_op = 651 cpu_to_be32((next_ind << srq->wqe_shift) | 1); 652 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 653 /* flags field will always remain 0 */ 654 655 wqe += sizeof (struct mthca_next_seg); 656 657 if (unlikely(wr->num_sge > srq->max_gs)) { 658 err = -EINVAL; 659 *bad_wr = wr; 660 break; 661 } 662 663 for (i = 0; i < wr->num_sge; ++i) { 664 ((struct mthca_data_seg *) wqe)->byte_count = 665 cpu_to_be32(wr->sg_list[i].length); 666 ((struct mthca_data_seg *) wqe)->lkey = 667 cpu_to_be32(wr->sg_list[i].lkey); 668 ((struct mthca_data_seg *) wqe)->addr = 669 cpu_to_be64(wr->sg_list[i].addr); 670 wqe += sizeof (struct mthca_data_seg); 671 } 672 673 if (i < srq->max_gs) { 674 ((struct mthca_data_seg *) wqe)->byte_count = 0; 675 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 676 ((struct mthca_data_seg *) wqe)->addr = 0; 677 } 678 679 srq->wrid[ind] = wr->wr_id; 680 srq->first_free = next_ind; 681 } 682 683 if (likely(nreq)) { 684 srq->counter += nreq; 685 686 /* 687 * Make sure that descriptors are written before 688 * we write doorbell record. 689 */ 690 wmb(); 691 *srq->db = cpu_to_be32(srq->counter); 692 } 693 694 spin_unlock_irqrestore(&srq->lock, flags); 695 return err; 696} 697 698int mthca_max_srq_sge(struct mthca_dev *dev) 699{ 700 if (mthca_is_memfree(dev)) 701 return dev->limits.max_sg; 702 703 /* 704 * SRQ allocations are based on powers of 2 for Tavor, 705 * (although they only need to be multiples of 16 bytes). 706 * 707 * Therefore, we need to base the max number of sg entries on 708 * the largest power of 2 descriptor size that is <= to the 709 * actual max WQE descriptor size, rather than return the 710 * max_sg value given by the firmware (which is based on WQE 711 * sizes as multiples of 16, not powers of 2). 712 * 713 * If SRQ implementation is changed for Tavor to be based on 714 * multiples of 16, the calculation below can be deleted and 715 * the FW max_sg value returned. 716 */ 717 return min_t(int, dev->limits.max_sg, 718 ((1 << (fls(dev->limits.max_desc_sz) - 1)) - 719 sizeof (struct mthca_next_seg)) / 720 sizeof (struct mthca_data_seg)); 721} 722 723int mthca_init_srq_table(struct mthca_dev *dev) 724{ 725 int err; 726 727 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 728 return 0; 729 730 spin_lock_init(&dev->srq_table.lock); 731 732 err = mthca_alloc_init(&dev->srq_table.alloc, 733 dev->limits.num_srqs, 734 dev->limits.num_srqs - 1, 735 dev->limits.reserved_srqs); 736 if (err) 737 return err; 738 739 err = mthca_array_init(&dev->srq_table.srq, 740 dev->limits.num_srqs); 741 if (err) 742 mthca_alloc_cleanup(&dev->srq_table.alloc); 743 744 return err; 745} 746 747void mthca_cleanup_srq_table(struct mthca_dev *dev) 748{ 749 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 750 return; 751 752 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 753 mthca_alloc_cleanup(&dev->srq_table.alloc); 754} 755