mthca_srq.c revision f0d1b0b30d250a07627ad8b9fbbb5c7cc08422e8
1/* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ 33 */ 34 35#include <linux/slab.h> 36#include <linux/string.h> 37 38#include <asm/io.h> 39 40#include "mthca_dev.h" 41#include "mthca_cmd.h" 42#include "mthca_memfree.h" 43#include "mthca_wqe.h" 44 45enum { 46 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 47}; 48 49struct mthca_tavor_srq_context { 50 __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 51 __be32 state_pd; 52 __be32 lkey; 53 __be32 uar; 54 __be16 limit_watermark; 55 __be16 wqe_cnt; 56 u32 reserved[2]; 57}; 58 59struct mthca_arbel_srq_context { 60 __be32 state_logsize_srqn; 61 __be32 lkey; 62 __be32 db_index; 63 __be32 logstride_usrpage; 64 __be64 wqe_base; 65 __be32 eq_pd; 66 __be16 limit_watermark; 67 __be16 wqe_cnt; 68 u16 reserved1; 69 __be16 wqe_counter; 70 u32 reserved2[3]; 71}; 72 73static void *get_wqe(struct mthca_srq *srq, int n) 74{ 75 if (srq->is_direct) 76 return srq->queue.direct.buf + (n << srq->wqe_shift); 77 else 78 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 79 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 80} 81 82/* 83 * Return a pointer to the location within a WQE that we're using as a 84 * link when the WQE is in the free list. We use the imm field 85 * because in the Tavor case, posting a WQE may overwrite the next 86 * segment of the previous WQE, but a receive WQE will never touch the 87 * imm field. This avoids corrupting our free list if the previous 88 * WQE has already completed and been put on the free list when we 89 * post the next WQE. 90 */ 91static inline int *wqe_to_link(void *wqe) 92{ 93 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 94} 95 96static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 97 struct mthca_pd *pd, 98 struct mthca_srq *srq, 99 struct mthca_tavor_srq_context *context) 100{ 101 memset(context, 0, sizeof *context); 102 103 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 104 context->state_pd = cpu_to_be32(pd->pd_num); 105 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 106 107 if (pd->ibpd.uobject) 108 context->uar = 109 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 110 else 111 context->uar = cpu_to_be32(dev->driver_uar.index); 112} 113 114static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 115 struct mthca_pd *pd, 116 struct mthca_srq *srq, 117 struct mthca_arbel_srq_context *context) 118{ 119 int logsize; 120 121 memset(context, 0, sizeof *context); 122 123 logsize = ilog2(srq->max); 124 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 125 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 126 context->db_index = cpu_to_be32(srq->db_index); 127 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 128 if (pd->ibpd.uobject) 129 context->logstride_usrpage |= 130 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 131 else 132 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 133 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 134} 135 136static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 137{ 138 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 139 srq->is_direct, &srq->mr); 140 kfree(srq->wrid); 141} 142 143static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 144 struct mthca_srq *srq) 145{ 146 struct mthca_data_seg *scatter; 147 void *wqe; 148 int err; 149 int i; 150 151 if (pd->ibpd.uobject) 152 return 0; 153 154 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 155 if (!srq->wrid) 156 return -ENOMEM; 157 158 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 159 MTHCA_MAX_DIRECT_SRQ_SIZE, 160 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 161 if (err) { 162 kfree(srq->wrid); 163 return err; 164 } 165 166 /* 167 * Now initialize the SRQ buffer so that all of the WQEs are 168 * linked into the list of free WQEs. In addition, set the 169 * scatter list L_Keys to the sentry value of 0x100. 170 */ 171 for (i = 0; i < srq->max; ++i) { 172 wqe = get_wqe(srq, i); 173 174 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 175 176 for (scatter = wqe + sizeof (struct mthca_next_seg); 177 (void *) scatter < wqe + (1 << srq->wqe_shift); 178 ++scatter) 179 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 180 } 181 182 srq->last = get_wqe(srq, srq->max - 1); 183 184 return 0; 185} 186 187int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 188 struct ib_srq_attr *attr, struct mthca_srq *srq) 189{ 190 struct mthca_mailbox *mailbox; 191 u8 status; 192 int ds; 193 int err; 194 195 /* Sanity check SRQ size before proceeding */ 196 if (attr->max_wr > dev->limits.max_srq_wqes || 197 attr->max_sge > dev->limits.max_srq_sge) 198 return -EINVAL; 199 200 srq->max = attr->max_wr; 201 srq->max_gs = attr->max_sge; 202 srq->counter = 0; 203 204 if (mthca_is_memfree(dev)) 205 srq->max = roundup_pow_of_two(srq->max + 1); 206 else 207 srq->max = srq->max + 1; 208 209 ds = max(64UL, 210 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 211 srq->max_gs * sizeof (struct mthca_data_seg))); 212 213 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) 214 return -EINVAL; 215 216 srq->wqe_shift = ilog2(ds); 217 218 srq->srqn = mthca_alloc(&dev->srq_table.alloc); 219 if (srq->srqn == -1) 220 return -ENOMEM; 221 222 if (mthca_is_memfree(dev)) { 223 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 224 if (err) 225 goto err_out; 226 227 if (!pd->ibpd.uobject) { 228 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 229 srq->srqn, &srq->db); 230 if (srq->db_index < 0) { 231 err = -ENOMEM; 232 goto err_out_icm; 233 } 234 } 235 } 236 237 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 238 if (IS_ERR(mailbox)) { 239 err = PTR_ERR(mailbox); 240 goto err_out_db; 241 } 242 243 err = mthca_alloc_srq_buf(dev, pd, srq); 244 if (err) 245 goto err_out_mailbox; 246 247 spin_lock_init(&srq->lock); 248 srq->refcount = 1; 249 init_waitqueue_head(&srq->wait); 250 mutex_init(&srq->mutex); 251 252 if (mthca_is_memfree(dev)) 253 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 254 else 255 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 256 257 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 258 259 if (err) { 260 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 261 goto err_out_free_buf; 262 } 263 if (status) { 264 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 265 status); 266 err = -EINVAL; 267 goto err_out_free_buf; 268 } 269 270 spin_lock_irq(&dev->srq_table.lock); 271 if (mthca_array_set(&dev->srq_table.srq, 272 srq->srqn & (dev->limits.num_srqs - 1), 273 srq)) { 274 spin_unlock_irq(&dev->srq_table.lock); 275 goto err_out_free_srq; 276 } 277 spin_unlock_irq(&dev->srq_table.lock); 278 279 mthca_free_mailbox(dev, mailbox); 280 281 srq->first_free = 0; 282 srq->last_free = srq->max - 1; 283 284 attr->max_wr = srq->max - 1; 285 attr->max_sge = srq->max_gs; 286 287 return 0; 288 289err_out_free_srq: 290 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 291 if (err) 292 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 293 else if (status) 294 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 295 296err_out_free_buf: 297 if (!pd->ibpd.uobject) 298 mthca_free_srq_buf(dev, srq); 299 300err_out_mailbox: 301 mthca_free_mailbox(dev, mailbox); 302 303err_out_db: 304 if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 305 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 306 307err_out_icm: 308 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 309 310err_out: 311 mthca_free(&dev->srq_table.alloc, srq->srqn); 312 313 return err; 314} 315 316static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) 317{ 318 int c; 319 320 spin_lock_irq(&dev->srq_table.lock); 321 c = srq->refcount; 322 spin_unlock_irq(&dev->srq_table.lock); 323 324 return c; 325} 326 327void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 328{ 329 struct mthca_mailbox *mailbox; 330 int err; 331 u8 status; 332 333 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 334 if (IS_ERR(mailbox)) { 335 mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 336 return; 337 } 338 339 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 340 if (err) 341 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 342 else if (status) 343 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 344 345 spin_lock_irq(&dev->srq_table.lock); 346 mthca_array_clear(&dev->srq_table.srq, 347 srq->srqn & (dev->limits.num_srqs - 1)); 348 --srq->refcount; 349 spin_unlock_irq(&dev->srq_table.lock); 350 351 wait_event(srq->wait, !get_srq_refcount(dev, srq)); 352 353 if (!srq->ibsrq.uobject) { 354 mthca_free_srq_buf(dev, srq); 355 if (mthca_is_memfree(dev)) 356 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 357 } 358 359 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 360 mthca_free(&dev->srq_table.alloc, srq->srqn); 361 mthca_free_mailbox(dev, mailbox); 362} 363 364int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 365 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 366{ 367 struct mthca_dev *dev = to_mdev(ibsrq->device); 368 struct mthca_srq *srq = to_msrq(ibsrq); 369 int ret; 370 u8 status; 371 372 /* We don't support resizing SRQs (yet?) */ 373 if (attr_mask & IB_SRQ_MAX_WR) 374 return -EINVAL; 375 376 if (attr_mask & IB_SRQ_LIMIT) { 377 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; 378 if (attr->srq_limit > max_wr) 379 return -EINVAL; 380 381 mutex_lock(&srq->mutex); 382 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 383 mutex_unlock(&srq->mutex); 384 385 if (ret) 386 return ret; 387 if (status) 388 return -EINVAL; 389 } 390 391 return 0; 392} 393 394int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 395{ 396 struct mthca_dev *dev = to_mdev(ibsrq->device); 397 struct mthca_srq *srq = to_msrq(ibsrq); 398 struct mthca_mailbox *mailbox; 399 struct mthca_arbel_srq_context *arbel_ctx; 400 struct mthca_tavor_srq_context *tavor_ctx; 401 u8 status; 402 int err; 403 404 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 405 if (IS_ERR(mailbox)) 406 return PTR_ERR(mailbox); 407 408 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 409 if (err) 410 goto out; 411 412 if (mthca_is_memfree(dev)) { 413 arbel_ctx = mailbox->buf; 414 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark); 415 } else { 416 tavor_ctx = mailbox->buf; 417 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark); 418 } 419 420 srq_attr->max_wr = srq->max - 1; 421 srq_attr->max_sge = srq->max_gs; 422 423out: 424 mthca_free_mailbox(dev, mailbox); 425 426 return err; 427} 428 429void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 430 enum ib_event_type event_type) 431{ 432 struct mthca_srq *srq; 433 struct ib_event event; 434 435 spin_lock(&dev->srq_table.lock); 436 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 437 if (srq) 438 ++srq->refcount; 439 spin_unlock(&dev->srq_table.lock); 440 441 if (!srq) { 442 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 443 return; 444 } 445 446 if (!srq->ibsrq.event_handler) 447 goto out; 448 449 event.device = &dev->ib_dev; 450 event.event = event_type; 451 event.element.srq = &srq->ibsrq; 452 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 453 454out: 455 spin_lock(&dev->srq_table.lock); 456 if (!--srq->refcount) 457 wake_up(&srq->wait); 458 spin_unlock(&dev->srq_table.lock); 459} 460 461/* 462 * This function must be called with IRQs disabled. 463 */ 464void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 465{ 466 int ind; 467 468 ind = wqe_addr >> srq->wqe_shift; 469 470 spin_lock(&srq->lock); 471 472 if (likely(srq->first_free >= 0)) 473 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 474 else 475 srq->first_free = ind; 476 477 *wqe_to_link(get_wqe(srq, ind)) = -1; 478 srq->last_free = ind; 479 480 spin_unlock(&srq->lock); 481} 482 483int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 484 struct ib_recv_wr **bad_wr) 485{ 486 struct mthca_dev *dev = to_mdev(ibsrq->device); 487 struct mthca_srq *srq = to_msrq(ibsrq); 488 __be32 doorbell[2]; 489 unsigned long flags; 490 int err = 0; 491 int first_ind; 492 int ind; 493 int next_ind; 494 int nreq; 495 int i; 496 void *wqe; 497 void *prev_wqe; 498 499 spin_lock_irqsave(&srq->lock, flags); 500 501 first_ind = srq->first_free; 502 503 for (nreq = 0; wr; wr = wr->next) { 504 ind = srq->first_free; 505 506 if (ind < 0) { 507 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 508 err = -ENOMEM; 509 *bad_wr = wr; 510 break; 511 } 512 513 wqe = get_wqe(srq, ind); 514 next_ind = *wqe_to_link(wqe); 515 516 if (next_ind < 0) { 517 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 518 err = -ENOMEM; 519 *bad_wr = wr; 520 break; 521 } 522 523 prev_wqe = srq->last; 524 srq->last = wqe; 525 526 ((struct mthca_next_seg *) wqe)->nda_op = 0; 527 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 528 /* flags field will always remain 0 */ 529 530 wqe += sizeof (struct mthca_next_seg); 531 532 if (unlikely(wr->num_sge > srq->max_gs)) { 533 err = -EINVAL; 534 *bad_wr = wr; 535 srq->last = prev_wqe; 536 break; 537 } 538 539 for (i = 0; i < wr->num_sge; ++i) { 540 ((struct mthca_data_seg *) wqe)->byte_count = 541 cpu_to_be32(wr->sg_list[i].length); 542 ((struct mthca_data_seg *) wqe)->lkey = 543 cpu_to_be32(wr->sg_list[i].lkey); 544 ((struct mthca_data_seg *) wqe)->addr = 545 cpu_to_be64(wr->sg_list[i].addr); 546 wqe += sizeof (struct mthca_data_seg); 547 } 548 549 if (i < srq->max_gs) { 550 ((struct mthca_data_seg *) wqe)->byte_count = 0; 551 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 552 ((struct mthca_data_seg *) wqe)->addr = 0; 553 } 554 555 ((struct mthca_next_seg *) prev_wqe)->nda_op = 556 cpu_to_be32((ind << srq->wqe_shift) | 1); 557 wmb(); 558 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 559 cpu_to_be32(MTHCA_NEXT_DBD); 560 561 srq->wrid[ind] = wr->wr_id; 562 srq->first_free = next_ind; 563 564 ++nreq; 565 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 566 nreq = 0; 567 568 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 569 doorbell[1] = cpu_to_be32(srq->srqn << 8); 570 571 /* 572 * Make sure that descriptors are written 573 * before doorbell is rung. 574 */ 575 wmb(); 576 577 mthca_write64(doorbell, 578 dev->kar + MTHCA_RECEIVE_DOORBELL, 579 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 580 581 first_ind = srq->first_free; 582 } 583 } 584 585 if (likely(nreq)) { 586 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 587 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); 588 589 /* 590 * Make sure that descriptors are written before 591 * doorbell is rung. 592 */ 593 wmb(); 594 595 mthca_write64(doorbell, 596 dev->kar + MTHCA_RECEIVE_DOORBELL, 597 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 598 } 599 600 /* 601 * Make sure doorbells don't leak out of SRQ spinlock and 602 * reach the HCA out of order: 603 */ 604 mmiowb(); 605 606 spin_unlock_irqrestore(&srq->lock, flags); 607 return err; 608} 609 610int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 611 struct ib_recv_wr **bad_wr) 612{ 613 struct mthca_dev *dev = to_mdev(ibsrq->device); 614 struct mthca_srq *srq = to_msrq(ibsrq); 615 unsigned long flags; 616 int err = 0; 617 int ind; 618 int next_ind; 619 int nreq; 620 int i; 621 void *wqe; 622 623 spin_lock_irqsave(&srq->lock, flags); 624 625 for (nreq = 0; wr; ++nreq, wr = wr->next) { 626 ind = srq->first_free; 627 628 if (ind < 0) { 629 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 630 err = -ENOMEM; 631 *bad_wr = wr; 632 break; 633 } 634 635 wqe = get_wqe(srq, ind); 636 next_ind = *wqe_to_link(wqe); 637 638 if (next_ind < 0) { 639 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 640 err = -ENOMEM; 641 *bad_wr = wr; 642 break; 643 } 644 645 ((struct mthca_next_seg *) wqe)->nda_op = 646 cpu_to_be32((next_ind << srq->wqe_shift) | 1); 647 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 648 /* flags field will always remain 0 */ 649 650 wqe += sizeof (struct mthca_next_seg); 651 652 if (unlikely(wr->num_sge > srq->max_gs)) { 653 err = -EINVAL; 654 *bad_wr = wr; 655 break; 656 } 657 658 for (i = 0; i < wr->num_sge; ++i) { 659 ((struct mthca_data_seg *) wqe)->byte_count = 660 cpu_to_be32(wr->sg_list[i].length); 661 ((struct mthca_data_seg *) wqe)->lkey = 662 cpu_to_be32(wr->sg_list[i].lkey); 663 ((struct mthca_data_seg *) wqe)->addr = 664 cpu_to_be64(wr->sg_list[i].addr); 665 wqe += sizeof (struct mthca_data_seg); 666 } 667 668 if (i < srq->max_gs) { 669 ((struct mthca_data_seg *) wqe)->byte_count = 0; 670 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 671 ((struct mthca_data_seg *) wqe)->addr = 0; 672 } 673 674 srq->wrid[ind] = wr->wr_id; 675 srq->first_free = next_ind; 676 } 677 678 if (likely(nreq)) { 679 srq->counter += nreq; 680 681 /* 682 * Make sure that descriptors are written before 683 * we write doorbell record. 684 */ 685 wmb(); 686 *srq->db = cpu_to_be32(srq->counter); 687 } 688 689 spin_unlock_irqrestore(&srq->lock, flags); 690 return err; 691} 692 693int mthca_max_srq_sge(struct mthca_dev *dev) 694{ 695 if (mthca_is_memfree(dev)) 696 return dev->limits.max_sg; 697 698 /* 699 * SRQ allocations are based on powers of 2 for Tavor, 700 * (although they only need to be multiples of 16 bytes). 701 * 702 * Therefore, we need to base the max number of sg entries on 703 * the largest power of 2 descriptor size that is <= to the 704 * actual max WQE descriptor size, rather than return the 705 * max_sg value given by the firmware (which is based on WQE 706 * sizes as multiples of 16, not powers of 2). 707 * 708 * If SRQ implementation is changed for Tavor to be based on 709 * multiples of 16, the calculation below can be deleted and 710 * the FW max_sg value returned. 711 */ 712 return min_t(int, dev->limits.max_sg, 713 ((1 << (fls(dev->limits.max_desc_sz) - 1)) - 714 sizeof (struct mthca_next_seg)) / 715 sizeof (struct mthca_data_seg)); 716} 717 718int mthca_init_srq_table(struct mthca_dev *dev) 719{ 720 int err; 721 722 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 723 return 0; 724 725 spin_lock_init(&dev->srq_table.lock); 726 727 err = mthca_alloc_init(&dev->srq_table.alloc, 728 dev->limits.num_srqs, 729 dev->limits.num_srqs - 1, 730 dev->limits.reserved_srqs); 731 if (err) 732 return err; 733 734 err = mthca_array_init(&dev->srq_table.srq, 735 dev->limits.num_srqs); 736 if (err) 737 mthca_alloc_cleanup(&dev->srq_table.alloc); 738 739 return err; 740} 741 742void mthca_cleanup_srq_table(struct mthca_dev *dev) 743{ 744 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 745 return; 746 747 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 748 mthca_alloc_cleanup(&dev->srq_table.alloc); 749} 750