1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/init.h> 36#include <linux/errno.h> 37#include <linux/export.h> 38#include <linux/slab.h> 39#include <linux/kernel.h> 40 41#include <linux/mlx4/cmd.h> 42 43#include "mlx4.h" 44#include "icm.h" 45 46#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 47#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 48#define MLX4_MPT_FLAG_MIO (1 << 17) 49#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 50#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 51#define MLX4_MPT_FLAG_REGION (1 << 8) 52 53#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 54#define MLX4_MPT_PD_FLAG_RAE (1 << 28) 55#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 56 57#define MLX4_MPT_STATUS_SW 0xF0 58#define MLX4_MPT_STATUS_HW 0x00 59 60static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 61{ 62 int o; 63 int m; 64 u32 seg; 65 66 spin_lock(&buddy->lock); 67 68 for (o = order; o <= buddy->max_order; ++o) 69 if (buddy->num_free[o]) { 70 m = 1 << (buddy->max_order - o); 71 seg = find_first_bit(buddy->bits[o], m); 72 if (seg < m) 73 goto found; 74 } 75 76 spin_unlock(&buddy->lock); 77 return -1; 78 79 found: 80 clear_bit(seg, buddy->bits[o]); 81 --buddy->num_free[o]; 82 83 while (o > order) { 84 --o; 85 seg <<= 1; 86 set_bit(seg ^ 1, buddy->bits[o]); 87 ++buddy->num_free[o]; 88 } 89 90 spin_unlock(&buddy->lock); 91 92 seg <<= order; 93 94 return seg; 95} 96 97static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 98{ 99 seg >>= order; 100 101 spin_lock(&buddy->lock); 102 103 while (test_bit(seg ^ 1, buddy->bits[order])) { 104 clear_bit(seg ^ 1, buddy->bits[order]); 105 --buddy->num_free[order]; 106 seg >>= 1; 107 ++order; 108 } 109 110 set_bit(seg, buddy->bits[order]); 111 ++buddy->num_free[order]; 112 113 spin_unlock(&buddy->lock); 114} 115 116static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 117{ 118 int i, s; 119 120 buddy->max_order = max_order; 121 spin_lock_init(&buddy->lock); 122 123 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 124 GFP_KERNEL); 125 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, 126 GFP_KERNEL); 127 if (!buddy->bits || !buddy->num_free) 128 goto err_out; 129 130 for (i = 0; i <= buddy->max_order; ++i) { 131 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 132 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 133 if (!buddy->bits[i]) 134 goto err_out_free; 135 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); 136 } 137 138 set_bit(0, buddy->bits[buddy->max_order]); 139 buddy->num_free[buddy->max_order] = 1; 140 141 return 0; 142 143err_out_free: 144 for (i = 0; i <= buddy->max_order; ++i) 145 kfree(buddy->bits[i]); 146 147err_out: 148 kfree(buddy->bits); 149 kfree(buddy->num_free); 150 151 return -ENOMEM; 152} 153 154static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 155{ 156 int i; 157 158 for (i = 0; i <= buddy->max_order; ++i) 159 kfree(buddy->bits[i]); 160 161 kfree(buddy->bits); 162 kfree(buddy->num_free); 163} 164 165u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 166{ 167 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 168 u32 seg; 169 int seg_order; 170 u32 offset; 171 172 seg_order = max_t(int, order - log_mtts_per_seg, 0); 173 174 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); 175 if (seg == -1) 176 return -1; 177 178 offset = seg * (1 << log_mtts_per_seg); 179 180 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, 181 offset + (1 << order) - 1)) { 182 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); 183 return -1; 184 } 185 186 return offset; 187} 188 189static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 190{ 191 u64 in_param; 192 u64 out_param; 193 int err; 194 195 if (mlx4_is_mfunc(dev)) { 196 set_param_l(&in_param, order); 197 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, 198 RES_OP_RESERVE_AND_MAP, 199 MLX4_CMD_ALLOC_RES, 200 MLX4_CMD_TIME_CLASS_A, 201 MLX4_CMD_WRAPPED); 202 if (err) 203 return -1; 204 return get_param_l(&out_param); 205 } 206 return __mlx4_alloc_mtt_range(dev, order); 207} 208 209int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 210 struct mlx4_mtt *mtt) 211{ 212 int i; 213 214 if (!npages) { 215 mtt->order = -1; 216 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 217 return 0; 218 } else 219 mtt->page_shift = page_shift; 220 221 for (mtt->order = 0, i = 1; i < npages; i <<= 1) 222 ++mtt->order; 223 224 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); 225 if (mtt->offset == -1) 226 return -ENOMEM; 227 228 return 0; 229} 230EXPORT_SYMBOL_GPL(mlx4_mtt_init); 231 232void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 233{ 234 u32 first_seg; 235 int seg_order; 236 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 237 238 seg_order = max_t(int, order - log_mtts_per_seg, 0); 239 first_seg = offset / (1 << log_mtts_per_seg); 240 241 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); 242 mlx4_table_put_range(dev, &mr_table->mtt_table, offset, 243 offset + (1 << order) - 1); 244} 245 246static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 247{ 248 u64 in_param; 249 int err; 250 251 if (mlx4_is_mfunc(dev)) { 252 set_param_l(&in_param, offset); 253 set_param_h(&in_param, order); 254 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, 255 MLX4_CMD_FREE_RES, 256 MLX4_CMD_TIME_CLASS_A, 257 MLX4_CMD_WRAPPED); 258 if (err) 259 mlx4_warn(dev, "Failed to free mtt range at:" 260 "%d order:%d\n", offset, order); 261 return; 262 } 263 __mlx4_free_mtt_range(dev, offset, order); 264} 265 266void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 267{ 268 if (mtt->order < 0) 269 return; 270 271 mlx4_free_mtt_range(dev, mtt->offset, mtt->order); 272} 273EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 274 275u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 276{ 277 return (u64) mtt->offset * dev->caps.mtt_entry_sz; 278} 279EXPORT_SYMBOL_GPL(mlx4_mtt_addr); 280 281static u32 hw_index_to_key(u32 ind) 282{ 283 return (ind >> 24) | (ind << 8); 284} 285 286static u32 key_to_hw_index(u32 key) 287{ 288 return (key << 24) | (key >> 8); 289} 290 291static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 292 int mpt_index) 293{ 294 return mlx4_cmd(dev, mailbox->dma, mpt_index, 295 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 296 MLX4_CMD_WRAPPED); 297} 298 299static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 300 int mpt_index) 301{ 302 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 303 !mailbox, MLX4_CMD_HW2SW_MPT, 304 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 305} 306 307static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 308 u64 iova, u64 size, u32 access, int npages, 309 int page_shift, struct mlx4_mr *mr) 310{ 311 mr->iova = iova; 312 mr->size = size; 313 mr->pd = pd; 314 mr->access = access; 315 mr->enabled = MLX4_MR_DISABLED; 316 mr->key = hw_index_to_key(mridx); 317 318 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 319} 320 321static int mlx4_WRITE_MTT(struct mlx4_dev *dev, 322 struct mlx4_cmd_mailbox *mailbox, 323 int num_entries) 324{ 325 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, 326 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 327} 328 329int __mlx4_mr_reserve(struct mlx4_dev *dev) 330{ 331 struct mlx4_priv *priv = mlx4_priv(dev); 332 333 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 334} 335 336static int mlx4_mr_reserve(struct mlx4_dev *dev) 337{ 338 u64 out_param; 339 340 if (mlx4_is_mfunc(dev)) { 341 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, 342 MLX4_CMD_ALLOC_RES, 343 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 344 return -1; 345 return get_param_l(&out_param); 346 } 347 return __mlx4_mr_reserve(dev); 348} 349 350void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) 351{ 352 struct mlx4_priv *priv = mlx4_priv(dev); 353 354 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 355} 356 357static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) 358{ 359 u64 in_param; 360 361 if (mlx4_is_mfunc(dev)) { 362 set_param_l(&in_param, index); 363 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, 364 MLX4_CMD_FREE_RES, 365 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 366 mlx4_warn(dev, "Failed to release mr index:%d\n", 367 index); 368 return; 369 } 370 __mlx4_mr_release(dev, index); 371} 372 373int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 374{ 375 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 376 377 return mlx4_table_get(dev, &mr_table->dmpt_table, index); 378} 379 380static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 381{ 382 u64 param; 383 384 if (mlx4_is_mfunc(dev)) { 385 set_param_l(¶m, index); 386 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, 387 MLX4_CMD_ALLOC_RES, 388 MLX4_CMD_TIME_CLASS_A, 389 MLX4_CMD_WRAPPED); 390 } 391 return __mlx4_mr_alloc_icm(dev, index); 392} 393 394void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 395{ 396 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 397 398 mlx4_table_put(dev, &mr_table->dmpt_table, index); 399} 400 401static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 402{ 403 u64 in_param; 404 405 if (mlx4_is_mfunc(dev)) { 406 set_param_l(&in_param, index); 407 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, 408 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 409 MLX4_CMD_WRAPPED)) 410 mlx4_warn(dev, "Failed to free icm of mr index:%d\n", 411 index); 412 return; 413 } 414 return __mlx4_mr_free_icm(dev, index); 415} 416 417int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 418 int npages, int page_shift, struct mlx4_mr *mr) 419{ 420 u32 index; 421 int err; 422 423 index = mlx4_mr_reserve(dev); 424 if (index == -1) 425 return -ENOMEM; 426 427 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 428 access, npages, page_shift, mr); 429 if (err) 430 mlx4_mr_release(dev, index); 431 432 return err; 433} 434EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 435 436static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 437{ 438 int err; 439 440 if (mr->enabled == MLX4_MR_EN_HW) { 441 err = mlx4_HW2SW_MPT(dev, NULL, 442 key_to_hw_index(mr->key) & 443 (dev->caps.num_mpts - 1)); 444 if (err) 445 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); 446 447 mr->enabled = MLX4_MR_EN_SW; 448 } 449 mlx4_mtt_cleanup(dev, &mr->mtt); 450} 451 452void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 453{ 454 mlx4_mr_free_reserved(dev, mr); 455 if (mr->enabled) 456 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 457 mlx4_mr_release(dev, key_to_hw_index(mr->key)); 458} 459EXPORT_SYMBOL_GPL(mlx4_mr_free); 460 461int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 462{ 463 struct mlx4_cmd_mailbox *mailbox; 464 struct mlx4_mpt_entry *mpt_entry; 465 int err; 466 467 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); 468 if (err) 469 return err; 470 471 mailbox = mlx4_alloc_cmd_mailbox(dev); 472 if (IS_ERR(mailbox)) { 473 err = PTR_ERR(mailbox); 474 goto err_table; 475 } 476 mpt_entry = mailbox->buf; 477 478 memset(mpt_entry, 0, sizeof *mpt_entry); 479 480 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 481 MLX4_MPT_FLAG_REGION | 482 mr->access); 483 484 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 485 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 486 mpt_entry->start = cpu_to_be64(mr->iova); 487 mpt_entry->length = cpu_to_be64(mr->size); 488 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 489 490 if (mr->mtt.order < 0) { 491 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 492 mpt_entry->mtt_addr = 0; 493 } else { 494 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 495 &mr->mtt)); 496 } 497 498 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 499 /* fast register MR in free state */ 500 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 501 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 502 MLX4_MPT_PD_FLAG_RAE); 503 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 504 } else { 505 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 506 } 507 508 err = mlx4_SW2HW_MPT(dev, mailbox, 509 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 510 if (err) { 511 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 512 goto err_cmd; 513 } 514 mr->enabled = MLX4_MR_EN_HW; 515 516 mlx4_free_cmd_mailbox(dev, mailbox); 517 518 return 0; 519 520err_cmd: 521 mlx4_free_cmd_mailbox(dev, mailbox); 522 523err_table: 524 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 525 return err; 526} 527EXPORT_SYMBOL_GPL(mlx4_mr_enable); 528 529static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 530 int start_index, int npages, u64 *page_list) 531{ 532 struct mlx4_priv *priv = mlx4_priv(dev); 533 __be64 *mtts; 534 dma_addr_t dma_handle; 535 int i; 536 537 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + 538 start_index, &dma_handle); 539 540 if (!mtts) 541 return -ENOMEM; 542 543 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 544 npages * sizeof (u64), DMA_TO_DEVICE); 545 546 for (i = 0; i < npages; ++i) 547 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 548 549 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 550 npages * sizeof (u64), DMA_TO_DEVICE); 551 552 return 0; 553} 554 555int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 556 int start_index, int npages, u64 *page_list) 557{ 558 int err = 0; 559 int chunk; 560 int mtts_per_page; 561 int max_mtts_first_page; 562 563 /* compute how may mtts fit in the first page */ 564 mtts_per_page = PAGE_SIZE / sizeof(u64); 565 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) 566 % mtts_per_page; 567 568 chunk = min_t(int, max_mtts_first_page, npages); 569 570 while (npages > 0) { 571 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 572 if (err) 573 return err; 574 npages -= chunk; 575 start_index += chunk; 576 page_list += chunk; 577 578 chunk = min_t(int, mtts_per_page, npages); 579 } 580 return err; 581} 582 583int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 584 int start_index, int npages, u64 *page_list) 585{ 586 struct mlx4_cmd_mailbox *mailbox = NULL; 587 __be64 *inbox = NULL; 588 int chunk; 589 int err = 0; 590 int i; 591 592 if (mtt->order < 0) 593 return -EINVAL; 594 595 if (mlx4_is_mfunc(dev)) { 596 mailbox = mlx4_alloc_cmd_mailbox(dev); 597 if (IS_ERR(mailbox)) 598 return PTR_ERR(mailbox); 599 inbox = mailbox->buf; 600 601 while (npages > 0) { 602 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, 603 npages); 604 inbox[0] = cpu_to_be64(mtt->offset + start_index); 605 inbox[1] = 0; 606 for (i = 0; i < chunk; ++i) 607 inbox[i + 2] = cpu_to_be64(page_list[i] | 608 MLX4_MTT_FLAG_PRESENT); 609 err = mlx4_WRITE_MTT(dev, mailbox, chunk); 610 if (err) { 611 mlx4_free_cmd_mailbox(dev, mailbox); 612 return err; 613 } 614 615 npages -= chunk; 616 start_index += chunk; 617 page_list += chunk; 618 } 619 mlx4_free_cmd_mailbox(dev, mailbox); 620 return err; 621 } 622 623 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); 624} 625EXPORT_SYMBOL_GPL(mlx4_write_mtt); 626 627int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 628 struct mlx4_buf *buf) 629{ 630 u64 *page_list; 631 int err; 632 int i; 633 634 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); 635 if (!page_list) 636 return -ENOMEM; 637 638 for (i = 0; i < buf->npages; ++i) 639 if (buf->nbufs == 1) 640 page_list[i] = buf->direct.map + (i << buf->page_shift); 641 else 642 page_list[i] = buf->page_list[i].map; 643 644 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 645 646 kfree(page_list); 647 return err; 648} 649EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 650 651int mlx4_init_mr_table(struct mlx4_dev *dev) 652{ 653 struct mlx4_priv *priv = mlx4_priv(dev); 654 struct mlx4_mr_table *mr_table = &priv->mr_table; 655 int err; 656 657 if (!is_power_of_2(dev->caps.num_mpts)) 658 return -EINVAL; 659 660 /* Nothing to do for slaves - all MR handling is forwarded 661 * to the master */ 662 if (mlx4_is_slave(dev)) 663 return 0; 664 665 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 666 ~0, dev->caps.reserved_mrws, 0); 667 if (err) 668 return err; 669 670 err = mlx4_buddy_init(&mr_table->mtt_buddy, 671 ilog2(dev->caps.num_mtts / 672 (1 << log_mtts_per_seg))); 673 if (err) 674 goto err_buddy; 675 676 if (dev->caps.reserved_mtts) { 677 priv->reserved_mtts = 678 mlx4_alloc_mtt_range(dev, 679 fls(dev->caps.reserved_mtts - 1)); 680 if (priv->reserved_mtts < 0) { 681 mlx4_warn(dev, "MTT table of order %d is too small.\n", 682 mr_table->mtt_buddy.max_order); 683 err = -ENOMEM; 684 goto err_reserve_mtts; 685 } 686 } 687 688 return 0; 689 690err_reserve_mtts: 691 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 692 693err_buddy: 694 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 695 696 return err; 697} 698 699void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 700{ 701 struct mlx4_priv *priv = mlx4_priv(dev); 702 struct mlx4_mr_table *mr_table = &priv->mr_table; 703 704 if (mlx4_is_slave(dev)) 705 return; 706 if (priv->reserved_mtts >= 0) 707 mlx4_free_mtt_range(dev, priv->reserved_mtts, 708 fls(dev->caps.reserved_mtts - 1)); 709 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 710 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 711} 712 713static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, 714 int npages, u64 iova) 715{ 716 int i, page_mask; 717 718 if (npages > fmr->max_pages) 719 return -EINVAL; 720 721 page_mask = (1 << fmr->page_shift) - 1; 722 723 /* We are getting page lists, so va must be page aligned. */ 724 if (iova & page_mask) 725 return -EINVAL; 726 727 /* Trust the user not to pass misaligned data in page_list */ 728 if (0) 729 for (i = 0; i < npages; ++i) { 730 if (page_list[i] & ~page_mask) 731 return -EINVAL; 732 } 733 734 if (fmr->maps >= fmr->max_maps) 735 return -EINVAL; 736 737 return 0; 738} 739 740int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 741 int npages, u64 iova, u32 *lkey, u32 *rkey) 742{ 743 u32 key; 744 int i, err; 745 746 err = mlx4_check_fmr(fmr, page_list, npages, iova); 747 if (err) 748 return err; 749 750 ++fmr->maps; 751 752 key = key_to_hw_index(fmr->mr.key); 753 key += dev->caps.num_mpts; 754 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); 755 756 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 757 758 /* Make sure MPT status is visible before writing MTT entries */ 759 wmb(); 760 761 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 762 npages * sizeof(u64), DMA_TO_DEVICE); 763 764 for (i = 0; i < npages; ++i) 765 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 766 767 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, 768 npages * sizeof(u64), DMA_TO_DEVICE); 769 770 fmr->mpt->key = cpu_to_be32(key); 771 fmr->mpt->lkey = cpu_to_be32(key); 772 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); 773 fmr->mpt->start = cpu_to_be64(iova); 774 775 /* Make MTT entries are visible before setting MPT status */ 776 wmb(); 777 778 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; 779 780 /* Make sure MPT status is visible before consumer can use FMR */ 781 wmb(); 782 783 return 0; 784} 785EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); 786 787int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 788 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 789{ 790 struct mlx4_priv *priv = mlx4_priv(dev); 791 u64 mtt_offset; 792 int err = -ENOMEM; 793 794 if (max_maps > dev->caps.max_fmr_maps) 795 return -EINVAL; 796 797 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 798 return -EINVAL; 799 800 /* All MTTs must fit in the same page */ 801 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 802 return -EINVAL; 803 804 fmr->page_shift = page_shift; 805 fmr->max_pages = max_pages; 806 fmr->max_maps = max_maps; 807 fmr->maps = 0; 808 809 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, 810 page_shift, &fmr->mr); 811 if (err) 812 return err; 813 814 mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz; 815 816 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 817 fmr->mr.mtt.offset, 818 &fmr->dma_handle); 819 820 if (!fmr->mtts) { 821 err = -ENOMEM; 822 goto err_free; 823 } 824 825 return 0; 826 827err_free: 828 mlx4_mr_free(dev, &fmr->mr); 829 return err; 830} 831EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 832 833int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 834{ 835 struct mlx4_priv *priv = mlx4_priv(dev); 836 int err; 837 838 err = mlx4_mr_enable(dev, &fmr->mr); 839 if (err) 840 return err; 841 842 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, 843 key_to_hw_index(fmr->mr.key), NULL); 844 if (!fmr->mpt) 845 return -ENOMEM; 846 847 return 0; 848} 849EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 850 851void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 852 u32 *lkey, u32 *rkey) 853{ 854 struct mlx4_cmd_mailbox *mailbox; 855 int err; 856 857 if (!fmr->maps) 858 return; 859 860 fmr->maps = 0; 861 862 mailbox = mlx4_alloc_cmd_mailbox(dev); 863 if (IS_ERR(mailbox)) { 864 err = PTR_ERR(mailbox); 865 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" 866 " failed (%d)\n", err); 867 return; 868 } 869 870 err = mlx4_HW2SW_MPT(dev, NULL, 871 key_to_hw_index(fmr->mr.key) & 872 (dev->caps.num_mpts - 1)); 873 mlx4_free_cmd_mailbox(dev, mailbox); 874 if (err) { 875 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", 876 err); 877 return; 878 } 879 fmr->mr.enabled = MLX4_MR_EN_SW; 880} 881EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 882 883int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 884{ 885 if (fmr->maps) 886 return -EBUSY; 887 888 mlx4_mr_free(dev, &fmr->mr); 889 fmr->mr.enabled = MLX4_MR_DISABLED; 890 891 return 0; 892} 893EXPORT_SYMBOL_GPL(mlx4_fmr_free); 894 895int mlx4_SYNC_TPT(struct mlx4_dev *dev) 896{ 897 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 898 MLX4_CMD_WRAPPED); 899} 900EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 901