mthca_qp.c revision 9e583b85c2a0215dc7f4427361b4f75fcc0316af
1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ 36 */ 37 38#include <linux/init.h> 39#include <linux/string.h> 40#include <linux/slab.h> 41 42#include <rdma/ib_verbs.h> 43#include <rdma/ib_cache.h> 44#include <rdma/ib_pack.h> 45 46#include "mthca_dev.h" 47#include "mthca_cmd.h" 48#include "mthca_memfree.h" 49#include "mthca_wqe.h" 50 51enum { 52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 53 MTHCA_ACK_REQ_FREQ = 10, 54 MTHCA_FLIGHT_LIMIT = 9, 55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ 56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ 57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ 58}; 59 60enum { 61 MTHCA_QP_STATE_RST = 0, 62 MTHCA_QP_STATE_INIT = 1, 63 MTHCA_QP_STATE_RTR = 2, 64 MTHCA_QP_STATE_RTS = 3, 65 MTHCA_QP_STATE_SQE = 4, 66 MTHCA_QP_STATE_SQD = 5, 67 MTHCA_QP_STATE_ERR = 6, 68 MTHCA_QP_STATE_DRAINING = 7 69}; 70 71enum { 72 MTHCA_QP_ST_RC = 0x0, 73 MTHCA_QP_ST_UC = 0x1, 74 MTHCA_QP_ST_RD = 0x2, 75 MTHCA_QP_ST_UD = 0x3, 76 MTHCA_QP_ST_MLX = 0x7 77}; 78 79enum { 80 MTHCA_QP_PM_MIGRATED = 0x3, 81 MTHCA_QP_PM_ARMED = 0x0, 82 MTHCA_QP_PM_REARM = 0x1 83}; 84 85enum { 86 /* qp_context flags */ 87 MTHCA_QP_BIT_DE = 1 << 8, 88 /* params1 */ 89 MTHCA_QP_BIT_SRE = 1 << 15, 90 MTHCA_QP_BIT_SWE = 1 << 14, 91 MTHCA_QP_BIT_SAE = 1 << 13, 92 MTHCA_QP_BIT_SIC = 1 << 4, 93 MTHCA_QP_BIT_SSC = 1 << 3, 94 /* params2 */ 95 MTHCA_QP_BIT_RRE = 1 << 15, 96 MTHCA_QP_BIT_RWE = 1 << 14, 97 MTHCA_QP_BIT_RAE = 1 << 13, 98 MTHCA_QP_BIT_RIC = 1 << 4, 99 MTHCA_QP_BIT_RSC = 1 << 3 100}; 101 102enum { 103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5 104}; 105 106struct mthca_qp_path { 107 __be32 port_pkey; 108 u8 rnr_retry; 109 u8 g_mylmc; 110 __be16 rlid; 111 u8 ackto; 112 u8 mgid_index; 113 u8 static_rate; 114 u8 hop_limit; 115 __be32 sl_tclass_flowlabel; 116 u8 rgid[16]; 117} __attribute__((packed)); 118 119struct mthca_qp_context { 120 __be32 flags; 121 __be32 tavor_sched_queue; /* Reserved on Arbel */ 122 u8 mtu_msgmax; 123 u8 rq_size_stride; /* Reserved on Tavor */ 124 u8 sq_size_stride; /* Reserved on Tavor */ 125 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 126 __be32 usr_page; 127 __be32 local_qpn; 128 __be32 remote_qpn; 129 u32 reserved1[2]; 130 struct mthca_qp_path pri_path; 131 struct mthca_qp_path alt_path; 132 __be32 rdd; 133 __be32 pd; 134 __be32 wqe_base; 135 __be32 wqe_lkey; 136 __be32 params1; 137 __be32 reserved2; 138 __be32 next_send_psn; 139 __be32 cqn_snd; 140 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ 141 __be32 snd_db_index; /* (debugging only entries) */ 142 __be32 last_acked_psn; 143 __be32 ssn; 144 __be32 params2; 145 __be32 rnr_nextrecvpsn; 146 __be32 ra_buff_indx; 147 __be32 cqn_rcv; 148 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 149 __be32 rcv_db_index; /* (debugging only entries) */ 150 __be32 qkey; 151 __be32 srqn; 152 __be32 rmsn; 153 __be16 rq_wqe_counter; /* reserved on Tavor */ 154 __be16 sq_wqe_counter; /* reserved on Tavor */ 155 u32 reserved3[18]; 156} __attribute__((packed)); 157 158struct mthca_qp_param { 159 __be32 opt_param_mask; 160 u32 reserved1; 161 struct mthca_qp_context context; 162 u32 reserved2[62]; 163} __attribute__((packed)); 164 165enum { 166 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 167 MTHCA_QP_OPTPAR_RRE = 1 << 1, 168 MTHCA_QP_OPTPAR_RAE = 1 << 2, 169 MTHCA_QP_OPTPAR_RWE = 1 << 3, 170 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, 171 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, 172 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 173 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 174 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, 175 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, 176 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, 177 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, 178 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, 179 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, 180 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 181 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, 182 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 183}; 184 185static const u8 mthca_opcode[] = { 186 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 187 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 188 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, 189 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, 190 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, 191 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, 192 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, 193}; 194 195static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) 196{ 197 return qp->qpn >= dev->qp_table.sqp_start && 198 qp->qpn <= dev->qp_table.sqp_start + 3; 199} 200 201static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) 202{ 203 return qp->qpn >= dev->qp_table.sqp_start && 204 qp->qpn <= dev->qp_table.sqp_start + 1; 205} 206 207static void *get_recv_wqe(struct mthca_qp *qp, int n) 208{ 209 if (qp->is_direct) 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); 211 else 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); 214} 215 216static void *get_send_wqe(struct mthca_qp *qp, int n) 217{ 218 if (qp->is_direct) 219 return qp->queue.direct.buf + qp->send_wqe_offset + 220 (n << qp->sq.wqe_shift); 221 else 222 return qp->queue.page_list[(qp->send_wqe_offset + 223 (n << qp->sq.wqe_shift)) >> 224 PAGE_SHIFT].buf + 225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & 226 (PAGE_SIZE - 1)); 227} 228 229static void mthca_wq_reset(struct mthca_wq *wq) 230{ 231 wq->next_ind = 0; 232 wq->last_comp = wq->max - 1; 233 wq->head = 0; 234 wq->tail = 0; 235} 236 237void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 238 enum ib_event_type event_type) 239{ 240 struct mthca_qp *qp; 241 struct ib_event event; 242 243 spin_lock(&dev->qp_table.lock); 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 245 if (qp) 246 ++qp->refcount; 247 spin_unlock(&dev->qp_table.lock); 248 249 if (!qp) { 250 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); 251 return; 252 } 253 254 if (event_type == IB_EVENT_PATH_MIG) 255 qp->port = qp->alt_port; 256 257 event.device = &dev->ib_dev; 258 event.event = event_type; 259 event.element.qp = &qp->ibqp; 260 if (qp->ibqp.event_handler) 261 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 262 263 spin_lock(&dev->qp_table.lock); 264 if (!--qp->refcount) 265 wake_up(&qp->wait); 266 spin_unlock(&dev->qp_table.lock); 267} 268 269static int to_mthca_state(enum ib_qp_state ib_state) 270{ 271 switch (ib_state) { 272 case IB_QPS_RESET: return MTHCA_QP_STATE_RST; 273 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; 274 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; 275 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; 276 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; 277 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; 278 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; 279 default: return -1; 280 } 281} 282 283enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; 284 285static int to_mthca_st(int transport) 286{ 287 switch (transport) { 288 case RC: return MTHCA_QP_ST_RC; 289 case UC: return MTHCA_QP_ST_UC; 290 case UD: return MTHCA_QP_ST_UD; 291 case RD: return MTHCA_QP_ST_RD; 292 case MLX: return MTHCA_QP_ST_MLX; 293 default: return -1; 294 } 295} 296 297static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, 298 int attr_mask) 299{ 300 if (attr_mask & IB_QP_PKEY_INDEX) 301 sqp->pkey_index = attr->pkey_index; 302 if (attr_mask & IB_QP_QKEY) 303 sqp->qkey = attr->qkey; 304 if (attr_mask & IB_QP_SQ_PSN) 305 sqp->send_psn = attr->sq_psn; 306} 307 308static void init_port(struct mthca_dev *dev, int port) 309{ 310 int err; 311 u8 status; 312 struct mthca_init_ib_param param; 313 314 memset(¶m, 0, sizeof param); 315 316 param.port_width = dev->limits.port_width_cap; 317 param.vl_cap = dev->limits.vl_cap; 318 param.mtu_cap = dev->limits.mtu_cap; 319 param.gid_cap = dev->limits.gid_table_len; 320 param.pkey_cap = dev->limits.pkey_table_len; 321 322 err = mthca_INIT_IB(dev, ¶m, port, &status); 323 if (err) 324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 325 if (status) 326 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 327} 328 329static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, 330 int attr_mask) 331{ 332 u8 dest_rd_atomic; 333 u32 access_flags; 334 u32 hw_access_flags = 0; 335 336 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 337 dest_rd_atomic = attr->max_dest_rd_atomic; 338 else 339 dest_rd_atomic = qp->resp_depth; 340 341 if (attr_mask & IB_QP_ACCESS_FLAGS) 342 access_flags = attr->qp_access_flags; 343 else 344 access_flags = qp->atomic_rd_en; 345 346 if (!dest_rd_atomic) 347 access_flags &= IB_ACCESS_REMOTE_WRITE; 348 349 if (access_flags & IB_ACCESS_REMOTE_READ) 350 hw_access_flags |= MTHCA_QP_BIT_RRE; 351 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 352 hw_access_flags |= MTHCA_QP_BIT_RAE; 353 if (access_flags & IB_ACCESS_REMOTE_WRITE) 354 hw_access_flags |= MTHCA_QP_BIT_RWE; 355 356 return cpu_to_be32(hw_access_flags); 357} 358 359static inline enum ib_qp_state to_ib_qp_state(int mthca_state) 360{ 361 switch (mthca_state) { 362 case MTHCA_QP_STATE_RST: return IB_QPS_RESET; 363 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; 364 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; 365 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; 366 case MTHCA_QP_STATE_DRAINING: 367 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; 368 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; 369 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; 370 default: return -1; 371 } 372} 373 374static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) 375{ 376 switch (mthca_mig_state) { 377 case 0: return IB_MIG_ARMED; 378 case 1: return IB_MIG_REARM; 379 case 3: return IB_MIG_MIGRATED; 380 default: return -1; 381 } 382} 383 384static int to_ib_qp_access_flags(int mthca_flags) 385{ 386 int ib_flags = 0; 387 388 if (mthca_flags & MTHCA_QP_BIT_RRE) 389 ib_flags |= IB_ACCESS_REMOTE_READ; 390 if (mthca_flags & MTHCA_QP_BIT_RWE) 391 ib_flags |= IB_ACCESS_REMOTE_WRITE; 392 if (mthca_flags & MTHCA_QP_BIT_RAE) 393 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 394 395 return ib_flags; 396} 397 398static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, 399 struct mthca_qp_path *path) 400{ 401 memset(ib_ah_attr, 0, sizeof *path); 402 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 403 404 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) 405 return; 406 407 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 408 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 409 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 410 ib_ah_attr->static_rate = mthca_rate_to_ib(dev, 411 path->static_rate & 0xf, 412 ib_ah_attr->port_num); 413 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 414 if (ib_ah_attr->ah_flags) { 415 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); 416 ib_ah_attr->grh.hop_limit = path->hop_limit; 417 ib_ah_attr->grh.traffic_class = 418 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; 419 ib_ah_attr->grh.flow_label = 420 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; 421 memcpy(ib_ah_attr->grh.dgid.raw, 422 path->rgid, sizeof ib_ah_attr->grh.dgid.raw); 423 } 424} 425 426int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 427 struct ib_qp_init_attr *qp_init_attr) 428{ 429 struct mthca_dev *dev = to_mdev(ibqp->device); 430 struct mthca_qp *qp = to_mqp(ibqp); 431 int err; 432 struct mthca_mailbox *mailbox; 433 struct mthca_qp_param *qp_param; 434 struct mthca_qp_context *context; 435 int mthca_state; 436 u8 status; 437 438 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 439 if (IS_ERR(mailbox)) 440 return PTR_ERR(mailbox); 441 442 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); 443 if (err) 444 goto out; 445 if (status) { 446 mthca_warn(dev, "QUERY_QP returned status %02x\n", status); 447 err = -EINVAL; 448 goto out; 449 } 450 451 qp_param = mailbox->buf; 452 context = &qp_param->context; 453 mthca_state = be32_to_cpu(context->flags) >> 28; 454 455 qp_attr->qp_state = to_ib_qp_state(mthca_state); 456 qp_attr->cur_qp_state = qp_attr->qp_state; 457 qp_attr->path_mtu = context->mtu_msgmax >> 5; 458 qp_attr->path_mig_state = 459 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 460 qp_attr->qkey = be32_to_cpu(context->qkey); 461 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 462 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 463 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 464 qp_attr->qp_access_flags = 465 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 466 qp_attr->cap.max_send_wr = qp->sq.max; 467 qp_attr->cap.max_recv_wr = qp->rq.max; 468 qp_attr->cap.max_send_sge = qp->sq.max_gs; 469 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 470 qp_attr->cap.max_inline_data = qp->max_inline_data; 471 472 if (qp->transport == RC || qp->transport == UC) { 473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 474 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 475 qp_attr->alt_pkey_index = 476 be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 477 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 478 } 479 480 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 481 qp_attr->port_num = 482 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; 483 484 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 485 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; 486 487 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 488 489 qp_attr->max_dest_rd_atomic = 490 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 491 qp_attr->min_rnr_timer = 492 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 493 qp_attr->timeout = context->pri_path.ackto >> 3; 494 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 495 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 496 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 497 qp_init_attr->cap = qp_attr->cap; 498 499out: 500 mthca_free_mailbox(dev, mailbox); 501 return err; 502} 503 504static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, 505 struct mthca_qp_path *path, u8 port) 506{ 507 path->g_mylmc = ah->src_path_bits & 0x7f; 508 path->rlid = cpu_to_be16(ah->dlid); 509 path->static_rate = mthca_get_rate(dev, ah->static_rate, port); 510 511 if (ah->ah_flags & IB_AH_GRH) { 512 if (ah->grh.sgid_index >= dev->limits.gid_table_len) { 513 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", 514 ah->grh.sgid_index, dev->limits.gid_table_len-1); 515 return -1; 516 } 517 518 path->g_mylmc |= 1 << 7; 519 path->mgid_index = ah->grh.sgid_index; 520 path->hop_limit = ah->grh.hop_limit; 521 path->sl_tclass_flowlabel = 522 cpu_to_be32((ah->sl << 28) | 523 (ah->grh.traffic_class << 20) | 524 (ah->grh.flow_label)); 525 memcpy(path->rgid, ah->grh.dgid.raw, 16); 526 } else 527 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); 528 529 return 0; 530} 531 532int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 533 struct ib_udata *udata) 534{ 535 struct mthca_dev *dev = to_mdev(ibqp->device); 536 struct mthca_qp *qp = to_mqp(ibqp); 537 enum ib_qp_state cur_state, new_state; 538 struct mthca_mailbox *mailbox; 539 struct mthca_qp_param *qp_param; 540 struct mthca_qp_context *qp_context; 541 u32 sqd_event = 0; 542 u8 status; 543 int err = -EINVAL; 544 545 mutex_lock(&qp->mutex); 546 547 if (attr_mask & IB_QP_CUR_STATE) { 548 cur_state = attr->cur_qp_state; 549 } else { 550 spin_lock_irq(&qp->sq.lock); 551 spin_lock(&qp->rq.lock); 552 cur_state = qp->state; 553 spin_unlock(&qp->rq.lock); 554 spin_unlock_irq(&qp->sq.lock); 555 } 556 557 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 558 559 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 560 mthca_dbg(dev, "Bad QP transition (transport %d) " 561 "%d->%d with attr 0x%08x\n", 562 qp->transport, cur_state, new_state, 563 attr_mask); 564 goto out; 565 } 566 567 if ((attr_mask & IB_QP_PKEY_INDEX) && 568 attr->pkey_index >= dev->limits.pkey_table_len) { 569 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 570 attr->pkey_index, dev->limits.pkey_table_len-1); 571 goto out; 572 } 573 574 if ((attr_mask & IB_QP_PORT) && 575 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 576 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 577 goto out; 578 } 579 580 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 581 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 582 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 583 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 584 goto out; 585 } 586 587 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 588 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 589 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 590 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 591 goto out; 592 } 593 594 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 595 if (IS_ERR(mailbox)) { 596 err = PTR_ERR(mailbox); 597 goto out; 598 } 599 qp_param = mailbox->buf; 600 qp_context = &qp_param->context; 601 memset(qp_param, 0, sizeof *qp_param); 602 603 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | 604 (to_mthca_st(qp->transport) << 16)); 605 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); 606 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 607 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 608 else { 609 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); 610 switch (attr->path_mig_state) { 611 case IB_MIG_MIGRATED: 612 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 613 break; 614 case IB_MIG_REARM: 615 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); 616 break; 617 case IB_MIG_ARMED: 618 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); 619 break; 620 } 621 } 622 623 /* leave tavor_sched_queue as 0 */ 624 625 if (qp->transport == MLX || qp->transport == UD) 626 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; 627 else if (attr_mask & IB_QP_PATH_MTU) { 628 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 629 mthca_dbg(dev, "path MTU (%u) is invalid\n", 630 attr->path_mtu); 631 goto out_mailbox; 632 } 633 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 634 } 635 636 if (mthca_is_memfree(dev)) { 637 if (qp->rq.max) 638 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; 639 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 640 641 if (qp->sq.max) 642 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; 643 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 644 } 645 646 /* leave arbel_sched_queue as 0 */ 647 648 if (qp->ibqp.uobject) 649 qp_context->usr_page = 650 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); 651 else 652 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 653 qp_context->local_qpn = cpu_to_be32(qp->qpn); 654 if (attr_mask & IB_QP_DEST_QPN) { 655 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 656 } 657 658 if (qp->transport == MLX) 659 qp_context->pri_path.port_pkey |= 660 cpu_to_be32(qp->port << 24); 661 else { 662 if (attr_mask & IB_QP_PORT) { 663 qp_context->pri_path.port_pkey |= 664 cpu_to_be32(attr->port_num << 24); 665 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); 666 } 667 } 668 669 if (attr_mask & IB_QP_PKEY_INDEX) { 670 qp_context->pri_path.port_pkey |= 671 cpu_to_be32(attr->pkey_index); 672 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); 673 } 674 675 if (attr_mask & IB_QP_RNR_RETRY) { 676 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = 677 attr->rnr_retry << 5; 678 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | 679 MTHCA_QP_OPTPAR_ALT_RNR_RETRY); 680 } 681 682 if (attr_mask & IB_QP_AV) { 683 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 684 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 685 goto out_mailbox; 686 687 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 688 } 689 690 if (attr_mask & IB_QP_TIMEOUT) { 691 qp_context->pri_path.ackto = attr->timeout << 3; 692 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 693 } 694 695 if (attr_mask & IB_QP_ALT_PATH) { 696 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 697 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 698 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 699 goto out_mailbox; 700 } 701 702 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 703 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 704 attr->alt_port_num); 705 goto out_mailbox; 706 } 707 708 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 709 attr->alt_ah_attr.port_num)) 710 goto out_mailbox; 711 712 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 713 attr->alt_port_num << 24); 714 qp_context->alt_path.ackto = attr->alt_timeout << 3; 715 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); 716 } 717 718 /* leave rdd as 0 */ 719 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); 720 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ 721 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); 722 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | 723 (MTHCA_FLIGHT_LIMIT << 24) | 724 MTHCA_QP_BIT_SWE); 725 if (qp->sq_policy == IB_SIGNAL_ALL_WR) 726 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); 727 if (attr_mask & IB_QP_RETRY_CNT) { 728 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 729 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); 730 } 731 732 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 733 if (attr->max_rd_atomic) { 734 qp_context->params1 |= 735 cpu_to_be32(MTHCA_QP_BIT_SRE | 736 MTHCA_QP_BIT_SAE); 737 qp_context->params1 |= 738 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 739 } 740 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 741 } 742 743 if (attr_mask & IB_QP_SQ_PSN) 744 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); 745 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 746 747 if (mthca_is_memfree(dev)) { 748 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); 749 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 750 } 751 752 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 753 if (attr->max_dest_rd_atomic) 754 qp_context->params2 |= 755 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 756 757 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 758 } 759 760 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 761 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); 762 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 763 MTHCA_QP_OPTPAR_RRE | 764 MTHCA_QP_OPTPAR_RAE); 765 } 766 767 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 768 769 if (ibqp->srq) 770 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 771 772 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 773 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 774 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 775 } 776 if (attr_mask & IB_QP_RQ_PSN) 777 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 778 779 qp_context->ra_buff_indx = 780 cpu_to_be32(dev->qp_table.rdb_base + 781 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << 782 dev->qp_table.rdb_shift)); 783 784 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); 785 786 if (mthca_is_memfree(dev)) 787 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 788 789 if (attr_mask & IB_QP_QKEY) { 790 qp_context->qkey = cpu_to_be32(attr->qkey); 791 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 792 } 793 794 if (ibqp->srq) 795 qp_context->srqn = cpu_to_be32(1 << 24 | 796 to_msrq(ibqp->srq)->srqn); 797 798 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 799 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && 800 attr->en_sqd_async_notify) 801 sqd_event = 1 << 31; 802 803 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 804 mailbox, sqd_event, &status); 805 if (err) 806 goto out_mailbox; 807 if (status) { 808 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", 809 cur_state, new_state, status); 810 err = -EINVAL; 811 goto out_mailbox; 812 } 813 814 qp->state = new_state; 815 if (attr_mask & IB_QP_ACCESS_FLAGS) 816 qp->atomic_rd_en = attr->qp_access_flags; 817 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 818 qp->resp_depth = attr->max_dest_rd_atomic; 819 if (attr_mask & IB_QP_PORT) 820 qp->port = attr->port_num; 821 if (attr_mask & IB_QP_ALT_PATH) 822 qp->alt_port = attr->alt_port_num; 823 824 if (is_sqp(dev, qp)) 825 store_attrs(to_msqp(qp), attr, attr_mask); 826 827 /* 828 * If we moved QP0 to RTR, bring the IB link up; if we moved 829 * QP0 to RESET or ERROR, bring the link back down. 830 */ 831 if (is_qp0(dev, qp)) { 832 if (cur_state != IB_QPS_RTR && 833 new_state == IB_QPS_RTR) 834 init_port(dev, qp->port); 835 836 if (cur_state != IB_QPS_RESET && 837 cur_state != IB_QPS_ERR && 838 (new_state == IB_QPS_RESET || 839 new_state == IB_QPS_ERR)) 840 mthca_CLOSE_IB(dev, qp->port, &status); 841 } 842 843 /* 844 * If we moved a kernel QP to RESET, clean up all old CQ 845 * entries and reinitialize the QP. 846 */ 847 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 848 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, 849 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 850 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 851 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 852 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 853 854 mthca_wq_reset(&qp->sq); 855 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 856 857 mthca_wq_reset(&qp->rq); 858 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 859 860 if (mthca_is_memfree(dev)) { 861 *qp->sq.db = 0; 862 *qp->rq.db = 0; 863 } 864 } 865 866out_mailbox: 867 mthca_free_mailbox(dev, mailbox); 868 869out: 870 mutex_unlock(&qp->mutex); 871 return err; 872} 873 874static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) 875{ 876 /* 877 * Calculate the maximum size of WQE s/g segments, excluding 878 * the next segment and other non-data segments. 879 */ 880 int max_data_size = desc_sz - sizeof (struct mthca_next_seg); 881 882 switch (qp->transport) { 883 case MLX: 884 max_data_size -= 2 * sizeof (struct mthca_data_seg); 885 break; 886 887 case UD: 888 if (mthca_is_memfree(dev)) 889 max_data_size -= sizeof (struct mthca_arbel_ud_seg); 890 else 891 max_data_size -= sizeof (struct mthca_tavor_ud_seg); 892 break; 893 894 default: 895 max_data_size -= sizeof (struct mthca_raddr_seg); 896 break; 897 } 898 899 return max_data_size; 900} 901 902static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) 903{ 904 /* We don't support inline data for kernel QPs (yet). */ 905 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; 906} 907 908static void mthca_adjust_qp_caps(struct mthca_dev *dev, 909 struct mthca_pd *pd, 910 struct mthca_qp *qp) 911{ 912 int max_data_size = mthca_max_data_size(dev, qp, 913 min(dev->limits.max_desc_sz, 914 1 << qp->sq.wqe_shift)); 915 916 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); 917 918 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 919 max_data_size / sizeof (struct mthca_data_seg)); 920 qp->rq.max_gs = min_t(int, dev->limits.max_sg, 921 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - 922 sizeof (struct mthca_next_seg)) / 923 sizeof (struct mthca_data_seg)); 924} 925 926/* 927 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 928 * rq.max_gs and sq.max_gs must all be assigned. 929 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and 930 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and 931 * queue) 932 */ 933static int mthca_alloc_wqe_buf(struct mthca_dev *dev, 934 struct mthca_pd *pd, 935 struct mthca_qp *qp) 936{ 937 int size; 938 int err = -ENOMEM; 939 940 size = sizeof (struct mthca_next_seg) + 941 qp->rq.max_gs * sizeof (struct mthca_data_seg); 942 943 if (size > dev->limits.max_desc_sz) 944 return -EINVAL; 945 946 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; 947 qp->rq.wqe_shift++) 948 ; /* nothing */ 949 950 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); 951 switch (qp->transport) { 952 case MLX: 953 size += 2 * sizeof (struct mthca_data_seg); 954 break; 955 956 case UD: 957 size += mthca_is_memfree(dev) ? 958 sizeof (struct mthca_arbel_ud_seg) : 959 sizeof (struct mthca_tavor_ud_seg); 960 break; 961 962 case UC: 963 size += sizeof (struct mthca_raddr_seg); 964 break; 965 966 case RC: 967 size += sizeof (struct mthca_raddr_seg); 968 /* 969 * An atomic op will require an atomic segment, a 970 * remote address segment and one scatter entry. 971 */ 972 size = max_t(int, size, 973 sizeof (struct mthca_atomic_seg) + 974 sizeof (struct mthca_raddr_seg) + 975 sizeof (struct mthca_data_seg)); 976 break; 977 978 default: 979 break; 980 } 981 982 /* Make sure that we have enough space for a bind request */ 983 size = max_t(int, size, sizeof (struct mthca_bind_seg)); 984 985 size += sizeof (struct mthca_next_seg); 986 987 if (size > dev->limits.max_desc_sz) 988 return -EINVAL; 989 990 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; 991 qp->sq.wqe_shift++) 992 ; /* nothing */ 993 994 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 995 1 << qp->sq.wqe_shift); 996 997 /* 998 * If this is a userspace QP, we don't actually have to 999 * allocate anything. All we need is to calculate the WQE 1000 * sizes and the send_wqe_offset, so we're done now. 1001 */ 1002 if (pd->ibpd.uobject) 1003 return 0; 1004 1005 size = PAGE_ALIGN(qp->send_wqe_offset + 1006 (qp->sq.max << qp->sq.wqe_shift)); 1007 1008 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), 1009 GFP_KERNEL); 1010 if (!qp->wrid) 1011 goto err_out; 1012 1013 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, 1014 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); 1015 if (err) 1016 goto err_out; 1017 1018 return 0; 1019 1020err_out: 1021 kfree(qp->wrid); 1022 return err; 1023} 1024 1025static void mthca_free_wqe_buf(struct mthca_dev *dev, 1026 struct mthca_qp *qp) 1027{ 1028 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + 1029 (qp->sq.max << qp->sq.wqe_shift)), 1030 &qp->queue, qp->is_direct, &qp->mr); 1031 kfree(qp->wrid); 1032} 1033 1034static int mthca_map_memfree(struct mthca_dev *dev, 1035 struct mthca_qp *qp) 1036{ 1037 int ret; 1038 1039 if (mthca_is_memfree(dev)) { 1040 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1041 if (ret) 1042 return ret; 1043 1044 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); 1045 if (ret) 1046 goto err_qpc; 1047 1048 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1049 qp->qpn << dev->qp_table.rdb_shift); 1050 if (ret) 1051 goto err_eqpc; 1052 1053 } 1054 1055 return 0; 1056 1057err_eqpc: 1058 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1059 1060err_qpc: 1061 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1062 1063 return ret; 1064} 1065 1066static void mthca_unmap_memfree(struct mthca_dev *dev, 1067 struct mthca_qp *qp) 1068{ 1069 mthca_table_put(dev, dev->qp_table.rdb_table, 1070 qp->qpn << dev->qp_table.rdb_shift); 1071 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1072 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1073} 1074 1075static int mthca_alloc_memfree(struct mthca_dev *dev, 1076 struct mthca_qp *qp) 1077{ 1078 int ret = 0; 1079 1080 if (mthca_is_memfree(dev)) { 1081 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1082 qp->qpn, &qp->rq.db); 1083 if (qp->rq.db_index < 0) 1084 return ret; 1085 1086 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1087 qp->qpn, &qp->sq.db); 1088 if (qp->sq.db_index < 0) 1089 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1090 } 1091 1092 return ret; 1093} 1094 1095static void mthca_free_memfree(struct mthca_dev *dev, 1096 struct mthca_qp *qp) 1097{ 1098 if (mthca_is_memfree(dev)) { 1099 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1100 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1101 } 1102} 1103 1104static int mthca_alloc_qp_common(struct mthca_dev *dev, 1105 struct mthca_pd *pd, 1106 struct mthca_cq *send_cq, 1107 struct mthca_cq *recv_cq, 1108 enum ib_sig_type send_policy, 1109 struct mthca_qp *qp) 1110{ 1111 int ret; 1112 int i; 1113 1114 qp->refcount = 1; 1115 init_waitqueue_head(&qp->wait); 1116 mutex_init(&qp->mutex); 1117 qp->state = IB_QPS_RESET; 1118 qp->atomic_rd_en = 0; 1119 qp->resp_depth = 0; 1120 qp->sq_policy = send_policy; 1121 mthca_wq_reset(&qp->sq); 1122 mthca_wq_reset(&qp->rq); 1123 1124 spin_lock_init(&qp->sq.lock); 1125 spin_lock_init(&qp->rq.lock); 1126 1127 ret = mthca_map_memfree(dev, qp); 1128 if (ret) 1129 return ret; 1130 1131 ret = mthca_alloc_wqe_buf(dev, pd, qp); 1132 if (ret) { 1133 mthca_unmap_memfree(dev, qp); 1134 return ret; 1135 } 1136 1137 mthca_adjust_qp_caps(dev, pd, qp); 1138 1139 /* 1140 * If this is a userspace QP, we're done now. The doorbells 1141 * will be allocated and buffers will be initialized in 1142 * userspace. 1143 */ 1144 if (pd->ibpd.uobject) 1145 return 0; 1146 1147 ret = mthca_alloc_memfree(dev, qp); 1148 if (ret) { 1149 mthca_free_wqe_buf(dev, qp); 1150 mthca_unmap_memfree(dev, qp); 1151 return ret; 1152 } 1153 1154 if (mthca_is_memfree(dev)) { 1155 struct mthca_next_seg *next; 1156 struct mthca_data_seg *scatter; 1157 int size = (sizeof (struct mthca_next_seg) + 1158 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1159 1160 for (i = 0; i < qp->rq.max; ++i) { 1161 next = get_recv_wqe(qp, i); 1162 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1163 qp->rq.wqe_shift); 1164 next->ee_nds = cpu_to_be32(size); 1165 1166 for (scatter = (void *) (next + 1); 1167 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); 1168 ++scatter) 1169 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 1170 } 1171 1172 for (i = 0; i < qp->sq.max; ++i) { 1173 next = get_send_wqe(qp, i); 1174 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << 1175 qp->sq.wqe_shift) + 1176 qp->send_wqe_offset); 1177 } 1178 } 1179 1180 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1181 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 1182 1183 return 0; 1184} 1185 1186static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1187 struct mthca_pd *pd, struct mthca_qp *qp) 1188{ 1189 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); 1190 1191 /* Sanity check QP size before proceeding */ 1192 if (cap->max_send_wr > dev->limits.max_wqes || 1193 cap->max_recv_wr > dev->limits.max_wqes || 1194 cap->max_send_sge > dev->limits.max_sg || 1195 cap->max_recv_sge > dev->limits.max_sg || 1196 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) 1197 return -EINVAL; 1198 1199 /* 1200 * For MLX transport we need 2 extra S/G entries: 1201 * one for the header and one for the checksum at the end 1202 */ 1203 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg) 1204 return -EINVAL; 1205 1206 if (mthca_is_memfree(dev)) { 1207 qp->rq.max = cap->max_recv_wr ? 1208 roundup_pow_of_two(cap->max_recv_wr) : 0; 1209 qp->sq.max = cap->max_send_wr ? 1210 roundup_pow_of_two(cap->max_send_wr) : 0; 1211 } else { 1212 qp->rq.max = cap->max_recv_wr; 1213 qp->sq.max = cap->max_send_wr; 1214 } 1215 1216 qp->rq.max_gs = cap->max_recv_sge; 1217 qp->sq.max_gs = max_t(int, cap->max_send_sge, 1218 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, 1219 MTHCA_INLINE_CHUNK_SIZE) / 1220 sizeof (struct mthca_data_seg)); 1221 1222 return 0; 1223} 1224 1225int mthca_alloc_qp(struct mthca_dev *dev, 1226 struct mthca_pd *pd, 1227 struct mthca_cq *send_cq, 1228 struct mthca_cq *recv_cq, 1229 enum ib_qp_type type, 1230 enum ib_sig_type send_policy, 1231 struct ib_qp_cap *cap, 1232 struct mthca_qp *qp) 1233{ 1234 int err; 1235 1236 switch (type) { 1237 case IB_QPT_RC: qp->transport = RC; break; 1238 case IB_QPT_UC: qp->transport = UC; break; 1239 case IB_QPT_UD: qp->transport = UD; break; 1240 default: return -EINVAL; 1241 } 1242 1243 err = mthca_set_qp_size(dev, cap, pd, qp); 1244 if (err) 1245 return err; 1246 1247 qp->qpn = mthca_alloc(&dev->qp_table.alloc); 1248 if (qp->qpn == -1) 1249 return -ENOMEM; 1250 1251 /* initialize port to zero for error-catching. */ 1252 qp->port = 0; 1253 1254 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1255 send_policy, qp); 1256 if (err) { 1257 mthca_free(&dev->qp_table.alloc, qp->qpn); 1258 return err; 1259 } 1260 1261 spin_lock_irq(&dev->qp_table.lock); 1262 mthca_array_set(&dev->qp_table.qp, 1263 qp->qpn & (dev->limits.num_qps - 1), qp); 1264 spin_unlock_irq(&dev->qp_table.lock); 1265 1266 return 0; 1267} 1268 1269static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1270{ 1271 if (send_cq == recv_cq) 1272 spin_lock_irq(&send_cq->lock); 1273 else if (send_cq->cqn < recv_cq->cqn) { 1274 spin_lock_irq(&send_cq->lock); 1275 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1276 } else { 1277 spin_lock_irq(&recv_cq->lock); 1278 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1279 } 1280} 1281 1282static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1283{ 1284 if (send_cq == recv_cq) 1285 spin_unlock_irq(&send_cq->lock); 1286 else if (send_cq->cqn < recv_cq->cqn) { 1287 spin_unlock(&recv_cq->lock); 1288 spin_unlock_irq(&send_cq->lock); 1289 } else { 1290 spin_unlock(&send_cq->lock); 1291 spin_unlock_irq(&recv_cq->lock); 1292 } 1293} 1294 1295int mthca_alloc_sqp(struct mthca_dev *dev, 1296 struct mthca_pd *pd, 1297 struct mthca_cq *send_cq, 1298 struct mthca_cq *recv_cq, 1299 enum ib_sig_type send_policy, 1300 struct ib_qp_cap *cap, 1301 int qpn, 1302 int port, 1303 struct mthca_sqp *sqp) 1304{ 1305 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1306 int err; 1307 1308 sqp->qp.transport = MLX; 1309 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); 1310 if (err) 1311 return err; 1312 1313 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; 1314 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, 1315 &sqp->header_dma, GFP_KERNEL); 1316 if (!sqp->header_buf) 1317 return -ENOMEM; 1318 1319 spin_lock_irq(&dev->qp_table.lock); 1320 if (mthca_array_get(&dev->qp_table.qp, mqpn)) 1321 err = -EBUSY; 1322 else 1323 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); 1324 spin_unlock_irq(&dev->qp_table.lock); 1325 1326 if (err) 1327 goto err_out; 1328 1329 sqp->qp.port = port; 1330 sqp->qp.qpn = mqpn; 1331 sqp->qp.transport = MLX; 1332 1333 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1334 send_policy, &sqp->qp); 1335 if (err) 1336 goto err_out_free; 1337 1338 atomic_inc(&pd->sqp_count); 1339 1340 return 0; 1341 1342 err_out_free: 1343 /* 1344 * Lock CQs here, so that CQ polling code can do QP lookup 1345 * without taking a lock. 1346 */ 1347 mthca_lock_cqs(send_cq, recv_cq); 1348 1349 spin_lock(&dev->qp_table.lock); 1350 mthca_array_clear(&dev->qp_table.qp, mqpn); 1351 spin_unlock(&dev->qp_table.lock); 1352 1353 mthca_unlock_cqs(send_cq, recv_cq); 1354 1355 err_out: 1356 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1357 sqp->header_buf, sqp->header_dma); 1358 1359 return err; 1360} 1361 1362static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) 1363{ 1364 int c; 1365 1366 spin_lock_irq(&dev->qp_table.lock); 1367 c = qp->refcount; 1368 spin_unlock_irq(&dev->qp_table.lock); 1369 1370 return c; 1371} 1372 1373void mthca_free_qp(struct mthca_dev *dev, 1374 struct mthca_qp *qp) 1375{ 1376 u8 status; 1377 struct mthca_cq *send_cq; 1378 struct mthca_cq *recv_cq; 1379 1380 send_cq = to_mcq(qp->ibqp.send_cq); 1381 recv_cq = to_mcq(qp->ibqp.recv_cq); 1382 1383 /* 1384 * Lock CQs here, so that CQ polling code can do QP lookup 1385 * without taking a lock. 1386 */ 1387 mthca_lock_cqs(send_cq, recv_cq); 1388 1389 spin_lock(&dev->qp_table.lock); 1390 mthca_array_clear(&dev->qp_table.qp, 1391 qp->qpn & (dev->limits.num_qps - 1)); 1392 --qp->refcount; 1393 spin_unlock(&dev->qp_table.lock); 1394 1395 mthca_unlock_cqs(send_cq, recv_cq); 1396 1397 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1398 1399 if (qp->state != IB_QPS_RESET) 1400 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1401 NULL, 0, &status); 1402 1403 /* 1404 * If this is a userspace QP, the buffers, MR, CQs and so on 1405 * will be cleaned up in userspace, so all we have to do is 1406 * unref the mem-free tables and free the QPN in our table. 1407 */ 1408 if (!qp->ibqp.uobject) { 1409 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, 1410 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1411 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1412 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 1413 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1414 1415 mthca_free_memfree(dev, qp); 1416 mthca_free_wqe_buf(dev, qp); 1417 } 1418 1419 mthca_unmap_memfree(dev, qp); 1420 1421 if (is_sqp(dev, qp)) { 1422 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1423 dma_free_coherent(&dev->pdev->dev, 1424 to_msqp(qp)->header_buf_size, 1425 to_msqp(qp)->header_buf, 1426 to_msqp(qp)->header_dma); 1427 } else 1428 mthca_free(&dev->qp_table.alloc, qp->qpn); 1429} 1430 1431/* Create UD header for an MLX send and build a data segment for it */ 1432static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, 1433 int ind, struct ib_send_wr *wr, 1434 struct mthca_mlx_seg *mlx, 1435 struct mthca_data_seg *data) 1436{ 1437 int header_size; 1438 int err; 1439 u16 pkey; 1440 1441 ib_ud_header_init(256, /* assume a MAD */ 1442 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 1443 &sqp->ud_header); 1444 1445 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1446 if (err) 1447 return err; 1448 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1449 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1450 (sqp->ud_header.lrh.destination_lid == 1451 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | 1452 (sqp->ud_header.lrh.service_level << 8)); 1453 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1454 mlx->vcrc = 0; 1455 1456 switch (wr->opcode) { 1457 case IB_WR_SEND: 1458 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1459 sqp->ud_header.immediate_present = 0; 1460 break; 1461 case IB_WR_SEND_WITH_IMM: 1462 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1463 sqp->ud_header.immediate_present = 1; 1464 sqp->ud_header.immediate_data = wr->imm_data; 1465 break; 1466 default: 1467 return -EINVAL; 1468 } 1469 1470 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1471 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1472 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1473 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1474 if (!sqp->qp.ibqp.qp_num) 1475 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1476 sqp->pkey_index, &pkey); 1477 else 1478 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1479 wr->wr.ud.pkey_index, &pkey); 1480 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1481 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1482 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1483 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1484 sqp->qkey : wr->wr.ud.remote_qkey); 1485 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 1486 1487 header_size = ib_ud_header_pack(&sqp->ud_header, 1488 sqp->header_buf + 1489 ind * MTHCA_UD_HEADER_SIZE); 1490 1491 data->byte_count = cpu_to_be32(header_size); 1492 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); 1493 data->addr = cpu_to_be64(sqp->header_dma + 1494 ind * MTHCA_UD_HEADER_SIZE); 1495 1496 return 0; 1497} 1498 1499static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, 1500 struct ib_cq *ib_cq) 1501{ 1502 unsigned cur; 1503 struct mthca_cq *cq; 1504 1505 cur = wq->head - wq->tail; 1506 if (likely(cur + nreq < wq->max)) 1507 return 0; 1508 1509 cq = to_mcq(ib_cq); 1510 spin_lock(&cq->lock); 1511 cur = wq->head - wq->tail; 1512 spin_unlock(&cq->lock); 1513 1514 return cur + nreq >= wq->max; 1515} 1516 1517int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1518 struct ib_send_wr **bad_wr) 1519{ 1520 struct mthca_dev *dev = to_mdev(ibqp->device); 1521 struct mthca_qp *qp = to_mqp(ibqp); 1522 void *wqe; 1523 void *prev_wqe; 1524 unsigned long flags; 1525 int err = 0; 1526 int nreq; 1527 int i; 1528 int size; 1529 int size0 = 0; 1530 u32 f0; 1531 int ind; 1532 u8 op0 = 0; 1533 1534 spin_lock_irqsave(&qp->sq.lock, flags); 1535 1536 /* XXX check that state is OK to post send */ 1537 1538 ind = qp->sq.next_ind; 1539 1540 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1541 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1542 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1543 " %d max, %d nreq)\n", qp->qpn, 1544 qp->sq.head, qp->sq.tail, 1545 qp->sq.max, nreq); 1546 err = -ENOMEM; 1547 *bad_wr = wr; 1548 goto out; 1549 } 1550 1551 wqe = get_send_wqe(qp, ind); 1552 prev_wqe = qp->sq.last; 1553 qp->sq.last = wqe; 1554 1555 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1556 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 1557 ((struct mthca_next_seg *) wqe)->flags = 1558 ((wr->send_flags & IB_SEND_SIGNALED) ? 1559 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1560 ((wr->send_flags & IB_SEND_SOLICITED) ? 1561 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1562 cpu_to_be32(1); 1563 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1564 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1565 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 1566 1567 wqe += sizeof (struct mthca_next_seg); 1568 size = sizeof (struct mthca_next_seg) / 16; 1569 1570 switch (qp->transport) { 1571 case RC: 1572 switch (wr->opcode) { 1573 case IB_WR_ATOMIC_CMP_AND_SWP: 1574 case IB_WR_ATOMIC_FETCH_AND_ADD: 1575 ((struct mthca_raddr_seg *) wqe)->raddr = 1576 cpu_to_be64(wr->wr.atomic.remote_addr); 1577 ((struct mthca_raddr_seg *) wqe)->rkey = 1578 cpu_to_be32(wr->wr.atomic.rkey); 1579 ((struct mthca_raddr_seg *) wqe)->reserved = 0; 1580 1581 wqe += sizeof (struct mthca_raddr_seg); 1582 1583 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1584 ((struct mthca_atomic_seg *) wqe)->swap_add = 1585 cpu_to_be64(wr->wr.atomic.swap); 1586 ((struct mthca_atomic_seg *) wqe)->compare = 1587 cpu_to_be64(wr->wr.atomic.compare_add); 1588 } else { 1589 ((struct mthca_atomic_seg *) wqe)->swap_add = 1590 cpu_to_be64(wr->wr.atomic.compare_add); 1591 ((struct mthca_atomic_seg *) wqe)->compare = 0; 1592 } 1593 1594 wqe += sizeof (struct mthca_atomic_seg); 1595 size += (sizeof (struct mthca_raddr_seg) + 1596 sizeof (struct mthca_atomic_seg)) / 16; 1597 break; 1598 1599 case IB_WR_RDMA_WRITE: 1600 case IB_WR_RDMA_WRITE_WITH_IMM: 1601 case IB_WR_RDMA_READ: 1602 ((struct mthca_raddr_seg *) wqe)->raddr = 1603 cpu_to_be64(wr->wr.rdma.remote_addr); 1604 ((struct mthca_raddr_seg *) wqe)->rkey = 1605 cpu_to_be32(wr->wr.rdma.rkey); 1606 ((struct mthca_raddr_seg *) wqe)->reserved = 0; 1607 wqe += sizeof (struct mthca_raddr_seg); 1608 size += sizeof (struct mthca_raddr_seg) / 16; 1609 break; 1610 1611 default: 1612 /* No extra segments required for sends */ 1613 break; 1614 } 1615 1616 break; 1617 1618 case UC: 1619 switch (wr->opcode) { 1620 case IB_WR_RDMA_WRITE: 1621 case IB_WR_RDMA_WRITE_WITH_IMM: 1622 ((struct mthca_raddr_seg *) wqe)->raddr = 1623 cpu_to_be64(wr->wr.rdma.remote_addr); 1624 ((struct mthca_raddr_seg *) wqe)->rkey = 1625 cpu_to_be32(wr->wr.rdma.rkey); 1626 ((struct mthca_raddr_seg *) wqe)->reserved = 0; 1627 wqe += sizeof (struct mthca_raddr_seg); 1628 size += sizeof (struct mthca_raddr_seg) / 16; 1629 break; 1630 1631 default: 1632 /* No extra segments required for sends */ 1633 break; 1634 } 1635 1636 break; 1637 1638 case UD: 1639 ((struct mthca_tavor_ud_seg *) wqe)->lkey = 1640 cpu_to_be32(to_mah(wr->wr.ud.ah)->key); 1641 ((struct mthca_tavor_ud_seg *) wqe)->av_addr = 1642 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); 1643 ((struct mthca_tavor_ud_seg *) wqe)->dqpn = 1644 cpu_to_be32(wr->wr.ud.remote_qpn); 1645 ((struct mthca_tavor_ud_seg *) wqe)->qkey = 1646 cpu_to_be32(wr->wr.ud.remote_qkey); 1647 1648 wqe += sizeof (struct mthca_tavor_ud_seg); 1649 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1650 break; 1651 1652 case MLX: 1653 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 1654 wqe - sizeof (struct mthca_next_seg), 1655 wqe); 1656 if (err) { 1657 *bad_wr = wr; 1658 goto out; 1659 } 1660 wqe += sizeof (struct mthca_data_seg); 1661 size += sizeof (struct mthca_data_seg) / 16; 1662 break; 1663 } 1664 1665 if (wr->num_sge > qp->sq.max_gs) { 1666 mthca_err(dev, "too many gathers\n"); 1667 err = -EINVAL; 1668 *bad_wr = wr; 1669 goto out; 1670 } 1671 1672 for (i = 0; i < wr->num_sge; ++i) { 1673 ((struct mthca_data_seg *) wqe)->byte_count = 1674 cpu_to_be32(wr->sg_list[i].length); 1675 ((struct mthca_data_seg *) wqe)->lkey = 1676 cpu_to_be32(wr->sg_list[i].lkey); 1677 ((struct mthca_data_seg *) wqe)->addr = 1678 cpu_to_be64(wr->sg_list[i].addr); 1679 wqe += sizeof (struct mthca_data_seg); 1680 size += sizeof (struct mthca_data_seg) / 16; 1681 } 1682 1683 /* Add one more inline data segment for ICRC */ 1684 if (qp->transport == MLX) { 1685 ((struct mthca_data_seg *) wqe)->byte_count = 1686 cpu_to_be32((1 << 31) | 4); 1687 ((u32 *) wqe)[1] = 0; 1688 wqe += sizeof (struct mthca_data_seg); 1689 size += sizeof (struct mthca_data_seg) / 16; 1690 } 1691 1692 qp->wrid[ind + qp->rq.max] = wr->wr_id; 1693 1694 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 1695 mthca_err(dev, "opcode invalid\n"); 1696 err = -EINVAL; 1697 *bad_wr = wr; 1698 goto out; 1699 } 1700 1701 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1702 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1703 qp->send_wqe_offset) | 1704 mthca_opcode[wr->opcode]); 1705 wmb(); 1706 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1707 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size | 1708 ((wr->send_flags & IB_SEND_FENCE) ? 1709 MTHCA_NEXT_FENCE : 0)); 1710 1711 if (!size0) { 1712 size0 = size; 1713 op0 = mthca_opcode[wr->opcode]; 1714 f0 = wr->send_flags & IB_SEND_FENCE ? 1715 MTHCA_SEND_DOORBELL_FENCE : 0; 1716 } 1717 1718 ++ind; 1719 if (unlikely(ind >= qp->sq.max)) 1720 ind -= qp->sq.max; 1721 } 1722 1723out: 1724 if (likely(nreq)) { 1725 __be32 doorbell[2]; 1726 1727 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + 1728 qp->send_wqe_offset) | f0 | op0); 1729 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); 1730 1731 wmb(); 1732 1733 mthca_write64(doorbell, 1734 dev->kar + MTHCA_SEND_DOORBELL, 1735 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1736 } 1737 1738 qp->sq.next_ind = ind; 1739 qp->sq.head += nreq; 1740 1741 spin_unlock_irqrestore(&qp->sq.lock, flags); 1742 return err; 1743} 1744 1745int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1746 struct ib_recv_wr **bad_wr) 1747{ 1748 struct mthca_dev *dev = to_mdev(ibqp->device); 1749 struct mthca_qp *qp = to_mqp(ibqp); 1750 __be32 doorbell[2]; 1751 unsigned long flags; 1752 int err = 0; 1753 int nreq; 1754 int i; 1755 int size; 1756 int size0 = 0; 1757 int ind; 1758 void *wqe; 1759 void *prev_wqe; 1760 1761 spin_lock_irqsave(&qp->rq.lock, flags); 1762 1763 /* XXX check that state is OK to post receive */ 1764 1765 ind = qp->rq.next_ind; 1766 1767 for (nreq = 0; wr; wr = wr->next) { 1768 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 1769 mthca_err(dev, "RQ %06x full (%u head, %u tail," 1770 " %d max, %d nreq)\n", qp->qpn, 1771 qp->rq.head, qp->rq.tail, 1772 qp->rq.max, nreq); 1773 err = -ENOMEM; 1774 *bad_wr = wr; 1775 goto out; 1776 } 1777 1778 wqe = get_recv_wqe(qp, ind); 1779 prev_wqe = qp->rq.last; 1780 qp->rq.last = wqe; 1781 1782 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1783 ((struct mthca_next_seg *) wqe)->ee_nds = 1784 cpu_to_be32(MTHCA_NEXT_DBD); 1785 ((struct mthca_next_seg *) wqe)->flags = 0; 1786 1787 wqe += sizeof (struct mthca_next_seg); 1788 size = sizeof (struct mthca_next_seg) / 16; 1789 1790 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 1791 err = -EINVAL; 1792 *bad_wr = wr; 1793 goto out; 1794 } 1795 1796 for (i = 0; i < wr->num_sge; ++i) { 1797 ((struct mthca_data_seg *) wqe)->byte_count = 1798 cpu_to_be32(wr->sg_list[i].length); 1799 ((struct mthca_data_seg *) wqe)->lkey = 1800 cpu_to_be32(wr->sg_list[i].lkey); 1801 ((struct mthca_data_seg *) wqe)->addr = 1802 cpu_to_be64(wr->sg_list[i].addr); 1803 wqe += sizeof (struct mthca_data_seg); 1804 size += sizeof (struct mthca_data_seg) / 16; 1805 } 1806 1807 qp->wrid[ind] = wr->wr_id; 1808 1809 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1810 cpu_to_be32((ind << qp->rq.wqe_shift) | 1); 1811 wmb(); 1812 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1813 cpu_to_be32(MTHCA_NEXT_DBD | size); 1814 1815 if (!size0) 1816 size0 = size; 1817 1818 ++ind; 1819 if (unlikely(ind >= qp->rq.max)) 1820 ind -= qp->rq.max; 1821 1822 ++nreq; 1823 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1824 nreq = 0; 1825 1826 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1827 doorbell[1] = cpu_to_be32(qp->qpn << 8); 1828 1829 wmb(); 1830 1831 mthca_write64(doorbell, 1832 dev->kar + MTHCA_RECEIVE_DOORBELL, 1833 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1834 1835 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1836 size0 = 0; 1837 } 1838 } 1839 1840out: 1841 if (likely(nreq)) { 1842 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1843 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); 1844 1845 wmb(); 1846 1847 mthca_write64(doorbell, 1848 dev->kar + MTHCA_RECEIVE_DOORBELL, 1849 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1850 } 1851 1852 qp->rq.next_ind = ind; 1853 qp->rq.head += nreq; 1854 1855 spin_unlock_irqrestore(&qp->rq.lock, flags); 1856 return err; 1857} 1858 1859int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1860 struct ib_send_wr **bad_wr) 1861{ 1862 struct mthca_dev *dev = to_mdev(ibqp->device); 1863 struct mthca_qp *qp = to_mqp(ibqp); 1864 __be32 doorbell[2]; 1865 void *wqe; 1866 void *prev_wqe; 1867 unsigned long flags; 1868 int err = 0; 1869 int nreq; 1870 int i; 1871 int size; 1872 int size0 = 0; 1873 u32 f0; 1874 int ind; 1875 u8 op0 = 0; 1876 1877 spin_lock_irqsave(&qp->sq.lock, flags); 1878 1879 /* XXX check that state is OK to post send */ 1880 1881 ind = qp->sq.head & (qp->sq.max - 1); 1882 1883 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1884 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { 1885 nreq = 0; 1886 1887 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | 1888 ((qp->sq.head & 0xffff) << 8) | 1889 f0 | op0); 1890 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); 1891 1892 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1893 size0 = 0; 1894 1895 /* 1896 * Make sure that descriptors are written before 1897 * doorbell record. 1898 */ 1899 wmb(); 1900 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 1901 1902 /* 1903 * Make sure doorbell record is written before we 1904 * write MMIO send doorbell. 1905 */ 1906 wmb(); 1907 mthca_write64(doorbell, 1908 dev->kar + MTHCA_SEND_DOORBELL, 1909 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1910 } 1911 1912 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1913 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1914 " %d max, %d nreq)\n", qp->qpn, 1915 qp->sq.head, qp->sq.tail, 1916 qp->sq.max, nreq); 1917 err = -ENOMEM; 1918 *bad_wr = wr; 1919 goto out; 1920 } 1921 1922 wqe = get_send_wqe(qp, ind); 1923 prev_wqe = qp->sq.last; 1924 qp->sq.last = wqe; 1925 1926 ((struct mthca_next_seg *) wqe)->flags = 1927 ((wr->send_flags & IB_SEND_SIGNALED) ? 1928 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1929 ((wr->send_flags & IB_SEND_SOLICITED) ? 1930 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1931 cpu_to_be32(1); 1932 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1933 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1934 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 1935 1936 wqe += sizeof (struct mthca_next_seg); 1937 size = sizeof (struct mthca_next_seg) / 16; 1938 1939 switch (qp->transport) { 1940 case RC: 1941 switch (wr->opcode) { 1942 case IB_WR_ATOMIC_CMP_AND_SWP: 1943 case IB_WR_ATOMIC_FETCH_AND_ADD: 1944 ((struct mthca_raddr_seg *) wqe)->raddr = 1945 cpu_to_be64(wr->wr.atomic.remote_addr); 1946 ((struct mthca_raddr_seg *) wqe)->rkey = 1947 cpu_to_be32(wr->wr.atomic.rkey); 1948 ((struct mthca_raddr_seg *) wqe)->reserved = 0; 1949 1950 wqe += sizeof (struct mthca_raddr_seg); 1951 1952 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1953 ((struct mthca_atomic_seg *) wqe)->swap_add = 1954 cpu_to_be64(wr->wr.atomic.swap); 1955 ((struct mthca_atomic_seg *) wqe)->compare = 1956 cpu_to_be64(wr->wr.atomic.compare_add); 1957 } else { 1958 ((struct mthca_atomic_seg *) wqe)->swap_add = 1959 cpu_to_be64(wr->wr.atomic.compare_add); 1960 ((struct mthca_atomic_seg *) wqe)->compare = 0; 1961 } 1962 1963 wqe += sizeof (struct mthca_atomic_seg); 1964 size += (sizeof (struct mthca_raddr_seg) + 1965 sizeof (struct mthca_atomic_seg)) / 16; 1966 break; 1967 1968 case IB_WR_RDMA_READ: 1969 case IB_WR_RDMA_WRITE: 1970 case IB_WR_RDMA_WRITE_WITH_IMM: 1971 ((struct mthca_raddr_seg *) wqe)->raddr = 1972 cpu_to_be64(wr->wr.rdma.remote_addr); 1973 ((struct mthca_raddr_seg *) wqe)->rkey = 1974 cpu_to_be32(wr->wr.rdma.rkey); 1975 ((struct mthca_raddr_seg *) wqe)->reserved = 0; 1976 wqe += sizeof (struct mthca_raddr_seg); 1977 size += sizeof (struct mthca_raddr_seg) / 16; 1978 break; 1979 1980 default: 1981 /* No extra segments required for sends */ 1982 break; 1983 } 1984 1985 break; 1986 1987 case UC: 1988 switch (wr->opcode) { 1989 case IB_WR_RDMA_WRITE: 1990 case IB_WR_RDMA_WRITE_WITH_IMM: 1991 ((struct mthca_raddr_seg *) wqe)->raddr = 1992 cpu_to_be64(wr->wr.rdma.remote_addr); 1993 ((struct mthca_raddr_seg *) wqe)->rkey = 1994 cpu_to_be32(wr->wr.rdma.rkey); 1995 ((struct mthca_raddr_seg *) wqe)->reserved = 0; 1996 wqe += sizeof (struct mthca_raddr_seg); 1997 size += sizeof (struct mthca_raddr_seg) / 16; 1998 break; 1999 2000 default: 2001 /* No extra segments required for sends */ 2002 break; 2003 } 2004 2005 break; 2006 2007 case UD: 2008 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, 2009 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); 2010 ((struct mthca_arbel_ud_seg *) wqe)->dqpn = 2011 cpu_to_be32(wr->wr.ud.remote_qpn); 2012 ((struct mthca_arbel_ud_seg *) wqe)->qkey = 2013 cpu_to_be32(wr->wr.ud.remote_qkey); 2014 2015 wqe += sizeof (struct mthca_arbel_ud_seg); 2016 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2017 break; 2018 2019 case MLX: 2020 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 2021 wqe - sizeof (struct mthca_next_seg), 2022 wqe); 2023 if (err) { 2024 *bad_wr = wr; 2025 goto out; 2026 } 2027 wqe += sizeof (struct mthca_data_seg); 2028 size += sizeof (struct mthca_data_seg) / 16; 2029 break; 2030 } 2031 2032 if (wr->num_sge > qp->sq.max_gs) { 2033 mthca_err(dev, "too many gathers\n"); 2034 err = -EINVAL; 2035 *bad_wr = wr; 2036 goto out; 2037 } 2038 2039 for (i = 0; i < wr->num_sge; ++i) { 2040 ((struct mthca_data_seg *) wqe)->byte_count = 2041 cpu_to_be32(wr->sg_list[i].length); 2042 ((struct mthca_data_seg *) wqe)->lkey = 2043 cpu_to_be32(wr->sg_list[i].lkey); 2044 ((struct mthca_data_seg *) wqe)->addr = 2045 cpu_to_be64(wr->sg_list[i].addr); 2046 wqe += sizeof (struct mthca_data_seg); 2047 size += sizeof (struct mthca_data_seg) / 16; 2048 } 2049 2050 /* Add one more inline data segment for ICRC */ 2051 if (qp->transport == MLX) { 2052 ((struct mthca_data_seg *) wqe)->byte_count = 2053 cpu_to_be32((1 << 31) | 4); 2054 ((u32 *) wqe)[1] = 0; 2055 wqe += sizeof (struct mthca_data_seg); 2056 size += sizeof (struct mthca_data_seg) / 16; 2057 } 2058 2059 qp->wrid[ind + qp->rq.max] = wr->wr_id; 2060 2061 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 2062 mthca_err(dev, "opcode invalid\n"); 2063 err = -EINVAL; 2064 *bad_wr = wr; 2065 goto out; 2066 } 2067 2068 ((struct mthca_next_seg *) prev_wqe)->nda_op = 2069 cpu_to_be32(((ind << qp->sq.wqe_shift) + 2070 qp->send_wqe_offset) | 2071 mthca_opcode[wr->opcode]); 2072 wmb(); 2073 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 2074 cpu_to_be32(MTHCA_NEXT_DBD | size | 2075 ((wr->send_flags & IB_SEND_FENCE) ? 2076 MTHCA_NEXT_FENCE : 0)); 2077 2078 if (!size0) { 2079 size0 = size; 2080 op0 = mthca_opcode[wr->opcode]; 2081 f0 = wr->send_flags & IB_SEND_FENCE ? 2082 MTHCA_SEND_DOORBELL_FENCE : 0; 2083 } 2084 2085 ++ind; 2086 if (unlikely(ind >= qp->sq.max)) 2087 ind -= qp->sq.max; 2088 } 2089 2090out: 2091 if (likely(nreq)) { 2092 doorbell[0] = cpu_to_be32((nreq << 24) | 2093 ((qp->sq.head & 0xffff) << 8) | 2094 f0 | op0); 2095 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); 2096 2097 qp->sq.head += nreq; 2098 2099 /* 2100 * Make sure that descriptors are written before 2101 * doorbell record. 2102 */ 2103 wmb(); 2104 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 2105 2106 /* 2107 * Make sure doorbell record is written before we 2108 * write MMIO send doorbell. 2109 */ 2110 wmb(); 2111 mthca_write64(doorbell, 2112 dev->kar + MTHCA_SEND_DOORBELL, 2113 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 2114 } 2115 2116 spin_unlock_irqrestore(&qp->sq.lock, flags); 2117 return err; 2118} 2119 2120int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2121 struct ib_recv_wr **bad_wr) 2122{ 2123 struct mthca_dev *dev = to_mdev(ibqp->device); 2124 struct mthca_qp *qp = to_mqp(ibqp); 2125 unsigned long flags; 2126 int err = 0; 2127 int nreq; 2128 int ind; 2129 int i; 2130 void *wqe; 2131 2132 spin_lock_irqsave(&qp->rq.lock, flags); 2133 2134 /* XXX check that state is OK to post receive */ 2135 2136 ind = qp->rq.head & (qp->rq.max - 1); 2137 2138 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2139 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2140 mthca_err(dev, "RQ %06x full (%u head, %u tail," 2141 " %d max, %d nreq)\n", qp->qpn, 2142 qp->rq.head, qp->rq.tail, 2143 qp->rq.max, nreq); 2144 err = -ENOMEM; 2145 *bad_wr = wr; 2146 goto out; 2147 } 2148 2149 wqe = get_recv_wqe(qp, ind); 2150 2151 ((struct mthca_next_seg *) wqe)->flags = 0; 2152 2153 wqe += sizeof (struct mthca_next_seg); 2154 2155 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2156 err = -EINVAL; 2157 *bad_wr = wr; 2158 goto out; 2159 } 2160 2161 for (i = 0; i < wr->num_sge; ++i) { 2162 ((struct mthca_data_seg *) wqe)->byte_count = 2163 cpu_to_be32(wr->sg_list[i].length); 2164 ((struct mthca_data_seg *) wqe)->lkey = 2165 cpu_to_be32(wr->sg_list[i].lkey); 2166 ((struct mthca_data_seg *) wqe)->addr = 2167 cpu_to_be64(wr->sg_list[i].addr); 2168 wqe += sizeof (struct mthca_data_seg); 2169 } 2170 2171 if (i < qp->rq.max_gs) { 2172 ((struct mthca_data_seg *) wqe)->byte_count = 0; 2173 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 2174 ((struct mthca_data_seg *) wqe)->addr = 0; 2175 } 2176 2177 qp->wrid[ind] = wr->wr_id; 2178 2179 ++ind; 2180 if (unlikely(ind >= qp->rq.max)) 2181 ind -= qp->rq.max; 2182 } 2183out: 2184 if (likely(nreq)) { 2185 qp->rq.head += nreq; 2186 2187 /* 2188 * Make sure that descriptors are written before 2189 * doorbell record. 2190 */ 2191 wmb(); 2192 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); 2193 } 2194 2195 spin_unlock_irqrestore(&qp->rq.lock, flags); 2196 return err; 2197} 2198 2199void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2200 int index, int *dbd, __be32 *new_wqe) 2201{ 2202 struct mthca_next_seg *next; 2203 2204 /* 2205 * For SRQs, all WQEs generate a CQE, so we're always at the 2206 * end of the doorbell chain. 2207 */ 2208 if (qp->ibqp.srq) { 2209 *new_wqe = 0; 2210 return; 2211 } 2212 2213 if (is_send) 2214 next = get_send_wqe(qp, index); 2215 else 2216 next = get_recv_wqe(qp, index); 2217 2218 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2219 if (next->ee_nds & cpu_to_be32(0x3f)) 2220 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2221 (next->ee_nds & cpu_to_be32(0x3f)); 2222 else 2223 *new_wqe = 0; 2224} 2225 2226int __devinit mthca_init_qp_table(struct mthca_dev *dev) 2227{ 2228 int err; 2229 u8 status; 2230 int i; 2231 2232 spin_lock_init(&dev->qp_table.lock); 2233 2234 /* 2235 * We reserve 2 extra QPs per port for the special QPs. The 2236 * special QP for port 1 has to be even, so round up. 2237 */ 2238 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; 2239 err = mthca_alloc_init(&dev->qp_table.alloc, 2240 dev->limits.num_qps, 2241 (1 << 24) - 1, 2242 dev->qp_table.sqp_start + 2243 MTHCA_MAX_PORTS * 2); 2244 if (err) 2245 return err; 2246 2247 err = mthca_array_init(&dev->qp_table.qp, 2248 dev->limits.num_qps); 2249 if (err) { 2250 mthca_alloc_cleanup(&dev->qp_table.alloc); 2251 return err; 2252 } 2253 2254 for (i = 0; i < 2; ++i) { 2255 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2256 dev->qp_table.sqp_start + i * 2, 2257 &status); 2258 if (err) 2259 goto err_out; 2260 if (status) { 2261 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2262 "status %02x, aborting.\n", 2263 status); 2264 err = -EINVAL; 2265 goto err_out; 2266 } 2267 } 2268 return 0; 2269 2270 err_out: 2271 for (i = 0; i < 2; ++i) 2272 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2273 2274 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2275 mthca_alloc_cleanup(&dev->qp_table.alloc); 2276 2277 return err; 2278} 2279 2280void mthca_cleanup_qp_table(struct mthca_dev *dev) 2281{ 2282 int i; 2283 u8 status; 2284 2285 for (i = 0; i < 2; ++i) 2286 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2287 2288 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2289 mthca_alloc_cleanup(&dev->qp_table.alloc); 2290} 2291