1/* 2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <rdma/ib_smi.h> 36 37#include "qib.h" 38#include "qib_mad.h" 39 40static int reply(struct ib_smp *smp) 41{ 42 /* 43 * The verbs framework will handle the directed/LID route 44 * packet changes. 45 */ 46 smp->method = IB_MGMT_METHOD_GET_RESP; 47 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 48 smp->status |= IB_SMP_DIRECTION; 49 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 50} 51 52static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) 53{ 54 struct ib_mad_send_buf *send_buf; 55 struct ib_mad_agent *agent; 56 struct ib_smp *smp; 57 int ret; 58 unsigned long flags; 59 unsigned long timeout; 60 61 agent = ibp->send_agent; 62 if (!agent) 63 return; 64 65 /* o14-3.2.1 */ 66 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE)) 67 return; 68 69 /* o14-2 */ 70 if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) 71 return; 72 73 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, 74 IB_MGMT_MAD_DATA, GFP_ATOMIC); 75 if (IS_ERR(send_buf)) 76 return; 77 78 smp = send_buf->mad; 79 smp->base_version = IB_MGMT_BASE_VERSION; 80 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 81 smp->class_version = 1; 82 smp->method = IB_MGMT_METHOD_TRAP; 83 ibp->tid++; 84 smp->tid = cpu_to_be64(ibp->tid); 85 smp->attr_id = IB_SMP_ATTR_NOTICE; 86 /* o14-1: smp->mkey = 0; */ 87 memcpy(smp->data, data, len); 88 89 spin_lock_irqsave(&ibp->lock, flags); 90 if (!ibp->sm_ah) { 91 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { 92 struct ib_ah *ah; 93 struct ib_ah_attr attr; 94 95 memset(&attr, 0, sizeof attr); 96 attr.dlid = ibp->sm_lid; 97 attr.port_num = ppd_from_ibp(ibp)->port; 98 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); 99 if (IS_ERR(ah)) 100 ret = -EINVAL; 101 else { 102 send_buf->ah = ah; 103 ibp->sm_ah = to_iah(ah); 104 ret = 0; 105 } 106 } else 107 ret = -EINVAL; 108 } else { 109 send_buf->ah = &ibp->sm_ah->ibah; 110 ret = 0; 111 } 112 spin_unlock_irqrestore(&ibp->lock, flags); 113 114 if (!ret) 115 ret = ib_post_send_mad(send_buf, NULL); 116 if (!ret) { 117 /* 4.096 usec. */ 118 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; 119 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); 120 } else { 121 ib_free_send_mad(send_buf); 122 ibp->trap_timeout = 0; 123 } 124} 125 126/* 127 * Send a bad [PQ]_Key trap (ch. 14.3.8). 128 */ 129void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, 130 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) 131{ 132 struct ib_mad_notice_attr data; 133 134 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) 135 ibp->pkey_violations++; 136 else 137 ibp->qkey_violations++; 138 ibp->n_pkt_drops++; 139 140 /* Send violation trap */ 141 data.generic_type = IB_NOTICE_TYPE_SECURITY; 142 data.prod_type_msb = 0; 143 data.prod_type_lsb = IB_NOTICE_PROD_CA; 144 data.trap_num = trap_num; 145 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 146 data.toggle_count = 0; 147 memset(&data.details, 0, sizeof data.details); 148 data.details.ntc_257_258.lid1 = lid1; 149 data.details.ntc_257_258.lid2 = lid2; 150 data.details.ntc_257_258.key = cpu_to_be32(key); 151 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); 152 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); 153 154 qib_send_trap(ibp, &data, sizeof data); 155} 156 157/* 158 * Send a bad M_Key trap (ch. 14.3.9). 159 */ 160static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) 161{ 162 struct ib_mad_notice_attr data; 163 164 /* Send violation trap */ 165 data.generic_type = IB_NOTICE_TYPE_SECURITY; 166 data.prod_type_msb = 0; 167 data.prod_type_lsb = IB_NOTICE_PROD_CA; 168 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; 169 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 170 data.toggle_count = 0; 171 memset(&data.details, 0, sizeof data.details); 172 data.details.ntc_256.lid = data.issuer_lid; 173 data.details.ntc_256.method = smp->method; 174 data.details.ntc_256.attr_id = smp->attr_id; 175 data.details.ntc_256.attr_mod = smp->attr_mod; 176 data.details.ntc_256.mkey = smp->mkey; 177 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 178 u8 hop_cnt; 179 180 data.details.ntc_256.dr_slid = smp->dr_slid; 181 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; 182 hop_cnt = smp->hop_cnt; 183 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) { 184 data.details.ntc_256.dr_trunc_hop |= 185 IB_NOTICE_TRAP_DR_TRUNC; 186 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path); 187 } 188 data.details.ntc_256.dr_trunc_hop |= hop_cnt; 189 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path, 190 hop_cnt); 191 } 192 193 qib_send_trap(ibp, &data, sizeof data); 194} 195 196/* 197 * Send a Port Capability Mask Changed trap (ch. 14.3.11). 198 */ 199void qib_cap_mask_chg(struct qib_ibport *ibp) 200{ 201 struct ib_mad_notice_attr data; 202 203 data.generic_type = IB_NOTICE_TYPE_INFO; 204 data.prod_type_msb = 0; 205 data.prod_type_lsb = IB_NOTICE_PROD_CA; 206 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 207 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 208 data.toggle_count = 0; 209 memset(&data.details, 0, sizeof data.details); 210 data.details.ntc_144.lid = data.issuer_lid; 211 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); 212 213 qib_send_trap(ibp, &data, sizeof data); 214} 215 216/* 217 * Send a System Image GUID Changed trap (ch. 14.3.12). 218 */ 219void qib_sys_guid_chg(struct qib_ibport *ibp) 220{ 221 struct ib_mad_notice_attr data; 222 223 data.generic_type = IB_NOTICE_TYPE_INFO; 224 data.prod_type_msb = 0; 225 data.prod_type_lsb = IB_NOTICE_PROD_CA; 226 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; 227 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 228 data.toggle_count = 0; 229 memset(&data.details, 0, sizeof data.details); 230 data.details.ntc_145.lid = data.issuer_lid; 231 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; 232 233 qib_send_trap(ibp, &data, sizeof data); 234} 235 236/* 237 * Send a Node Description Changed trap (ch. 14.3.13). 238 */ 239void qib_node_desc_chg(struct qib_ibport *ibp) 240{ 241 struct ib_mad_notice_attr data; 242 243 data.generic_type = IB_NOTICE_TYPE_INFO; 244 data.prod_type_msb = 0; 245 data.prod_type_lsb = IB_NOTICE_PROD_CA; 246 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 247 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 248 data.toggle_count = 0; 249 memset(&data.details, 0, sizeof data.details); 250 data.details.ntc_144.lid = data.issuer_lid; 251 data.details.ntc_144.local_changes = 1; 252 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; 253 254 qib_send_trap(ibp, &data, sizeof data); 255} 256 257static int subn_get_nodedescription(struct ib_smp *smp, 258 struct ib_device *ibdev) 259{ 260 if (smp->attr_mod) 261 smp->status |= IB_SMP_INVALID_FIELD; 262 263 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); 264 265 return reply(smp); 266} 267 268static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, 269 u8 port) 270{ 271 struct ib_node_info *nip = (struct ib_node_info *)&smp->data; 272 struct qib_devdata *dd = dd_from_ibdev(ibdev); 273 u32 vendor, majrev, minrev; 274 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 275 276 /* GUID 0 is illegal */ 277 if (smp->attr_mod || pidx >= dd->num_pports || 278 dd->pport[pidx].guid == 0) 279 smp->status |= IB_SMP_INVALID_FIELD; 280 else 281 nip->port_guid = dd->pport[pidx].guid; 282 283 nip->base_version = 1; 284 nip->class_version = 1; 285 nip->node_type = 1; /* channel adapter */ 286 nip->num_ports = ibdev->phys_port_cnt; 287 /* This is already in network order */ 288 nip->sys_guid = ib_qib_sys_image_guid; 289 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ 290 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd)); 291 nip->device_id = cpu_to_be16(dd->deviceid); 292 majrev = dd->majrev; 293 minrev = dd->minrev; 294 nip->revision = cpu_to_be32((majrev << 16) | minrev); 295 nip->local_port_num = port; 296 vendor = dd->vendorid; 297 nip->vendor_id[0] = QIB_SRC_OUI_1; 298 nip->vendor_id[1] = QIB_SRC_OUI_2; 299 nip->vendor_id[2] = QIB_SRC_OUI_3; 300 301 return reply(smp); 302} 303 304static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, 305 u8 port) 306{ 307 struct qib_devdata *dd = dd_from_ibdev(ibdev); 308 u32 startgx = 8 * be32_to_cpu(smp->attr_mod); 309 __be64 *p = (__be64 *) smp->data; 310 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 311 312 /* 32 blocks of 8 64-bit GUIDs per block */ 313 314 memset(smp->data, 0, sizeof(smp->data)); 315 316 if (startgx == 0 && pidx < dd->num_pports) { 317 struct qib_pportdata *ppd = dd->pport + pidx; 318 struct qib_ibport *ibp = &ppd->ibport_data; 319 __be64 g = ppd->guid; 320 unsigned i; 321 322 /* GUID 0 is illegal */ 323 if (g == 0) 324 smp->status |= IB_SMP_INVALID_FIELD; 325 else { 326 /* The first is a copy of the read-only HW GUID. */ 327 p[0] = g; 328 for (i = 1; i < QIB_GUIDS_PER_PORT; i++) 329 p[i] = ibp->guids[i - 1]; 330 } 331 } else 332 smp->status |= IB_SMP_INVALID_FIELD; 333 334 return reply(smp); 335} 336 337static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) 338{ 339 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); 340} 341 342static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) 343{ 344 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); 345} 346 347static int get_overrunthreshold(struct qib_pportdata *ppd) 348{ 349 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); 350} 351 352/** 353 * set_overrunthreshold - set the overrun threshold 354 * @ppd: the physical port data 355 * @n: the new threshold 356 * 357 * Note that this will only take effect when the link state changes. 358 */ 359static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) 360{ 361 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH, 362 (u32)n); 363 return 0; 364} 365 366static int get_phyerrthreshold(struct qib_pportdata *ppd) 367{ 368 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH); 369} 370 371/** 372 * set_phyerrthreshold - set the physical error threshold 373 * @ppd: the physical port data 374 * @n: the new threshold 375 * 376 * Note that this will only take effect when the link state changes. 377 */ 378static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) 379{ 380 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH, 381 (u32)n); 382 return 0; 383} 384 385/** 386 * get_linkdowndefaultstate - get the default linkdown state 387 * @ppd: the physical port data 388 * 389 * Returns zero if the default is POLL, 1 if the default is SLEEP. 390 */ 391static int get_linkdowndefaultstate(struct qib_pportdata *ppd) 392{ 393 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) == 394 IB_LINKINITCMD_SLEEP; 395} 396 397static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) 398{ 399 int ret = 0; 400 401 /* Is the mkey in the process of expiring? */ 402 if (ibp->mkey_lease_timeout && 403 time_after_eq(jiffies, ibp->mkey_lease_timeout)) { 404 /* Clear timeout and mkey protection field. */ 405 ibp->mkey_lease_timeout = 0; 406 ibp->mkeyprot = 0; 407 } 408 409 /* M_Key checking depends on Portinfo:M_Key_protect_bits */ 410 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 && 411 ibp->mkey != smp->mkey && 412 (smp->method == IB_MGMT_METHOD_SET || 413 smp->method == IB_MGMT_METHOD_TRAP_REPRESS || 414 (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) { 415 if (ibp->mkey_violations != 0xFFFF) 416 ++ibp->mkey_violations; 417 if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) 418 ibp->mkey_lease_timeout = jiffies + 419 ibp->mkey_lease_period * HZ; 420 /* Generate a trap notice. */ 421 qib_bad_mkey(ibp, smp); 422 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 423 } else if (ibp->mkey_lease_timeout) 424 ibp->mkey_lease_timeout = 0; 425 426 return ret; 427} 428 429static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, 430 u8 port) 431{ 432 struct qib_devdata *dd; 433 struct qib_pportdata *ppd; 434 struct qib_ibport *ibp; 435 struct ib_port_info *pip = (struct ib_port_info *)smp->data; 436 u8 mtu; 437 int ret; 438 u32 state; 439 u32 port_num = be32_to_cpu(smp->attr_mod); 440 441 if (port_num == 0) 442 port_num = port; 443 else { 444 if (port_num > ibdev->phys_port_cnt) { 445 smp->status |= IB_SMP_INVALID_FIELD; 446 ret = reply(smp); 447 goto bail; 448 } 449 if (port_num != port) { 450 ibp = to_iport(ibdev, port_num); 451 ret = check_mkey(ibp, smp, 0); 452 if (ret) 453 goto bail; 454 } 455 } 456 457 dd = dd_from_ibdev(ibdev); 458 /* IB numbers ports from 1, hdw from 0 */ 459 ppd = dd->pport + (port_num - 1); 460 ibp = &ppd->ibport_data; 461 462 /* Clear all fields. Only set the non-zero fields. */ 463 memset(smp->data, 0, sizeof(smp->data)); 464 465 /* Only return the mkey if the protection field allows it. */ 466 if (!(smp->method == IB_MGMT_METHOD_GET && 467 ibp->mkey != smp->mkey && 468 ibp->mkeyprot == 1)) 469 pip->mkey = ibp->mkey; 470 pip->gid_prefix = ibp->gid_prefix; 471 pip->lid = cpu_to_be16(ppd->lid); 472 pip->sm_lid = cpu_to_be16(ibp->sm_lid); 473 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); 474 /* pip->diag_code; */ 475 pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); 476 pip->local_port_num = port; 477 pip->link_width_enabled = ppd->link_width_enabled; 478 pip->link_width_supported = ppd->link_width_supported; 479 pip->link_width_active = ppd->link_width_active; 480 state = dd->f_iblink_state(ppd->lastibcstat); 481 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state; 482 483 pip->portphysstate_linkdown = 484 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | 485 (get_linkdowndefaultstate(ppd) ? 1 : 2); 486 pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; 487 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | 488 ppd->link_speed_enabled; 489 switch (ppd->ibmtu) { 490 default: /* something is wrong; fall through */ 491 case 4096: 492 mtu = IB_MTU_4096; 493 break; 494 case 2048: 495 mtu = IB_MTU_2048; 496 break; 497 case 1024: 498 mtu = IB_MTU_1024; 499 break; 500 case 512: 501 mtu = IB_MTU_512; 502 break; 503 case 256: 504 mtu = IB_MTU_256; 505 break; 506 } 507 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; 508 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ 509 pip->vl_high_limit = ibp->vl_high_limit; 510 pip->vl_arb_high_cap = 511 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); 512 pip->vl_arb_low_cap = 513 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP); 514 /* InitTypeReply = 0 */ 515 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; 516 /* HCAs ignore VLStallCount and HOQLife */ 517 /* pip->vlstallcnt_hoqlife; */ 518 pip->operationalvl_pei_peo_fpi_fpo = 519 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; 520 pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); 521 /* P_KeyViolations are counted by hardware. */ 522 pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); 523 pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); 524 /* Only the hardware GUID is supported for now */ 525 pip->guid_cap = QIB_GUIDS_PER_PORT; 526 pip->clientrereg_resv_subnetto = ibp->subnet_timeout; 527 /* 32.768 usec. response time (guessing) */ 528 pip->resv_resptimevalue = 3; 529 pip->localphyerrors_overrunerrors = 530 (get_phyerrthreshold(ppd) << 4) | 531 get_overrunthreshold(ppd); 532 /* pip->max_credit_hint; */ 533 if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { 534 u32 v; 535 536 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); 537 pip->link_roundtrip_latency[0] = v >> 16; 538 pip->link_roundtrip_latency[1] = v >> 8; 539 pip->link_roundtrip_latency[2] = v; 540 } 541 542 ret = reply(smp); 543 544bail: 545 return ret; 546} 547 548/** 549 * get_pkeys - return the PKEY table 550 * @dd: the qlogic_ib device 551 * @port: the IB port number 552 * @pkeys: the pkey table is placed here 553 */ 554static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) 555{ 556 struct qib_pportdata *ppd = dd->pport + port - 1; 557 /* 558 * always a kernel context, no locking needed. 559 * If we get here with ppd setup, no need to check 560 * that pd is valid. 561 */ 562 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx]; 563 564 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys)); 565 566 return 0; 567} 568 569static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, 570 u8 port) 571{ 572 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); 573 u16 *p = (u16 *) smp->data; 574 __be16 *q = (__be16 *) smp->data; 575 576 /* 64 blocks of 32 16-bit P_Key entries */ 577 578 memset(smp->data, 0, sizeof(smp->data)); 579 if (startpx == 0) { 580 struct qib_devdata *dd = dd_from_ibdev(ibdev); 581 unsigned i, n = qib_get_npkeys(dd); 582 583 get_pkeys(dd, port, p); 584 585 for (i = 0; i < n; i++) 586 q[i] = cpu_to_be16(p[i]); 587 } else 588 smp->status |= IB_SMP_INVALID_FIELD; 589 590 return reply(smp); 591} 592 593static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, 594 u8 port) 595{ 596 struct qib_devdata *dd = dd_from_ibdev(ibdev); 597 u32 startgx = 8 * be32_to_cpu(smp->attr_mod); 598 __be64 *p = (__be64 *) smp->data; 599 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 600 601 /* 32 blocks of 8 64-bit GUIDs per block */ 602 603 if (startgx == 0 && pidx < dd->num_pports) { 604 struct qib_pportdata *ppd = dd->pport + pidx; 605 struct qib_ibport *ibp = &ppd->ibport_data; 606 unsigned i; 607 608 /* The first entry is read-only. */ 609 for (i = 1; i < QIB_GUIDS_PER_PORT; i++) 610 ibp->guids[i - 1] = p[i]; 611 } else 612 smp->status |= IB_SMP_INVALID_FIELD; 613 614 /* The only GUID we support is the first read-only entry. */ 615 return subn_get_guidinfo(smp, ibdev, port); 616} 617 618/** 619 * subn_set_portinfo - set port information 620 * @smp: the incoming SM packet 621 * @ibdev: the infiniband device 622 * @port: the port on the device 623 * 624 * Set Portinfo (see ch. 14.2.5.6). 625 */ 626static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, 627 u8 port) 628{ 629 struct ib_port_info *pip = (struct ib_port_info *)smp->data; 630 struct ib_event event; 631 struct qib_devdata *dd; 632 struct qib_pportdata *ppd; 633 struct qib_ibport *ibp; 634 char clientrereg = 0; 635 unsigned long flags; 636 u16 lid, smlid; 637 u8 lwe; 638 u8 lse; 639 u8 state; 640 u8 vls; 641 u8 msl; 642 u16 lstate; 643 int ret, ore, mtu; 644 u32 port_num = be32_to_cpu(smp->attr_mod); 645 646 if (port_num == 0) 647 port_num = port; 648 else { 649 if (port_num > ibdev->phys_port_cnt) 650 goto err; 651 /* Port attributes can only be set on the receiving port */ 652 if (port_num != port) 653 goto get_only; 654 } 655 656 dd = dd_from_ibdev(ibdev); 657 /* IB numbers ports from 1, hdw from 0 */ 658 ppd = dd->pport + (port_num - 1); 659 ibp = &ppd->ibport_data; 660 event.device = ibdev; 661 event.element.port_num = port; 662 663 ibp->mkey = pip->mkey; 664 ibp->gid_prefix = pip->gid_prefix; 665 ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); 666 667 lid = be16_to_cpu(pip->lid); 668 /* Must be a valid unicast LID address. */ 669 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) 670 smp->status |= IB_SMP_INVALID_FIELD; 671 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { 672 if (ppd->lid != lid) 673 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); 674 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) 675 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT); 676 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7); 677 event.event = IB_EVENT_LID_CHANGE; 678 ib_dispatch_event(&event); 679 } 680 681 smlid = be16_to_cpu(pip->sm_lid); 682 msl = pip->neighbormtu_mastersmsl & 0xF; 683 /* Must be a valid unicast LID address. */ 684 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) 685 smp->status |= IB_SMP_INVALID_FIELD; 686 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { 687 spin_lock_irqsave(&ibp->lock, flags); 688 if (ibp->sm_ah) { 689 if (smlid != ibp->sm_lid) 690 ibp->sm_ah->attr.dlid = smlid; 691 if (msl != ibp->sm_sl) 692 ibp->sm_ah->attr.sl = msl; 693 } 694 spin_unlock_irqrestore(&ibp->lock, flags); 695 if (smlid != ibp->sm_lid) 696 ibp->sm_lid = smlid; 697 if (msl != ibp->sm_sl) 698 ibp->sm_sl = msl; 699 event.event = IB_EVENT_SM_CHANGE; 700 ib_dispatch_event(&event); 701 } 702 703 /* Allow 1x or 4x to be set (see 14.2.6.6). */ 704 lwe = pip->link_width_enabled; 705 if (lwe) { 706 if (lwe == 0xFF) 707 set_link_width_enabled(ppd, ppd->link_width_supported); 708 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) 709 smp->status |= IB_SMP_INVALID_FIELD; 710 else if (lwe != ppd->link_width_enabled) 711 set_link_width_enabled(ppd, lwe); 712 } 713 714 lse = pip->linkspeedactive_enabled & 0xF; 715 if (lse) { 716 /* 717 * The IB 1.2 spec. only allows link speed values 718 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific 719 * speeds. 720 */ 721 if (lse == 15) 722 set_link_speed_enabled(ppd, 723 ppd->link_speed_supported); 724 else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) 725 smp->status |= IB_SMP_INVALID_FIELD; 726 else if (lse != ppd->link_speed_enabled) 727 set_link_speed_enabled(ppd, lse); 728 } 729 730 /* Set link down default state. */ 731 switch (pip->portphysstate_linkdown & 0xF) { 732 case 0: /* NOP */ 733 break; 734 case 1: /* SLEEP */ 735 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, 736 IB_LINKINITCMD_SLEEP); 737 break; 738 case 2: /* POLL */ 739 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, 740 IB_LINKINITCMD_POLL); 741 break; 742 default: 743 smp->status |= IB_SMP_INVALID_FIELD; 744 } 745 746 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; 747 ibp->vl_high_limit = pip->vl_high_limit; 748 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, 749 ibp->vl_high_limit); 750 751 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); 752 if (mtu == -1) 753 smp->status |= IB_SMP_INVALID_FIELD; 754 else 755 qib_set_mtu(ppd, mtu); 756 757 /* Set operational VLs */ 758 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; 759 if (vls) { 760 if (vls > ppd->vls_supported) 761 smp->status |= IB_SMP_INVALID_FIELD; 762 else 763 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); 764 } 765 766 if (pip->mkey_violations == 0) 767 ibp->mkey_violations = 0; 768 769 if (pip->pkey_violations == 0) 770 ibp->pkey_violations = 0; 771 772 if (pip->qkey_violations == 0) 773 ibp->qkey_violations = 0; 774 775 ore = pip->localphyerrors_overrunerrors; 776 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) 777 smp->status |= IB_SMP_INVALID_FIELD; 778 779 if (set_overrunthreshold(ppd, (ore & 0xF))) 780 smp->status |= IB_SMP_INVALID_FIELD; 781 782 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 783 784 if (pip->clientrereg_resv_subnetto & 0x80) { 785 clientrereg = 1; 786 event.event = IB_EVENT_CLIENT_REREGISTER; 787 ib_dispatch_event(&event); 788 } 789 790 /* 791 * Do the port state change now that the other link parameters 792 * have been set. 793 * Changing the port physical state only makes sense if the link 794 * is down or is being set to down. 795 */ 796 state = pip->linkspeed_portstate & 0xF; 797 lstate = (pip->portphysstate_linkdown >> 4) & 0xF; 798 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) 799 smp->status |= IB_SMP_INVALID_FIELD; 800 801 /* 802 * Only state changes of DOWN, ARM, and ACTIVE are valid 803 * and must be in the correct state to take effect (see 7.2.6). 804 */ 805 switch (state) { 806 case IB_PORT_NOP: 807 if (lstate == 0) 808 break; 809 /* FALLTHROUGH */ 810 case IB_PORT_DOWN: 811 if (lstate == 0) 812 lstate = QIB_IB_LINKDOWN_ONLY; 813 else if (lstate == 1) 814 lstate = QIB_IB_LINKDOWN_SLEEP; 815 else if (lstate == 2) 816 lstate = QIB_IB_LINKDOWN; 817 else if (lstate == 3) 818 lstate = QIB_IB_LINKDOWN_DISABLE; 819 else { 820 smp->status |= IB_SMP_INVALID_FIELD; 821 break; 822 } 823 spin_lock_irqsave(&ppd->lflags_lock, flags); 824 ppd->lflags &= ~QIBL_LINKV; 825 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 826 qib_set_linkstate(ppd, lstate); 827 /* 828 * Don't send a reply if the response would be sent 829 * through the disabled port. 830 */ 831 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) { 832 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 833 goto done; 834 } 835 qib_wait_linkstate(ppd, QIBL_LINKV, 10); 836 break; 837 case IB_PORT_ARMED: 838 qib_set_linkstate(ppd, QIB_IB_LINKARM); 839 break; 840 case IB_PORT_ACTIVE: 841 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); 842 break; 843 default: 844 smp->status |= IB_SMP_INVALID_FIELD; 845 } 846 847 ret = subn_get_portinfo(smp, ibdev, port); 848 849 if (clientrereg) 850 pip->clientrereg_resv_subnetto |= 0x80; 851 852 goto get_only; 853 854err: 855 smp->status |= IB_SMP_INVALID_FIELD; 856get_only: 857 ret = subn_get_portinfo(smp, ibdev, port); 858done: 859 return ret; 860} 861 862/** 863 * rm_pkey - decrecment the reference count for the given PKEY 864 * @dd: the qlogic_ib device 865 * @key: the PKEY index 866 * 867 * Return true if this was the last reference and the hardware table entry 868 * needs to be changed. 869 */ 870static int rm_pkey(struct qib_pportdata *ppd, u16 key) 871{ 872 int i; 873 int ret; 874 875 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 876 if (ppd->pkeys[i] != key) 877 continue; 878 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) { 879 ppd->pkeys[i] = 0; 880 ret = 1; 881 goto bail; 882 } 883 break; 884 } 885 886 ret = 0; 887 888bail: 889 return ret; 890} 891 892/** 893 * add_pkey - add the given PKEY to the hardware table 894 * @dd: the qlogic_ib device 895 * @key: the PKEY 896 * 897 * Return an error code if unable to add the entry, zero if no change, 898 * or 1 if the hardware PKEY register needs to be updated. 899 */ 900static int add_pkey(struct qib_pportdata *ppd, u16 key) 901{ 902 int i; 903 u16 lkey = key & 0x7FFF; 904 int any = 0; 905 int ret; 906 907 if (lkey == 0x7FFF) { 908 ret = 0; 909 goto bail; 910 } 911 912 /* Look for an empty slot or a matching PKEY. */ 913 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 914 if (!ppd->pkeys[i]) { 915 any++; 916 continue; 917 } 918 /* If it matches exactly, try to increment the ref count */ 919 if (ppd->pkeys[i] == key) { 920 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) { 921 ret = 0; 922 goto bail; 923 } 924 /* Lost the race. Look for an empty slot below. */ 925 atomic_dec(&ppd->pkeyrefs[i]); 926 any++; 927 } 928 /* 929 * It makes no sense to have both the limited and unlimited 930 * PKEY set at the same time since the unlimited one will 931 * disable the limited one. 932 */ 933 if ((ppd->pkeys[i] & 0x7FFF) == lkey) { 934 ret = -EEXIST; 935 goto bail; 936 } 937 } 938 if (!any) { 939 ret = -EBUSY; 940 goto bail; 941 } 942 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 943 if (!ppd->pkeys[i] && 944 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { 945 /* for qibstats, etc. */ 946 ppd->pkeys[i] = key; 947 ret = 1; 948 goto bail; 949 } 950 } 951 ret = -EBUSY; 952 953bail: 954 return ret; 955} 956 957/** 958 * set_pkeys - set the PKEY table for ctxt 0 959 * @dd: the qlogic_ib device 960 * @port: the IB port number 961 * @pkeys: the PKEY table 962 */ 963static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) 964{ 965 struct qib_pportdata *ppd; 966 struct qib_ctxtdata *rcd; 967 int i; 968 int changed = 0; 969 970 /* 971 * IB port one/two always maps to context zero/one, 972 * always a kernel context, no locking needed 973 * If we get here with ppd setup, no need to check 974 * that rcd is valid. 975 */ 976 ppd = dd->pport + (port - 1); 977 rcd = dd->rcd[ppd->hw_pidx]; 978 979 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { 980 u16 key = pkeys[i]; 981 u16 okey = rcd->pkeys[i]; 982 983 if (key == okey) 984 continue; 985 /* 986 * The value of this PKEY table entry is changing. 987 * Remove the old entry in the hardware's array of PKEYs. 988 */ 989 if (okey & 0x7FFF) 990 changed |= rm_pkey(ppd, okey); 991 if (key & 0x7FFF) { 992 int ret = add_pkey(ppd, key); 993 994 if (ret < 0) 995 key = 0; 996 else 997 changed |= ret; 998 } 999 rcd->pkeys[i] = key; 1000 } 1001 if (changed) { 1002 struct ib_event event; 1003 1004 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); 1005 1006 event.event = IB_EVENT_PKEY_CHANGE; 1007 event.device = &dd->verbs_dev.ibdev; 1008 event.element.port_num = 1; 1009 ib_dispatch_event(&event); 1010 } 1011 return 0; 1012} 1013 1014static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, 1015 u8 port) 1016{ 1017 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); 1018 __be16 *p = (__be16 *) smp->data; 1019 u16 *q = (u16 *) smp->data; 1020 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1021 unsigned i, n = qib_get_npkeys(dd); 1022 1023 for (i = 0; i < n; i++) 1024 q[i] = be16_to_cpu(p[i]); 1025 1026 if (startpx != 0 || set_pkeys(dd, port, q) != 0) 1027 smp->status |= IB_SMP_INVALID_FIELD; 1028 1029 return subn_get_pkeytable(smp, ibdev, port); 1030} 1031 1032static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, 1033 u8 port) 1034{ 1035 struct qib_ibport *ibp = to_iport(ibdev, port); 1036 u8 *p = (u8 *) smp->data; 1037 unsigned i; 1038 1039 memset(smp->data, 0, sizeof(smp->data)); 1040 1041 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) 1042 smp->status |= IB_SMP_UNSUP_METHOD; 1043 else 1044 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) 1045 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1]; 1046 1047 return reply(smp); 1048} 1049 1050static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, 1051 u8 port) 1052{ 1053 struct qib_ibport *ibp = to_iport(ibdev, port); 1054 u8 *p = (u8 *) smp->data; 1055 unsigned i; 1056 1057 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { 1058 smp->status |= IB_SMP_UNSUP_METHOD; 1059 return reply(smp); 1060 } 1061 1062 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) { 1063 ibp->sl_to_vl[i] = *p >> 4; 1064 ibp->sl_to_vl[i + 1] = *p & 0xF; 1065 } 1066 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)), 1067 _QIB_EVENT_SL2VL_CHANGE_BIT); 1068 1069 return subn_get_sl_to_vl(smp, ibdev, port); 1070} 1071 1072static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, 1073 u8 port) 1074{ 1075 unsigned which = be32_to_cpu(smp->attr_mod) >> 16; 1076 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); 1077 1078 memset(smp->data, 0, sizeof(smp->data)); 1079 1080 if (ppd->vls_supported == IB_VL_VL0) 1081 smp->status |= IB_SMP_UNSUP_METHOD; 1082 else if (which == IB_VLARB_LOWPRI_0_31) 1083 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, 1084 smp->data); 1085 else if (which == IB_VLARB_HIGHPRI_0_31) 1086 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, 1087 smp->data); 1088 else 1089 smp->status |= IB_SMP_INVALID_FIELD; 1090 1091 return reply(smp); 1092} 1093 1094static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, 1095 u8 port) 1096{ 1097 unsigned which = be32_to_cpu(smp->attr_mod) >> 16; 1098 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); 1099 1100 if (ppd->vls_supported == IB_VL_VL0) 1101 smp->status |= IB_SMP_UNSUP_METHOD; 1102 else if (which == IB_VLARB_LOWPRI_0_31) 1103 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, 1104 smp->data); 1105 else if (which == IB_VLARB_HIGHPRI_0_31) 1106 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, 1107 smp->data); 1108 else 1109 smp->status |= IB_SMP_INVALID_FIELD; 1110 1111 return subn_get_vl_arb(smp, ibdev, port); 1112} 1113 1114static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev, 1115 u8 port) 1116{ 1117 /* 1118 * For now, we only send the trap once so no need to process this. 1119 * o13-6, o13-7, 1120 * o14-3.a4 The SMA shall not send any message in response to a valid 1121 * SubnTrapRepress() message. 1122 */ 1123 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 1124} 1125 1126static int pma_get_classportinfo(struct ib_pma_mad *pmp, 1127 struct ib_device *ibdev) 1128{ 1129 struct ib_class_port_info *p = 1130 (struct ib_class_port_info *)pmp->data; 1131 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1132 1133 memset(pmp->data, 0, sizeof(pmp->data)); 1134 1135 if (pmp->mad_hdr.attr_mod != 0) 1136 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1137 1138 /* Note that AllPortSelect is not valid */ 1139 p->base_version = 1; 1140 p->class_version = 1; 1141 p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 1142 /* 1143 * Set the most significant bit of CM2 to indicate support for 1144 * congestion statistics 1145 */ 1146 p->reserved[0] = dd->psxmitwait_supported << 7; 1147 /* 1148 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. 1149 */ 1150 p->resp_time_value = 18; 1151 1152 return reply((struct ib_smp *) pmp); 1153} 1154 1155static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, 1156 struct ib_device *ibdev, u8 port) 1157{ 1158 struct ib_pma_portsamplescontrol *p = 1159 (struct ib_pma_portsamplescontrol *)pmp->data; 1160 struct qib_ibdev *dev = to_idev(ibdev); 1161 struct qib_devdata *dd = dd_from_dev(dev); 1162 struct qib_ibport *ibp = to_iport(ibdev, port); 1163 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1164 unsigned long flags; 1165 u8 port_select = p->port_select; 1166 1167 memset(pmp->data, 0, sizeof(pmp->data)); 1168 1169 p->port_select = port_select; 1170 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { 1171 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1172 goto bail; 1173 } 1174 spin_lock_irqsave(&ibp->lock, flags); 1175 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); 1176 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 1177 p->counter_width = 4; /* 32 bit counters */ 1178 p->counter_mask0_9 = COUNTER_MASK0_9; 1179 p->sample_start = cpu_to_be32(ibp->pma_sample_start); 1180 p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); 1181 p->tag = cpu_to_be16(ibp->pma_tag); 1182 p->counter_select[0] = ibp->pma_counter_select[0]; 1183 p->counter_select[1] = ibp->pma_counter_select[1]; 1184 p->counter_select[2] = ibp->pma_counter_select[2]; 1185 p->counter_select[3] = ibp->pma_counter_select[3]; 1186 p->counter_select[4] = ibp->pma_counter_select[4]; 1187 spin_unlock_irqrestore(&ibp->lock, flags); 1188 1189bail: 1190 return reply((struct ib_smp *) pmp); 1191} 1192 1193static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, 1194 struct ib_device *ibdev, u8 port) 1195{ 1196 struct ib_pma_portsamplescontrol *p = 1197 (struct ib_pma_portsamplescontrol *)pmp->data; 1198 struct qib_ibdev *dev = to_idev(ibdev); 1199 struct qib_devdata *dd = dd_from_dev(dev); 1200 struct qib_ibport *ibp = to_iport(ibdev, port); 1201 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1202 unsigned long flags; 1203 u8 status, xmit_flags; 1204 int ret; 1205 1206 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { 1207 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1208 ret = reply((struct ib_smp *) pmp); 1209 goto bail; 1210 } 1211 1212 spin_lock_irqsave(&ibp->lock, flags); 1213 1214 /* Port Sampling code owns the PS* HW counters */ 1215 xmit_flags = ppd->cong_stats.flags; 1216 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE; 1217 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 1218 if (status == IB_PMA_SAMPLE_STATUS_DONE || 1219 (status == IB_PMA_SAMPLE_STATUS_RUNNING && 1220 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { 1221 ibp->pma_sample_start = be32_to_cpu(p->sample_start); 1222 ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); 1223 ibp->pma_tag = be16_to_cpu(p->tag); 1224 ibp->pma_counter_select[0] = p->counter_select[0]; 1225 ibp->pma_counter_select[1] = p->counter_select[1]; 1226 ibp->pma_counter_select[2] = p->counter_select[2]; 1227 ibp->pma_counter_select[3] = p->counter_select[3]; 1228 ibp->pma_counter_select[4] = p->counter_select[4]; 1229 dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, 1230 ibp->pma_sample_start); 1231 } 1232 spin_unlock_irqrestore(&ibp->lock, flags); 1233 1234 ret = pma_get_portsamplescontrol(pmp, ibdev, port); 1235 1236bail: 1237 return ret; 1238} 1239 1240static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd, 1241 __be16 sel) 1242{ 1243 u64 ret; 1244 1245 switch (sel) { 1246 case IB_PMA_PORT_XMIT_DATA: 1247 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA); 1248 break; 1249 case IB_PMA_PORT_RCV_DATA: 1250 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA); 1251 break; 1252 case IB_PMA_PORT_XMIT_PKTS: 1253 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS); 1254 break; 1255 case IB_PMA_PORT_RCV_PKTS: 1256 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS); 1257 break; 1258 case IB_PMA_PORT_XMIT_WAIT: 1259 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT); 1260 break; 1261 default: 1262 ret = 0; 1263 } 1264 1265 return ret; 1266} 1267 1268/* This function assumes that the xmit_wait lock is already held */ 1269static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd) 1270{ 1271 u32 delta; 1272 1273 delta = get_counter(&ppd->ibport_data, ppd, 1274 IB_PMA_PORT_XMIT_WAIT); 1275 return ppd->cong_stats.counter + delta; 1276} 1277 1278static void cache_hw_sample_counters(struct qib_pportdata *ppd) 1279{ 1280 struct qib_ibport *ibp = &ppd->ibport_data; 1281 1282 ppd->cong_stats.counter_cache.psxmitdata = 1283 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA); 1284 ppd->cong_stats.counter_cache.psrcvdata = 1285 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA); 1286 ppd->cong_stats.counter_cache.psxmitpkts = 1287 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS); 1288 ppd->cong_stats.counter_cache.psrcvpkts = 1289 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS); 1290 ppd->cong_stats.counter_cache.psxmitwait = 1291 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT); 1292} 1293 1294static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd, 1295 __be16 sel) 1296{ 1297 u64 ret; 1298 1299 switch (sel) { 1300 case IB_PMA_PORT_XMIT_DATA: 1301 ret = ppd->cong_stats.counter_cache.psxmitdata; 1302 break; 1303 case IB_PMA_PORT_RCV_DATA: 1304 ret = ppd->cong_stats.counter_cache.psrcvdata; 1305 break; 1306 case IB_PMA_PORT_XMIT_PKTS: 1307 ret = ppd->cong_stats.counter_cache.psxmitpkts; 1308 break; 1309 case IB_PMA_PORT_RCV_PKTS: 1310 ret = ppd->cong_stats.counter_cache.psrcvpkts; 1311 break; 1312 case IB_PMA_PORT_XMIT_WAIT: 1313 ret = ppd->cong_stats.counter_cache.psxmitwait; 1314 break; 1315 default: 1316 ret = 0; 1317 } 1318 1319 return ret; 1320} 1321 1322static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, 1323 struct ib_device *ibdev, u8 port) 1324{ 1325 struct ib_pma_portsamplesresult *p = 1326 (struct ib_pma_portsamplesresult *)pmp->data; 1327 struct qib_ibdev *dev = to_idev(ibdev); 1328 struct qib_devdata *dd = dd_from_dev(dev); 1329 struct qib_ibport *ibp = to_iport(ibdev, port); 1330 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1331 unsigned long flags; 1332 u8 status; 1333 int i; 1334 1335 memset(pmp->data, 0, sizeof(pmp->data)); 1336 spin_lock_irqsave(&ibp->lock, flags); 1337 p->tag = cpu_to_be16(ibp->pma_tag); 1338 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) 1339 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; 1340 else { 1341 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 1342 p->sample_status = cpu_to_be16(status); 1343 if (status == IB_PMA_SAMPLE_STATUS_DONE) { 1344 cache_hw_sample_counters(ppd); 1345 ppd->cong_stats.counter = 1346 xmit_wait_get_value_delta(ppd); 1347 dd->f_set_cntr_sample(ppd, 1348 QIB_CONG_TIMER_PSINTERVAL, 0); 1349 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; 1350 } 1351 } 1352 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) 1353 p->counter[i] = cpu_to_be32( 1354 get_cache_hw_sample_counters( 1355 ppd, ibp->pma_counter_select[i])); 1356 spin_unlock_irqrestore(&ibp->lock, flags); 1357 1358 return reply((struct ib_smp *) pmp); 1359} 1360 1361static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, 1362 struct ib_device *ibdev, u8 port) 1363{ 1364 struct ib_pma_portsamplesresult_ext *p = 1365 (struct ib_pma_portsamplesresult_ext *)pmp->data; 1366 struct qib_ibdev *dev = to_idev(ibdev); 1367 struct qib_devdata *dd = dd_from_dev(dev); 1368 struct qib_ibport *ibp = to_iport(ibdev, port); 1369 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1370 unsigned long flags; 1371 u8 status; 1372 int i; 1373 1374 /* Port Sampling code owns the PS* HW counters */ 1375 memset(pmp->data, 0, sizeof(pmp->data)); 1376 spin_lock_irqsave(&ibp->lock, flags); 1377 p->tag = cpu_to_be16(ibp->pma_tag); 1378 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) 1379 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; 1380 else { 1381 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 1382 p->sample_status = cpu_to_be16(status); 1383 /* 64 bits */ 1384 p->extended_width = cpu_to_be32(0x80000000); 1385 if (status == IB_PMA_SAMPLE_STATUS_DONE) { 1386 cache_hw_sample_counters(ppd); 1387 ppd->cong_stats.counter = 1388 xmit_wait_get_value_delta(ppd); 1389 dd->f_set_cntr_sample(ppd, 1390 QIB_CONG_TIMER_PSINTERVAL, 0); 1391 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; 1392 } 1393 } 1394 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) 1395 p->counter[i] = cpu_to_be64( 1396 get_cache_hw_sample_counters( 1397 ppd, ibp->pma_counter_select[i])); 1398 spin_unlock_irqrestore(&ibp->lock, flags); 1399 1400 return reply((struct ib_smp *) pmp); 1401} 1402 1403static int pma_get_portcounters(struct ib_pma_mad *pmp, 1404 struct ib_device *ibdev, u8 port) 1405{ 1406 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1407 pmp->data; 1408 struct qib_ibport *ibp = to_iport(ibdev, port); 1409 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1410 struct qib_verbs_counters cntrs; 1411 u8 port_select = p->port_select; 1412 1413 qib_get_counters(ppd, &cntrs); 1414 1415 /* Adjust counters for any resets done. */ 1416 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; 1417 cntrs.link_error_recovery_counter -= 1418 ibp->z_link_error_recovery_counter; 1419 cntrs.link_downed_counter -= ibp->z_link_downed_counter; 1420 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; 1421 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; 1422 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; 1423 cntrs.port_xmit_data -= ibp->z_port_xmit_data; 1424 cntrs.port_rcv_data -= ibp->z_port_rcv_data; 1425 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; 1426 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; 1427 cntrs.local_link_integrity_errors -= 1428 ibp->z_local_link_integrity_errors; 1429 cntrs.excessive_buffer_overrun_errors -= 1430 ibp->z_excessive_buffer_overrun_errors; 1431 cntrs.vl15_dropped -= ibp->z_vl15_dropped; 1432 cntrs.vl15_dropped += ibp->n_vl15_dropped; 1433 1434 memset(pmp->data, 0, sizeof(pmp->data)); 1435 1436 p->port_select = port_select; 1437 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) 1438 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1439 1440 if (cntrs.symbol_error_counter > 0xFFFFUL) 1441 p->symbol_error_counter = cpu_to_be16(0xFFFF); 1442 else 1443 p->symbol_error_counter = 1444 cpu_to_be16((u16)cntrs.symbol_error_counter); 1445 if (cntrs.link_error_recovery_counter > 0xFFUL) 1446 p->link_error_recovery_counter = 0xFF; 1447 else 1448 p->link_error_recovery_counter = 1449 (u8)cntrs.link_error_recovery_counter; 1450 if (cntrs.link_downed_counter > 0xFFUL) 1451 p->link_downed_counter = 0xFF; 1452 else 1453 p->link_downed_counter = (u8)cntrs.link_downed_counter; 1454 if (cntrs.port_rcv_errors > 0xFFFFUL) 1455 p->port_rcv_errors = cpu_to_be16(0xFFFF); 1456 else 1457 p->port_rcv_errors = 1458 cpu_to_be16((u16) cntrs.port_rcv_errors); 1459 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1460 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); 1461 else 1462 p->port_rcv_remphys_errors = 1463 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); 1464 if (cntrs.port_xmit_discards > 0xFFFFUL) 1465 p->port_xmit_discards = cpu_to_be16(0xFFFF); 1466 else 1467 p->port_xmit_discards = 1468 cpu_to_be16((u16)cntrs.port_xmit_discards); 1469 if (cntrs.local_link_integrity_errors > 0xFUL) 1470 cntrs.local_link_integrity_errors = 0xFUL; 1471 if (cntrs.excessive_buffer_overrun_errors > 0xFUL) 1472 cntrs.excessive_buffer_overrun_errors = 0xFUL; 1473 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | 1474 cntrs.excessive_buffer_overrun_errors; 1475 if (cntrs.vl15_dropped > 0xFFFFUL) 1476 p->vl15_dropped = cpu_to_be16(0xFFFF); 1477 else 1478 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1479 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1480 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); 1481 else 1482 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); 1483 if (cntrs.port_rcv_data > 0xFFFFFFFFUL) 1484 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); 1485 else 1486 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); 1487 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) 1488 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); 1489 else 1490 p->port_xmit_packets = 1491 cpu_to_be32((u32)cntrs.port_xmit_packets); 1492 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) 1493 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); 1494 else 1495 p->port_rcv_packets = 1496 cpu_to_be32((u32) cntrs.port_rcv_packets); 1497 1498 return reply((struct ib_smp *) pmp); 1499} 1500 1501static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, 1502 struct ib_device *ibdev, u8 port) 1503{ 1504 /* Congestion PMA packets start at offset 24 not 64 */ 1505 struct ib_pma_portcounters_cong *p = 1506 (struct ib_pma_portcounters_cong *)pmp->reserved; 1507 struct qib_verbs_counters cntrs; 1508 struct qib_ibport *ibp = to_iport(ibdev, port); 1509 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1510 struct qib_devdata *dd = dd_from_ppd(ppd); 1511 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF; 1512 u64 xmit_wait_counter; 1513 unsigned long flags; 1514 1515 /* 1516 * This check is performed only in the GET method because the 1517 * SET method ends up calling this anyway. 1518 */ 1519 if (!dd->psxmitwait_supported) 1520 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 1521 if (port_select != port) 1522 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1523 1524 qib_get_counters(ppd, &cntrs); 1525 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 1526 xmit_wait_counter = xmit_wait_get_value_delta(ppd); 1527 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); 1528 1529 /* Adjust counters for any resets done. */ 1530 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; 1531 cntrs.link_error_recovery_counter -= 1532 ibp->z_link_error_recovery_counter; 1533 cntrs.link_downed_counter -= ibp->z_link_downed_counter; 1534 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; 1535 cntrs.port_rcv_remphys_errors -= 1536 ibp->z_port_rcv_remphys_errors; 1537 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; 1538 cntrs.local_link_integrity_errors -= 1539 ibp->z_local_link_integrity_errors; 1540 cntrs.excessive_buffer_overrun_errors -= 1541 ibp->z_excessive_buffer_overrun_errors; 1542 cntrs.vl15_dropped -= ibp->z_vl15_dropped; 1543 cntrs.vl15_dropped += ibp->n_vl15_dropped; 1544 cntrs.port_xmit_data -= ibp->z_port_xmit_data; 1545 cntrs.port_rcv_data -= ibp->z_port_rcv_data; 1546 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; 1547 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; 1548 1549 memset(pmp->reserved, 0, sizeof(pmp->reserved) + 1550 sizeof(pmp->data)); 1551 1552 /* 1553 * Set top 3 bits to indicate interval in picoseconds in 1554 * remaining bits. 1555 */ 1556 p->port_check_rate = 1557 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) | 1558 (dd->psxmitwait_check_rate & 1559 ~(QIB_XMIT_RATE_PICO << 13))); 1560 p->port_adr_events = cpu_to_be64(0); 1561 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter); 1562 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data); 1563 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data); 1564 p->port_xmit_packets = 1565 cpu_to_be64(cntrs.port_xmit_packets); 1566 p->port_rcv_packets = 1567 cpu_to_be64(cntrs.port_rcv_packets); 1568 if (cntrs.symbol_error_counter > 0xFFFFUL) 1569 p->symbol_error_counter = cpu_to_be16(0xFFFF); 1570 else 1571 p->symbol_error_counter = 1572 cpu_to_be16( 1573 (u16)cntrs.symbol_error_counter); 1574 if (cntrs.link_error_recovery_counter > 0xFFUL) 1575 p->link_error_recovery_counter = 0xFF; 1576 else 1577 p->link_error_recovery_counter = 1578 (u8)cntrs.link_error_recovery_counter; 1579 if (cntrs.link_downed_counter > 0xFFUL) 1580 p->link_downed_counter = 0xFF; 1581 else 1582 p->link_downed_counter = 1583 (u8)cntrs.link_downed_counter; 1584 if (cntrs.port_rcv_errors > 0xFFFFUL) 1585 p->port_rcv_errors = cpu_to_be16(0xFFFF); 1586 else 1587 p->port_rcv_errors = 1588 cpu_to_be16((u16) cntrs.port_rcv_errors); 1589 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1590 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); 1591 else 1592 p->port_rcv_remphys_errors = 1593 cpu_to_be16( 1594 (u16)cntrs.port_rcv_remphys_errors); 1595 if (cntrs.port_xmit_discards > 0xFFFFUL) 1596 p->port_xmit_discards = cpu_to_be16(0xFFFF); 1597 else 1598 p->port_xmit_discards = 1599 cpu_to_be16((u16)cntrs.port_xmit_discards); 1600 if (cntrs.local_link_integrity_errors > 0xFUL) 1601 cntrs.local_link_integrity_errors = 0xFUL; 1602 if (cntrs.excessive_buffer_overrun_errors > 0xFUL) 1603 cntrs.excessive_buffer_overrun_errors = 0xFUL; 1604 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | 1605 cntrs.excessive_buffer_overrun_errors; 1606 if (cntrs.vl15_dropped > 0xFFFFUL) 1607 p->vl15_dropped = cpu_to_be16(0xFFFF); 1608 else 1609 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1610 1611 return reply((struct ib_smp *)pmp); 1612} 1613 1614static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, 1615 struct ib_device *ibdev, u8 port) 1616{ 1617 struct ib_pma_portcounters_ext *p = 1618 (struct ib_pma_portcounters_ext *)pmp->data; 1619 struct qib_ibport *ibp = to_iport(ibdev, port); 1620 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1621 u64 swords, rwords, spkts, rpkts, xwait; 1622 u8 port_select = p->port_select; 1623 1624 memset(pmp->data, 0, sizeof(pmp->data)); 1625 1626 p->port_select = port_select; 1627 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { 1628 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1629 goto bail; 1630 } 1631 1632 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); 1633 1634 /* Adjust counters for any resets done. */ 1635 swords -= ibp->z_port_xmit_data; 1636 rwords -= ibp->z_port_rcv_data; 1637 spkts -= ibp->z_port_xmit_packets; 1638 rpkts -= ibp->z_port_rcv_packets; 1639 1640 p->port_xmit_data = cpu_to_be64(swords); 1641 p->port_rcv_data = cpu_to_be64(rwords); 1642 p->port_xmit_packets = cpu_to_be64(spkts); 1643 p->port_rcv_packets = cpu_to_be64(rpkts); 1644 p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); 1645 p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); 1646 p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); 1647 p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); 1648 1649bail: 1650 return reply((struct ib_smp *) pmp); 1651} 1652 1653static int pma_set_portcounters(struct ib_pma_mad *pmp, 1654 struct ib_device *ibdev, u8 port) 1655{ 1656 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1657 pmp->data; 1658 struct qib_ibport *ibp = to_iport(ibdev, port); 1659 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1660 struct qib_verbs_counters cntrs; 1661 1662 /* 1663 * Since the HW doesn't support clearing counters, we save the 1664 * current count and subtract it from future responses. 1665 */ 1666 qib_get_counters(ppd, &cntrs); 1667 1668 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) 1669 ibp->z_symbol_error_counter = cntrs.symbol_error_counter; 1670 1671 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) 1672 ibp->z_link_error_recovery_counter = 1673 cntrs.link_error_recovery_counter; 1674 1675 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) 1676 ibp->z_link_downed_counter = cntrs.link_downed_counter; 1677 1678 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) 1679 ibp->z_port_rcv_errors = cntrs.port_rcv_errors; 1680 1681 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) 1682 ibp->z_port_rcv_remphys_errors = 1683 cntrs.port_rcv_remphys_errors; 1684 1685 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) 1686 ibp->z_port_xmit_discards = cntrs.port_xmit_discards; 1687 1688 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) 1689 ibp->z_local_link_integrity_errors = 1690 cntrs.local_link_integrity_errors; 1691 1692 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) 1693 ibp->z_excessive_buffer_overrun_errors = 1694 cntrs.excessive_buffer_overrun_errors; 1695 1696 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { 1697 ibp->n_vl15_dropped = 0; 1698 ibp->z_vl15_dropped = cntrs.vl15_dropped; 1699 } 1700 1701 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) 1702 ibp->z_port_xmit_data = cntrs.port_xmit_data; 1703 1704 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) 1705 ibp->z_port_rcv_data = cntrs.port_rcv_data; 1706 1707 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) 1708 ibp->z_port_xmit_packets = cntrs.port_xmit_packets; 1709 1710 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) 1711 ibp->z_port_rcv_packets = cntrs.port_rcv_packets; 1712 1713 return pma_get_portcounters(pmp, ibdev, port); 1714} 1715 1716static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, 1717 struct ib_device *ibdev, u8 port) 1718{ 1719 struct qib_ibport *ibp = to_iport(ibdev, port); 1720 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1721 struct qib_devdata *dd = dd_from_ppd(ppd); 1722 struct qib_verbs_counters cntrs; 1723 u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF; 1724 int ret = 0; 1725 unsigned long flags; 1726 1727 qib_get_counters(ppd, &cntrs); 1728 /* Get counter values before we save them */ 1729 ret = pma_get_portcounters_cong(pmp, ibdev, port); 1730 1731 if (counter_select & IB_PMA_SEL_CONG_XMIT) { 1732 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 1733 ppd->cong_stats.counter = 0; 1734 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 1735 0x0); 1736 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); 1737 } 1738 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { 1739 ibp->z_port_xmit_data = cntrs.port_xmit_data; 1740 ibp->z_port_rcv_data = cntrs.port_rcv_data; 1741 ibp->z_port_xmit_packets = cntrs.port_xmit_packets; 1742 ibp->z_port_rcv_packets = cntrs.port_rcv_packets; 1743 } 1744 if (counter_select & IB_PMA_SEL_CONG_ALL) { 1745 ibp->z_symbol_error_counter = 1746 cntrs.symbol_error_counter; 1747 ibp->z_link_error_recovery_counter = 1748 cntrs.link_error_recovery_counter; 1749 ibp->z_link_downed_counter = 1750 cntrs.link_downed_counter; 1751 ibp->z_port_rcv_errors = cntrs.port_rcv_errors; 1752 ibp->z_port_rcv_remphys_errors = 1753 cntrs.port_rcv_remphys_errors; 1754 ibp->z_port_xmit_discards = 1755 cntrs.port_xmit_discards; 1756 ibp->z_local_link_integrity_errors = 1757 cntrs.local_link_integrity_errors; 1758 ibp->z_excessive_buffer_overrun_errors = 1759 cntrs.excessive_buffer_overrun_errors; 1760 ibp->n_vl15_dropped = 0; 1761 ibp->z_vl15_dropped = cntrs.vl15_dropped; 1762 } 1763 1764 return ret; 1765} 1766 1767static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, 1768 struct ib_device *ibdev, u8 port) 1769{ 1770 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1771 pmp->data; 1772 struct qib_ibport *ibp = to_iport(ibdev, port); 1773 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1774 u64 swords, rwords, spkts, rpkts, xwait; 1775 1776 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); 1777 1778 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) 1779 ibp->z_port_xmit_data = swords; 1780 1781 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) 1782 ibp->z_port_rcv_data = rwords; 1783 1784 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) 1785 ibp->z_port_xmit_packets = spkts; 1786 1787 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) 1788 ibp->z_port_rcv_packets = rpkts; 1789 1790 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) 1791 ibp->n_unicast_xmit = 0; 1792 1793 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) 1794 ibp->n_unicast_rcv = 0; 1795 1796 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) 1797 ibp->n_multicast_xmit = 0; 1798 1799 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) 1800 ibp->n_multicast_rcv = 0; 1801 1802 return pma_get_portcounters_ext(pmp, ibdev, port); 1803} 1804 1805static int process_subn(struct ib_device *ibdev, int mad_flags, 1806 u8 port, struct ib_mad *in_mad, 1807 struct ib_mad *out_mad) 1808{ 1809 struct ib_smp *smp = (struct ib_smp *)out_mad; 1810 struct qib_ibport *ibp = to_iport(ibdev, port); 1811 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1812 int ret; 1813 1814 *out_mad = *in_mad; 1815 if (smp->class_version != 1) { 1816 smp->status |= IB_SMP_UNSUP_VERSION; 1817 ret = reply(smp); 1818 goto bail; 1819 } 1820 1821 ret = check_mkey(ibp, smp, mad_flags); 1822 if (ret) { 1823 u32 port_num = be32_to_cpu(smp->attr_mod); 1824 1825 /* 1826 * If this is a get/set portinfo, we already check the 1827 * M_Key if the MAD is for another port and the M_Key 1828 * is OK on the receiving port. This check is needed 1829 * to increment the error counters when the M_Key 1830 * fails to match on *both* ports. 1831 */ 1832 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && 1833 (smp->method == IB_MGMT_METHOD_GET || 1834 smp->method == IB_MGMT_METHOD_SET) && 1835 port_num && port_num <= ibdev->phys_port_cnt && 1836 port != port_num) 1837 (void) check_mkey(to_iport(ibdev, port_num), smp, 0); 1838 goto bail; 1839 } 1840 1841 switch (smp->method) { 1842 case IB_MGMT_METHOD_GET: 1843 switch (smp->attr_id) { 1844 case IB_SMP_ATTR_NODE_DESC: 1845 ret = subn_get_nodedescription(smp, ibdev); 1846 goto bail; 1847 case IB_SMP_ATTR_NODE_INFO: 1848 ret = subn_get_nodeinfo(smp, ibdev, port); 1849 goto bail; 1850 case IB_SMP_ATTR_GUID_INFO: 1851 ret = subn_get_guidinfo(smp, ibdev, port); 1852 goto bail; 1853 case IB_SMP_ATTR_PORT_INFO: 1854 ret = subn_get_portinfo(smp, ibdev, port); 1855 goto bail; 1856 case IB_SMP_ATTR_PKEY_TABLE: 1857 ret = subn_get_pkeytable(smp, ibdev, port); 1858 goto bail; 1859 case IB_SMP_ATTR_SL_TO_VL_TABLE: 1860 ret = subn_get_sl_to_vl(smp, ibdev, port); 1861 goto bail; 1862 case IB_SMP_ATTR_VL_ARB_TABLE: 1863 ret = subn_get_vl_arb(smp, ibdev, port); 1864 goto bail; 1865 case IB_SMP_ATTR_SM_INFO: 1866 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { 1867 ret = IB_MAD_RESULT_SUCCESS | 1868 IB_MAD_RESULT_CONSUMED; 1869 goto bail; 1870 } 1871 if (ibp->port_cap_flags & IB_PORT_SM) { 1872 ret = IB_MAD_RESULT_SUCCESS; 1873 goto bail; 1874 } 1875 /* FALLTHROUGH */ 1876 default: 1877 smp->status |= IB_SMP_UNSUP_METH_ATTR; 1878 ret = reply(smp); 1879 goto bail; 1880 } 1881 1882 case IB_MGMT_METHOD_SET: 1883 switch (smp->attr_id) { 1884 case IB_SMP_ATTR_GUID_INFO: 1885 ret = subn_set_guidinfo(smp, ibdev, port); 1886 goto bail; 1887 case IB_SMP_ATTR_PORT_INFO: 1888 ret = subn_set_portinfo(smp, ibdev, port); 1889 goto bail; 1890 case IB_SMP_ATTR_PKEY_TABLE: 1891 ret = subn_set_pkeytable(smp, ibdev, port); 1892 goto bail; 1893 case IB_SMP_ATTR_SL_TO_VL_TABLE: 1894 ret = subn_set_sl_to_vl(smp, ibdev, port); 1895 goto bail; 1896 case IB_SMP_ATTR_VL_ARB_TABLE: 1897 ret = subn_set_vl_arb(smp, ibdev, port); 1898 goto bail; 1899 case IB_SMP_ATTR_SM_INFO: 1900 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { 1901 ret = IB_MAD_RESULT_SUCCESS | 1902 IB_MAD_RESULT_CONSUMED; 1903 goto bail; 1904 } 1905 if (ibp->port_cap_flags & IB_PORT_SM) { 1906 ret = IB_MAD_RESULT_SUCCESS; 1907 goto bail; 1908 } 1909 /* FALLTHROUGH */ 1910 default: 1911 smp->status |= IB_SMP_UNSUP_METH_ATTR; 1912 ret = reply(smp); 1913 goto bail; 1914 } 1915 1916 case IB_MGMT_METHOD_TRAP_REPRESS: 1917 if (smp->attr_id == IB_SMP_ATTR_NOTICE) 1918 ret = subn_trap_repress(smp, ibdev, port); 1919 else { 1920 smp->status |= IB_SMP_UNSUP_METH_ATTR; 1921 ret = reply(smp); 1922 } 1923 goto bail; 1924 1925 case IB_MGMT_METHOD_TRAP: 1926 case IB_MGMT_METHOD_REPORT: 1927 case IB_MGMT_METHOD_REPORT_RESP: 1928 case IB_MGMT_METHOD_GET_RESP: 1929 /* 1930 * The ib_mad module will call us to process responses 1931 * before checking for other consumers. 1932 * Just tell the caller to process it normally. 1933 */ 1934 ret = IB_MAD_RESULT_SUCCESS; 1935 goto bail; 1936 1937 case IB_MGMT_METHOD_SEND: 1938 if (ib_get_smp_direction(smp) && 1939 smp->attr_id == QIB_VENDOR_IPG) { 1940 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT, 1941 smp->data[0]); 1942 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 1943 } else 1944 ret = IB_MAD_RESULT_SUCCESS; 1945 goto bail; 1946 1947 default: 1948 smp->status |= IB_SMP_UNSUP_METHOD; 1949 ret = reply(smp); 1950 } 1951 1952bail: 1953 return ret; 1954} 1955 1956static int process_perf(struct ib_device *ibdev, u8 port, 1957 struct ib_mad *in_mad, 1958 struct ib_mad *out_mad) 1959{ 1960 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; 1961 int ret; 1962 1963 *out_mad = *in_mad; 1964 if (pmp->mad_hdr.class_version != 1) { 1965 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; 1966 ret = reply((struct ib_smp *) pmp); 1967 goto bail; 1968 } 1969 1970 switch (pmp->mad_hdr.method) { 1971 case IB_MGMT_METHOD_GET: 1972 switch (pmp->mad_hdr.attr_id) { 1973 case IB_PMA_CLASS_PORT_INFO: 1974 ret = pma_get_classportinfo(pmp, ibdev); 1975 goto bail; 1976 case IB_PMA_PORT_SAMPLES_CONTROL: 1977 ret = pma_get_portsamplescontrol(pmp, ibdev, port); 1978 goto bail; 1979 case IB_PMA_PORT_SAMPLES_RESULT: 1980 ret = pma_get_portsamplesresult(pmp, ibdev, port); 1981 goto bail; 1982 case IB_PMA_PORT_SAMPLES_RESULT_EXT: 1983 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port); 1984 goto bail; 1985 case IB_PMA_PORT_COUNTERS: 1986 ret = pma_get_portcounters(pmp, ibdev, port); 1987 goto bail; 1988 case IB_PMA_PORT_COUNTERS_EXT: 1989 ret = pma_get_portcounters_ext(pmp, ibdev, port); 1990 goto bail; 1991 case IB_PMA_PORT_COUNTERS_CONG: 1992 ret = pma_get_portcounters_cong(pmp, ibdev, port); 1993 goto bail; 1994 default: 1995 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 1996 ret = reply((struct ib_smp *) pmp); 1997 goto bail; 1998 } 1999 2000 case IB_MGMT_METHOD_SET: 2001 switch (pmp->mad_hdr.attr_id) { 2002 case IB_PMA_PORT_SAMPLES_CONTROL: 2003 ret = pma_set_portsamplescontrol(pmp, ibdev, port); 2004 goto bail; 2005 case IB_PMA_PORT_COUNTERS: 2006 ret = pma_set_portcounters(pmp, ibdev, port); 2007 goto bail; 2008 case IB_PMA_PORT_COUNTERS_EXT: 2009 ret = pma_set_portcounters_ext(pmp, ibdev, port); 2010 goto bail; 2011 case IB_PMA_PORT_COUNTERS_CONG: 2012 ret = pma_set_portcounters_cong(pmp, ibdev, port); 2013 goto bail; 2014 default: 2015 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 2016 ret = reply((struct ib_smp *) pmp); 2017 goto bail; 2018 } 2019 2020 case IB_MGMT_METHOD_TRAP: 2021 case IB_MGMT_METHOD_GET_RESP: 2022 /* 2023 * The ib_mad module will call us to process responses 2024 * before checking for other consumers. 2025 * Just tell the caller to process it normally. 2026 */ 2027 ret = IB_MAD_RESULT_SUCCESS; 2028 goto bail; 2029 2030 default: 2031 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; 2032 ret = reply((struct ib_smp *) pmp); 2033 } 2034 2035bail: 2036 return ret; 2037} 2038 2039/** 2040 * qib_process_mad - process an incoming MAD packet 2041 * @ibdev: the infiniband device this packet came in on 2042 * @mad_flags: MAD flags 2043 * @port: the port number this packet came in on 2044 * @in_wc: the work completion entry for this packet 2045 * @in_grh: the global route header for this packet 2046 * @in_mad: the incoming MAD 2047 * @out_mad: any outgoing MAD reply 2048 * 2049 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not 2050 * interested in processing. 2051 * 2052 * Note that the verbs framework has already done the MAD sanity checks, 2053 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 2054 * MADs. 2055 * 2056 * This is called by the ib_mad module. 2057 */ 2058int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 2059 struct ib_wc *in_wc, struct ib_grh *in_grh, 2060 struct ib_mad *in_mad, struct ib_mad *out_mad) 2061{ 2062 int ret; 2063 2064 switch (in_mad->mad_hdr.mgmt_class) { 2065 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 2066 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 2067 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); 2068 goto bail; 2069 2070 case IB_MGMT_CLASS_PERF_MGMT: 2071 ret = process_perf(ibdev, port, in_mad, out_mad); 2072 goto bail; 2073 2074 default: 2075 ret = IB_MAD_RESULT_SUCCESS; 2076 } 2077 2078bail: 2079 return ret; 2080} 2081 2082static void send_handler(struct ib_mad_agent *agent, 2083 struct ib_mad_send_wc *mad_send_wc) 2084{ 2085 ib_free_send_mad(mad_send_wc->send_buf); 2086} 2087 2088static void xmit_wait_timer_func(unsigned long opaque) 2089{ 2090 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 2091 struct qib_devdata *dd = dd_from_ppd(ppd); 2092 unsigned long flags; 2093 u8 status; 2094 2095 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 2096 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { 2097 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 2098 if (status == IB_PMA_SAMPLE_STATUS_DONE) { 2099 /* save counter cache */ 2100 cache_hw_sample_counters(ppd); 2101 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; 2102 } else 2103 goto done; 2104 } 2105 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); 2106 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); 2107done: 2108 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); 2109 mod_timer(&ppd->cong_stats.timer, jiffies + HZ); 2110} 2111 2112int qib_create_agents(struct qib_ibdev *dev) 2113{ 2114 struct qib_devdata *dd = dd_from_dev(dev); 2115 struct ib_mad_agent *agent; 2116 struct qib_ibport *ibp; 2117 int p; 2118 int ret; 2119 2120 for (p = 0; p < dd->num_pports; p++) { 2121 ibp = &dd->pport[p].ibport_data; 2122 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, 2123 NULL, 0, send_handler, 2124 NULL, NULL); 2125 if (IS_ERR(agent)) { 2126 ret = PTR_ERR(agent); 2127 goto err; 2128 } 2129 2130 /* Initialize xmit_wait structure */ 2131 dd->pport[p].cong_stats.counter = 0; 2132 init_timer(&dd->pport[p].cong_stats.timer); 2133 dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; 2134 dd->pport[p].cong_stats.timer.data = 2135 (unsigned long)(&dd->pport[p]); 2136 dd->pport[p].cong_stats.timer.expires = 0; 2137 add_timer(&dd->pport[p].cong_stats.timer); 2138 2139 ibp->send_agent = agent; 2140 } 2141 2142 return 0; 2143 2144err: 2145 for (p = 0; p < dd->num_pports; p++) { 2146 ibp = &dd->pport[p].ibport_data; 2147 if (ibp->send_agent) { 2148 agent = ibp->send_agent; 2149 ibp->send_agent = NULL; 2150 ib_unregister_mad_agent(agent); 2151 } 2152 } 2153 2154 return ret; 2155} 2156 2157void qib_free_agents(struct qib_ibdev *dev) 2158{ 2159 struct qib_devdata *dd = dd_from_dev(dev); 2160 struct ib_mad_agent *agent; 2161 struct qib_ibport *ibp; 2162 int p; 2163 2164 for (p = 0; p < dd->num_pports; p++) { 2165 ibp = &dd->pport[p].ibport_data; 2166 if (ibp->send_agent) { 2167 agent = ibp->send_agent; 2168 ibp->send_agent = NULL; 2169 ib_unregister_mad_agent(agent); 2170 } 2171 if (ibp->sm_ah) { 2172 ib_destroy_ah(&ibp->sm_ah->ibah); 2173 ibp->sm_ah = NULL; 2174 } 2175 if (dd->pport[p].cong_stats.timer.data) 2176 del_timer_sync(&dd->pport[p].cong_stats.timer); 2177 } 2178} 2179