ipath_intr.c revision fba75200ad92892bf32d8d6f1443c6f1e4f48676
1/* 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/pci.h> 35 36#include "ipath_kernel.h" 37#include "ips_common.h" 38#include "ipath_layer.h" 39 40/* These are all rcv-related errors which we want to count for stats */ 41#define E_SUM_PKTERRS \ 42 (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \ 43 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \ 44 INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \ 45 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \ 46 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \ 47 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP) 48 49/* These are all send-related errors which we want to count for stats */ 50#define E_SUM_ERRS \ 51 (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \ 52 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \ 53 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \ 54 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \ 55 INFINIPATH_E_INVALIDADDR) 56 57/* 58 * these are errors that can occur when the link changes state while 59 * a packet is being sent or received. This doesn't cover things 60 * like EBP or VCRC that can be the result of a sending having the 61 * link change state, so we receive a "known bad" packet. 62 */ 63#define E_SUM_LINK_PKTERRS \ 64 (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \ 65 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \ 66 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \ 67 INFINIPATH_E_RUNEXPCHAR) 68 69static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs) 70{ 71 unsigned long sbuf[4]; 72 u64 ignore_this_time = 0; 73 u32 piobcnt; 74 75 /* if possible that sendbuffererror could be valid */ 76 piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; 77 /* read these before writing errorclear */ 78 sbuf[0] = ipath_read_kreg64( 79 dd, dd->ipath_kregs->kr_sendbuffererror); 80 sbuf[1] = ipath_read_kreg64( 81 dd, dd->ipath_kregs->kr_sendbuffererror + 1); 82 if (piobcnt > 128) { 83 sbuf[2] = ipath_read_kreg64( 84 dd, dd->ipath_kregs->kr_sendbuffererror + 2); 85 sbuf[3] = ipath_read_kreg64( 86 dd, dd->ipath_kregs->kr_sendbuffererror + 3); 87 } 88 89 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { 90 int i; 91 92 ipath_cdbg(PKT, "SendbufErrs %lx %lx ", sbuf[0], sbuf[1]); 93 if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128) 94 printk("%lx %lx ", sbuf[2], sbuf[3]); 95 for (i = 0; i < piobcnt; i++) { 96 if (test_bit(i, sbuf)) { 97 u32 __iomem *piobuf; 98 if (i < dd->ipath_piobcnt2k) 99 piobuf = (u32 __iomem *) 100 (dd->ipath_pio2kbase + 101 i * dd->ipath_palign); 102 else 103 piobuf = (u32 __iomem *) 104 (dd->ipath_pio4kbase + 105 (i - dd->ipath_piobcnt2k) * 106 dd->ipath_4kalign); 107 108 ipath_cdbg(PKT, 109 "PIObuf[%u] @%p pbc is %x; ", 110 i, piobuf, readl(piobuf)); 111 112 ipath_disarm_piobufs(dd, i, 1); 113 } 114 } 115 if (ipath_debug & __IPATH_PKTDBG) 116 printk("\n"); 117 } 118 if ((errs & E_SUM_LINK_PKTERRS) && 119 !(dd->ipath_flags & IPATH_LINKACTIVE)) { 120 /* 121 * This can happen when SMA is trying to bring the link 122 * up, but the IB link changes state at the "wrong" time. 123 * The IB logic then complains that the packet isn't 124 * valid. We don't want to confuse people, so we just 125 * don't print them, except at debug 126 */ 127 ipath_dbg("Ignoring packet errors %llx, because link not " 128 "ACTIVE\n", (unsigned long long) errs); 129 ignore_this_time = errs & E_SUM_LINK_PKTERRS; 130 } 131 132 return ignore_this_time; 133} 134 135/* return the strings for the most common link states */ 136static char *ib_linkstate(u32 linkstate) 137{ 138 char *ret; 139 140 switch (linkstate) { 141 case IPATH_IBSTATE_INIT: 142 ret = "Init"; 143 break; 144 case IPATH_IBSTATE_ARM: 145 ret = "Arm"; 146 break; 147 case IPATH_IBSTATE_ACTIVE: 148 ret = "Active"; 149 break; 150 default: 151 ret = "Down"; 152 } 153 154 return ret; 155} 156 157static void handle_e_ibstatuschanged(struct ipath_devdata *dd, 158 ipath_err_t errs, int noprint) 159{ 160 u64 val; 161 u32 ltstate, lstate; 162 163 /* 164 * even if diags are enabled, we want to notice LINKINIT, etc. 165 * We just don't want to change the LED state, or 166 * dd->ipath_kregs->kr_ibcctrl 167 */ 168 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 169 lstate = val & IPATH_IBSTATE_MASK; 170 171 /* 172 * this is confusing enough when it happens that I want to always put it 173 * on the console and in the logs. If it was a requested state change, 174 * we'll have already cleared the flags, so we won't print this warning 175 */ 176 if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE) 177 && (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) { 178 dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n", 179 (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE", 180 ib_linkstate(lstate)); 181 /* 182 * Flush all queued sends when link went to DOWN or INIT, 183 * to be sure that they don't block SMA and other MAD packets 184 */ 185 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 186 INFINIPATH_S_ABORT); 187 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf, 188 (unsigned)(dd->ipath_piobcnt2k + 189 dd->ipath_piobcnt4k) - 190 dd->ipath_lastport_piobuf); 191 } 192 else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || 193 lstate == IPATH_IBSTATE_ACTIVE) { 194 /* 195 * only print at SMA if there is a change, debug if not 196 * (sometimes we want to know that, usually not). 197 */ 198 if (lstate == ((unsigned) dd->ipath_lastibcstat 199 & IPATH_IBSTATE_MASK)) { 200 ipath_dbg("Status change intr but no change (%s)\n", 201 ib_linkstate(lstate)); 202 } 203 else 204 ipath_cdbg(SMA, "Unit %u link state %s, last " 205 "was %s\n", dd->ipath_unit, 206 ib_linkstate(lstate), 207 ib_linkstate((unsigned) 208 dd->ipath_lastibcstat 209 & IPATH_IBSTATE_MASK)); 210 } 211 else { 212 lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; 213 if (lstate == IPATH_IBSTATE_INIT || 214 lstate == IPATH_IBSTATE_ARM || 215 lstate == IPATH_IBSTATE_ACTIVE) 216 ipath_cdbg(SMA, "Unit %u link state down" 217 " (state 0x%x), from %s\n", 218 dd->ipath_unit, 219 (u32)val & IPATH_IBSTATE_MASK, 220 ib_linkstate(lstate)); 221 else 222 ipath_cdbg(VERBOSE, "Unit %u link state changed " 223 "to 0x%x from down (%x)\n", 224 dd->ipath_unit, (u32) val, lstate); 225 } 226 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 227 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK; 228 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) & 229 INFINIPATH_IBCS_LINKSTATE_MASK; 230 231 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE || 232 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) { 233 u32 last_ltstate; 234 235 /* 236 * Ignore cycling back and forth from Polling.Active 237 * to Polling.Quiet while waiting for the other end of 238 * the link to come up. We will cycle back and forth 239 * between them if no cable is plugged in, 240 * the other device is powered off or disabled, etc. 241 */ 242 last_ltstate = (dd->ipath_lastibcstat >> 243 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) 244 & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK; 245 if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE 246 || last_ltstate == 247 INFINIPATH_IBCS_LT_STATE_POLLQUIET) { 248 if (dd->ipath_ibpollcnt > 40) { 249 dd->ipath_flags |= IPATH_NOCABLE; 250 *dd->ipath_statusp |= 251 IPATH_STATUS_IB_NOCABLE; 252 } else 253 dd->ipath_ibpollcnt++; 254 goto skip_ibchange; 255 } 256 } 257 dd->ipath_ibpollcnt = 0; /* some state other than 2 or 3 */ 258 ipath_stats.sps_iblink++; 259 if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) { 260 dd->ipath_flags |= IPATH_LINKDOWN; 261 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT 262 | IPATH_LINKACTIVE | 263 IPATH_LINKARMED); 264 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 265 dd->ipath_lli_counter = 0; 266 if (!noprint) { 267 if (((dd->ipath_lastibcstat >> 268 INFINIPATH_IBCS_LINKSTATE_SHIFT) & 269 INFINIPATH_IBCS_LINKSTATE_MASK) 270 == INFINIPATH_IBCS_L_STATE_ACTIVE) 271 /* if from up to down be more vocal */ 272 ipath_cdbg(SMA, 273 "Unit %u link now down (%s)\n", 274 dd->ipath_unit, 275 ipath_ibcstatus_str[ltstate]); 276 else 277 ipath_cdbg(VERBOSE, "Unit %u link is " 278 "down (%s)\n", dd->ipath_unit, 279 ipath_ibcstatus_str[ltstate]); 280 } 281 282 dd->ipath_f_setextled(dd, lstate, ltstate); 283 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) { 284 dd->ipath_flags |= IPATH_LINKACTIVE; 285 dd->ipath_flags &= 286 ~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN | 287 IPATH_LINKARMED | IPATH_NOCABLE); 288 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE; 289 *dd->ipath_statusp |= 290 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; 291 dd->ipath_f_setextled(dd, lstate, ltstate); 292 293 __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP); 294 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) { 295 /* 296 * set INIT and DOWN. Down is checked by most of the other 297 * code, but INIT is useful to know in a few places. 298 */ 299 dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN; 300 dd->ipath_flags &= 301 ~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED 302 | IPATH_NOCABLE); 303 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE 304 | IPATH_STATUS_IB_READY); 305 dd->ipath_f_setextled(dd, lstate, ltstate); 306 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) { 307 dd->ipath_flags |= IPATH_LINKARMED; 308 dd->ipath_flags &= 309 ~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT | 310 IPATH_LINKACTIVE | IPATH_NOCABLE); 311 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE 312 | IPATH_STATUS_IB_READY); 313 dd->ipath_f_setextled(dd, lstate, ltstate); 314 } else { 315 if (!noprint) 316 ipath_dbg("IBstatuschange unit %u: %s (%x)\n", 317 dd->ipath_unit, 318 ipath_ibcstatus_str[ltstate], ltstate); 319 } 320skip_ibchange: 321 dd->ipath_lastibcstat = val; 322} 323 324static void handle_supp_msgs(struct ipath_devdata *dd, 325 unsigned supp_msgs, char msg[512]) 326{ 327 /* 328 * Print the message unless it's ibc status change only, which 329 * happens so often we never want to count it. 330 */ 331 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { 332 ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror & 333 ~INFINIPATH_E_IBSTATUSCHANGED); 334 if (dd->ipath_lasterror & 335 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) 336 ipath_dev_err(dd, "Suppressed %u messages for " 337 "fast-repeating errors (%s) (%llx)\n", 338 supp_msgs, msg, 339 (unsigned long long) 340 dd->ipath_lasterror); 341 else { 342 /* 343 * rcvegrfull and rcvhdrqfull are "normal", for some 344 * types of processes (mostly benchmarks) that send 345 * huge numbers of messages, while not processing 346 * them. So only complain about these at debug 347 * level. 348 */ 349 ipath_dbg("Suppressed %u messages for %s\n", 350 supp_msgs, msg); 351 } 352 } 353} 354 355static unsigned handle_frequent_errors(struct ipath_devdata *dd, 356 ipath_err_t errs, char msg[512], 357 int *noprint) 358{ 359 unsigned long nc; 360 static unsigned long nextmsg_time; 361 static unsigned nmsgs, supp_msgs; 362 363 /* 364 * Throttle back "fast" messages to no more than 10 per 5 seconds. 365 * This isn't perfect, but it's a reasonable heuristic. If we get 366 * more than 10, give a 6x longer delay. 367 */ 368 nc = jiffies; 369 if (nmsgs > 10) { 370 if (time_before(nc, nextmsg_time)) { 371 *noprint = 1; 372 if (!supp_msgs++) 373 nextmsg_time = nc + HZ * 3; 374 } 375 else if (supp_msgs) { 376 handle_supp_msgs(dd, supp_msgs, msg); 377 supp_msgs = 0; 378 nmsgs = 0; 379 } 380 } 381 else if (!nmsgs++ || time_after(nc, nextmsg_time)) 382 nextmsg_time = nc + HZ / 2; 383 384 return supp_msgs; 385} 386 387static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) 388{ 389 char msg[512]; 390 u64 ignore_this_time = 0; 391 int i; 392 int chkerrpkts = 0, noprint = 0; 393 unsigned supp_msgs; 394 395 supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint); 396 397 /* 398 * don't report errors that are masked (includes those always 399 * ignored) 400 */ 401 errs &= ~dd->ipath_maskederrs; 402 403 /* do these first, they are most important */ 404 if (errs & INFINIPATH_E_HARDWARE) { 405 /* reuse same msg buf */ 406 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg); 407 } 408 409 if (!noprint && (errs & ~infinipath_e_bitsextant)) 410 ipath_dev_err(dd, "error interrupt with unknown errors " 411 "%llx set\n", (unsigned long long) 412 (errs & ~infinipath_e_bitsextant)); 413 414 if (errs & E_SUM_ERRS) 415 ignore_this_time = handle_e_sum_errs(dd, errs); 416 else if ((errs & E_SUM_LINK_PKTERRS) && 417 !(dd->ipath_flags & IPATH_LINKACTIVE)) { 418 /* 419 * This can happen when SMA is trying to bring the link 420 * up, but the IB link changes state at the "wrong" time. 421 * The IB logic then complains that the packet isn't 422 * valid. We don't want to confuse people, so we just 423 * don't print them, except at debug 424 */ 425 ipath_dbg("Ignoring packet errors %llx, because link not " 426 "ACTIVE\n", (unsigned long long) errs); 427 ignore_this_time = errs & E_SUM_LINK_PKTERRS; 428 } 429 430 if (supp_msgs == 250000) { 431 /* 432 * It's not entirely reasonable assuming that the errors set 433 * in the last clear period are all responsible for the 434 * problem, but the alternative is to assume it's the only 435 * ones on this particular interrupt, which also isn't great 436 */ 437 dd->ipath_maskederrs |= dd->ipath_lasterror | errs; 438 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 439 ~dd->ipath_maskederrs); 440 ipath_decode_err(msg, sizeof msg, 441 (dd->ipath_maskederrs & ~dd-> 442 ipath_ignorederrs)); 443 444 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & 445 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) 446 ipath_dev_err(dd, "Disabling error(s) %llx because " 447 "occurring too frequently (%s)\n", 448 (unsigned long long) 449 (dd->ipath_maskederrs & 450 ~dd->ipath_ignorederrs), msg); 451 else { 452 /* 453 * rcvegrfull and rcvhdrqfull are "normal", 454 * for some types of processes (mostly benchmarks) 455 * that send huge numbers of messages, while not 456 * processing them. So only complain about 457 * these at debug level. 458 */ 459 ipath_dbg("Disabling frequent queue full errors " 460 "(%s)\n", msg); 461 } 462 463 /* 464 * Re-enable the masked errors after around 3 minutes. in 465 * ipath_get_faststats(). If we have a series of fast 466 * repeating but different errors, the interval will keep 467 * stretching out, but that's OK, as that's pretty 468 * catastrophic. 469 */ 470 dd->ipath_unmasktime = jiffies + HZ * 180; 471 } 472 473 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs); 474 if (ignore_this_time) 475 errs &= ~ignore_this_time; 476 if (errs & ~dd->ipath_lasterror) { 477 errs &= ~dd->ipath_lasterror; 478 /* never suppress duplicate hwerrors or ibstatuschange */ 479 dd->ipath_lasterror |= errs & 480 ~(INFINIPATH_E_HARDWARE | 481 INFINIPATH_E_IBSTATUSCHANGED); 482 } 483 if (!errs) 484 return 0; 485 486 if (!noprint) 487 /* 488 * the ones we mask off are handled specially below or above 489 */ 490 ipath_decode_err(msg, sizeof msg, 491 errs & ~(INFINIPATH_E_IBSTATUSCHANGED | 492 INFINIPATH_E_RRCVEGRFULL | 493 INFINIPATH_E_RRCVHDRFULL | 494 INFINIPATH_E_HARDWARE)); 495 else 496 /* so we don't need if (!noprint) at strlcat's below */ 497 *msg = 0; 498 499 if (errs & E_SUM_PKTERRS) { 500 ipath_stats.sps_pkterrs++; 501 chkerrpkts = 1; 502 } 503 if (errs & E_SUM_ERRS) 504 ipath_stats.sps_errs++; 505 506 if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) { 507 ipath_stats.sps_crcerrs++; 508 chkerrpkts = 1; 509 } 510 511 /* 512 * We don't want to print these two as they happen, or we can make 513 * the situation even worse, because it takes so long to print 514 * messages to serial consoles. Kernel ports get printed from 515 * fast_stats, no more than every 5 seconds, user ports get printed 516 * on close 517 */ 518 if (errs & INFINIPATH_E_RRCVHDRFULL) { 519 int any; 520 u32 hd, tl; 521 ipath_stats.sps_hdrqfull++; 522 for (any = i = 0; i < dd->ipath_cfgports; i++) { 523 struct ipath_portdata *pd = dd->ipath_pd[i]; 524 if (i == 0) { 525 hd = dd->ipath_port0head; 526 tl = (u32) le64_to_cpu( 527 *dd->ipath_hdrqtailptr); 528 } else if (pd && pd->port_cnt && 529 pd->port_rcvhdrtail_kvaddr) { 530 /* 531 * don't report same point multiple times, 532 * except kernel 533 */ 534 tl = (u32) * pd->port_rcvhdrtail_kvaddr; 535 if (tl == dd->ipath_lastrcvhdrqtails[i]) 536 continue; 537 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, 538 i); 539 } else 540 continue; 541 if (hd == (tl + 1) || 542 (!hd && tl == dd->ipath_hdrqlast)) { 543 if (i == 0) 544 chkerrpkts = 1; 545 dd->ipath_lastrcvhdrqtails[i] = tl; 546 pd->port_hdrqfull++; 547 } 548 } 549 } 550 if (errs & INFINIPATH_E_RRCVEGRFULL) { 551 /* 552 * since this is of less importance and not likely to 553 * happen without also getting hdrfull, only count 554 * occurrences; don't check each port (or even the kernel 555 * vs user) 556 */ 557 ipath_stats.sps_etidfull++; 558 if (dd->ipath_port0head != 559 (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) 560 chkerrpkts = 1; 561 } 562 563 /* 564 * do this before IBSTATUSCHANGED, in case both bits set in a single 565 * interrupt; we want the STATUSCHANGE to "win", so we do our 566 * internal copy of state machine correctly 567 */ 568 if (errs & INFINIPATH_E_RIBLOSTLINK) { 569 /* 570 * force through block below 571 */ 572 errs |= INFINIPATH_E_IBSTATUSCHANGED; 573 ipath_stats.sps_iblink++; 574 dd->ipath_flags |= IPATH_LINKDOWN; 575 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT 576 | IPATH_LINKARMED | IPATH_LINKACTIVE); 577 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 578 if (!noprint) { 579 u64 st = ipath_read_kreg64( 580 dd, dd->ipath_kregs->kr_ibcstatus); 581 582 ipath_dbg("Lost link, link now down (%s)\n", 583 ipath_ibcstatus_str[st & 0xf]); 584 } 585 } 586 if (errs & INFINIPATH_E_IBSTATUSCHANGED) 587 handle_e_ibstatuschanged(dd, errs, noprint); 588 589 if (errs & INFINIPATH_E_RESET) { 590 if (!noprint) 591 ipath_dev_err(dd, "Got reset, requires re-init " 592 "(unload and reload driver)\n"); 593 dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */ 594 /* mark as having had error */ 595 *dd->ipath_statusp |= IPATH_STATUS_HWERROR; 596 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF; 597 } 598 599 if (!noprint && *msg) 600 ipath_dev_err(dd, "%s error\n", msg); 601 if (dd->ipath_sma_state_wanted & dd->ipath_flags) { 602 ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, " 603 "waking\n", dd->ipath_sma_state_wanted, 604 dd->ipath_flags); 605 wake_up_interruptible(&ipath_sma_state_wait); 606 } 607 608 return chkerrpkts; 609} 610 611/* this is separate to allow for better optimization of ipath_intr() */ 612 613static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) 614{ 615 /* 616 * sometimes happen during driver init and unload, don't want 617 * to process any interrupts at that point 618 */ 619 620 /* this is just a bandaid, not a fix, if something goes badly 621 * wrong */ 622 if (++*unexpectp > 100) { 623 if (++*unexpectp > 105) { 624 /* 625 * ok, we must be taking somebody else's interrupts, 626 * due to a messed up mptable and/or PIRQ table, so 627 * unregister the interrupt. We've seen this during 628 * linuxbios development work, and it may happen in 629 * the future again. 630 */ 631 if (dd->pcidev && dd->pcidev->irq) { 632 ipath_dev_err(dd, "Now %u unexpected " 633 "interrupts, unregistering " 634 "interrupt handler\n", 635 *unexpectp); 636 ipath_dbg("free_irq of irq %x\n", 637 dd->pcidev->irq); 638 free_irq(dd->pcidev->irq, dd); 639 } 640 } 641 if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) { 642 ipath_dev_err(dd, "%u unexpected interrupts, " 643 "disabling interrupts completely\n", 644 *unexpectp); 645 /* 646 * disable all interrupts, something is very wrong 647 */ 648 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 649 0ULL); 650 } 651 } else if (*unexpectp > 1) 652 ipath_dbg("Interrupt when not ready, should not happen, " 653 "ignoring\n"); 654} 655 656static void ipath_bad_regread(struct ipath_devdata *dd) 657{ 658 static int allbits; 659 660 /* separate routine, for better optimization of ipath_intr() */ 661 662 /* 663 * We print the message and disable interrupts, in hope of 664 * having a better chance of debugging the problem. 665 */ 666 ipath_dev_err(dd, 667 "Read of interrupt status failed (all bits set)\n"); 668 if (allbits++) { 669 /* disable all interrupts, something is very wrong */ 670 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL); 671 if (allbits == 2) { 672 ipath_dev_err(dd, "Still bad interrupt status, " 673 "unregistering interrupt\n"); 674 free_irq(dd->pcidev->irq, dd); 675 } else if (allbits > 2) { 676 if ((allbits % 10000) == 0) 677 printk("."); 678 } else 679 ipath_dev_err(dd, "Disabling interrupts, " 680 "multiple errors\n"); 681 } 682} 683 684static void handle_port_pioavail(struct ipath_devdata *dd) 685{ 686 u32 i; 687 /* 688 * start from port 1, since for now port 0 is never using 689 * wait_event for PIO 690 */ 691 for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) { 692 struct ipath_portdata *pd = dd->ipath_pd[i]; 693 694 if (pd && pd->port_cnt && 695 dd->ipath_portpiowait & (1U << i)) { 696 clear_bit(i, &dd->ipath_portpiowait); 697 if (test_bit(IPATH_PORT_WAITING_PIO, 698 &pd->port_flag)) { 699 clear_bit(IPATH_PORT_WAITING_PIO, 700 &pd->port_flag); 701 wake_up_interruptible(&pd->port_wait); 702 } 703 } 704 } 705} 706 707static void handle_layer_pioavail(struct ipath_devdata *dd) 708{ 709 int ret; 710 711 ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); 712 if (ret > 0) 713 goto set; 714 715 ret = __ipath_verbs_piobufavail(dd); 716 if (ret > 0) 717 goto set; 718 719 return; 720set: 721 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 722 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 723 dd->ipath_sendctrl); 724} 725 726/* 727 * Handle receive interrupts for user ports; this means a user 728 * process was waiting for a packet to arrive, and didn't want 729 * to poll 730 */ 731static void handle_urcv(struct ipath_devdata *dd, u32 istat) 732{ 733 u64 portr; 734 int i; 735 int rcvdint = 0; 736 737 portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & 738 infinipath_i_rcvavail_mask) 739 | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & 740 infinipath_i_rcvurg_mask); 741 for (i = 1; i < dd->ipath_cfgports; i++) { 742 struct ipath_portdata *pd = dd->ipath_pd[i]; 743 if (portr & (1 << i) && pd && pd->port_cnt && 744 test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { 745 int rcbit; 746 clear_bit(IPATH_PORT_WAITING_RCV, 747 &pd->port_flag); 748 rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT; 749 clear_bit(1UL << rcbit, &dd->ipath_rcvctrl); 750 wake_up_interruptible(&pd->port_wait); 751 rcvdint = 1; 752 } 753 } 754 if (rcvdint) { 755 /* only want to take one interrupt, so turn off the rcv 756 * interrupt for all the ports that we did the wakeup on 757 * (but never for kernel port) 758 */ 759 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 760 dd->ipath_rcvctrl); 761 } 762} 763 764irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) 765{ 766 struct ipath_devdata *dd = data; 767 u32 istat, chk0rcv = 0; 768 ipath_err_t estat = 0; 769 irqreturn_t ret; 770 u32 oldhead, curtail; 771 static unsigned unexpected = 0; 772 static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | 773 (1U<<INFINIPATH_I_RCVURG_SHIFT); 774 775 ipath_stats.sps_ints++; 776 777 if (!(dd->ipath_flags & IPATH_PRESENT)) { 778 /* 779 * This return value is not great, but we do not want the 780 * interrupt core code to remove our interrupt handler 781 * because we don't appear to be handling an interrupt 782 * during a chip reset. 783 */ 784 return IRQ_HANDLED; 785 } 786 787 /* 788 * this needs to be flags&initted, not statusp, so we keep 789 * taking interrupts even after link goes down, etc. 790 * Also, we *must* clear the interrupt at some point, or we won't 791 * take it again, which can be real bad for errors, etc... 792 */ 793 794 if (!(dd->ipath_flags & IPATH_INITTED)) { 795 ipath_bad_intr(dd, &unexpected); 796 ret = IRQ_NONE; 797 goto bail; 798 } 799 800 /* 801 * We try to avoid reading the interrupt status register, since 802 * that's a PIO read, and stalls the processor for up to about 803 * ~0.25 usec. The idea is that if we processed a port0 packet, 804 * we blindly clear the port 0 receive interrupt bits, and nothing 805 * else, then return. If other interrupts are pending, the chip 806 * will re-interrupt us as soon as we write the intclear register. 807 * We then won't process any more kernel packets (if not the 2nd 808 * time, then the 3rd or 4th) and we'll then handle the other 809 * interrupts. We clear the interrupts first so that we don't 810 * lose intr for later packets that arrive while we are processing. 811 */ 812 oldhead = dd->ipath_port0head; 813 curtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); 814 if (oldhead != curtail) { 815 if (dd->ipath_flags & IPATH_GPIO_INTR) { 816 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, 817 (u64) (1 << 2)); 818 istat = port0rbits | INFINIPATH_I_GPIO; 819 } 820 else 821 istat = port0rbits; 822 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); 823 ipath_kreceive(dd); 824 if (oldhead != dd->ipath_port0head) { 825 ipath_stats.sps_fastrcvint++; 826 goto done; 827 } 828 } 829 830 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); 831 832 if (unlikely(!istat)) { 833 ipath_stats.sps_nullintr++; 834 ret = IRQ_NONE; /* not our interrupt, or already handled */ 835 goto bail; 836 } 837 if (unlikely(istat == -1)) { 838 ipath_bad_regread(dd); 839 /* don't know if it was our interrupt or not */ 840 ret = IRQ_NONE; 841 goto bail; 842 } 843 844 if (unexpected) 845 unexpected = 0; 846 847 if (unlikely(istat & ~infinipath_i_bitsextant)) 848 ipath_dev_err(dd, 849 "interrupt with unknown interrupts %x set\n", 850 istat & (u32) ~ infinipath_i_bitsextant); 851 else 852 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); 853 854 if (unlikely(istat & INFINIPATH_I_ERROR)) { 855 ipath_stats.sps_errints++; 856 estat = ipath_read_kreg64(dd, 857 dd->ipath_kregs->kr_errorstatus); 858 if (!estat) 859 dev_info(&dd->pcidev->dev, "error interrupt (%x), " 860 "but no error bits set!\n", istat); 861 else if (estat == -1LL) 862 /* 863 * should we try clearing all, or hope next read 864 * works? 865 */ 866 ipath_dev_err(dd, "Read of error status failed " 867 "(all bits set); ignoring\n"); 868 else 869 if (handle_errors(dd, estat)) 870 /* force calling ipath_kreceive() */ 871 chk0rcv = 1; 872 } 873 874 if (istat & INFINIPATH_I_GPIO) { 875 /* 876 * Packets are available in the port 0 rcv queue. 877 * Eventually this needs to be generalized to check 878 * IPATH_GPIO_INTR, and the specific GPIO bit, if 879 * GPIO interrupts are used for anything else. 880 */ 881 if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) { 882 u32 gpiostatus; 883 gpiostatus = ipath_read_kreg32( 884 dd, dd->ipath_kregs->kr_gpio_status); 885 ipath_dbg("Unexpected GPIO interrupt bits %x\n", 886 gpiostatus); 887 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, 888 gpiostatus); 889 } 890 else { 891 /* Clear GPIO status bit 2 */ 892 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, 893 (u64) (1 << 2)); 894 chk0rcv = 1; 895 } 896 } 897 chk0rcv |= istat & port0rbits; 898 899 /* 900 * Clear the interrupt bits we found set, unless they are receive 901 * related, in which case we already cleared them above, and don't 902 * want to clear them again, because we might lose an interrupt. 903 * Clear it early, so we "know" know the chip will have seen this by 904 * the time we process the queue, and will re-interrupt if necessary. 905 * The processor itself won't take the interrupt again until we return. 906 */ 907 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); 908 909 /* 910 * handle port0 receive before checking for pio buffers available, 911 * since receives can overflow; piobuf waiters can afford a few 912 * extra cycles, since they were waiting anyway, and user's waiting 913 * for receive are at the bottom. 914 */ 915 if (chk0rcv) { 916 ipath_kreceive(dd); 917 istat &= ~port0rbits; 918 } 919 920 if (istat & ((infinipath_i_rcvavail_mask << 921 INFINIPATH_I_RCVAVAIL_SHIFT) 922 | (infinipath_i_rcvurg_mask << 923 INFINIPATH_I_RCVURG_SHIFT))) 924 handle_urcv(dd, istat); 925 926 if (istat & INFINIPATH_I_SPIOBUFAVAIL) { 927 clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 928 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 929 dd->ipath_sendctrl); 930 931 if (dd->ipath_portpiowait) 932 handle_port_pioavail(dd); 933 934 handle_layer_pioavail(dd); 935 } 936 937done: 938 ret = IRQ_HANDLED; 939 940bail: 941 return ret; 942} 943