Lines Matching refs:dd

47 void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
55 piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
58 dd, dd->ipath_kregs->kr_sendbuffererror);
60 dd, dd->ipath_kregs->kr_sendbuffererror + 1);
63 dd, dd->ipath_kregs->kr_sendbuffererror + 2);
66 dd, dd->ipath_kregs->kr_sendbuffererror + 3);
73 time_after(dd->ipath_lastcancel, jiffies)) {
84 ipath_disarm_piobufs(dd, i, 1);
86 dd->ipath_lastcancel = jiffies+3;
131 static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
135 ipath_disarm_senderrbufs(dd);
137 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
229 static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
234 state = ipath_ib_state(dd, ibcs);
235 if (state == dd->ib_init)
237 else if (state == dd->ib_arm)
239 else if (state == dd->ib_active)
246 void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
250 event.device = &dd->verbs_dev->ibdev;
256 static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
260 u32 init = dd->ib_init;
261 u32 arm = dd->ib_arm;
262 u32 active = dd->ib_active;
263 const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
265 lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
266 ibstate = ipath_ib_state(dd, ibcs);
268 lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
269 ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
292 if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
294 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
299 (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
303 handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
304 dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
317 (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
318 dev_info(&dd->pcidev->dev, "Link state changed from %s "
319 "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
320 "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
326 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
337 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
338 (++dd->ipath_ibpollcnt == 40)) {
339 dd->ipath_flags |= IPATH_NOCABLE;
340 *dd->ipath_statusp |=
350 dd->ipath_ibpollcnt = 0; /* not poll*, now */
353 if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
355 linkrecov = ipath_snap_cntr(dd,
356 dd->ipath_cregs->cr_iblinkerrrecovcnt);
357 if (linkrecov != dd->ipath_lastlinkrecov) {
360 ib_linkstate(dd, ibcs),
364 dd->ipath_lastlinkrecov = 0;
365 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
371 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
373 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
374 if (dd->ipath_flags & IPATH_LINKACTIVE)
375 signal_ib_event(dd, IB_EVENT_PORT_ERR);
378 dd->ipath_flags |= IPATH_LINKARMED;
379 dd->ipath_flags &= ~(IPATH_LINKUNK |
382 ipath_hol_down(dd);
389 dd->ipath_flags |= IPATH_LINKINIT |
391 dd->ipath_flags &= ~(IPATH_LINKUNK |
394 ipath_hol_down(dd);
396 dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
397 dd->ipath_cregs->cr_iblinkerrrecovcnt);
398 *dd->ipath_statusp |=
400 dd->ipath_flags |= IPATH_LINKACTIVE;
401 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
404 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
405 ipath_restart_sdma(dd);
406 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
408 dd->ipath_f_setextled(dd, lstate, ltstate);
409 ipath_hol_up(dd);
418 "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
421 dd->ipath_unit, ib_linkstate(dd, ibcs),
424 if (dd->ipath_flags & IPATH_LINKACTIVE)
425 signal_ib_event(dd, IB_EVENT_PORT_ERR);
426 dd->ipath_flags |= IPATH_LINKDOWN;
427 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
430 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
431 dd->ipath_lli_counter = 0;
436 dd->ipath_unit, lstate,
437 ib_linkstate(dd, dd->ipath_lastibcstat));
441 dd->ipath_unit,
447 dd->ipath_lastibcstat = ibcs;
452 static void handle_supp_msgs(struct ipath_devdata *dd,
459 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
462 iserr = ipath_decode_err(dd, msg, msgsz,
463 dd->ipath_lasterror &
473 if (dd->ipath_lasterror & ~mask)
474 ipath_dev_err(dd, "Suppressed %u messages for "
478 dd->ipath_lasterror);
498 static unsigned handle_frequent_errors(struct ipath_devdata *dd,
519 handle_supp_msgs(dd, supp_msgs, msg, msgsz);
530 static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
537 ipath_decode_err(dd, msg, sizeof msg, errs &
543 tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
544 hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
545 status = ipath_read_kreg64(dd
546 , dd->ipath_kregs->kr_senddmastatus);
547 lengen = ipath_read_kreg64(dd,
548 dd->ipath_kregs->kr_senddmalengen);
553 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
554 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
555 expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
556 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
558 ipath_cancel_sends(dd, 1);
561 static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
567 !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
568 ipath_sdma_intr(dd);
572 &dd->ipath_sdma_status);
575 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
576 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
577 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
579 ipath_cancel_sends(dd, 1);
580 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
581 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
585 static int handle_hdrq_full(struct ipath_devdata *dd)
592 for (i = 0; i < dd->ipath_cfgports; i++) {
593 struct ipath_portdata *pd = dd->ipath_pd[i];
611 if (dd->ipath_flags & IPATH_NODMA_RTAIL)
612 tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
618 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
619 if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
631 static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
645 errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
647 supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
653 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
657 mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
659 ipath_inc_eeprom_err(dd, log_idx, 1);
664 handle_sdma_errors(dd, errs);
666 if (!noprint && (errs & ~dd->ipath_e_bitsextant))
667 ipath_dev_err(dd, "error interrupt with unknown errors "
669 (errs & ~dd->ipath_e_bitsextant));
672 ignore_this_time = handle_e_sum_errs(dd, errs);
674 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
695 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
697 dd->ipath_errormask &= ~dd->ipath_maskederrs;
698 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
699 dd->ipath_errormask);
700 s_iserr = ipath_decode_err(dd, msg, sizeof msg,
701 dd->ipath_maskederrs);
703 if (dd->ipath_maskederrs &
706 ipath_dev_err(dd, "Temporarily disabling "
708 (unsigned long long) dd->ipath_maskederrs,
736 dd->ipath_unmasktime = jiffies + HZ * 180;
739 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
742 if (errs & ~dd->ipath_lasterror) {
743 errs &= ~dd->ipath_lasterror;
745 dd->ipath_lasterror |= errs &
751 dd->ipath_spectriggerhit++;
753 dd->ipath_spectriggerhit);
758 time_after(dd->ipath_lastcancel, jiffies)) {
785 ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
812 chkerrpkts |= handle_hdrq_full(dd);
814 struct ipath_portdata *pd = dd->ipath_pd[0];
838 dd->ipath_flags |= IPATH_LINKDOWN;
839 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
841 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
844 ipath_ibcstatus_str[ipath_read_kreg64(dd,
845 dd->ipath_kregs->kr_ibcstatus) & 0xf]);
848 handle_e_ibstatuschanged(dd, errs);
852 ipath_dev_err(dd, "Got reset, requires re-init "
854 dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */
856 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
857 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
862 ipath_dev_err(dd, "%s error\n", msg);
864 if (dd->ipath_state_wanted & dd->ipath_flags) {
866 "waking\n", dd->ipath_state_wanted,
867 dd->ipath_flags);
887 void ipath_clear_freeze(struct ipath_devdata *dd)
890 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
893 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
895 ipath_cancel_sends(dd, 1);
898 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
899 dd->ipath_control);
900 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
903 ipath_force_pio_avail_update(dd);
911 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
912 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
914 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
915 dd->ipath_errormask);
916 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
917 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
923 static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
941 if (dd->pcidev && dd->ipath_irq) {
942 ipath_dev_err(dd, "Now %u unexpected "
947 dd->ipath_irq);
948 dd->ipath_f_free_irq(dd);
951 if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
952 ipath_dev_err(dd, "%u unexpected interrupts, "
958 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
966 static noinline void ipath_bad_regread(struct ipath_devdata *dd)
976 ipath_dev_err(dd,
980 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
982 ipath_dev_err(dd, "Still bad interrupt status, "
984 dd->ipath_f_free_irq(dd);
989 ipath_dev_err(dd, "Disabling interrupts, "
994 static void handle_layer_pioavail(struct ipath_devdata *dd)
999 ret = ipath_ib_piobufavail(dd->verbs_dev);
1005 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1006 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
1007 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1008 dd->ipath_sendctrl);
1009 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1010 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1018 static void handle_urcv(struct ipath_devdata *dd, u64 istat)
1034 portr = ((istat >> dd->ipath_i_rcvavail_shift) &
1035 dd->ipath_i_rcvavail_mask) |
1036 ((istat >> dd->ipath_i_rcvurg_shift) &
1037 dd->ipath_i_rcvurg_mask);
1038 for (i = 1; i < dd->ipath_cfgports; i++) {
1039 struct ipath_portdata *pd = dd->ipath_pd[i];
1044 clear_bit(i + dd->ipath_r_intravail_shift,
1045 &dd->ipath_rcvctrl);
1060 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1061 dd->ipath_rcvctrl);
1067 struct ipath_devdata *dd = data;
1076 if (dd->ipath_int_counter != (u32) -1)
1077 dd->ipath_int_counter++;
1079 if (!(dd->ipath_flags & IPATH_PRESENT)) {
1096 if (!(dd->ipath_flags & IPATH_INITTED)) {
1097 ipath_bad_intr(dd, &unexpected);
1102 istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
1110 ipath_bad_regread(dd);
1119 if (unlikely(istat & ~dd->ipath_i_bitsextant))
1120 ipath_dev_err(dd,
1123 istat & ~dd->ipath_i_bitsextant);
1130 estat = ipath_read_kreg64(dd,
1131 dd->ipath_kregs->kr_errorstatus);
1133 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1141 ipath_dev_err(dd, "Read of error status failed "
1144 chk0rcv |= handle_errors(dd, estat);
1161 dd, dd->ipath_kregs->kr_gpio_status);
1164 (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
1174 dd->ipath_rxfc_unsupvl_errs++;
1178 dd->ipath_overrun_thresh_errs++;
1182 dd->ipath_lli_errs++;
1188 (dd->ipath_flags & IPATH_GPIO_INTR)) {
1207 const u32 mask = (u32) dd->ipath_gpio_mask;
1213 dd->ipath_gpio_mask &= ~(gpiostatus & mask);
1214 ipath_write_kreg(dd,
1215 dd->ipath_kregs->kr_gpio_mask,
1216 dd->ipath_gpio_mask);
1220 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
1233 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
1241 kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
1242 (1ULL << dd->ipath_i_rcvurg_shift);
1245 ipath_kreceive(dd->ipath_pd[0]);
1248 if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
1249 (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
1250 handle_urcv(dd, istat);
1253 handle_sdma_intr(dd, istat);
1258 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1259 dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
1260 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1261 dd->ipath_sendctrl);
1262 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1263 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1266 handle_layer_pioavail(dd);