Lines Matching refs:dd

157 static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
164 ipath_dev_err(dd, "failed to read bar0 before enable: "
169 ipath_dev_err(dd, "failed to read bar1 before enable: "
176 struct ipath_devdata *dd)
182 if (dd->ipath_unit != -1) {
184 idr_remove(&unit_table, dd->ipath_unit);
185 list_del(&dd->ipath_list);
188 vfree(dd);
194 struct ipath_devdata *dd;
197 dd = vzalloc(sizeof(*dd));
198 if (!dd) {
199 dd = ERR_PTR(-ENOMEM);
202 dd->ipath_unit = -1;
207 ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
211 ipath_free_devdata(pdev, dd);
212 dd = ERR_PTR(ret);
215 dd->ipath_unit = ret;
217 dd->pcidev = pdev;
218 pci_set_drvdata(pdev, dd);
220 list_add(&dd->ipath_list, &ipath_dev_list);
226 return dd;
236 struct ipath_devdata *dd;
240 dd = __ipath_lookup(unit);
243 return dd;
249 struct ipath_devdata *dd;
257 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
259 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
261 if (dd->ipath_lid &&
262 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
265 if (dd->ipath_cfgports > maxports)
266 maxports = dd->ipath_cfgports;
287 int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
292 void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
309 static void ipath_verify_pioperf(struct ipath_devdata *dd)
316 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
318 dev_info(&dd->pcidev->dev,
331 dev_info(&dd->pcidev->dev,
346 ipath_disable_armlaunch(dd);
352 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
371 ipath_dev_err(dd,
385 ipath_disarm_piobufs(dd, pbnum, 1);
386 ipath_enable_armlaunch(dd);
389 static void cleanup_device(struct ipath_devdata *dd);
394 struct ipath_devdata *dd;
398 dd = ipath_alloc_devdata(pdev);
399 if (IS_ERR(dd)) {
400 ret = PTR_ERR(dd);
406 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
422 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
423 dd->ipath_unit, -ret);
432 read_bars(dd, pdev, &bar0, &bar1);
441 ipath_dev_err(dd, "rewrite of BAR0 "
448 ipath_dev_err(dd, "rewrite of BAR1 "
453 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
463 "err %d\n", dd->ipath_unit, -ret);
478 dd->ipath_unit, ret);
488 dd->ipath_unit, ret);
498 dd->ipath_unit, ret);
507 dd->ipath_pcibar0 = addr;
508 dd->ipath_pcibar1 = addr >> 32;
509 dd->ipath_deviceid = ent->device; /* save for later use */
510 dd->ipath_vendorid = ent->vendor;
515 ipath_init_iba6110_funcs(dd);
519 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
533 ipath_dev_err(dd, "No valid address in BAR 0!\n");
538 dd->ipath_pcirev = pdev->revision;
542 dd->ipath_kregbase = __ioremap(addr, len,
545 dd->ipath_kregbase = ioremap_nocache(addr, len);
548 if (!dd->ipath_kregbase) {
554 dd->ipath_kregend = (u64 __iomem *)
555 ((void __iomem *)dd->ipath_kregbase + len);
556 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
559 addr, dd->ipath_kregbase);
561 if (dd->ipath_f_bus(dd, pdev))
562 ipath_dev_err(dd, "Failed to setup config space; "
571 if (!dd->ipath_irq)
572 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
575 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
576 IPATH_DRV_NAME, dd);
578 ipath_dev_err(dd, "Couldn't setup irq handler, "
579 "irq=%d: %d\n", dd->ipath_irq, ret);
584 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
588 ret = ipath_enable_wc(dd);
591 ipath_dev_err(dd, "Write combining not enabled "
597 ipath_verify_pioperf(dd);
599 ipath_device_create_group(&pdev->dev, dd);
600 ipathfs_add_device(dd);
601 ipath_user_add(dd);
602 ipath_diag_add(dd);
603 ipath_register_ib_device(dd);
608 cleanup_device(dd);
610 if (dd->ipath_irq)
611 dd->ipath_f_free_irq(dd);
613 if (dd->ipath_f_cleanup)
614 dd->ipath_f_cleanup(dd);
617 iounmap((volatile void __iomem *) dd->ipath_kregbase);
626 ipath_free_devdata(pdev, dd);
632 static void cleanup_device(struct ipath_devdata *dd)
638 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
640 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
641 if (dd->ipath_kregbase) {
647 dd->ipath_kregbase = NULL;
648 dd->ipath_uregbase = 0;
649 dd->ipath_sregbase = 0;
650 dd->ipath_cregbase = 0;
651 dd->ipath_kregsize = 0;
653 ipath_disable_wc(dd);
656 if (dd->ipath_spectriggerhit)
657 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
658 dd->ipath_spectriggerhit);
660 if (dd->ipath_pioavailregs_dma) {
661 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
662 (void *) dd->ipath_pioavailregs_dma,
663 dd->ipath_pioavailregs_phys);
664 dd->ipath_pioavailregs_dma = NULL;
666 if (dd->ipath_dummy_hdrq) {
667 dma_free_coherent(&dd->pcidev->dev,
668 dd->ipath_pd[0]->port_rcvhdrq_size,
669 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
670 dd->ipath_dummy_hdrq = NULL;
673 if (dd->ipath_pageshadow) {
674 struct page **tmpp = dd->ipath_pageshadow;
675 dma_addr_t *tmpd = dd->ipath_physshadow;
680 for (port = 0; port < dd->ipath_cfgports; port++) {
681 int port_tidbase = port * dd->ipath_rcvtidcnt;
682 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
686 pci_unmap_page(dd->pcidev, tmpd[i],
708 dd->ipath_pageshadow);
709 tmpp = dd->ipath_pageshadow;
710 dd->ipath_pageshadow = NULL;
713 dd->ipath_egrtidbase = NULL;
723 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
724 tmp = dd->ipath_pd;
725 dd->ipath_pd = NULL;
726 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
727 for (port = 0; port < dd->ipath_portcnt; port++) {
730 ipath_free_pddata(dd, pd);
737 struct ipath_devdata *dd = pci_get_drvdata(pdev);
739 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
745 ipath_shutdown_device(dd);
749 if (dd->verbs_dev)
750 ipath_unregister_ib_device(dd->verbs_dev);
752 ipath_diag_remove(dd);
753 ipath_user_remove(dd);
754 ipathfs_remove_device(dd);
755 ipath_device_remove_group(&pdev->dev, dd);
757 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
758 "unit %u\n", dd, (u32) dd->ipath_unit);
760 cleanup_device(dd);
768 if (dd->ipath_irq) {
770 dd->ipath_unit, dd->ipath_irq);
771 dd->ipath_f_free_irq(dd);
774 "for unit %u\n", dd->ipath_unit);
781 if (dd->ipath_f_cleanup)
783 dd->ipath_f_cleanup(dd);
785 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
786 iounmap((volatile void __iomem *) dd->ipath_kregbase);
791 ipath_free_devdata(pdev, dd);
801 * @dd: the infinipath device
810 void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
818 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
824 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
825 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
828 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
829 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
832 ipath_force_pio_avail_update(dd);
837 * @dd: the infinipath device
847 int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
849 dd->ipath_state_wanted = state;
851 (dd->ipath_flags & state),
853 dd->ipath_state_wanted = 0;
855 if (!(dd->ipath_flags & state)) {
864 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
867 dd, dd->ipath_kregs->kr_ibcctrl),
869 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
871 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
874 static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
900 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
913 int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
997 decode_sdma_errs(dd, err, buf, blen);
1045 * @dd: the infinipath device
1050 static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1052 return dd->ipath_port0_skbinfo ?
1053 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
1058 * @dd: the infinipath device
1061 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1079 len = dd->ipath_ibmaxlen + 4;
1081 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1091 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1098 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1108 static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1128 u8 n = (dd->ipath_ibcctrl >>
1132 if (++dd->ipath_lli_counter > n) {
1133 dd->ipath_lli_counter = 0;
1134 dd->ipath_lli_errors++;
1147 struct ipath_devdata *dd = pd->port_dd;
1150 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1151 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1159 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1160 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1175 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1181 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1195 ebuf = ipath_get_egrbuf(dd, etail);
1211 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1213 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1214 if (dd->ipath_lli_counter)
1215 dd->ipath_lli_counter--;
1255 l + dd->ipath_rhf_offset;
1256 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1275 lval |= dd->ipath_rhdrhead_intr_off;
1276 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1279 ipath_write_ureg(dd, ur_rcvegrindexhead,
1286 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1287 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1319 * @dd: the infinipath device
1326 static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1330 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1349 if (!dd->ipath_pioavailregs_dma) {
1355 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1356 unsigned long *shadow = dd->ipath_pioavailshadow;
1389 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1390 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1392 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1393 pchg = dd->ipath_pioavailkernel[i] &
1394 ~(dd->ipath_pioavailshadow[i] ^ piov);
1396 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1397 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1399 dd->ipath_pioavailshadow[i] = pnew;
1411 static void ipath_reset_availshadow(struct ipath_devdata *dd)
1417 for (i = 0; i < dd->ipath_pioavregs; i++) {
1420 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1422 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1427 oldval = dd->ipath_pioavailshadow[i];
1428 dd->ipath_pioavailshadow[i] = val |
1429 ((~dd->ipath_pioavailkernel[i] <<
1432 if (oldval != dd->ipath_pioavailshadow[i])
1435 dd->ipath_pioavailshadow[i]);
1442 * @dd: the infinipath device
1447 int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1451 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1452 if (dd->ipath_rcvhdrsize != rhdrsize) {
1453 dev_info(&dd->pcidev->dev,
1456 rhdrsize, dd->ipath_rcvhdrsize);
1460 "size %u\n", dd->ipath_rcvhdrsize);
1461 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1465 dd->ipath_rcvhdrentsize -
1469 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1470 dd->ipath_rcvhdrsize = rhdrsize;
1471 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1472 dd->ipath_rcvhdrsize);
1474 dd->ipath_rcvhdrsize);
1482 static noinline void no_pio_bufs(struct ipath_devdata *dd)
1484 unsigned long *shadow = dd->ipath_pioavailshadow;
1485 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1487 dd->ipath_upd_pio_shadow = 1;
1493 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1494 ipath_force_pio_avail_update(dd); /* at start */
1498 dd->ipath_consec_nopiobuf,
1509 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1521 ipath_reset_availshadow(dd);
1532 static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1538 unsigned long *shadow = dd->ipath_pioavailshadow;
1542 if (dd->ipath_upd_pio_shadow) {
1548 ipath_update_pio_bufs(dd);
1577 ipath_update_pio_bufs(dd);
1582 ((dd->ipath_sendctrl
1591 ipath_force_pio_avail_update(dd);
1592 ipath_update_pio_bufs(dd);
1598 no_pio_bufs(dd);
1601 if (i < dd->ipath_piobcnt2k)
1602 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1603 i * dd->ipath_palign);
1606 (dd->ipath_pio4kbase +
1607 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1617 * @dd: the infinipath device
1621 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1628 first = dd->ipath_piobcnt2k;
1629 lasti = dd->ipath_lastpioindexl;
1632 lasti = dd->ipath_lastpioindex;
1634 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1635 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1643 dd->ipath_lastpioindexl = pnum + 1;
1645 dd->ipath_lastpioindex = pnum + 1;
1646 if (dd->ipath_upd_pio_shadow)
1647 dd->ipath_upd_pio_shadow = 0;
1648 if (dd->ipath_consec_nopiobuf)
1649 dd->ipath_consec_nopiobuf = 0;
1651 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1661 * @dd: the infinipath device
1666 void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1697 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1700 + start, dd->ipath_pioavailshadow);
1702 dd->ipath_pioavailregs_dma[im]);
1706 + start, dd->ipath_pioavailshadow);
1709 + start, dd->ipath_pioavailshadow);
1710 __set_bit(start, dd->ipath_pioavailkernel);
1713 dd->ipath_pioavailshadow);
1714 __clear_bit(start, dd->ipath_pioavailkernel);
1719 if (dd->ipath_pioupd_thresh) {
1720 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1721 cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
1738 if (cnt < dd->ipath_pioupd_thresh) {
1739 dd->ipath_pioupd_thresh = cnt;
1741 dd->ipath_pioupd_thresh);
1742 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1743 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1745 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1747 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1748 dd->ipath_sendctrl);
1749 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1755 * @dd: the infinipath device
1762 int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1770 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1774 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1778 ipath_dev_err(dd, "attempt to allocate %d bytes "
1785 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1787 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1790 ipath_dev_err(dd, "attempt to allocate 1 page "
1794 dma_free_coherent(&dd->pcidev->dev, amt,
1832 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1834 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1851 void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1855 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1865 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1867 unsigned long *statp = &dd->ipath_sdma_status;
1869 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1873 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1881 dd->ipath_lastcancel = jiffies + HZ / 2;
1891 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1892 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1894 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1895 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1896 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1897 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1900 ipath_disarm_piobufs(dd, 0,
1901 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1903 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1904 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1908 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1909 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1911 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1912 dd->ipath_sendctrl);
1914 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1915 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1918 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1919 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1920 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1921 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1923 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1924 dd->ipath_sdma_reset_wait = 200;
1925 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1926 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1927 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1940 void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1944 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1945 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1946 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1947 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1948 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1949 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1950 dd->ipath_sendctrl);
1951 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1953 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1956 static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1973 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1982 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1986 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1990 dd->ipath_unit, what[linkcmd], linitcmd,
1991 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
1992 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
1994 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1995 dd->ipath_ibcctrl | mod_wd);
1997 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2000 int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
2007 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2013 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2020 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2027 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2034 if (dd->ipath_flags & IPATH_LINKARMED) {
2038 if (!(dd->ipath_flags &
2043 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
2053 if (dd->ipath_flags & IPATH_LINKACTIVE) {
2057 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
2061 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2066 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
2067 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
2068 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2069 dd->ipath_ibcctrl);
2072 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2079 dev_info(&dd->pcidev->dev,
2081 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2083 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
2084 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2085 dd->ipath_ibcctrl);
2097 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2102 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2111 ret = ipath_wait_linkstate(dd, lstate, 2000);
2119 * @dd: the infinipath device
2129 int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
2147 if (dd->ipath_ibmtu == arg) {
2152 piosize = dd->ipath_ibmaxlen;
2153 dd->ipath_ibmtu = arg;
2157 if (piosize != dd->ipath_init_ibmaxlen) {
2158 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2159 piosize = dd->ipath_init_ibmaxlen;
2160 dd->ipath_ibmaxlen = piosize;
2163 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
2166 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
2168 dd->ipath_ibmaxlen = piosize;
2173 u64 ibc = dd->ipath_ibcctrl, ibdw;
2179 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2180 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2182 dd->ibcc_mpl_shift);
2183 ibc |= ibdw << dd->ibcc_mpl_shift;
2184 dd->ipath_ibcctrl = ibc;
2185 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2186 dd->ipath_ibcctrl);
2187 dd->ipath_f_tidtemplate(dd);
2196 int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2198 dd->ipath_lid = lid;
2199 dd->ipath_lmc = lmc;
2201 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2204 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2212 * @dd: the infinipath device
2220 void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2225 if (port < dd->ipath_portcnt &&
2226 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2227 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2232 ipath_write_kreg(dd, where, value);
2249 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2254 if (!(dd->ipath_flags & IPATH_INITTED))
2257 pidx = dd->ipath_led_override_phase++ & 1;
2258 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2259 timeoff = dd->ipath_led_override_timeoff;
2266 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2267 ltstate = ipath_ib_linktrstate(dd, val);
2268 lstate = ipath_ib_linkstate(dd, val);
2270 dd->ipath_f_setextled(dd, lstate, ltstate);
2271 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2274 void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2278 if (!(dd->ipath_flags & IPATH_INITTED))
2287 dd->ipath_led_override_vals[0] = val & 0xF;
2288 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2292 dd->ipath_led_override_vals[0] = val & 0xF;
2293 dd->ipath_led_override_vals[1] = val & 0xF;
2295 dd->ipath_led_override_timeoff = timeoff;
2301 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2303 init_timer(&dd->ipath_led_override_timer);
2304 dd->ipath_led_override_timer.function =
2306 dd->ipath_led_override_timer.data = (unsigned long) dd;
2307 dd->ipath_led_override_timer.expires = jiffies + 1;
2308 add_timer(&dd->ipath_led_override_timer);
2310 atomic_dec(&dd->ipath_led_override_timer_active);
2315 * @dd: the infinipath device
2320 * Everything it does has to be setup again by ipath_init_chip(dd,1)
2322 void ipath_shutdown_device(struct ipath_devdata *dd)
2328 ipath_hol_up(dd); /* make sure user processes aren't suspended */
2330 dd->ipath_flags |= IPATH_LINKUNK;
2331 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2334 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2338 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2340 dd->ipath_rcvctrl = 0;
2341 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2342 dd->ipath_rcvctrl);
2344 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2345 teardown_sdma(dd);
2351 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2352 dd->ipath_sendctrl = 0;
2353 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2355 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2356 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2364 dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
2366 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2367 ipath_cancel_sends(dd, 0);
2374 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2377 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2378 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2379 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
2386 dd->ipath_f_quiet_serdes(dd);
2389 del_timer_sync(&dd->ipath_hol_timer);
2390 if (dd->ipath_stats_timer_active) {
2391 del_timer_sync(&dd->ipath_stats_timer);
2392 dd->ipath_stats_timer_active = 0;
2394 if (dd->ipath_intrchk_timer.data) {
2395 del_timer_sync(&dd->ipath_intrchk_timer);
2396 dd->ipath_intrchk_timer.data = 0;
2398 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2399 del_timer_sync(&dd->ipath_led_override_timer);
2400 atomic_set(&dd->ipath_led_override_timer_active, 0);
2408 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2410 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2411 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2414 ipath_update_eeprom_log(dd);
2419 * @dd: the infinipath device
2429 void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2438 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2442 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2459 dma_free_coherent(&dd->pcidev->dev, size,
2467 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
2469 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
2471 dd->ipath_port0_skbinfo = NULL;
2475 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2477 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2478 dd->ipath_ibmaxlen,
2552 struct ipath_devdata *dd = ipath_lookup(unit);
2555 if (!dd) {
2560 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2562 del_timer_sync(&dd->ipath_led_override_timer);
2563 atomic_set(&dd->ipath_led_override_timer_active, 0);
2567 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2568 dd->ipath_f_setextled(dd, 0, 0);
2570 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2572 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2573 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2579 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2580 if (dd->ipath_pd)
2581 for (i = 1; i < dd->ipath_cfgports; i++) {
2582 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2584 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2588 pid_nr(dd->ipath_pd[i]->port_pid),
2589 dd->ipath_pd[i]->port_comm);
2593 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2595 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2596 teardown_sdma(dd);
2598 dd->ipath_flags &= ~IPATH_INITTED;
2599 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2600 ret = dd->ipath_f_reset(dd);
2604 ret = ipath_init_chip(dd, 1);
2608 ipath_dev_err(dd, "Reinitialize unit %u after "
2611 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2623 static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2629 if (!dd->ipath_pd)
2632 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2633 for (i = 1; i < dd->ipath_cfgports; i++) {
2634 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2636 pid = dd->ipath_pd[i]->port_pid;
2640 dev_info(&dd->pcidev->dev, "context %d in use "
2646 pid = dd->ipath_pd[i]->port_subpid[sub];
2649 dev_info(&dd->pcidev->dev, "sub-context "
2656 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2660 static void ipath_hol_signal_down(struct ipath_devdata *dd)
2662 if (ipath_signal_procs(dd, SIGSTOP))
2664 ipath_cancel_sends(dd, 1);
2668 static void ipath_hol_signal_up(struct ipath_devdata *dd)
2670 if (ipath_signal_procs(dd, SIGCONT))
2681 void ipath_hol_down(struct ipath_devdata *dd)
2683 dd->ipath_hol_state = IPATH_HOL_DOWN;
2684 ipath_hol_signal_down(dd);
2685 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2686 dd->ipath_hol_timer.expires = jiffies +
2688 mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2696 void ipath_hol_up(struct ipath_devdata *dd)
2698 ipath_hol_signal_up(dd);
2699 dd->ipath_hol_state = IPATH_HOL_UP;
2710 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2712 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2713 && dd->ipath_hol_state != IPATH_HOL_UP) {
2714 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2716 ipath_hol_signal_down(dd);
2718 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2720 ipath_hol_signal_up(dd);
2722 if (dd->ipath_hol_state == IPATH_HOL_UP)
2725 dd->ipath_hol_timer.expires = jiffies +
2727 mod_timer(&dd->ipath_hol_timer,
2728 dd->ipath_hol_timer.expires);
2732 int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2738 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2739 dd->ipath_rx_pol_inv = new_pol_inv;
2740 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2743 val |= ((u64)dd->ipath_rx_pol_inv) <<
2745 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2759 void ipath_enable_armlaunch(struct ipath_devdata *dd)
2761 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2762 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2764 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2765 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2766 dd->ipath_errormask);
2769 void ipath_disable_armlaunch(struct ipath_devdata *dd)
2772 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2773 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2774 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2775 dd->ipath_errormask);