Lines Matching refs:dd

111 void qib_set_ctxtcnt(struct qib_devdata *dd)
114 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
115 if (dd->cfgctxts > dd->ctxtcnt)
116 dd->cfgctxts = dd->ctxtcnt;
117 } else if (qib_cfgctxts < dd->num_pports)
118 dd->cfgctxts = dd->ctxtcnt;
119 else if (qib_cfgctxts <= dd->ctxtcnt)
120 dd->cfgctxts = qib_cfgctxts;
122 dd->cfgctxts = dd->ctxtcnt;
123 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
124 dd->cfgctxts - dd->first_user_ctxt;
130 int qib_create_ctxts(struct qib_devdata *dd)
133 int local_node_id = pcibus_to_node(dd->pcidev->bus);
137 dd->assigned_node_id = local_node_id;
143 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
144 if (!dd->rcd) {
145 qib_dev_err(dd,
151 for (i = 0; i < dd->first_user_ctxt; ++i) {
155 if (dd->skip_kctxt_mask & (1 << i))
158 ppd = dd->pport + (i % dd->num_pports);
160 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
162 qib_dev_err(dd,
164 kfree(dd->rcd);
165 dd->rcd = NULL;
180 struct qib_devdata *dd = ppd->dd;
188 rcd->dd = dd;
191 dd->rcd[ctxt] = rcd;
193 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
198 qib_dev_err(dd,
204 dd->f_init_ctxt(rcd);
219 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
233 int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
237 ppd->dd = dd;
269 qib_dev_err(dd,
278 qib_dev_err(dd,
287 qib_dev_err(dd,
296 qib_dev_err(dd,
320 qib_dev_err(dd,
325 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
330 static int init_pioavailregs(struct qib_devdata *dd)
335 dd->pioavailregs_dma = dma_alloc_coherent(
336 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
338 if (!dd->pioavailregs_dma) {
339 qib_dev_err(dd,
350 ((char *) dd->pioavailregs_dma +
352 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
354 dd->devstatusp = status_page;
356 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
357 dd->pport[pidx].statusp = status_page;
365 dd->freezemsg = (char *) status_page;
366 *dd->freezemsg = 0;
368 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
369 dd->freezelen = PAGE_SIZE - ret;
379 * @dd: the qlogic_ib device
388 static void init_shadow_tids(struct qib_devdata *dd)
393 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
395 qib_dev_err(dd,
400 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
402 qib_dev_err(dd,
407 dd->pageshadow = pages;
408 dd->physshadow = addrs;
414 dd->pageshadow = NULL;
421 static int loadtime_init(struct qib_devdata *dd)
425 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
427 qib_dev_err(dd,
430 (int)(dd->revision >>
433 (unsigned long long) dd->revision);
438 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
439 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
441 spin_lock_init(&dd->pioavail_lock);
442 spin_lock_init(&dd->sendctrl_lock);
443 spin_lock_init(&dd->uctxt_lock);
444 spin_lock_init(&dd->qib_diag_trans_lock);
445 spin_lock_init(&dd->eep_st_lock);
446 mutex_init(&dd->eep_lock);
451 ret = init_pioavailregs(dd);
452 init_shadow_tids(dd);
454 qib_get_eeprom_info(dd);
457 init_timer(&dd->intrchk_timer);
458 dd->intrchk_timer.function = verify_interrupt;
459 dd->intrchk_timer.data = (unsigned long) dd;
461 ret = qib_cq_init(dd);
468 * @dd: the qlogic_ib device
474 static int init_after_reset(struct qib_devdata *dd)
483 for (i = 0; i < dd->num_pports; ++i) {
488 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
492 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
499 static void enable_chip(struct qib_devdata *dd)
507 for (i = 0; i < dd->num_pports; ++i)
508 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
515 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
517 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
518 struct qib_ctxtdata *rcd = dd->rcd[i];
521 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
527 struct qib_devdata *dd = (struct qib_devdata *) opaque;
530 if (!dd)
537 int_counter = qib_int_counter(dd) - dd->z_int_counter;
539 if (!dd->f_intr_fallback(dd))
540 dev_err(&dd->pcidev->dev,
543 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
547 static void init_piobuf_state(struct qib_devdata *dd)
560 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
561 for (pidx = 0; pidx < dd->num_pports; ++pidx)
562 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
570 uctxts = dd->cfgctxts - dd->first_user_ctxt;
571 dd->ctxts_extrabuf = dd->pbufsctxt ?
572 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
583 for (i = 0; i < dd->pioavregs; i++) {
586 tmp = dd->pioavailregs_dma[i];
592 dd->pioavailshadow[i] = le64_to_cpu(tmp);
594 while (i < ARRAY_SIZE(dd->pioavailshadow))
595 dd->pioavailshadow[i++] = 0; /* for debugging sanity */
598 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
600 dd->f_initvl15_bufs(dd);
605 * @dd: the qlogic_ib device
607 static int qib_create_workqueues(struct qib_devdata *dd)
612 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
613 ppd = dd->pport + pidx;
617 dd->unit, pidx);
628 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
629 ppd = dd->pport + pidx;
646 * @dd: the qlogic_ib device
659 int qib_init(struct qib_devdata *dd, int reinit)
669 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
670 ppd = dd->pport + pidx;
679 ret = init_after_reset(dd);
681 ret = loadtime_init(dd);
689 ret = dd->f_late_initreg(dd);
693 /* dd->rcd can be NULL if early init failed */
694 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
701 rcd = dd->rcd[i];
705 lastfail = qib_create_rcvhdrq(dd, rcd);
709 qib_dev_err(dd,
715 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
719 ppd = dd->pport + pidx;
727 dd->piosize4k : dd->piosize2k,
728 dd->rcvegrbufsize +
729 (dd->rcvhdrentsize << 2));
741 lastfail = dd->f_bringup_serdes(ppd);
743 qib_devinfo(dd->pcidev,
761 enable_chip(dd);
763 init_piobuf_state(dd);
768 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
769 ppd = dd->pport + pidx;
778 if (dd->flags & QIB_HAS_SEND_DMA)
787 dd->f_set_intr_state(dd, 1);
793 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
795 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
808 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
813 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
824 struct qib_devdata *dd;
828 dd = __qib_lookup(unit);
831 return dd;
838 static void qib_stop_timers(struct qib_devdata *dd)
843 if (dd->stats_timer.data) {
844 del_timer_sync(&dd->stats_timer);
845 dd->stats_timer.data = 0;
847 if (dd->intrchk_timer.data) {
848 del_timer_sync(&dd->intrchk_timer);
849 dd->intrchk_timer.data = 0;
851 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
852 ppd = dd->pport + pidx;
866 * @dd: the qlogic_ib device
871 * Everything it does has to be setup again by qib_init(dd, 1)
873 static void qib_shutdown_device(struct qib_devdata *dd)
878 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
879 ppd = dd->pport + pidx;
888 dd->flags &= ~QIB_INITTED;
891 dd->f_set_intr_state(dd, 0);
893 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
894 ppd = dd->pport + pidx;
895 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
903 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
912 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
913 ppd = dd->pport + pidx;
914 dd->f_setextled(ppd, 0); /* make sure LEDs are off */
916 if (dd->flags & QIB_HAS_SEND_DMA)
919 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
925 dd->f_quiet_serdes(ppd);
934 qib_update_eeprom_log(dd);
939 * @dd: the qlogic_ib device
948 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
954 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
958 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
971 dma_free_coherent(&dd->pcidev->dev, size,
1006 static void qib_verify_pioperf(struct qib_devdata *dd)
1013 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
1015 qib_devinfo(dd->pcidev,
1028 qib_devinfo(dd->pcidev,
1043 dd->f_set_armlaunch(dd, 0);
1064 qib_dev_err(dd,
1074 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
1075 qib_sendbuf_done(dd, pbnum);
1076 dd->f_set_armlaunch(dd, 1);
1079 void qib_free_devdata(struct qib_devdata *dd)
1084 idr_remove(&qib_unit_table, dd->unit);
1085 list_del(&dd->list);
1089 qib_dbg_ibdev_exit(&dd->verbs_dev);
1091 free_percpu(dd->int_counter);
1092 ib_dealloc_device(&dd->verbs_dev.ibdev);
1095 u64 qib_int_counter(struct qib_devdata *dd)
1101 int_counter += *per_cpu_ptr(dd->int_counter, cpu);
1108 struct qib_devdata *dd;
1112 list_for_each_entry(dd, &qib_dev_list, list) {
1113 sps_ints += qib_int_counter(dd);
1130 struct qib_devdata *dd;
1133 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
1134 if (!dd)
1137 INIT_LIST_HEAD(&dd->list);
1142 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
1144 dd->unit = ret;
1145 list_add(&dd->list, &qib_dev_list);
1156 dd->int_counter = alloc_percpu(u64);
1157 if (!dd->int_counter) {
1175 qib_dbg_ibdev_init(&dd->verbs_dev);
1177 return dd;
1179 if (!list_empty(&dd->list))
1180 list_del_init(&dd->list);
1181 ib_dealloc_device(&dd->verbs_dev.ibdev);
1190 void qib_disable_after_error(struct qib_devdata *dd)
1192 if (dd->flags & QIB_INITTED) {
1195 dd->flags &= ~QIB_INITTED;
1196 if (dd->pport)
1197 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1200 ppd = dd->pport + pidx;
1201 if (dd->flags & QIB_PRESENT) {
1204 dd->f_setextled(ppd, 0);
1215 if (dd->devstatusp)
1216 *dd->devstatusp |= QIB_STATUS_HWERROR;
1253 struct qib_devdata *dd = dev_get_drvdata(device);
1256 return dd->f_notify_dca(dd, event);
1352 static void cleanup_device_data(struct qib_devdata *dd)
1360 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1361 if (dd->pport[pidx].statusp)
1362 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1364 spin_lock(&dd->pport[pidx].cc_shadow_lock);
1366 kfree(dd->pport[pidx].congestion_entries);
1367 dd->pport[pidx].congestion_entries = NULL;
1368 kfree(dd->pport[pidx].ccti_entries);
1369 dd->pport[pidx].ccti_entries = NULL;
1370 kfree(dd->pport[pidx].ccti_entries_shadow);
1371 dd->pport[pidx].ccti_entries_shadow = NULL;
1372 kfree(dd->pport[pidx].congestion_entries_shadow);
1373 dd->pport[pidx].congestion_entries_shadow = NULL;
1375 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1379 qib_disable_wc(dd);
1381 if (dd->pioavailregs_dma) {
1382 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1383 (void *) dd->pioavailregs_dma,
1384 dd->pioavailregs_phys);
1385 dd->pioavailregs_dma = NULL;
1388 if (dd->pageshadow) {
1389 struct page **tmpp = dd->pageshadow;
1390 dma_addr_t *tmpd = dd->physshadow;
1393 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1394 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1395 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1400 pci_unmap_page(dd->pcidev, tmpd[i],
1407 dd->pageshadow = NULL;
1409 dd->physshadow = NULL;
1420 spin_lock_irqsave(&dd->uctxt_lock, flags);
1421 tmp = dd->rcd;
1422 dd->rcd = NULL;
1423 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1424 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1428 qib_free_ctxtdata(dd, rcd);
1431 kfree(dd->boardname);
1432 qib_cq_exit(dd);
1439 static void qib_postinit_cleanup(struct qib_devdata *dd)
1448 if (dd->f_cleanup)
1449 dd->f_cleanup(dd);
1451 qib_pcie_ddcleanup(dd);
1453 cleanup_device_data(dd);
1455 qib_free_devdata(dd);
1461 struct qib_devdata *dd = NULL;
1468 * Do device-specific initialiation, function table setup, dd
1474 dd = qib_init_iba6120_funcs(pdev, ent);
1479 dd = ERR_PTR(-ENODEV);
1484 dd = qib_init_iba7220_funcs(pdev, ent);
1488 dd = qib_init_iba7322_funcs(pdev, ent);
1498 if (IS_ERR(dd))
1499 ret = PTR_ERR(dd);
1503 ret = qib_create_workqueues(dd);
1508 initfail = qib_init(dd, 0);
1510 ret = qib_register_ib_device(dd);
1519 dd->flags |= QIB_INITTED;
1521 j = qib_device_create(dd);
1523 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1524 j = qibfs_add(dd);
1526 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1530 qib_stop_timers(dd);
1532 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1533 dd->f_quiet_serdes(dd->pport + pidx);
1537 (void) qibfs_remove(dd);
1538 qib_device_remove(dd);
1541 qib_unregister_ib_device(dd);
1542 qib_postinit_cleanup(dd);
1549 ret = qib_enable_wc(dd);
1551 qib_dev_err(dd,
1558 qib_verify_pioperf(dd);
1565 struct qib_devdata *dd = pci_get_drvdata(pdev);
1569 qib_unregister_ib_device(dd);
1576 qib_shutdown_device(dd);
1578 qib_stop_timers(dd);
1583 ret = qibfs_remove(dd);
1585 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1588 qib_device_remove(dd);
1590 qib_postinit_cleanup(dd);
1595 * @dd: the qlogic_ib device
1602 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1611 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1613 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1616 old_node_id = dev_to_node(&dd->pcidev->dev);
1617 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1619 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1621 set_dev_node(&dd->pcidev->dev, old_node_id);
1624 qib_dev_err(dd,
1630 if (rcd->ctxt >= dd->first_user_ctxt) {
1636 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1637 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1639 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1641 set_dev_node(&dd->pcidev->dev, old_node_id);
1657 qib_dev_err(dd,
1663 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1681 struct qib_devdata *dd = rcd->dd;
1697 egrsize = dd->rcvegrbufsize;
1720 old_node_id = dev_to_node(&dd->pcidev->dev);
1721 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1723 dma_alloc_coherent(&dd->pcidev->dev, size,
1726 set_dev_node(&dd->pcidev->dev, old_node_id);
1741 dd->f_put_tid(dd, e + egroff +
1744 dd->kregbase +
1745 dd->rcvegrbase),
1756 dma_free_coherent(&dd->pcidev->dev, size,
1773 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1779 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1780 u64 qib_pio4koffset = dd->piobufbase >> 32;
1781 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1782 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1783 u64 qib_physaddr = dd->physaddr;
1792 iounmap(dd->kregbase);
1793 dd->kregbase = NULL;
1804 if (dd->piobcnt4k == 0) {
1816 if (dd->uregbase > qib_kreglen)
1817 qib_userlen = dd->ureg_align * dd->cfgctxts;
1829 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1835 dd->kregbase = qib_kregbase;
1836 dd->kregend = (u64 __iomem *)
1838 dd->piobase = qib_piobase;
1839 dd->pio2kbase = (void __iomem *)
1840 (((char __iomem *) dd->piobase) +
1842 if (dd->piobcnt4k)
1843 dd->pio4kbase = (void __iomem *)
1844 (((char __iomem *) dd->piobase) +
1847 /* ureg will now be accessed relative to dd->userbase */
1848 dd->userbase = qib_userbase;