Lines Matching refs:dd

42 static void vl15_watchdog_enq(struct ipath_devdata *dd)
45 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
47 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
48 add_timer(&dd->ipath_sdma_vl15_timer);
52 static void vl15_watchdog_deq(struct ipath_devdata *dd)
55 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
57 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
59 del_timer(&dd->ipath_sdma_vl15_timer);
65 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
67 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
69 ipath_cancel_sends(dd, 1);
70 ipath_hol_down(dd);
77 static void unmap_desc(struct ipath_devdata *dd, unsigned head)
79 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
89 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
95 int ipath_sdma_make_progress(struct ipath_devdata *dd)
103 if (!list_empty(&dd->ipath_sdma_activelist)) {
104 lp = dd->ipath_sdma_activelist.next;
115 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
117 if (dmahead >= dd->ipath_sdma_descq_cnt)
120 while (dd->ipath_sdma_descq_head != dmahead) {
122 dd->ipath_sdma_descq_head == start_idx) {
123 unmap_desc(dd, dd->ipath_sdma_descq_head);
125 if (start_idx == dd->ipath_sdma_descq_cnt)
130 dd->ipath_sdma_descq_removed++;
131 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
132 dd->ipath_sdma_descq_head = 0;
134 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
137 vl15_watchdog_deq(dd);
138 list_move_tail(lp, &dd->ipath_sdma_notifylist);
139 if (!list_empty(&dd->ipath_sdma_activelist)) {
140 lp = dd->ipath_sdma_activelist.next;
153 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
159 static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
172 static void sdma_notify_taskbody(struct ipath_devdata *dd)
179 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
181 list_splice_init(&dd->ipath_sdma_notifylist, &list);
183 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
185 ipath_sdma_notify(dd, &list);
194 ipath_ib_piobufavail(dd->verbs_dev);
199 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
201 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
202 sdma_notify_taskbody(dd);
205 static void dump_sdma_state(struct ipath_devdata *dd)
209 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
212 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
215 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
218 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
221 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
224 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
227 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
233 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
237 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
240 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
242 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
250 if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
254 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
264 hwstatus = ipath_read_kreg64(dd,
265 dd->ipath_kregs->kr_senddmastatus);
271 if (dd->ipath_sdma_reset_wait > 0) {
273 --dd->ipath_sdma_reset_wait;
278 dump_sdma_state(dd);
283 &dd->ipath_sdma_activelist, list) {
286 vl15_watchdog_deq(dd);
287 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
291 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
294 dd->ipath_sdma_descq_tail = 0;
295 dd->ipath_sdma_descq_head = 0;
296 dd->ipath_sdma_head_dma[0] = 0;
297 dd->ipath_sdma_generation = 0;
298 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
301 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
302 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
305 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
318 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
319 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
320 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
321 dd->ipath_sendctrl);
322 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
323 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
326 dd->ipath_sdma_abort_jiffies = 0;
332 if (dd->ipath_flags & IPATH_LINKACTIVE)
333 ipath_restart_sdma(dd);
344 if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
346 dd->ipath_sdma_status);
347 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
351 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
352 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
356 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
364 void ipath_sdma_intr(struct ipath_devdata *dd)
368 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
370 (void) ipath_sdma_make_progress(dd);
372 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
375 static int alloc_sdma(struct ipath_devdata *dd)
380 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
381 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
383 if (!dd->ipath_sdma_descq) {
384 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
390 dd->ipath_sdma_descq_cnt =
394 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
395 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
396 if (!dd->ipath_sdma_head_dma) {
397 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
401 dd->ipath_sdma_head_dma[0] = 0;
403 init_timer(&dd->ipath_sdma_vl15_timer);
404 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
405 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
406 atomic_set(&dd->ipath_sdma_vl15_count, 0);
411 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
412 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
413 dd->ipath_sdma_descq = NULL;
414 dd->ipath_sdma_descq_phys = 0;
419 int setup_sdma(struct ipath_devdata *dd)
427 ret = alloc_sdma(dd);
431 if (!dd->ipath_sdma_descq) {
432 ipath_dev_err(dd, "SendDMA memory not allocated\n");
441 dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
442 dd->ipath_sdma_abort_jiffies = 0;
443 dd->ipath_sdma_generation = 0;
444 dd->ipath_sdma_descq_tail = 0;
445 dd->ipath_sdma_descq_head = 0;
446 dd->ipath_sdma_descq_removed = 0;
447 dd->ipath_sdma_descq_added = 0;
450 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
451 dd->ipath_sdma_descq_phys);
453 tmp64 = dd->ipath_sdma_descq_cnt;
455 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
457 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
458 dd->ipath_sdma_descq_tail);
460 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
461 dd->ipath_sdma_head_phys);
467 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
468 i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
469 ipath_chg_pioavailkernel(dd, i, n - i , 0);
476 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
478 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
480 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
483 INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
484 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
486 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
487 (unsigned long) dd);
488 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
489 (unsigned long) dd);
497 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
498 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
499 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
500 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
501 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
502 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
508 void teardown_sdma(struct ipath_devdata *dd)
517 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
518 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
519 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
520 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
521 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
523 tasklet_kill(&dd->ipath_sdma_abort_task);
524 tasklet_kill(&dd->ipath_sdma_notify_task);
527 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
528 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
529 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
530 dd->ipath_sendctrl);
531 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
532 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
534 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
536 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
540 vl15_watchdog_deq(dd);
541 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
543 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
545 sdma_notify_taskbody(dd);
547 del_timer_sync(&dd->ipath_sdma_vl15_timer);
549 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
551 dd->ipath_sdma_abort_jiffies = 0;
553 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
554 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
555 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
556 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
557 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
558 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
559 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
561 if (dd->ipath_sdma_head_dma) {
562 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
563 sdma_head_phys = dd->ipath_sdma_head_phys;
564 dd->ipath_sdma_head_dma = NULL;
565 dd->ipath_sdma_head_phys = 0;
568 if (dd->ipath_sdma_descq) {
569 sdma_descq = dd->ipath_sdma_descq;
570 sdma_descq_phys = dd->ipath_sdma_descq_phys;
571 dd->ipath_sdma_descq = NULL;
572 dd->ipath_sdma_descq_phys = 0;
575 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
578 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
582 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
591 void ipath_restart_sdma(struct ipath_devdata *dd)
596 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
604 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
605 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
606 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
609 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
610 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
611 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
613 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
616 dd->ipath_sdma_status);
619 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
624 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
625 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
626 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
627 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
628 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
629 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
630 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
633 ipath_ib_piobufavail(dd->verbs_dev);
639 static inline void make_sdma_desc(struct ipath_devdata *dd,
648 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
664 int ipath_sdma_verbs_send(struct ipath_devdata *dd,
678 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
680 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
685 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
688 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
693 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
694 if (ipath_sdma_make_progress(dd))
700 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
702 if (dma_mapping_error(&dd->pcidev->dev, addr))
706 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
714 tail = dd->ipath_sdma_descq_tail;
715 descqp = &dd->ipath_sdma_descq[tail].qw[0];
723 if (++tail == dd->ipath_sdma_descq_cnt) {
725 descqp = &dd->ipath_sdma_descq[0].qw[0];
726 ++dd->ipath_sdma_generation;
741 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
743 if (dma_mapping_error(&dd->pcidev->dev, addr))
745 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
754 if (++tail == dd->ipath_sdma_descq_cnt) {
756 descqp = &dd->ipath_sdma_descq[0].qw[0];
757 ++dd->ipath_sdma_generation;
782 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
793 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
797 dd->ipath_sdma_descq_tail = tail;
798 dd->ipath_sdma_descq_added += tx->txreq.sg_count;
799 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
801 vl15_watchdog_enq(dd);
805 while (tail != dd->ipath_sdma_descq_tail) {
807 tail = dd->ipath_sdma_descq_cnt - 1;
810 unmap_desc(dd, tail);
815 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);