Lines Matching refs:ohci

37  * PRECONDITION:  ohci lock held, irqs blocked.
40 finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41 __releases(ohci->lock)
42 __acquires(ohci->lock)
46 urb_free_priv (ohci, urb->hcpriv);
52 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
53 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
54 if (quirk_amdiso(ohci))
56 if (quirk_amdprefetch(ohci))
57 sb800_prefetch(ohci, 0);
61 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
70 usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
71 spin_unlock (&ohci->lock);
72 usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
73 spin_lock (&ohci->lock);
76 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
77 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
78 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
79 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
91 static int balance (struct ohci_hcd *ohci, int interval, int load)
103 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
108 if ((ohci->load [j] + load) > 900)
125 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
129 ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
130 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
134 struct ed **prev = &ohci->periodic [i];
135 __hc32 *prev_p = &ohci->hcca->int_table [i];
155 *prev_p = cpu_to_hc32(ohci, ed->dma);
158 ohci->load [i] += ed->load;
160 ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
165 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
173 if (quirk_zfmicro(ohci)
175 && !(ohci->eds_scheduled++))
176 mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ));
185 * periodic schedule encodes a tree like figure 3-5 in the ohci
191 if (ohci->ed_controltail == NULL) {
192 WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
193 ohci_writel (ohci, ed->dma,
194 &ohci->regs->ed_controlhead);
196 ohci->ed_controltail->ed_next = ed;
197 ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
200 ed->ed_prev = ohci->ed_controltail;
201 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
203 ohci->hc_control |= OHCI_CTRL_CLE;
204 ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
205 ohci_writel (ohci, ohci->hc_control,
206 &ohci->regs->control);
208 ohci->ed_controltail = ed;
212 if (ohci->ed_bulktail == NULL) {
213 WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
214 ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
216 ohci->ed_bulktail->ed_next = ed;
217 ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
220 ed->ed_prev = ohci->ed_bulktail;
221 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
223 ohci->hc_control |= OHCI_CTRL_BLE;
224 ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
225 ohci_writel (ohci, ohci->hc_control,
226 &ohci->regs->control);
228 ohci->ed_bulktail = ed;
234 branch = balance (ohci, ed->interval, ed->load);
236 ohci_dbg (ohci,
243 periodic_link (ohci, ed);
255 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
261 struct ed **prev = &ohci->periodic [i];
262 __hc32 *prev_p = &ohci->hcca->int_table [i];
272 ohci->load [i] -= ed->load;
274 ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
276 ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
277 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
303 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
305 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
324 ohci->hc_control &= ~OHCI_CTRL_CLE;
325 ohci_writel (ohci, ohci->hc_control,
326 &ohci->regs->control);
329 ohci_writel (ohci,
330 hc32_to_cpup (ohci, &ed->hwNextED),
331 &ohci->regs->ed_controlhead);
337 if (ohci->ed_controltail == ed) {
338 ohci->ed_controltail = ed->ed_prev;
339 if (ohci->ed_controltail)
340 ohci->ed_controltail->ed_next = NULL;
350 ohci->hc_control &= ~OHCI_CTRL_BLE;
351 ohci_writel (ohci, ohci->hc_control,
352 &ohci->regs->control);
355 ohci_writel (ohci,
356 hc32_to_cpup (ohci, &ed->hwNextED),
357 &ohci->regs->ed_bulkhead);
363 if (ohci->ed_bulktail == ed) {
364 ohci->ed_bulktail = ed->ed_prev;
365 if (ohci->ed_bulktail)
366 ohci->ed_bulktail->ed_next = NULL;
375 periodic_unlink (ohci, ed);
387 struct ohci_hcd *ohci,
396 spin_lock_irqsave (&ohci->lock, flags);
403 ed = ed_alloc (ohci, GFP_ATOMIC);
410 td = td_alloc (ohci, GFP_ATOMIC);
413 ed_free (ohci, ed);
418 ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
451 ed->hwINFO = cpu_to_hc32(ohci, info);
457 spin_unlock_irqrestore (&ohci->lock, flags);
469 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
471 ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
472 ed_deschedule (ohci, ed);
475 ed->ed_next = ohci->ed_rm_list;
477 ohci->ed_rm_list = ed;
480 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
481 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
483 (void) ohci_readl (ohci, &ohci->regs->control);
490 ed->tick = ohci_frame_no(ohci) + 1;
501 td_fill (struct ohci_hcd *ohci, u32 info,
542 td->hwINFO = cpu_to_hc32 (ohci, info);
544 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
545 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
549 td->hwCBP = cpu_to_hc32 (ohci, data);
552 td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
555 td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
562 td->td_hash = ohci->td_hash [hash];
563 ohci->td_hash [hash] = td;
578 struct ohci_hcd *ohci,
596 urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
600 list_add (&urb_priv->pending, &ohci->pending);
618 periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
619 && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
627 td_fill (ohci, info, data, 4096, urb, cnt);
635 td_fill (ohci, info, data, data_len, urb, cnt);
639 td_fill (ohci, info, 0, 0, urb, cnt);
645 ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
654 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
659 td_fill (ohci, info, data, data_len, urb, cnt++);
664 td_fill (ohci, info, data, 0, urb, cnt++);
667 ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
683 td_fill (ohci, TD_CC | TD_ISO | frame,
687 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
688 if (quirk_amdiso(ohci))
690 if (quirk_amdprefetch(ohci))
691 sb800_prefetch(ohci, 1);
693 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
694 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
701 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
702 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
713 static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
715 u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
723 u16 tdPSW = ohci_hwPSW(ohci, td, 0);
747 ohci_vdbg (ohci,
757 u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
774 hc32_to_cpup (ohci, &td->hwCBP)
779 ohci_vdbg (ohci,
790 static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
796 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
801 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
803 ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
845 ohci_dbg (ohci,
850 hc32_to_cpu (ohci, td->hwINFO),
858 static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
864 td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
865 ohci->hcca->done_head = 0;
874 td = dma_to_td (ohci, td_dma);
876 ohci_err (ohci, "bad entry %8x\n", td_dma);
880 td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
881 cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
888 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
889 ed_halted(ohci, td, cc);
893 td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
902 finish_unlinks (struct ohci_hcd *ohci, u16 tick)
907 for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
915 if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
928 head = hc32_to_cpu (ohci, ed->hwHeadP) &
933 if (ed == ohci->ed_to_check)
934 ohci->ed_to_check = NULL;
977 savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
985 tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
987 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
989 ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
992 td_done (ohci, urb, td);
998 finish_urb(ohci, urb, 0);
1006 if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
1007 ohci->eds_scheduled--;
1008 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1011 ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
1015 if (ohci->rh_state == OHCI_RH_RUNNING)
1016 ed_schedule (ohci, ed);
1024 if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
1027 if (ohci->ed_controltail) {
1029 if (quirk_zfmicro(ohci))
1031 if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1033 ohci_writel (ohci, 0,
1034 &ohci->regs->ed_controlcurrent);
1037 if (ohci->ed_bulktail) {
1039 if (quirk_zfmicro(ohci))
1041 if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1043 ohci_writel (ohci, 0,
1044 &ohci->regs->ed_bulkcurrent);
1050 ohci->hc_control |= control;
1051 if (quirk_zfmicro(ohci))
1053 ohci_writel (ohci, ohci->hc_control,
1054 &ohci->regs->control);
1057 if (quirk_zfmicro(ohci))
1059 ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1074 static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1082 status = td_done(ohci, urb, td);
1087 finish_urb(ohci, urb, status);
1092 start_ed_unlink(ohci, ed);
1095 } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
1096 == cpu_to_hc32(ohci, ED_SKIP)) {
1098 if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
1099 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
1103 ohci_writel(ohci, OHCI_CLF,
1104 &ohci->regs->cmdstatus);
1107 ohci_writel(ohci, OHCI_BLF,
1108 &ohci->regs->cmdstatus);
1125 dl_done_list (struct ohci_hcd *ohci)
1127 struct td *td = dl_reverse_done_list (ohci);
1131 takeback_td(ohci, td);