Lines Matching refs:ep

95 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
98 int buflen = ep->is_in ? req->req.length : req->req.actual;
102 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
116 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
129 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
131 u32 epctrl = gr_read32(&ep->regs->epctrl);
132 u32 epstat = gr_read32(&ep->regs->epstat);
136 seq_printf(seq, "%s:\n", ep->ep.name);
141 seq_printf(seq, " dma_start = %d\n", ep->dma_start);
142 seq_printf(seq, " stopped = %d\n", ep->stopped);
143 seq_printf(seq, " wedged = %d\n", ep->wedged);
144 seq_printf(seq, " callback = %d\n", ep->callback);
145 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
146 seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
147 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
161 if (list_empty(&ep->queue)) {
167 list_for_each_entry(req, &ep->queue, queue) {
192 struct gr_ep *ep;
205 list_for_each_entry(ep, &dev->ep_list, ep_list)
206 gr_seq_ep_show(seq, ep);
251 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
256 dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
258 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
303 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
317 dev = ep->dev;
318 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
321 if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
326 * divisible by ep->ep.maxpacket and the last descriptor was
331 memcpy(buftail, ep->tailbuf, req->oddlen);
335 dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
336 ep->ep.name);
337 gr_dbgprint_request("OVFL", ep, req);
343 if (ep->is_in)
344 gr_dbgprint_request("SENT", ep, req);
346 gr_dbgprint_request("RECV", ep, req);
349 /* Prevent changes to ep->queue during callback */
350 ep->callback = 1;
360 usb_gadget_giveback_request(&ep->ep, &req->req);
364 ep->callback = 0;
381 * Starts DMA for endpoint ep if there are requests in the queue.
383 * Must be called with dev->lock held and with !ep->stopped.
385 static void gr_start_dma(struct gr_ep *ep)
390 if (list_empty(&ep->queue)) {
391 ep->dma_start = 0;
395 req = list_first_entry(&ep->queue, struct gr_request, queue);
402 * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
406 if (!ep->is_in && req->oddlen)
407 req->last_desc->data = ep->tailbuf_paddr;
412 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
415 dmactrl = gr_read32(&ep->regs->dmactrl);
416 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
418 ep->dma_start = 1;
422 * Finishes the first request in the ep's queue and, if available, starts the
425 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
427 static void gr_dma_advance(struct gr_ep *ep, int status)
431 req = list_first_entry(&ep->queue, struct gr_request, queue);
432 gr_finish_request(ep, req, status);
433 gr_start_dma(ep); /* Regardless of ep->dma_start */
442 static void gr_abort_dma(struct gr_ep *ep)
446 dmactrl = gr_read32(&ep->regs->dmactrl);
447 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
457 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
462 desc = gr_alloc_dma_desc(ep, gfp_flags);
467 if (ep->is_in)
496 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
508 u16 size = min(bytes_left, ep->bytes_per_buffer);
510 if (size < ep->bytes_per_buffer) {
516 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
529 gr_free_dma_desc_chain(ep->dev, req);
540 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
549 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
561 u16 size = min(bytes_left, ep->bytes_per_buffer);
563 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
574 * multiples of ep->ep.maxpacket.
576 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
577 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
591 gr_free_dma_desc_chain(ep->dev, req);
597 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
599 struct gr_udc *dev = ep->dev;
602 if (unlikely(!ep->ep.desc && ep->num != 0)) {
603 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
610 ep->ep.name, req->req.buf, list_empty(&req->queue));
626 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
632 if (ep->is_in)
633 ret = gr_setup_in_desc_list(ep, req, gfp_flags);
635 ret = gr_setup_out_desc_list(ep, req, gfp_flags);
641 list_add_tail(&req->queue, &ep->queue);
644 if (!ep->dma_start && likely(!ep->stopped))
645 gr_start_dma(ep);
655 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
658 if (ep->is_in)
659 gr_dbgprint_request("RESP", ep, req);
661 return gr_queue(ep, req, gfp_flags);
672 static void gr_ep_nuke(struct gr_ep *ep)
676 ep->stopped = 1;
677 ep->dma_start = 0;
678 gr_abort_dma(ep);
680 while (!list_empty(&ep->queue)) {
681 req = list_first_entry(&ep->queue, struct gr_request, queue);
682 gr_finish_request(ep, req, -ESHUTDOWN);
691 static void gr_ep_reset(struct gr_ep *ep)
693 gr_write32(&ep->regs->epctrl, 0);
694 gr_write32(&ep->regs->dmactrl, 0);
696 ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
697 ep->ep.desc = NULL;
698 ep->stopped = 1;
699 ep->dma_start = 0;
724 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
729 if (ep->num && !ep->ep.desc)
732 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
736 if (!ep->num) {
739 gr_control_stall(ep->dev);
740 dev_dbg(ep->dev->dev, "EP: stall ep0\n");
746 dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
747 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
749 epctrl = gr_read32(&ep->regs->epctrl);
752 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
753 ep->stopped = 1;
755 ep->wedged = 1;
757 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
758 ep->stopped = 0;
759 ep->wedged = 0;
762 if (!ep->dma_start)
763 gr_start_dma(ep);
797 struct gr_ep *ep;
799 list_for_each_entry(ep, &dev->ep_list, ep_list)
800 gr_ep_nuke(ep);
814 struct gr_ep *ep;
818 ep = container_of(_ep, struct gr_ep, ep);
819 dev = ep->dev;
841 void (*complete)(struct usb_ep *ep,
1002 struct gr_ep *ep;
1014 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
1018 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
1024 status = gr_ep_halt_wedge(ep, 1, 0, 1);
1034 if (ep->wedged)
1036 status = gr_ep_halt_wedge(ep, 0, 0, 1);
1255 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1257 static int gr_handle_in_ep(struct gr_ep *ep)
1261 req = list_first_entry(&ep->queue, struct gr_request, queue);
1268 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1272 gr_dma_advance(ep, 0);
1280 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1282 static int gr_handle_out_ep(struct gr_ep *ep)
1288 struct gr_udc *dev = ep->dev;
1290 req = list_first_entry(&ep->queue, struct gr_request, queue);
1304 if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
1307 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1318 gr_dma_advance(ep, 0);
1324 ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1325 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1419 struct gr_ep *ep;
1430 * Check IN ep interrupts. We check these before the OUT eps because
1435 ep = &dev->epi[i];
1436 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1437 handled = gr_handle_in_ep(ep) || handled;
1440 /* Check OUT ep interrupts */
1442 ep = &dev->epo[i];
1443 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1444 handled = gr_handle_out_ep(ep) || handled;
1455 list_for_each_entry(ep, &dev->ep_list, ep_list) {
1456 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1459 ep->ep.name);
1483 /* USB ep ops */
1490 struct gr_ep *ep;
1497 ep = container_of(_ep, struct gr_ep, ep);
1501 dev = ep->dev;
1504 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1511 epctrl = gr_read32(&ep->regs->epctrl);
1516 if (!ep->is_in != !usb_endpoint_dir_in(desc))
1519 /* Check ep num */
1520 if ((!ep->is_in && ep->num >= dev->nepo) ||
1521 (ep->is_in && ep->num >= dev->nepi))
1534 ep->ep.name);
1561 } else if (max > ep->ep.maxpacket_limit) {
1563 max, ep->ep.maxpacket_limit);
1567 spin_lock(&ep->dev->lock);
1569 if (!ep->stopped) {
1570 spin_unlock(&ep->dev->lock);
1574 ep->stopped = 0;
1575 ep->wedged = 0;
1576 ep->ep.desc = desc;
1577 ep->ep.maxpacket = max;
1578 ep->dma_start = 0;
1586 ep->bytes_per_buffer = (nt + 1) * max;
1587 } else if (ep->is_in) {
1593 ep->bytes_per_buffer = (buffer_size / max) * max;
1599 ep->bytes_per_buffer = max;
1606 if (ep->is_in)
1608 gr_write32(&ep->regs->epctrl, epctrl);
1610 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1612 spin_unlock(&ep->dev->lock);
1614 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1615 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1622 struct gr_ep *ep;
1626 ep = container_of(_ep, struct gr_ep, ep);
1627 if (!_ep || !ep->ep.desc)
1630 dev = ep->dev;
1633 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1639 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1643 gr_ep_nuke(ep);
1644 gr_ep_reset(ep);
1645 ep->ep.desc = NULL;
1675 struct gr_ep *ep;
1683 ep = container_of(_ep, struct gr_ep, ep);
1685 dev = ep->dev;
1687 spin_lock(&ep->dev->lock);
1695 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1696 ep = &dev->epo[0];
1697 ep->ep.driver_data = dev->epi[0].ep.driver_data;
1700 if (ep->is_in)
1701 gr_dbgprint_request("EXTERN", ep, req);
1703 ret = gr_queue(ep, req, GFP_ATOMIC);
1705 spin_unlock(&ep->dev->lock);
1714 struct gr_ep *ep;
1719 ep = container_of(_ep, struct gr_ep, ep);
1720 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1722 dev = ep->dev;
1733 list_for_each_entry(req, &ep->queue, queue) {
1742 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1744 gr_abort_dma(ep);
1745 if (ep->stopped)
1746 gr_finish_request(ep, req, -ECONNRESET);
1748 gr_dma_advance(ep, -ECONNRESET);
1751 gr_finish_request(ep, req, -ECONNRESET);
1766 struct gr_ep *ep;
1770 ep = container_of(_ep, struct gr_ep, ep);
1772 spin_lock(&ep->dev->lock);
1775 if (halt && ep->is_in && !list_empty(&ep->queue)) {
1780 ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1783 spin_unlock(&ep->dev->lock);
1806 struct gr_ep *ep;
1812 ep = container_of(_ep, struct gr_ep, ep);
1814 epstat = gr_read32(&ep->regs->epstat);
1828 struct gr_ep *ep;
1833 ep = container_of(_ep, struct gr_ep, ep);
1834 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1836 spin_lock(&ep->dev->lock);
1838 epctrl = gr_read32(&ep->regs->epctrl);
1840 gr_write32(&ep->regs->epctrl, epctrl);
1842 spin_unlock(&ep->dev->lock);
1986 struct gr_ep *ep;
1992 ep = &dev->epi[num];
1993 ep->ep.name = inames[num];
1994 ep->regs = &dev->regs->epi[num];
1996 ep = &dev->epo[num];
1997 ep->ep.name = onames[num];
1998 ep->regs = &dev->regs->epo[num];
2001 gr_ep_reset(ep);
2002 ep->num = num;
2003 ep->is_in = is_in;
2004 ep->dev = dev;
2005 ep->ep.ops = &gr_ep_ops;
2006 INIT_LIST_HEAD(&ep->queue);
2009 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
2025 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2026 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2028 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2029 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2031 list_add_tail(&ep->ep_list, &dev->ep_list);
2033 ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
2034 &ep->tailbuf_paddr, GFP_ATOMIC);
2035 if (!ep->tailbuf)
2055 dev->gadget.ep0 = &dev->epi[0].ep;
2092 struct gr_ep *ep;
2095 ep = &dev->epi[num];
2097 ep = &dev->epo[num];
2099 if (ep->tailbuf)
2100 dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
2101 ep->tailbuf, ep->tailbuf_paddr);
2119 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2120 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);