gadget.c revision f9c56cdd3905c96c600456203637bd7ec8ec6383
1/** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2, as published by the Free 24 * Software Foundation. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39#include <linux/kernel.h> 40#include <linux/delay.h> 41#include <linux/slab.h> 42#include <linux/spinlock.h> 43#include <linux/platform_device.h> 44#include <linux/pm_runtime.h> 45#include <linux/interrupt.h> 46#include <linux/io.h> 47#include <linux/list.h> 48#include <linux/dma-mapping.h> 49 50#include <linux/usb/ch9.h> 51#include <linux/usb/gadget.h> 52 53#include "core.h" 54#include "gadget.h" 55#include "io.h" 56 57#define DMA_ADDR_INVALID (~(dma_addr_t)0) 58 59void dwc3_map_buffer_to_dma(struct dwc3_request *req) 60{ 61 struct dwc3 *dwc = req->dep->dwc; 62 63 if (req->request.length == 0) { 64 /* req->request.dma = dwc->setup_buf_addr; */ 65 return; 66 } 67 68 if (req->request.num_sgs) { 69 int mapped; 70 71 mapped = dma_map_sg(dwc->dev, req->request.sg, 72 req->request.num_sgs, 73 req->direction ? DMA_TO_DEVICE 74 : DMA_FROM_DEVICE); 75 if (mapped < 0) { 76 dev_err(dwc->dev, "failed to map SGs\n"); 77 return; 78 } 79 80 req->request.num_mapped_sgs = mapped; 81 return; 82 } 83 84 if (req->request.dma == DMA_ADDR_INVALID) { 85 req->request.dma = dma_map_single(dwc->dev, req->request.buf, 86 req->request.length, req->direction 87 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 88 req->mapped = true; 89 } 90} 91 92void dwc3_unmap_buffer_from_dma(struct dwc3_request *req) 93{ 94 struct dwc3 *dwc = req->dep->dwc; 95 96 if (req->request.length == 0) { 97 req->request.dma = DMA_ADDR_INVALID; 98 return; 99 } 100 101 if (req->request.num_mapped_sgs) { 102 req->request.dma = DMA_ADDR_INVALID; 103 dma_unmap_sg(dwc->dev, req->request.sg, 104 req->request.num_sgs, 105 req->direction ? DMA_TO_DEVICE 106 : DMA_FROM_DEVICE); 107 108 req->request.num_mapped_sgs = 0; 109 return; 110 } 111 112 if (req->mapped) { 113 dma_unmap_single(dwc->dev, req->request.dma, 114 req->request.length, req->direction 115 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 116 req->mapped = 0; 117 req->request.dma = DMA_ADDR_INVALID; 118 } 119} 120 121void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 122 int status) 123{ 124 struct dwc3 *dwc = dep->dwc; 125 126 if (req->queued) { 127 if (req->request.num_mapped_sgs) 128 dep->busy_slot += req->request.num_mapped_sgs; 129 else 130 dep->busy_slot++; 131 132 /* 133 * Skip LINK TRB. We can't use req->trb and check for 134 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just 135 * completed (not the LINK TRB). 136 */ 137 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 138 usb_endpoint_xfer_isoc(dep->desc)) 139 dep->busy_slot++; 140 } 141 list_del(&req->list); 142 req->trb = NULL; 143 144 if (req->request.status == -EINPROGRESS) 145 req->request.status = status; 146 147 dwc3_unmap_buffer_from_dma(req); 148 149 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 150 req, dep->name, req->request.actual, 151 req->request.length, status); 152 153 spin_unlock(&dwc->lock); 154 req->request.complete(&req->dep->endpoint, &req->request); 155 spin_lock(&dwc->lock); 156} 157 158static const char *dwc3_gadget_ep_cmd_string(u8 cmd) 159{ 160 switch (cmd) { 161 case DWC3_DEPCMD_DEPSTARTCFG: 162 return "Start New Configuration"; 163 case DWC3_DEPCMD_ENDTRANSFER: 164 return "End Transfer"; 165 case DWC3_DEPCMD_UPDATETRANSFER: 166 return "Update Transfer"; 167 case DWC3_DEPCMD_STARTTRANSFER: 168 return "Start Transfer"; 169 case DWC3_DEPCMD_CLEARSTALL: 170 return "Clear Stall"; 171 case DWC3_DEPCMD_SETSTALL: 172 return "Set Stall"; 173 case DWC3_DEPCMD_GETSEQNUMBER: 174 return "Get Data Sequence Number"; 175 case DWC3_DEPCMD_SETTRANSFRESOURCE: 176 return "Set Endpoint Transfer Resource"; 177 case DWC3_DEPCMD_SETEPCONFIG: 178 return "Set Endpoint Configuration"; 179 default: 180 return "UNKNOWN command"; 181 } 182} 183 184int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 185 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 186{ 187 struct dwc3_ep *dep = dwc->eps[ep]; 188 u32 timeout = 500; 189 u32 reg; 190 191 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", 192 dep->name, 193 dwc3_gadget_ep_cmd_string(cmd), params->param0, 194 params->param1, params->param2); 195 196 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 197 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 198 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 199 200 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 201 do { 202 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 203 if (!(reg & DWC3_DEPCMD_CMDACT)) { 204 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 205 DWC3_DEPCMD_STATUS(reg)); 206 return 0; 207 } 208 209 /* 210 * We can't sleep here, because it is also called from 211 * interrupt context. 212 */ 213 timeout--; 214 if (!timeout) 215 return -ETIMEDOUT; 216 217 udelay(1); 218 } while (1); 219} 220 221static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 222 struct dwc3_trb_hw *trb) 223{ 224 u32 offset = (char *) trb - (char *) dep->trb_pool; 225 226 return dep->trb_pool_dma + offset; 227} 228 229static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 230{ 231 struct dwc3 *dwc = dep->dwc; 232 233 if (dep->trb_pool) 234 return 0; 235 236 if (dep->number == 0 || dep->number == 1) 237 return 0; 238 239 dep->trb_pool = dma_alloc_coherent(dwc->dev, 240 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 241 &dep->trb_pool_dma, GFP_KERNEL); 242 if (!dep->trb_pool) { 243 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 244 dep->name); 245 return -ENOMEM; 246 } 247 248 return 0; 249} 250 251static void dwc3_free_trb_pool(struct dwc3_ep *dep) 252{ 253 struct dwc3 *dwc = dep->dwc; 254 255 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 256 dep->trb_pool, dep->trb_pool_dma); 257 258 dep->trb_pool = NULL; 259 dep->trb_pool_dma = 0; 260} 261 262static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 263{ 264 struct dwc3_gadget_ep_cmd_params params; 265 u32 cmd; 266 267 memset(¶ms, 0x00, sizeof(params)); 268 269 if (dep->number != 1) { 270 cmd = DWC3_DEPCMD_DEPSTARTCFG; 271 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 272 if (dep->number > 1) { 273 if (dwc->start_config_issued) 274 return 0; 275 dwc->start_config_issued = true; 276 cmd |= DWC3_DEPCMD_PARAM(2); 277 } 278 279 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 280 } 281 282 return 0; 283} 284 285static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 286 const struct usb_endpoint_descriptor *desc, 287 const struct usb_ss_ep_comp_descriptor *comp_desc) 288{ 289 struct dwc3_gadget_ep_cmd_params params; 290 291 memset(¶ms, 0x00, sizeof(params)); 292 293 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 294 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)) 295 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst); 296 297 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 298 | DWC3_DEPCFG_XFER_NOT_READY_EN; 299 300 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 301 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 302 | DWC3_DEPCFG_STREAM_EVENT_EN; 303 dep->stream_capable = true; 304 } 305 306 if (usb_endpoint_xfer_isoc(desc)) 307 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 308 309 /* 310 * We are doing 1:1 mapping for endpoints, meaning 311 * Physical Endpoints 2 maps to Logical Endpoint 2 and 312 * so on. We consider the direction bit as part of the physical 313 * endpoint number. So USB endpoint 0x81 is 0x03. 314 */ 315 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 316 317 /* 318 * We must use the lower 16 TX FIFOs even though 319 * HW might have more 320 */ 321 if (dep->direction) 322 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 323 324 if (desc->bInterval) { 325 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 326 dep->interval = 1 << (desc->bInterval - 1); 327 } 328 329 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 330 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 331} 332 333static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 334{ 335 struct dwc3_gadget_ep_cmd_params params; 336 337 memset(¶ms, 0x00, sizeof(params)); 338 339 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 340 341 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 342 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 343} 344 345/** 346 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 347 * @dep: endpoint to be initialized 348 * @desc: USB Endpoint Descriptor 349 * 350 * Caller should take care of locking 351 */ 352static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 353 const struct usb_endpoint_descriptor *desc, 354 const struct usb_ss_ep_comp_descriptor *comp_desc) 355{ 356 struct dwc3 *dwc = dep->dwc; 357 u32 reg; 358 int ret = -ENOMEM; 359 360 if (!(dep->flags & DWC3_EP_ENABLED)) { 361 ret = dwc3_gadget_start_config(dwc, dep); 362 if (ret) 363 return ret; 364 } 365 366 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc); 367 if (ret) 368 return ret; 369 370 if (!(dep->flags & DWC3_EP_ENABLED)) { 371 struct dwc3_trb_hw *trb_st_hw; 372 struct dwc3_trb_hw *trb_link_hw; 373 struct dwc3_trb trb_link; 374 375 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 376 if (ret) 377 return ret; 378 379 dep->desc = desc; 380 dep->comp_desc = comp_desc; 381 dep->type = usb_endpoint_type(desc); 382 dep->flags |= DWC3_EP_ENABLED; 383 384 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 385 reg |= DWC3_DALEPENA_EP(dep->number); 386 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 387 388 if (!usb_endpoint_xfer_isoc(desc)) 389 return 0; 390 391 memset(&trb_link, 0, sizeof(trb_link)); 392 393 /* Link TRB for ISOC. The HWO but is never reset */ 394 trb_st_hw = &dep->trb_pool[0]; 395 396 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw); 397 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB; 398 trb_link.hwo = true; 399 400 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1]; 401 dwc3_trb_to_hw(&trb_link, trb_link_hw); 402 } 403 404 return 0; 405} 406 407static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); 408static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 409{ 410 struct dwc3_request *req; 411 412 if (!list_empty(&dep->req_queued)) 413 dwc3_stop_active_transfer(dwc, dep->number); 414 415 while (!list_empty(&dep->request_list)) { 416 req = next_request(&dep->request_list); 417 418 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 419 } 420} 421 422/** 423 * __dwc3_gadget_ep_disable - Disables a HW endpoint 424 * @dep: the endpoint to disable 425 * 426 * This function also removes requests which are currently processed ny the 427 * hardware and those which are not yet scheduled. 428 * Caller should take care of locking. 429 */ 430static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 431{ 432 struct dwc3 *dwc = dep->dwc; 433 u32 reg; 434 435 dwc3_remove_requests(dwc, dep); 436 437 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 438 reg &= ~DWC3_DALEPENA_EP(dep->number); 439 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 440 441 dep->stream_capable = false; 442 dep->desc = NULL; 443 dep->endpoint.desc = NULL; 444 dep->comp_desc = NULL; 445 dep->type = 0; 446 dep->flags = 0; 447 448 return 0; 449} 450 451/* -------------------------------------------------------------------------- */ 452 453static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 454 const struct usb_endpoint_descriptor *desc) 455{ 456 return -EINVAL; 457} 458 459static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 460{ 461 return -EINVAL; 462} 463 464/* -------------------------------------------------------------------------- */ 465 466static int dwc3_gadget_ep_enable(struct usb_ep *ep, 467 const struct usb_endpoint_descriptor *desc) 468{ 469 struct dwc3_ep *dep; 470 struct dwc3 *dwc; 471 unsigned long flags; 472 int ret; 473 474 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 475 pr_debug("dwc3: invalid parameters\n"); 476 return -EINVAL; 477 } 478 479 if (!desc->wMaxPacketSize) { 480 pr_debug("dwc3: missing wMaxPacketSize\n"); 481 return -EINVAL; 482 } 483 484 dep = to_dwc3_ep(ep); 485 dwc = dep->dwc; 486 487 switch (usb_endpoint_type(desc)) { 488 case USB_ENDPOINT_XFER_CONTROL: 489 strncat(dep->name, "-control", sizeof(dep->name)); 490 break; 491 case USB_ENDPOINT_XFER_ISOC: 492 strncat(dep->name, "-isoc", sizeof(dep->name)); 493 break; 494 case USB_ENDPOINT_XFER_BULK: 495 strncat(dep->name, "-bulk", sizeof(dep->name)); 496 break; 497 case USB_ENDPOINT_XFER_INT: 498 strncat(dep->name, "-int", sizeof(dep->name)); 499 break; 500 default: 501 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 502 } 503 504 if (dep->flags & DWC3_EP_ENABLED) { 505 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 506 dep->name); 507 return 0; 508 } 509 510 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 511 512 spin_lock_irqsave(&dwc->lock, flags); 513 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc); 514 spin_unlock_irqrestore(&dwc->lock, flags); 515 516 return ret; 517} 518 519static int dwc3_gadget_ep_disable(struct usb_ep *ep) 520{ 521 struct dwc3_ep *dep; 522 struct dwc3 *dwc; 523 unsigned long flags; 524 int ret; 525 526 if (!ep) { 527 pr_debug("dwc3: invalid parameters\n"); 528 return -EINVAL; 529 } 530 531 dep = to_dwc3_ep(ep); 532 dwc = dep->dwc; 533 534 if (!(dep->flags & DWC3_EP_ENABLED)) { 535 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 536 dep->name); 537 return 0; 538 } 539 540 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 541 dep->number >> 1, 542 (dep->number & 1) ? "in" : "out"); 543 544 spin_lock_irqsave(&dwc->lock, flags); 545 ret = __dwc3_gadget_ep_disable(dep); 546 spin_unlock_irqrestore(&dwc->lock, flags); 547 548 return ret; 549} 550 551static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 552 gfp_t gfp_flags) 553{ 554 struct dwc3_request *req; 555 struct dwc3_ep *dep = to_dwc3_ep(ep); 556 struct dwc3 *dwc = dep->dwc; 557 558 req = kzalloc(sizeof(*req), gfp_flags); 559 if (!req) { 560 dev_err(dwc->dev, "not enough memory\n"); 561 return NULL; 562 } 563 564 req->epnum = dep->number; 565 req->dep = dep; 566 req->request.dma = DMA_ADDR_INVALID; 567 568 return &req->request; 569} 570 571static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 572 struct usb_request *request) 573{ 574 struct dwc3_request *req = to_dwc3_request(request); 575 576 kfree(req); 577} 578 579/** 580 * dwc3_prepare_one_trb - setup one TRB from one request 581 * @dep: endpoint for which this request is prepared 582 * @req: dwc3_request pointer 583 */ 584static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 585 struct dwc3_request *req, dma_addr_t dma, 586 unsigned length, unsigned last, unsigned chain) 587{ 588 struct dwc3 *dwc = dep->dwc; 589 struct dwc3_trb_hw *trb_hw; 590 struct dwc3_trb trb; 591 592 unsigned int cur_slot; 593 594 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 595 dep->name, req, (unsigned long long) dma, 596 length, last ? " last" : "", 597 chain ? " chain" : ""); 598 599 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 600 cur_slot = dep->free_slot; 601 dep->free_slot++; 602 603 /* Skip the LINK-TRB on ISOC */ 604 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 605 usb_endpoint_xfer_isoc(dep->desc)) 606 return; 607 608 memset(&trb, 0, sizeof(trb)); 609 if (!req->trb) { 610 dwc3_gadget_move_request_queued(req); 611 req->trb = trb_hw; 612 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw); 613 } 614 615 if (usb_endpoint_xfer_isoc(dep->desc)) { 616 trb.isp_imi = true; 617 trb.csp = true; 618 } else { 619 trb.chn = chain; 620 trb.lst = last; 621 } 622 623 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable) 624 trb.sid_sofn = req->request.stream_id; 625 626 switch (usb_endpoint_type(dep->desc)) { 627 case USB_ENDPOINT_XFER_CONTROL: 628 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP; 629 break; 630 631 case USB_ENDPOINT_XFER_ISOC: 632 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 633 634 /* IOC every DWC3_TRB_NUM / 4 so we can refill */ 635 if (!(cur_slot % (DWC3_TRB_NUM / 4))) 636 trb.ioc = last; 637 break; 638 639 case USB_ENDPOINT_XFER_BULK: 640 case USB_ENDPOINT_XFER_INT: 641 trb.trbctl = DWC3_TRBCTL_NORMAL; 642 break; 643 default: 644 /* 645 * This is only possible with faulty memory because we 646 * checked it already :) 647 */ 648 BUG(); 649 } 650 651 trb.length = length; 652 trb.bplh = dma; 653 trb.hwo = true; 654 655 dwc3_trb_to_hw(&trb, trb_hw); 656} 657 658/* 659 * dwc3_prepare_trbs - setup TRBs from requests 660 * @dep: endpoint for which requests are being prepared 661 * @starting: true if the endpoint is idle and no requests are queued. 662 * 663 * The functions goes through the requests list and setups TRBs for the 664 * transfers. The functions returns once there are not more TRBs available or 665 * it run out of requests. 666 */ 667static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 668{ 669 struct dwc3_request *req, *n; 670 u32 trbs_left; 671 unsigned int last_one = 0; 672 673 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 674 675 /* the first request must not be queued */ 676 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 677 678 /* 679 * if busy & slot are equal than it is either full or empty. If we are 680 * starting to proceed requests then we are empty. Otherwise we ar 681 * full and don't do anything 682 */ 683 if (!trbs_left) { 684 if (!starting) 685 return; 686 trbs_left = DWC3_TRB_NUM; 687 /* 688 * In case we start from scratch, we queue the ISOC requests 689 * starting from slot 1. This is done because we use ring 690 * buffer and have no LST bit to stop us. Instead, we place 691 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt 692 * after the first request so we start at slot 1 and have 693 * 7 requests proceed before we hit the first IOC. 694 * Other transfer types don't use the ring buffer and are 695 * processed from the first TRB until the last one. Since we 696 * don't wrap around we have to start at the beginning. 697 */ 698 if (usb_endpoint_xfer_isoc(dep->desc)) { 699 dep->busy_slot = 1; 700 dep->free_slot = 1; 701 } else { 702 dep->busy_slot = 0; 703 dep->free_slot = 0; 704 } 705 } 706 707 /* The last TRB is a link TRB, not used for xfer */ 708 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc)) 709 return; 710 711 list_for_each_entry_safe(req, n, &dep->request_list, list) { 712 unsigned length; 713 dma_addr_t dma; 714 715 if (req->request.num_mapped_sgs > 0) { 716 struct usb_request *request = &req->request; 717 struct scatterlist *sg = request->sg; 718 struct scatterlist *s; 719 int i; 720 721 for_each_sg(sg, s, request->num_mapped_sgs, i) { 722 unsigned chain = true; 723 724 length = sg_dma_len(s); 725 dma = sg_dma_address(s); 726 727 if (i == (request->num_mapped_sgs - 1) 728 || sg_is_last(s)) { 729 last_one = true; 730 chain = false; 731 } 732 733 trbs_left--; 734 if (!trbs_left) 735 last_one = true; 736 737 if (last_one) 738 chain = false; 739 740 dwc3_prepare_one_trb(dep, req, dma, length, 741 last_one, chain); 742 743 if (last_one) 744 break; 745 } 746 } else { 747 dma = req->request.dma; 748 length = req->request.length; 749 trbs_left--; 750 751 if (!trbs_left) 752 last_one = 1; 753 754 /* Is this the last request? */ 755 if (list_is_last(&req->list, &dep->request_list)) 756 last_one = 1; 757 758 dwc3_prepare_one_trb(dep, req, dma, length, 759 last_one, false); 760 761 if (last_one) 762 break; 763 } 764 } 765} 766 767static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 768 int start_new) 769{ 770 struct dwc3_gadget_ep_cmd_params params; 771 struct dwc3_request *req; 772 struct dwc3 *dwc = dep->dwc; 773 int ret; 774 u32 cmd; 775 776 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 777 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 778 return -EBUSY; 779 } 780 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 781 782 /* 783 * If we are getting here after a short-out-packet we don't enqueue any 784 * new requests as we try to set the IOC bit only on the last request. 785 */ 786 if (start_new) { 787 if (list_empty(&dep->req_queued)) 788 dwc3_prepare_trbs(dep, start_new); 789 790 /* req points to the first request which will be sent */ 791 req = next_request(&dep->req_queued); 792 } else { 793 dwc3_prepare_trbs(dep, start_new); 794 795 /* 796 * req points to the first request where HWO changed 797 * from 0 to 1 798 */ 799 req = next_request(&dep->req_queued); 800 } 801 if (!req) { 802 dep->flags |= DWC3_EP_PENDING_REQUEST; 803 return 0; 804 } 805 806 memset(¶ms, 0, sizeof(params)); 807 params.param0 = upper_32_bits(req->trb_dma); 808 params.param1 = lower_32_bits(req->trb_dma); 809 810 if (start_new) 811 cmd = DWC3_DEPCMD_STARTTRANSFER; 812 else 813 cmd = DWC3_DEPCMD_UPDATETRANSFER; 814 815 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 816 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 817 if (ret < 0) { 818 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 819 820 /* 821 * FIXME we need to iterate over the list of requests 822 * here and stop, unmap, free and del each of the linked 823 * requests instead of we do now. 824 */ 825 dwc3_unmap_buffer_from_dma(req); 826 list_del(&req->list); 827 return ret; 828 } 829 830 dep->flags |= DWC3_EP_BUSY; 831 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc, 832 dep->number); 833 834 WARN_ON_ONCE(!dep->res_trans_idx); 835 836 return 0; 837} 838 839static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 840{ 841 req->request.actual = 0; 842 req->request.status = -EINPROGRESS; 843 req->direction = dep->direction; 844 req->epnum = dep->number; 845 846 /* 847 * We only add to our list of requests now and 848 * start consuming the list once we get XferNotReady 849 * IRQ. 850 * 851 * That way, we avoid doing anything that we don't need 852 * to do now and defer it until the point we receive a 853 * particular token from the Host side. 854 * 855 * This will also avoid Host cancelling URBs due to too 856 * many NACKs. 857 */ 858 dwc3_map_buffer_to_dma(req); 859 list_add_tail(&req->list, &dep->request_list); 860 861 /* 862 * There is one special case: XferNotReady with 863 * empty list of requests. We need to kick the 864 * transfer here in that situation, otherwise 865 * we will be NAKing forever. 866 * 867 * If we get XferNotReady before gadget driver 868 * has a chance to queue a request, we will ACK 869 * the IRQ but won't be able to receive the data 870 * until the next request is queued. The following 871 * code is handling exactly that. 872 */ 873 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 874 int ret; 875 int start_trans; 876 877 start_trans = 1; 878 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 879 dep->flags & DWC3_EP_BUSY) 880 start_trans = 0; 881 882 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans); 883 if (ret && ret != -EBUSY) { 884 struct dwc3 *dwc = dep->dwc; 885 886 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 887 dep->name); 888 } 889 }; 890 891 return 0; 892} 893 894static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 895 gfp_t gfp_flags) 896{ 897 struct dwc3_request *req = to_dwc3_request(request); 898 struct dwc3_ep *dep = to_dwc3_ep(ep); 899 struct dwc3 *dwc = dep->dwc; 900 901 unsigned long flags; 902 903 int ret; 904 905 if (!dep->desc) { 906 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 907 request, ep->name); 908 return -ESHUTDOWN; 909 } 910 911 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 912 request, ep->name, request->length); 913 914 spin_lock_irqsave(&dwc->lock, flags); 915 ret = __dwc3_gadget_ep_queue(dep, req); 916 spin_unlock_irqrestore(&dwc->lock, flags); 917 918 return ret; 919} 920 921static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 922 struct usb_request *request) 923{ 924 struct dwc3_request *req = to_dwc3_request(request); 925 struct dwc3_request *r = NULL; 926 927 struct dwc3_ep *dep = to_dwc3_ep(ep); 928 struct dwc3 *dwc = dep->dwc; 929 930 unsigned long flags; 931 int ret = 0; 932 933 spin_lock_irqsave(&dwc->lock, flags); 934 935 list_for_each_entry(r, &dep->request_list, list) { 936 if (r == req) 937 break; 938 } 939 940 if (r != req) { 941 list_for_each_entry(r, &dep->req_queued, list) { 942 if (r == req) 943 break; 944 } 945 if (r == req) { 946 /* wait until it is processed */ 947 dwc3_stop_active_transfer(dwc, dep->number); 948 goto out0; 949 } 950 dev_err(dwc->dev, "request %p was not queued to %s\n", 951 request, ep->name); 952 ret = -EINVAL; 953 goto out0; 954 } 955 956 /* giveback the request */ 957 dwc3_gadget_giveback(dep, req, -ECONNRESET); 958 959out0: 960 spin_unlock_irqrestore(&dwc->lock, flags); 961 962 return ret; 963} 964 965int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) 966{ 967 struct dwc3_gadget_ep_cmd_params params; 968 struct dwc3 *dwc = dep->dwc; 969 int ret; 970 971 memset(¶ms, 0x00, sizeof(params)); 972 973 if (value) { 974 if (dep->number == 0 || dep->number == 1) { 975 /* 976 * Whenever EP0 is stalled, we will restart 977 * the state machine, thus moving back to 978 * Setup Phase 979 */ 980 dwc->ep0state = EP0_SETUP_PHASE; 981 } 982 983 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 984 DWC3_DEPCMD_SETSTALL, ¶ms); 985 if (ret) 986 dev_err(dwc->dev, "failed to %s STALL on %s\n", 987 value ? "set" : "clear", 988 dep->name); 989 else 990 dep->flags |= DWC3_EP_STALL; 991 } else { 992 if (dep->flags & DWC3_EP_WEDGE) 993 return 0; 994 995 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 996 DWC3_DEPCMD_CLEARSTALL, ¶ms); 997 if (ret) 998 dev_err(dwc->dev, "failed to %s STALL on %s\n", 999 value ? "set" : "clear", 1000 dep->name); 1001 else 1002 dep->flags &= ~DWC3_EP_STALL; 1003 } 1004 1005 return ret; 1006} 1007 1008static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1009{ 1010 struct dwc3_ep *dep = to_dwc3_ep(ep); 1011 struct dwc3 *dwc = dep->dwc; 1012 1013 unsigned long flags; 1014 1015 int ret; 1016 1017 spin_lock_irqsave(&dwc->lock, flags); 1018 1019 if (usb_endpoint_xfer_isoc(dep->desc)) { 1020 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1021 ret = -EINVAL; 1022 goto out; 1023 } 1024 1025 ret = __dwc3_gadget_ep_set_halt(dep, value); 1026out: 1027 spin_unlock_irqrestore(&dwc->lock, flags); 1028 1029 return ret; 1030} 1031 1032static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1033{ 1034 struct dwc3_ep *dep = to_dwc3_ep(ep); 1035 1036 dep->flags |= DWC3_EP_WEDGE; 1037 1038 return dwc3_gadget_ep_set_halt(ep, 1); 1039} 1040 1041/* -------------------------------------------------------------------------- */ 1042 1043static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1044 .bLength = USB_DT_ENDPOINT_SIZE, 1045 .bDescriptorType = USB_DT_ENDPOINT, 1046 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1047}; 1048 1049static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1050 .enable = dwc3_gadget_ep0_enable, 1051 .disable = dwc3_gadget_ep0_disable, 1052 .alloc_request = dwc3_gadget_ep_alloc_request, 1053 .free_request = dwc3_gadget_ep_free_request, 1054 .queue = dwc3_gadget_ep0_queue, 1055 .dequeue = dwc3_gadget_ep_dequeue, 1056 .set_halt = dwc3_gadget_ep_set_halt, 1057 .set_wedge = dwc3_gadget_ep_set_wedge, 1058}; 1059 1060static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1061 .enable = dwc3_gadget_ep_enable, 1062 .disable = dwc3_gadget_ep_disable, 1063 .alloc_request = dwc3_gadget_ep_alloc_request, 1064 .free_request = dwc3_gadget_ep_free_request, 1065 .queue = dwc3_gadget_ep_queue, 1066 .dequeue = dwc3_gadget_ep_dequeue, 1067 .set_halt = dwc3_gadget_ep_set_halt, 1068 .set_wedge = dwc3_gadget_ep_set_wedge, 1069}; 1070 1071/* -------------------------------------------------------------------------- */ 1072 1073static int dwc3_gadget_get_frame(struct usb_gadget *g) 1074{ 1075 struct dwc3 *dwc = gadget_to_dwc(g); 1076 u32 reg; 1077 1078 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1079 return DWC3_DSTS_SOFFN(reg); 1080} 1081 1082static int dwc3_gadget_wakeup(struct usb_gadget *g) 1083{ 1084 struct dwc3 *dwc = gadget_to_dwc(g); 1085 1086 unsigned long timeout; 1087 unsigned long flags; 1088 1089 u32 reg; 1090 1091 int ret = 0; 1092 1093 u8 link_state; 1094 u8 speed; 1095 1096 spin_lock_irqsave(&dwc->lock, flags); 1097 1098 /* 1099 * According to the Databook Remote wakeup request should 1100 * be issued only when the device is in early suspend state. 1101 * 1102 * We can check that via USB Link State bits in DSTS register. 1103 */ 1104 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1105 1106 speed = reg & DWC3_DSTS_CONNECTSPD; 1107 if (speed == DWC3_DSTS_SUPERSPEED) { 1108 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1109 ret = -EINVAL; 1110 goto out; 1111 } 1112 1113 link_state = DWC3_DSTS_USBLNKST(reg); 1114 1115 switch (link_state) { 1116 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1117 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1118 break; 1119 default: 1120 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1121 link_state); 1122 ret = -EINVAL; 1123 goto out; 1124 } 1125 1126 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1127 1128 /* 1129 * Switch link state to Recovery. In HS/FS/LS this means 1130 * RemoteWakeup Request 1131 */ 1132 reg |= DWC3_DCTL_ULSTCHNG_RECOVERY; 1133 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1134 1135 /* wait for at least 2000us */ 1136 usleep_range(2000, 2500); 1137 1138 /* write zeroes to Link Change Request */ 1139 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1140 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1141 1142 /* pool until Link State change to ON */ 1143 timeout = jiffies + msecs_to_jiffies(100); 1144 1145 while (!(time_after(jiffies, timeout))) { 1146 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1147 1148 /* in HS, means ON */ 1149 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1150 break; 1151 } 1152 1153 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1154 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1155 ret = -EINVAL; 1156 } 1157 1158out: 1159 spin_unlock_irqrestore(&dwc->lock, flags); 1160 1161 return ret; 1162} 1163 1164static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1165 int is_selfpowered) 1166{ 1167 struct dwc3 *dwc = gadget_to_dwc(g); 1168 1169 dwc->is_selfpowered = !!is_selfpowered; 1170 1171 return 0; 1172} 1173 1174static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) 1175{ 1176 u32 reg; 1177 u32 timeout = 500; 1178 1179 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1180 if (is_on) 1181 reg |= DWC3_DCTL_RUN_STOP; 1182 else 1183 reg &= ~DWC3_DCTL_RUN_STOP; 1184 1185 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1186 1187 do { 1188 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1189 if (is_on) { 1190 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1191 break; 1192 } else { 1193 if (reg & DWC3_DSTS_DEVCTRLHLT) 1194 break; 1195 } 1196 timeout--; 1197 if (!timeout) 1198 break; 1199 udelay(1); 1200 } while (1); 1201 1202 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1203 dwc->gadget_driver 1204 ? dwc->gadget_driver->function : "no-function", 1205 is_on ? "connect" : "disconnect"); 1206} 1207 1208static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1209{ 1210 struct dwc3 *dwc = gadget_to_dwc(g); 1211 unsigned long flags; 1212 1213 is_on = !!is_on; 1214 1215 spin_lock_irqsave(&dwc->lock, flags); 1216 dwc3_gadget_run_stop(dwc, is_on); 1217 spin_unlock_irqrestore(&dwc->lock, flags); 1218 1219 return 0; 1220} 1221 1222static int dwc3_gadget_start(struct usb_gadget *g, 1223 struct usb_gadget_driver *driver) 1224{ 1225 struct dwc3 *dwc = gadget_to_dwc(g); 1226 struct dwc3_ep *dep; 1227 unsigned long flags; 1228 int ret = 0; 1229 u32 reg; 1230 1231 spin_lock_irqsave(&dwc->lock, flags); 1232 1233 if (dwc->gadget_driver) { 1234 dev_err(dwc->dev, "%s is already bound to %s\n", 1235 dwc->gadget.name, 1236 dwc->gadget_driver->driver.name); 1237 ret = -EBUSY; 1238 goto err0; 1239 } 1240 1241 dwc->gadget_driver = driver; 1242 dwc->gadget.dev.driver = &driver->driver; 1243 1244 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1245 reg &= ~(DWC3_DCFG_SPEED_MASK); 1246 reg |= dwc->maximum_speed; 1247 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1248 1249 dwc->start_config_issued = false; 1250 1251 /* Start with SuperSpeed Default */ 1252 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1253 1254 dep = dwc->eps[0]; 1255 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 1256 if (ret) { 1257 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1258 goto err0; 1259 } 1260 1261 dep = dwc->eps[1]; 1262 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 1263 if (ret) { 1264 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1265 goto err1; 1266 } 1267 1268 /* begin to receive SETUP packets */ 1269 dwc->ep0state = EP0_SETUP_PHASE; 1270 dwc3_ep0_out_start(dwc); 1271 1272 spin_unlock_irqrestore(&dwc->lock, flags); 1273 1274 return 0; 1275 1276err1: 1277 __dwc3_gadget_ep_disable(dwc->eps[0]); 1278 1279err0: 1280 spin_unlock_irqrestore(&dwc->lock, flags); 1281 1282 return ret; 1283} 1284 1285static int dwc3_gadget_stop(struct usb_gadget *g, 1286 struct usb_gadget_driver *driver) 1287{ 1288 struct dwc3 *dwc = gadget_to_dwc(g); 1289 unsigned long flags; 1290 1291 spin_lock_irqsave(&dwc->lock, flags); 1292 1293 __dwc3_gadget_ep_disable(dwc->eps[0]); 1294 __dwc3_gadget_ep_disable(dwc->eps[1]); 1295 1296 dwc->gadget_driver = NULL; 1297 dwc->gadget.dev.driver = NULL; 1298 1299 spin_unlock_irqrestore(&dwc->lock, flags); 1300 1301 return 0; 1302} 1303static const struct usb_gadget_ops dwc3_gadget_ops = { 1304 .get_frame = dwc3_gadget_get_frame, 1305 .wakeup = dwc3_gadget_wakeup, 1306 .set_selfpowered = dwc3_gadget_set_selfpowered, 1307 .pullup = dwc3_gadget_pullup, 1308 .udc_start = dwc3_gadget_start, 1309 .udc_stop = dwc3_gadget_stop, 1310}; 1311 1312/* -------------------------------------------------------------------------- */ 1313 1314static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1315{ 1316 struct dwc3_ep *dep; 1317 u8 epnum; 1318 1319 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1320 1321 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1322 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1323 if (!dep) { 1324 dev_err(dwc->dev, "can't allocate endpoint %d\n", 1325 epnum); 1326 return -ENOMEM; 1327 } 1328 1329 dep->dwc = dwc; 1330 dep->number = epnum; 1331 dwc->eps[epnum] = dep; 1332 1333 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1334 (epnum & 1) ? "in" : "out"); 1335 dep->endpoint.name = dep->name; 1336 dep->direction = (epnum & 1); 1337 1338 if (epnum == 0 || epnum == 1) { 1339 dep->endpoint.maxpacket = 512; 1340 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1341 if (!epnum) 1342 dwc->gadget.ep0 = &dep->endpoint; 1343 } else { 1344 int ret; 1345 1346 dep->endpoint.maxpacket = 1024; 1347 dep->endpoint.max_streams = 15; 1348 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1349 list_add_tail(&dep->endpoint.ep_list, 1350 &dwc->gadget.ep_list); 1351 1352 ret = dwc3_alloc_trb_pool(dep); 1353 if (ret) 1354 return ret; 1355 } 1356 1357 INIT_LIST_HEAD(&dep->request_list); 1358 INIT_LIST_HEAD(&dep->req_queued); 1359 } 1360 1361 return 0; 1362} 1363 1364static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1365{ 1366 struct dwc3_ep *dep; 1367 u8 epnum; 1368 1369 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1370 dep = dwc->eps[epnum]; 1371 dwc3_free_trb_pool(dep); 1372 1373 if (epnum != 0 && epnum != 1) 1374 list_del(&dep->endpoint.ep_list); 1375 1376 kfree(dep); 1377 } 1378} 1379 1380static void dwc3_gadget_release(struct device *dev) 1381{ 1382 dev_dbg(dev, "%s\n", __func__); 1383} 1384 1385/* -------------------------------------------------------------------------- */ 1386static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1387 const struct dwc3_event_depevt *event, int status) 1388{ 1389 struct dwc3_request *req; 1390 struct dwc3_trb trb; 1391 unsigned int count; 1392 unsigned int s_pkt = 0; 1393 1394 do { 1395 req = next_request(&dep->req_queued); 1396 if (!req) { 1397 WARN_ON_ONCE(1); 1398 return 1; 1399 } 1400 1401 dwc3_trb_to_nat(req->trb, &trb); 1402 1403 if (trb.hwo && status != -ESHUTDOWN) 1404 /* 1405 * We continue despite the error. There is not much we 1406 * can do. If we don't clean in up we loop for ever. If 1407 * we skip the TRB than it gets overwritten reused after 1408 * a while since we use them in a ring buffer. a BUG() 1409 * would help. Lets hope that if this occures, someone 1410 * fixes the root cause instead of looking away :) 1411 */ 1412 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1413 dep->name, req->trb); 1414 count = trb.length; 1415 1416 if (dep->direction) { 1417 if (count) { 1418 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1419 dep->name); 1420 status = -ECONNRESET; 1421 } 1422 } else { 1423 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1424 s_pkt = 1; 1425 } 1426 1427 /* 1428 * We assume here we will always receive the entire data block 1429 * which we should receive. Meaning, if we program RX to 1430 * receive 4K but we receive only 2K, we assume that's all we 1431 * should receive and we simply bounce the request back to the 1432 * gadget driver for further processing. 1433 */ 1434 req->request.actual += req->request.length - count; 1435 dwc3_gadget_giveback(dep, req, status); 1436 if (s_pkt) 1437 break; 1438 if ((event->status & DEPEVT_STATUS_LST) && trb.lst) 1439 break; 1440 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) 1441 break; 1442 } while (1); 1443 1444 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) 1445 return 0; 1446 return 1; 1447} 1448 1449static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1450 struct dwc3_ep *dep, const struct dwc3_event_depevt *event, 1451 int start_new) 1452{ 1453 unsigned status = 0; 1454 int clean_busy; 1455 1456 if (event->status & DEPEVT_STATUS_BUSERR) 1457 status = -ECONNRESET; 1458 1459 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1460 if (clean_busy) { 1461 dep->flags &= ~DWC3_EP_BUSY; 1462 dep->res_trans_idx = 0; 1463 } 1464 1465 /* 1466 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1467 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1468 */ 1469 if (dwc->revision < DWC3_REVISION_183A) { 1470 u32 reg; 1471 int i; 1472 1473 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1474 struct dwc3_ep *dep = dwc->eps[i]; 1475 1476 if (!(dep->flags & DWC3_EP_ENABLED)) 1477 continue; 1478 1479 if (!list_empty(&dep->req_queued)) 1480 return; 1481 } 1482 1483 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1484 reg |= dwc->u1u2; 1485 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1486 1487 dwc->u1u2 = 0; 1488 } 1489} 1490 1491static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1492 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1493{ 1494 u32 uf; 1495 1496 if (list_empty(&dep->request_list)) { 1497 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1498 dep->name); 1499 return; 1500 } 1501 1502 if (event->parameters) { 1503 u32 mask; 1504 1505 mask = ~(dep->interval - 1); 1506 uf = event->parameters & mask; 1507 /* 4 micro frames in the future */ 1508 uf += dep->interval * 4; 1509 } else { 1510 uf = 0; 1511 } 1512 1513 __dwc3_gadget_kick_transfer(dep, uf, 1); 1514} 1515 1516static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep, 1517 const struct dwc3_event_depevt *event) 1518{ 1519 struct dwc3 *dwc = dep->dwc; 1520 struct dwc3_event_depevt mod_ev = *event; 1521 1522 /* 1523 * We were asked to remove one requests. It is possible that this 1524 * request and a few other were started together and have the same 1525 * transfer index. Since we stopped the complete endpoint we don't 1526 * know how many requests were already completed (and not yet) 1527 * reported and how could be done (later). We purge them all until 1528 * the end of the list. 1529 */ 1530 mod_ev.status = DEPEVT_STATUS_LST; 1531 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN); 1532 dep->flags &= ~DWC3_EP_BUSY; 1533 /* pending requets are ignored and are queued on XferNotReady */ 1534} 1535 1536static void dwc3_ep_cmd_compl(struct dwc3_ep *dep, 1537 const struct dwc3_event_depevt *event) 1538{ 1539 u32 param = event->parameters; 1540 u32 cmd_type = (param >> 8) & ((1 << 5) - 1); 1541 1542 switch (cmd_type) { 1543 case DWC3_DEPCMD_ENDTRANSFER: 1544 dwc3_process_ep_cmd_complete(dep, event); 1545 break; 1546 case DWC3_DEPCMD_STARTTRANSFER: 1547 dep->res_trans_idx = param & 0x7f; 1548 break; 1549 default: 1550 printk(KERN_ERR "%s() unknown /unexpected type: %d\n", 1551 __func__, cmd_type); 1552 break; 1553 }; 1554} 1555 1556static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1557 const struct dwc3_event_depevt *event) 1558{ 1559 struct dwc3_ep *dep; 1560 u8 epnum = event->endpoint_number; 1561 1562 dep = dwc->eps[epnum]; 1563 1564 dev_vdbg(dwc->dev, "%s: %s\n", dep->name, 1565 dwc3_ep_event_string(event->endpoint_event)); 1566 1567 if (epnum == 0 || epnum == 1) { 1568 dwc3_ep0_interrupt(dwc, event); 1569 return; 1570 } 1571 1572 switch (event->endpoint_event) { 1573 case DWC3_DEPEVT_XFERCOMPLETE: 1574 if (usb_endpoint_xfer_isoc(dep->desc)) { 1575 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1576 dep->name); 1577 return; 1578 } 1579 1580 dwc3_endpoint_transfer_complete(dwc, dep, event, 1); 1581 break; 1582 case DWC3_DEPEVT_XFERINPROGRESS: 1583 if (!usb_endpoint_xfer_isoc(dep->desc)) { 1584 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", 1585 dep->name); 1586 return; 1587 } 1588 1589 dwc3_endpoint_transfer_complete(dwc, dep, event, 0); 1590 break; 1591 case DWC3_DEPEVT_XFERNOTREADY: 1592 if (usb_endpoint_xfer_isoc(dep->desc)) { 1593 dwc3_gadget_start_isoc(dwc, dep, event); 1594 } else { 1595 int ret; 1596 1597 dev_vdbg(dwc->dev, "%s: reason %s\n", 1598 dep->name, event->status 1599 ? "Transfer Active" 1600 : "Transfer Not Active"); 1601 1602 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1603 if (!ret || ret == -EBUSY) 1604 return; 1605 1606 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1607 dep->name); 1608 } 1609 1610 break; 1611 case DWC3_DEPEVT_STREAMEVT: 1612 if (!usb_endpoint_xfer_bulk(dep->desc)) { 1613 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1614 dep->name); 1615 return; 1616 } 1617 1618 switch (event->status) { 1619 case DEPEVT_STREAMEVT_FOUND: 1620 dev_vdbg(dwc->dev, "Stream %d found and started\n", 1621 event->parameters); 1622 1623 break; 1624 case DEPEVT_STREAMEVT_NOTFOUND: 1625 /* FALLTHROUGH */ 1626 default: 1627 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 1628 } 1629 break; 1630 case DWC3_DEPEVT_RXTXFIFOEVT: 1631 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 1632 break; 1633 case DWC3_DEPEVT_EPCMDCMPLT: 1634 dwc3_ep_cmd_compl(dep, event); 1635 break; 1636 } 1637} 1638 1639static void dwc3_disconnect_gadget(struct dwc3 *dwc) 1640{ 1641 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 1642 spin_unlock(&dwc->lock); 1643 dwc->gadget_driver->disconnect(&dwc->gadget); 1644 spin_lock(&dwc->lock); 1645 } 1646} 1647 1648static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) 1649{ 1650 struct dwc3_ep *dep; 1651 struct dwc3_gadget_ep_cmd_params params; 1652 u32 cmd; 1653 int ret; 1654 1655 dep = dwc->eps[epnum]; 1656 1657 WARN_ON(!dep->res_trans_idx); 1658 if (dep->res_trans_idx) { 1659 cmd = DWC3_DEPCMD_ENDTRANSFER; 1660 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; 1661 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx); 1662 memset(¶ms, 0, sizeof(params)); 1663 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 1664 WARN_ON_ONCE(ret); 1665 dep->res_trans_idx = 0; 1666 } 1667} 1668 1669static void dwc3_stop_active_transfers(struct dwc3 *dwc) 1670{ 1671 u32 epnum; 1672 1673 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1674 struct dwc3_ep *dep; 1675 1676 dep = dwc->eps[epnum]; 1677 if (!(dep->flags & DWC3_EP_ENABLED)) 1678 continue; 1679 1680 dwc3_remove_requests(dwc, dep); 1681 } 1682} 1683 1684static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 1685{ 1686 u32 epnum; 1687 1688 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1689 struct dwc3_ep *dep; 1690 struct dwc3_gadget_ep_cmd_params params; 1691 int ret; 1692 1693 dep = dwc->eps[epnum]; 1694 1695 if (!(dep->flags & DWC3_EP_STALL)) 1696 continue; 1697 1698 dep->flags &= ~DWC3_EP_STALL; 1699 1700 memset(¶ms, 0, sizeof(params)); 1701 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1702 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1703 WARN_ON_ONCE(ret); 1704 } 1705} 1706 1707static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 1708{ 1709 dev_vdbg(dwc->dev, "%s\n", __func__); 1710#if 0 1711 XXX 1712 U1/U2 is powersave optimization. Skip it for now. Anyway we need to 1713 enable it before we can disable it. 1714 1715 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1716 reg &= ~DWC3_DCTL_INITU1ENA; 1717 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1718 1719 reg &= ~DWC3_DCTL_INITU2ENA; 1720 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1721#endif 1722 1723 dwc3_stop_active_transfers(dwc); 1724 dwc3_disconnect_gadget(dwc); 1725 dwc->start_config_issued = false; 1726 1727 dwc->gadget.speed = USB_SPEED_UNKNOWN; 1728 dwc->setup_packet_pending = false; 1729} 1730 1731static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on) 1732{ 1733 u32 reg; 1734 1735 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 1736 1737 if (on) 1738 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 1739 else 1740 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 1741 1742 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 1743} 1744 1745static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on) 1746{ 1747 u32 reg; 1748 1749 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1750 1751 if (on) 1752 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 1753 else 1754 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 1755 1756 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1757} 1758 1759static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 1760{ 1761 u32 reg; 1762 1763 dev_vdbg(dwc->dev, "%s\n", __func__); 1764 1765 /* 1766 * WORKAROUND: DWC3 revisions <1.88a have an issue which 1767 * would cause a missing Disconnect Event if there's a 1768 * pending Setup Packet in the FIFO. 1769 * 1770 * There's no suggested workaround on the official Bug 1771 * report, which states that "unless the driver/application 1772 * is doing any special handling of a disconnect event, 1773 * there is no functional issue". 1774 * 1775 * Unfortunately, it turns out that we _do_ some special 1776 * handling of a disconnect event, namely complete all 1777 * pending transfers, notify gadget driver of the 1778 * disconnection, and so on. 1779 * 1780 * Our suggested workaround is to follow the Disconnect 1781 * Event steps here, instead, based on a setup_packet_pending 1782 * flag. Such flag gets set whenever we have a XferNotReady 1783 * event on EP0 and gets cleared on XferComplete for the 1784 * same endpoint. 1785 * 1786 * Refers to: 1787 * 1788 * STAR#9000466709: RTL: Device : Disconnect event not 1789 * generated if setup packet pending in FIFO 1790 */ 1791 if (dwc->revision < DWC3_REVISION_188A) { 1792 if (dwc->setup_packet_pending) 1793 dwc3_gadget_disconnect_interrupt(dwc); 1794 } 1795 1796 /* after reset -> Default State */ 1797 dwc->dev_state = DWC3_DEFAULT_STATE; 1798 1799 /* Enable PHYs */ 1800 dwc3_gadget_usb2_phy_power(dwc, true); 1801 dwc3_gadget_usb3_phy_power(dwc, true); 1802 1803 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) 1804 dwc3_disconnect_gadget(dwc); 1805 1806 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1807 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 1808 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1809 1810 dwc3_stop_active_transfers(dwc); 1811 dwc3_clear_stall_all_ep(dwc); 1812 dwc->start_config_issued = false; 1813 1814 /* Reset device address to zero */ 1815 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1816 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 1817 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1818} 1819 1820static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 1821{ 1822 u32 reg; 1823 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 1824 1825 /* 1826 * We change the clock only at SS but I dunno why I would want to do 1827 * this. Maybe it becomes part of the power saving plan. 1828 */ 1829 1830 if (speed != DWC3_DSTS_SUPERSPEED) 1831 return; 1832 1833 /* 1834 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 1835 * each time on Connect Done. 1836 */ 1837 if (!usb30_clock) 1838 return; 1839 1840 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1841 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 1842 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1843} 1844 1845static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed) 1846{ 1847 switch (speed) { 1848 case USB_SPEED_SUPER: 1849 dwc3_gadget_usb2_phy_power(dwc, false); 1850 break; 1851 case USB_SPEED_HIGH: 1852 case USB_SPEED_FULL: 1853 case USB_SPEED_LOW: 1854 dwc3_gadget_usb3_phy_power(dwc, false); 1855 break; 1856 } 1857} 1858 1859static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 1860{ 1861 struct dwc3_gadget_ep_cmd_params params; 1862 struct dwc3_ep *dep; 1863 int ret; 1864 u32 reg; 1865 u8 speed; 1866 1867 dev_vdbg(dwc->dev, "%s\n", __func__); 1868 1869 memset(¶ms, 0x00, sizeof(params)); 1870 1871 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1872 speed = reg & DWC3_DSTS_CONNECTSPD; 1873 dwc->speed = speed; 1874 1875 dwc3_update_ram_clk_sel(dwc, speed); 1876 1877 switch (speed) { 1878 case DWC3_DCFG_SUPERSPEED: 1879 /* 1880 * WORKAROUND: DWC3 revisions <1.90a have an issue which 1881 * would cause a missing USB3 Reset event. 1882 * 1883 * In such situations, we should force a USB3 Reset 1884 * event by calling our dwc3_gadget_reset_interrupt() 1885 * routine. 1886 * 1887 * Refers to: 1888 * 1889 * STAR#9000483510: RTL: SS : USB3 reset event may 1890 * not be generated always when the link enters poll 1891 */ 1892 if (dwc->revision < DWC3_REVISION_190A) 1893 dwc3_gadget_reset_interrupt(dwc); 1894 1895 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1896 dwc->gadget.ep0->maxpacket = 512; 1897 dwc->gadget.speed = USB_SPEED_SUPER; 1898 break; 1899 case DWC3_DCFG_HIGHSPEED: 1900 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1901 dwc->gadget.ep0->maxpacket = 64; 1902 dwc->gadget.speed = USB_SPEED_HIGH; 1903 break; 1904 case DWC3_DCFG_FULLSPEED2: 1905 case DWC3_DCFG_FULLSPEED1: 1906 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1907 dwc->gadget.ep0->maxpacket = 64; 1908 dwc->gadget.speed = USB_SPEED_FULL; 1909 break; 1910 case DWC3_DCFG_LOWSPEED: 1911 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 1912 dwc->gadget.ep0->maxpacket = 8; 1913 dwc->gadget.speed = USB_SPEED_LOW; 1914 break; 1915 } 1916 1917 /* Disable unneded PHY */ 1918 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed); 1919 1920 dep = dwc->eps[0]; 1921 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 1922 if (ret) { 1923 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1924 return; 1925 } 1926 1927 dep = dwc->eps[1]; 1928 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 1929 if (ret) { 1930 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1931 return; 1932 } 1933 1934 /* 1935 * Configure PHY via GUSB3PIPECTLn if required. 1936 * 1937 * Update GTXFIFOSIZn 1938 * 1939 * In both cases reset values should be sufficient. 1940 */ 1941} 1942 1943static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 1944{ 1945 dev_vdbg(dwc->dev, "%s\n", __func__); 1946 1947 /* 1948 * TODO take core out of low power mode when that's 1949 * implemented. 1950 */ 1951 1952 dwc->gadget_driver->resume(&dwc->gadget); 1953} 1954 1955static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 1956 unsigned int evtinfo) 1957{ 1958 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 1959 1960 /* 1961 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 1962 * on the link partner, the USB session might do multiple entry/exit 1963 * of low power states before a transfer takes place. 1964 * 1965 * Due to this problem, we might experience lower throughput. The 1966 * suggested workaround is to disable DCTL[12:9] bits if we're 1967 * transitioning from U1/U2 to U0 and enable those bits again 1968 * after a transfer completes and there are no pending transfers 1969 * on any of the enabled endpoints. 1970 * 1971 * This is the first half of that workaround. 1972 * 1973 * Refers to: 1974 * 1975 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 1976 * core send LGO_Ux entering U0 1977 */ 1978 if (dwc->revision < DWC3_REVISION_183A) { 1979 if (next == DWC3_LINK_STATE_U0) { 1980 u32 u1u2; 1981 u32 reg; 1982 1983 switch (dwc->link_state) { 1984 case DWC3_LINK_STATE_U1: 1985 case DWC3_LINK_STATE_U2: 1986 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1987 u1u2 = reg & (DWC3_DCTL_INITU2ENA 1988 | DWC3_DCTL_ACCEPTU2ENA 1989 | DWC3_DCTL_INITU1ENA 1990 | DWC3_DCTL_ACCEPTU1ENA); 1991 1992 if (!dwc->u1u2) 1993 dwc->u1u2 = reg & u1u2; 1994 1995 reg &= ~u1u2; 1996 1997 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1998 break; 1999 default: 2000 /* do nothing */ 2001 break; 2002 } 2003 } 2004 } 2005 2006 dwc->link_state = next; 2007 2008 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state); 2009} 2010 2011static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2012 const struct dwc3_event_devt *event) 2013{ 2014 switch (event->type) { 2015 case DWC3_DEVICE_EVENT_DISCONNECT: 2016 dwc3_gadget_disconnect_interrupt(dwc); 2017 break; 2018 case DWC3_DEVICE_EVENT_RESET: 2019 dwc3_gadget_reset_interrupt(dwc); 2020 break; 2021 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2022 dwc3_gadget_conndone_interrupt(dwc); 2023 break; 2024 case DWC3_DEVICE_EVENT_WAKEUP: 2025 dwc3_gadget_wakeup_interrupt(dwc); 2026 break; 2027 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2028 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2029 break; 2030 case DWC3_DEVICE_EVENT_EOPF: 2031 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2032 break; 2033 case DWC3_DEVICE_EVENT_SOF: 2034 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2035 break; 2036 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2037 dev_vdbg(dwc->dev, "Erratic Error\n"); 2038 break; 2039 case DWC3_DEVICE_EVENT_CMD_CMPL: 2040 dev_vdbg(dwc->dev, "Command Complete\n"); 2041 break; 2042 case DWC3_DEVICE_EVENT_OVERFLOW: 2043 dev_vdbg(dwc->dev, "Overflow\n"); 2044 break; 2045 default: 2046 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2047 } 2048} 2049 2050static void dwc3_process_event_entry(struct dwc3 *dwc, 2051 const union dwc3_event *event) 2052{ 2053 /* Endpoint IRQ, handle it and return early */ 2054 if (event->type.is_devspec == 0) { 2055 /* depevt */ 2056 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2057 } 2058 2059 switch (event->type.type) { 2060 case DWC3_EVENT_TYPE_DEV: 2061 dwc3_gadget_interrupt(dwc, &event->devt); 2062 break; 2063 /* REVISIT what to do with Carkit and I2C events ? */ 2064 default: 2065 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2066 } 2067} 2068 2069static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2070{ 2071 struct dwc3_event_buffer *evt; 2072 int left; 2073 u32 count; 2074 2075 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2076 count &= DWC3_GEVNTCOUNT_MASK; 2077 if (!count) 2078 return IRQ_NONE; 2079 2080 evt = dwc->ev_buffs[buf]; 2081 left = count; 2082 2083 while (left > 0) { 2084 union dwc3_event event; 2085 2086 memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw)); 2087 dwc3_process_event_entry(dwc, &event); 2088 /* 2089 * XXX we wrap around correctly to the next entry as almost all 2090 * entries are 4 bytes in size. There is one entry which has 12 2091 * bytes which is a regular entry followed by 8 bytes data. ATM 2092 * I don't know how things are organized if were get next to the 2093 * a boundary so I worry about that once we try to handle that. 2094 */ 2095 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2096 left -= 4; 2097 2098 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2099 } 2100 2101 return IRQ_HANDLED; 2102} 2103 2104static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2105{ 2106 struct dwc3 *dwc = _dwc; 2107 int i; 2108 irqreturn_t ret = IRQ_NONE; 2109 2110 spin_lock(&dwc->lock); 2111 2112 for (i = 0; i < dwc->num_event_buffers; i++) { 2113 irqreturn_t status; 2114 2115 status = dwc3_process_event_buf(dwc, i); 2116 if (status == IRQ_HANDLED) 2117 ret = status; 2118 } 2119 2120 spin_unlock(&dwc->lock); 2121 2122 return ret; 2123} 2124 2125/** 2126 * dwc3_gadget_init - Initializes gadget related registers 2127 * @dwc: Pointer to out controller context structure 2128 * 2129 * Returns 0 on success otherwise negative errno. 2130 */ 2131int __devinit dwc3_gadget_init(struct dwc3 *dwc) 2132{ 2133 u32 reg; 2134 int ret; 2135 int irq; 2136 2137 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2138 &dwc->ctrl_req_addr, GFP_KERNEL); 2139 if (!dwc->ctrl_req) { 2140 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2141 ret = -ENOMEM; 2142 goto err0; 2143 } 2144 2145 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2146 &dwc->ep0_trb_addr, GFP_KERNEL); 2147 if (!dwc->ep0_trb) { 2148 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2149 ret = -ENOMEM; 2150 goto err1; 2151 } 2152 2153 dwc->setup_buf = dma_alloc_coherent(dwc->dev, 2154 sizeof(*dwc->setup_buf) * 2, 2155 &dwc->setup_buf_addr, GFP_KERNEL); 2156 if (!dwc->setup_buf) { 2157 dev_err(dwc->dev, "failed to allocate setup buffer\n"); 2158 ret = -ENOMEM; 2159 goto err2; 2160 } 2161 2162 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2163 512, &dwc->ep0_bounce_addr, GFP_KERNEL); 2164 if (!dwc->ep0_bounce) { 2165 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2166 ret = -ENOMEM; 2167 goto err3; 2168 } 2169 2170 dev_set_name(&dwc->gadget.dev, "gadget"); 2171 2172 dwc->gadget.ops = &dwc3_gadget_ops; 2173 dwc->gadget.max_speed = USB_SPEED_SUPER; 2174 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2175 dwc->gadget.dev.parent = dwc->dev; 2176 dwc->gadget.sg_supported = true; 2177 2178 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask); 2179 2180 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms; 2181 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask; 2182 dwc->gadget.dev.release = dwc3_gadget_release; 2183 dwc->gadget.name = "dwc3-gadget"; 2184 2185 /* 2186 * REVISIT: Here we should clear all pending IRQs to be 2187 * sure we're starting from a well known location. 2188 */ 2189 2190 ret = dwc3_gadget_init_endpoints(dwc); 2191 if (ret) 2192 goto err4; 2193 2194 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 2195 2196 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED, 2197 "dwc3", dwc); 2198 if (ret) { 2199 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2200 irq, ret); 2201 goto err5; 2202 } 2203 2204 /* Enable all but Start and End of Frame IRQs */ 2205 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 2206 DWC3_DEVTEN_EVNTOVERFLOWEN | 2207 DWC3_DEVTEN_CMDCMPLTEN | 2208 DWC3_DEVTEN_ERRTICERREN | 2209 DWC3_DEVTEN_WKUPEVTEN | 2210 DWC3_DEVTEN_ULSTCNGEN | 2211 DWC3_DEVTEN_CONNECTDONEEN | 2212 DWC3_DEVTEN_USBRSTEN | 2213 DWC3_DEVTEN_DISCONNEVTEN); 2214 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 2215 2216 ret = device_register(&dwc->gadget.dev); 2217 if (ret) { 2218 dev_err(dwc->dev, "failed to register gadget device\n"); 2219 put_device(&dwc->gadget.dev); 2220 goto err6; 2221 } 2222 2223 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2224 if (ret) { 2225 dev_err(dwc->dev, "failed to register udc\n"); 2226 goto err7; 2227 } 2228 2229 return 0; 2230 2231err7: 2232 device_unregister(&dwc->gadget.dev); 2233 2234err6: 2235 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2236 free_irq(irq, dwc); 2237 2238err5: 2239 dwc3_gadget_free_endpoints(dwc); 2240 2241err4: 2242 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, 2243 dwc->ep0_bounce_addr); 2244 2245err3: 2246 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, 2247 dwc->setup_buf, dwc->setup_buf_addr); 2248 2249err2: 2250 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2251 dwc->ep0_trb, dwc->ep0_trb_addr); 2252 2253err1: 2254 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2255 dwc->ctrl_req, dwc->ctrl_req_addr); 2256 2257err0: 2258 return ret; 2259} 2260 2261void dwc3_gadget_exit(struct dwc3 *dwc) 2262{ 2263 int irq; 2264 2265 usb_del_gadget_udc(&dwc->gadget); 2266 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 2267 2268 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2269 free_irq(irq, dwc); 2270 2271 dwc3_gadget_free_endpoints(dwc); 2272 2273 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, 2274 dwc->ep0_bounce_addr); 2275 2276 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, 2277 dwc->setup_buf, dwc->setup_buf_addr); 2278 2279 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2280 dwc->ep0_trb, dwc->ep0_trb_addr); 2281 2282 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2283 dwc->ctrl_req, dwc->ctrl_req_addr); 2284 2285 device_unregister(&dwc->gadget.dev); 2286} 2287