gadget.c revision 78c58a53c9864447f2a46d4c06dd3c2616823ad2
1/** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * All rights reserved. 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the above-listed copyright holders may not be used 20 * to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * ALTERNATIVELY, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2, as published by the Free 25 * Software Foundation. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40#include <linux/kernel.h> 41#include <linux/delay.h> 42#include <linux/slab.h> 43#include <linux/spinlock.h> 44#include <linux/platform_device.h> 45#include <linux/pm_runtime.h> 46#include <linux/interrupt.h> 47#include <linux/io.h> 48#include <linux/list.h> 49#include <linux/dma-mapping.h> 50 51#include <linux/usb/ch9.h> 52#include <linux/usb/gadget.h> 53 54#include "core.h" 55#include "gadget.h" 56#include "io.h" 57 58#define DMA_ADDR_INVALID (~(dma_addr_t)0) 59 60void dwc3_map_buffer_to_dma(struct dwc3_request *req) 61{ 62 struct dwc3 *dwc = req->dep->dwc; 63 64 if (req->request.length == 0) { 65 /* req->request.dma = dwc->setup_buf_addr; */ 66 return; 67 } 68 69 if (req->request.dma == DMA_ADDR_INVALID) { 70 req->request.dma = dma_map_single(dwc->dev, req->request.buf, 71 req->request.length, req->direction 72 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 73 req->mapped = true; 74 } else { 75 dma_sync_single_for_device(dwc->dev, req->request.dma, 76 req->request.length, req->direction 77 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 78 req->mapped = false; 79 } 80} 81 82void dwc3_unmap_buffer_from_dma(struct dwc3_request *req) 83{ 84 struct dwc3 *dwc = req->dep->dwc; 85 86 if (req->request.length == 0) { 87 req->request.dma = DMA_ADDR_INVALID; 88 return; 89 } 90 91 if (req->mapped) { 92 dma_unmap_single(dwc->dev, req->request.dma, 93 req->request.length, req->direction 94 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 95 req->mapped = 0; 96 req->request.dma = DMA_ADDR_INVALID; 97 } else { 98 dma_sync_single_for_cpu(dwc->dev, req->request.dma, 99 req->request.length, req->direction 100 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 101 } 102} 103 104void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 105 int status) 106{ 107 struct dwc3 *dwc = dep->dwc; 108 109 if (req->queued) { 110 dep->busy_slot++; 111 /* 112 * Skip LINK TRB. We can't use req->trb and check for 113 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just 114 * completed (not the LINK TRB). 115 */ 116 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 117 usb_endpoint_xfer_isoc(dep->desc)) 118 dep->busy_slot++; 119 } 120 list_del(&req->list); 121 122 if (req->request.status == -EINPROGRESS) 123 req->request.status = status; 124 125 dwc3_unmap_buffer_from_dma(req); 126 127 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 128 req, dep->name, req->request.actual, 129 req->request.length, status); 130 131 spin_unlock(&dwc->lock); 132 req->request.complete(&req->dep->endpoint, &req->request); 133 spin_lock(&dwc->lock); 134} 135 136static const char *dwc3_gadget_ep_cmd_string(u8 cmd) 137{ 138 switch (cmd) { 139 case DWC3_DEPCMD_DEPSTARTCFG: 140 return "Start New Configuration"; 141 case DWC3_DEPCMD_ENDTRANSFER: 142 return "End Transfer"; 143 case DWC3_DEPCMD_UPDATETRANSFER: 144 return "Update Transfer"; 145 case DWC3_DEPCMD_STARTTRANSFER: 146 return "Start Transfer"; 147 case DWC3_DEPCMD_CLEARSTALL: 148 return "Clear Stall"; 149 case DWC3_DEPCMD_SETSTALL: 150 return "Set Stall"; 151 case DWC3_DEPCMD_GETSEQNUMBER: 152 return "Get Data Sequence Number"; 153 case DWC3_DEPCMD_SETTRANSFRESOURCE: 154 return "Set Endpoint Transfer Resource"; 155 case DWC3_DEPCMD_SETEPCONFIG: 156 return "Set Endpoint Configuration"; 157 default: 158 return "UNKNOWN command"; 159 } 160} 161 162int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 163 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 164{ 165 struct dwc3_ep *dep = dwc->eps[ep]; 166 u32 timeout = 500; 167 u32 reg; 168 169 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", 170 dep->name, 171 dwc3_gadget_ep_cmd_string(cmd), params->param0.raw, 172 params->param1.raw, params->param2.raw); 173 174 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0.raw); 175 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1.raw); 176 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2.raw); 177 178 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 179 do { 180 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 181 if (!(reg & DWC3_DEPCMD_CMDACT)) { 182 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 183 DWC3_DEPCMD_STATUS(reg)); 184 return 0; 185 } 186 187 /* 188 * We can't sleep here, because it is also called from 189 * interrupt context. 190 */ 191 timeout--; 192 if (!timeout) 193 return -ETIMEDOUT; 194 195 udelay(1); 196 } while (1); 197} 198 199static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 200 struct dwc3_trb_hw *trb) 201{ 202 u32 offset = trb - dep->trb_pool; 203 204 return dep->trb_pool_dma + offset; 205} 206 207static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 208{ 209 struct dwc3 *dwc = dep->dwc; 210 211 if (dep->trb_pool) 212 return 0; 213 214 if (dep->number == 0 || dep->number == 1) 215 return 0; 216 217 dep->trb_pool = dma_alloc_coherent(dwc->dev, 218 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 219 &dep->trb_pool_dma, GFP_KERNEL); 220 if (!dep->trb_pool) { 221 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 222 dep->name); 223 return -ENOMEM; 224 } 225 226 return 0; 227} 228 229static void dwc3_free_trb_pool(struct dwc3_ep *dep) 230{ 231 struct dwc3 *dwc = dep->dwc; 232 233 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 234 dep->trb_pool, dep->trb_pool_dma); 235 236 dep->trb_pool = NULL; 237 dep->trb_pool_dma = 0; 238} 239 240static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 241{ 242 struct dwc3_gadget_ep_cmd_params params; 243 u32 cmd; 244 245 memset(¶ms, 0x00, sizeof(params)); 246 247 if (dep->number != 1) { 248 cmd = DWC3_DEPCMD_DEPSTARTCFG; 249 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 250 if (dep->number > 1) 251 cmd |= DWC3_DEPCMD_PARAM(2); 252 253 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 254 } 255 256 return 0; 257} 258 259static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 260 const struct usb_endpoint_descriptor *desc) 261{ 262 struct dwc3_gadget_ep_cmd_params params; 263 264 memset(¶ms, 0x00, sizeof(params)); 265 266 params.param0.depcfg.ep_type = usb_endpoint_type(desc); 267 params.param0.depcfg.max_packet_size = usb_endpoint_maxp(desc); 268 269 params.param1.depcfg.xfer_complete_enable = true; 270 params.param1.depcfg.xfer_not_ready_enable = true; 271 272 if (usb_endpoint_xfer_isoc(desc)) 273 params.param1.depcfg.xfer_in_progress_enable = true; 274 275 /* 276 * We are doing 1:1 mapping for endpoints, meaning 277 * Physical Endpoints 2 maps to Logical Endpoint 2 and 278 * so on. We consider the direction bit as part of the physical 279 * endpoint number. So USB endpoint 0x81 is 0x03. 280 */ 281 params.param1.depcfg.ep_number = dep->number; 282 283 /* 284 * We must use the lower 16 TX FIFOs even though 285 * HW might have more 286 */ 287 if (dep->direction) 288 params.param0.depcfg.fifo_number = dep->number >> 1; 289 290 if (desc->bInterval) { 291 params.param1.depcfg.binterval_m1 = desc->bInterval - 1; 292 dep->interval = 1 << (desc->bInterval - 1); 293 } 294 295 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 296 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 297} 298 299static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 300{ 301 struct dwc3_gadget_ep_cmd_params params; 302 303 memset(¶ms, 0x00, sizeof(params)); 304 305 params.param0.depxfercfg.number_xfer_resources = 1; 306 307 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 308 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 309} 310 311/** 312 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 313 * @dep: endpoint to be initialized 314 * @desc: USB Endpoint Descriptor 315 * 316 * Caller should take care of locking 317 */ 318static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 319 const struct usb_endpoint_descriptor *desc) 320{ 321 struct dwc3 *dwc = dep->dwc; 322 u32 reg; 323 int ret = -ENOMEM; 324 325 if (!(dep->flags & DWC3_EP_ENABLED)) { 326 ret = dwc3_gadget_start_config(dwc, dep); 327 if (ret) 328 return ret; 329 } 330 331 ret = dwc3_gadget_set_ep_config(dwc, dep, desc); 332 if (ret) 333 return ret; 334 335 if (!(dep->flags & DWC3_EP_ENABLED)) { 336 struct dwc3_trb_hw *trb_st_hw; 337 struct dwc3_trb_hw *trb_link_hw; 338 struct dwc3_trb trb_link; 339 340 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 341 if (ret) 342 return ret; 343 344 dep->desc = desc; 345 dep->type = usb_endpoint_type(desc); 346 dep->flags |= DWC3_EP_ENABLED; 347 348 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 349 reg |= DWC3_DALEPENA_EP(dep->number); 350 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 351 352 if (!usb_endpoint_xfer_isoc(desc)) 353 return 0; 354 355 memset(&trb_link, 0, sizeof(trb_link)); 356 357 /* Link TRB for ISOC. The HWO but is never reset */ 358 trb_st_hw = &dep->trb_pool[0]; 359 360 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw); 361 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB; 362 trb_link.hwo = true; 363 364 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1]; 365 dwc3_trb_to_hw(&trb_link, trb_link_hw); 366 } 367 368 return 0; 369} 370 371static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); 372static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 373{ 374 struct dwc3_request *req; 375 376 if (!list_empty(&dep->req_queued)) 377 dwc3_stop_active_transfer(dwc, dep->number); 378 379 while (!list_empty(&dep->request_list)) { 380 req = next_request(&dep->request_list); 381 382 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 383 } 384} 385 386/** 387 * __dwc3_gadget_ep_disable - Disables a HW endpoint 388 * @dep: the endpoint to disable 389 * 390 * This function also removes requests which are currently processed ny the 391 * hardware and those which are not yet scheduled. 392 * Caller should take care of locking. 393 */ 394static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 395{ 396 struct dwc3 *dwc = dep->dwc; 397 u32 reg; 398 399 dep->flags &= ~DWC3_EP_ENABLED; 400 dwc3_remove_requests(dwc, dep); 401 402 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 403 reg &= ~DWC3_DALEPENA_EP(dep->number); 404 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 405 406 dep->desc = NULL; 407 dep->type = 0; 408 409 return 0; 410} 411 412/* -------------------------------------------------------------------------- */ 413 414static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 415 const struct usb_endpoint_descriptor *desc) 416{ 417 return -EINVAL; 418} 419 420static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 421{ 422 return -EINVAL; 423} 424 425/* -------------------------------------------------------------------------- */ 426 427static int dwc3_gadget_ep_enable(struct usb_ep *ep, 428 const struct usb_endpoint_descriptor *desc) 429{ 430 struct dwc3_ep *dep; 431 struct dwc3 *dwc; 432 unsigned long flags; 433 int ret; 434 435 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 436 pr_debug("dwc3: invalid parameters\n"); 437 return -EINVAL; 438 } 439 440 if (!desc->wMaxPacketSize) { 441 pr_debug("dwc3: missing wMaxPacketSize\n"); 442 return -EINVAL; 443 } 444 445 dep = to_dwc3_ep(ep); 446 dwc = dep->dwc; 447 448 switch (usb_endpoint_type(desc)) { 449 case USB_ENDPOINT_XFER_CONTROL: 450 strncat(dep->name, "-control", sizeof(dep->name)); 451 break; 452 case USB_ENDPOINT_XFER_ISOC: 453 strncat(dep->name, "-isoc", sizeof(dep->name)); 454 break; 455 case USB_ENDPOINT_XFER_BULK: 456 strncat(dep->name, "-bulk", sizeof(dep->name)); 457 break; 458 case USB_ENDPOINT_XFER_INT: 459 strncat(dep->name, "-int", sizeof(dep->name)); 460 break; 461 default: 462 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 463 } 464 465 if (dep->flags & DWC3_EP_ENABLED) { 466 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 467 dep->name); 468 return 0; 469 } 470 471 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 472 473 spin_lock_irqsave(&dwc->lock, flags); 474 ret = __dwc3_gadget_ep_enable(dep, desc); 475 spin_unlock_irqrestore(&dwc->lock, flags); 476 477 return ret; 478} 479 480static int dwc3_gadget_ep_disable(struct usb_ep *ep) 481{ 482 struct dwc3_ep *dep; 483 struct dwc3 *dwc; 484 unsigned long flags; 485 int ret; 486 487 if (!ep) { 488 pr_debug("dwc3: invalid parameters\n"); 489 return -EINVAL; 490 } 491 492 dep = to_dwc3_ep(ep); 493 dwc = dep->dwc; 494 495 if (!(dep->flags & DWC3_EP_ENABLED)) { 496 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 497 dep->name); 498 return 0; 499 } 500 501 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 502 dep->number >> 1, 503 (dep->number & 1) ? "in" : "out"); 504 505 spin_lock_irqsave(&dwc->lock, flags); 506 ret = __dwc3_gadget_ep_disable(dep); 507 spin_unlock_irqrestore(&dwc->lock, flags); 508 509 return ret; 510} 511 512static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 513 gfp_t gfp_flags) 514{ 515 struct dwc3_request *req; 516 struct dwc3_ep *dep = to_dwc3_ep(ep); 517 struct dwc3 *dwc = dep->dwc; 518 519 req = kzalloc(sizeof(*req), gfp_flags); 520 if (!req) { 521 dev_err(dwc->dev, "not enough memory\n"); 522 return NULL; 523 } 524 525 req->epnum = dep->number; 526 req->dep = dep; 527 req->request.dma = DMA_ADDR_INVALID; 528 529 return &req->request; 530} 531 532static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 533 struct usb_request *request) 534{ 535 struct dwc3_request *req = to_dwc3_request(request); 536 537 kfree(req); 538} 539 540/* 541 * dwc3_prepare_trbs - setup TRBs from requests 542 * @dep: endpoint for which requests are being prepared 543 * @starting: true if the endpoint is idle and no requests are queued. 544 * 545 * The functions goes through the requests list and setups TRBs for the 546 * transfers. The functions returns once there are not more TRBs available or 547 * it run out of requests. 548 */ 549static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep, 550 bool starting) 551{ 552 struct dwc3_request *req, *n, *ret = NULL; 553 struct dwc3_trb_hw *trb_hw; 554 struct dwc3_trb trb; 555 u32 trbs_left; 556 557 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 558 559 /* the first request must not be queued */ 560 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 561 /* 562 * if busy & slot are equal than it is either full or empty. If we are 563 * starting to proceed requests then we are empty. Otherwise we ar 564 * full and don't do anything 565 */ 566 if (!trbs_left) { 567 if (!starting) 568 return NULL; 569 trbs_left = DWC3_TRB_NUM; 570 /* 571 * In case we start from scratch, we queue the ISOC requests 572 * starting from slot 1. This is done because we use ring 573 * buffer and have no LST bit to stop us. Instead, we place 574 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt 575 * after the first request so we start at slot 1 and have 576 * 7 requests proceed before we hit the first IOC. 577 * Other transfer types don't use the ring buffer and are 578 * processed from the first TRB until the last one. Since we 579 * don't wrap around we have to start at the beginning. 580 */ 581 if (usb_endpoint_xfer_isoc(dep->desc)) { 582 dep->busy_slot = 1; 583 dep->free_slot = 1; 584 } else { 585 dep->busy_slot = 0; 586 dep->free_slot = 0; 587 } 588 } 589 590 /* The last TRB is a link TRB, not used for xfer */ 591 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc)) 592 return NULL; 593 594 list_for_each_entry_safe(req, n, &dep->request_list, list) { 595 unsigned int last_one = 0; 596 unsigned int cur_slot; 597 598 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 599 cur_slot = dep->free_slot; 600 dep->free_slot++; 601 602 /* Skip the LINK-TRB on ISOC */ 603 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 604 usb_endpoint_xfer_isoc(dep->desc)) 605 continue; 606 607 dwc3_gadget_move_request_queued(req); 608 memset(&trb, 0, sizeof(trb)); 609 trbs_left--; 610 611 /* Is our TRB pool empty? */ 612 if (!trbs_left) 613 last_one = 1; 614 /* Is this the last request? */ 615 if (list_empty(&dep->request_list)) 616 last_one = 1; 617 618 /* 619 * FIXME we shouldn't need to set LST bit always but we are 620 * facing some weird problem with the Hardware where it doesn't 621 * complete even though it has been previously started. 622 * 623 * While we're debugging the problem, as a workaround to 624 * multiple TRBs handling, use only one TRB at a time. 625 */ 626 last_one = 1; 627 628 req->trb = trb_hw; 629 if (!ret) 630 ret = req; 631 632 trb.bplh = req->request.dma; 633 634 if (usb_endpoint_xfer_isoc(dep->desc)) { 635 trb.isp_imi = true; 636 trb.csp = true; 637 } else { 638 trb.lst = last_one; 639 } 640 641 switch (usb_endpoint_type(dep->desc)) { 642 case USB_ENDPOINT_XFER_CONTROL: 643 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP; 644 break; 645 646 case USB_ENDPOINT_XFER_ISOC: 647 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 648 649 /* IOC every DWC3_TRB_NUM / 4 so we can refill */ 650 if (!(cur_slot % (DWC3_TRB_NUM / 4))) 651 trb.ioc = last_one; 652 break; 653 654 case USB_ENDPOINT_XFER_BULK: 655 case USB_ENDPOINT_XFER_INT: 656 trb.trbctl = DWC3_TRBCTL_NORMAL; 657 break; 658 default: 659 /* 660 * This is only possible with faulty memory because we 661 * checked it already :) 662 */ 663 BUG(); 664 } 665 666 trb.length = req->request.length; 667 trb.hwo = true; 668 669 dwc3_trb_to_hw(&trb, trb_hw); 670 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw); 671 672 if (last_one) 673 break; 674 } 675 676 return ret; 677} 678 679static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 680 int start_new) 681{ 682 struct dwc3_gadget_ep_cmd_params params; 683 struct dwc3_request *req; 684 struct dwc3 *dwc = dep->dwc; 685 int ret; 686 u32 cmd; 687 688 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 689 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 690 return -EBUSY; 691 } 692 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 693 694 /* 695 * If we are getting here after a short-out-packet we don't enqueue any 696 * new requests as we try to set the IOC bit only on the last request. 697 */ 698 if (start_new) { 699 if (list_empty(&dep->req_queued)) 700 dwc3_prepare_trbs(dep, start_new); 701 702 /* req points to the first request which will be sent */ 703 req = next_request(&dep->req_queued); 704 } else { 705 /* 706 * req points to the first request where HWO changed 707 * from 0 to 1 708 */ 709 req = dwc3_prepare_trbs(dep, start_new); 710 } 711 if (!req) { 712 dep->flags |= DWC3_EP_PENDING_REQUEST; 713 return 0; 714 } 715 716 memset(¶ms, 0, sizeof(params)); 717 params.param0.depstrtxfer.transfer_desc_addr_high = 718 upper_32_bits(req->trb_dma); 719 params.param1.depstrtxfer.transfer_desc_addr_low = 720 lower_32_bits(req->trb_dma); 721 722 if (start_new) 723 cmd = DWC3_DEPCMD_STARTTRANSFER; 724 else 725 cmd = DWC3_DEPCMD_UPDATETRANSFER; 726 727 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 728 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 729 if (ret < 0) { 730 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 731 732 /* 733 * FIXME we need to iterate over the list of requests 734 * here and stop, unmap, free and del each of the linked 735 * requests instead of we do now. 736 */ 737 dwc3_unmap_buffer_from_dma(req); 738 list_del(&req->list); 739 return ret; 740 } 741 742 dep->flags |= DWC3_EP_BUSY; 743 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc, 744 dep->number); 745 if (!dep->res_trans_idx) 746 printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__); 747 return 0; 748} 749 750static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 751{ 752 req->request.actual = 0; 753 req->request.status = -EINPROGRESS; 754 req->direction = dep->direction; 755 req->epnum = dep->number; 756 757 /* 758 * We only add to our list of requests now and 759 * start consuming the list once we get XferNotReady 760 * IRQ. 761 * 762 * That way, we avoid doing anything that we don't need 763 * to do now and defer it until the point we receive a 764 * particular token from the Host side. 765 * 766 * This will also avoid Host cancelling URBs due to too 767 * many NACKs. 768 */ 769 dwc3_map_buffer_to_dma(req); 770 list_add_tail(&req->list, &dep->request_list); 771 772 /* 773 * There is one special case: XferNotReady with 774 * empty list of requests. We need to kick the 775 * transfer here in that situation, otherwise 776 * we will be NAKing forever. 777 * 778 * If we get XferNotReady before gadget driver 779 * has a chance to queue a request, we will ACK 780 * the IRQ but won't be able to receive the data 781 * until the next request is queued. The following 782 * code is handling exactly that. 783 */ 784 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 785 int ret; 786 int start_trans; 787 788 start_trans = 1; 789 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 790 dep->flags & DWC3_EP_BUSY) 791 start_trans = 0; 792 793 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans); 794 if (ret && ret != -EBUSY) { 795 struct dwc3 *dwc = dep->dwc; 796 797 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 798 dep->name); 799 } 800 }; 801 802 return 0; 803} 804 805static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 806 gfp_t gfp_flags) 807{ 808 struct dwc3_request *req = to_dwc3_request(request); 809 struct dwc3_ep *dep = to_dwc3_ep(ep); 810 struct dwc3 *dwc = dep->dwc; 811 812 unsigned long flags; 813 814 int ret; 815 816 if (!dep->desc) { 817 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 818 request, ep->name); 819 return -ESHUTDOWN; 820 } 821 822 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 823 request, ep->name, request->length); 824 825 spin_lock_irqsave(&dwc->lock, flags); 826 ret = __dwc3_gadget_ep_queue(dep, req); 827 spin_unlock_irqrestore(&dwc->lock, flags); 828 829 return ret; 830} 831 832static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 833 struct usb_request *request) 834{ 835 struct dwc3_request *req = to_dwc3_request(request); 836 struct dwc3_request *r = NULL; 837 838 struct dwc3_ep *dep = to_dwc3_ep(ep); 839 struct dwc3 *dwc = dep->dwc; 840 841 unsigned long flags; 842 int ret = 0; 843 844 spin_lock_irqsave(&dwc->lock, flags); 845 846 list_for_each_entry(r, &dep->request_list, list) { 847 if (r == req) 848 break; 849 } 850 851 if (r != req) { 852 list_for_each_entry(r, &dep->req_queued, list) { 853 if (r == req) 854 break; 855 } 856 if (r == req) { 857 /* wait until it is processed */ 858 dwc3_stop_active_transfer(dwc, dep->number); 859 goto out0; 860 } 861 dev_err(dwc->dev, "request %p was not queued to %s\n", 862 request, ep->name); 863 ret = -EINVAL; 864 goto out0; 865 } 866 867 /* giveback the request */ 868 dwc3_gadget_giveback(dep, req, -ECONNRESET); 869 870out0: 871 spin_unlock_irqrestore(&dwc->lock, flags); 872 873 return ret; 874} 875 876int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) 877{ 878 struct dwc3_gadget_ep_cmd_params params; 879 struct dwc3 *dwc = dep->dwc; 880 int ret; 881 882 memset(¶ms, 0x00, sizeof(params)); 883 884 if (value) { 885 if (dep->number == 0 || dep->number == 1) { 886 /* 887 * Whenever EP0 is stalled, we will restart 888 * the state machine, thus moving back to 889 * Setup Phase 890 */ 891 dwc->ep0state = EP0_SETUP_PHASE; 892 } 893 894 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 895 DWC3_DEPCMD_SETSTALL, ¶ms); 896 if (ret) 897 dev_err(dwc->dev, "failed to %s STALL on %s\n", 898 value ? "set" : "clear", 899 dep->name); 900 else 901 dep->flags |= DWC3_EP_STALL; 902 } else { 903 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 904 DWC3_DEPCMD_CLEARSTALL, ¶ms); 905 if (ret) 906 dev_err(dwc->dev, "failed to %s STALL on %s\n", 907 value ? "set" : "clear", 908 dep->name); 909 else 910 dep->flags &= ~DWC3_EP_STALL; 911 } 912 return ret; 913} 914 915static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 916{ 917 struct dwc3_ep *dep = to_dwc3_ep(ep); 918 struct dwc3 *dwc = dep->dwc; 919 920 unsigned long flags; 921 922 int ret; 923 924 spin_lock_irqsave(&dwc->lock, flags); 925 926 if (usb_endpoint_xfer_isoc(dep->desc)) { 927 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 928 ret = -EINVAL; 929 goto out; 930 } 931 932 ret = __dwc3_gadget_ep_set_halt(dep, value); 933out: 934 spin_unlock_irqrestore(&dwc->lock, flags); 935 936 return ret; 937} 938 939static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 940{ 941 struct dwc3_ep *dep = to_dwc3_ep(ep); 942 943 dep->flags |= DWC3_EP_WEDGE; 944 945 return usb_ep_set_halt(ep); 946} 947 948/* -------------------------------------------------------------------------- */ 949 950static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 951 .bLength = USB_DT_ENDPOINT_SIZE, 952 .bDescriptorType = USB_DT_ENDPOINT, 953 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 954}; 955 956static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 957 .enable = dwc3_gadget_ep0_enable, 958 .disable = dwc3_gadget_ep0_disable, 959 .alloc_request = dwc3_gadget_ep_alloc_request, 960 .free_request = dwc3_gadget_ep_free_request, 961 .queue = dwc3_gadget_ep0_queue, 962 .dequeue = dwc3_gadget_ep_dequeue, 963 .set_halt = dwc3_gadget_ep_set_halt, 964 .set_wedge = dwc3_gadget_ep_set_wedge, 965}; 966 967static const struct usb_ep_ops dwc3_gadget_ep_ops = { 968 .enable = dwc3_gadget_ep_enable, 969 .disable = dwc3_gadget_ep_disable, 970 .alloc_request = dwc3_gadget_ep_alloc_request, 971 .free_request = dwc3_gadget_ep_free_request, 972 .queue = dwc3_gadget_ep_queue, 973 .dequeue = dwc3_gadget_ep_dequeue, 974 .set_halt = dwc3_gadget_ep_set_halt, 975 .set_wedge = dwc3_gadget_ep_set_wedge, 976}; 977 978/* -------------------------------------------------------------------------- */ 979 980static int dwc3_gadget_get_frame(struct usb_gadget *g) 981{ 982 struct dwc3 *dwc = gadget_to_dwc(g); 983 u32 reg; 984 985 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 986 return DWC3_DSTS_SOFFN(reg); 987} 988 989static int dwc3_gadget_wakeup(struct usb_gadget *g) 990{ 991 struct dwc3 *dwc = gadget_to_dwc(g); 992 993 unsigned long timeout; 994 unsigned long flags; 995 996 u32 reg; 997 998 int ret = 0; 999 1000 u8 link_state; 1001 u8 speed; 1002 1003 spin_lock_irqsave(&dwc->lock, flags); 1004 1005 /* 1006 * According to the Databook Remote wakeup request should 1007 * be issued only when the device is in early suspend state. 1008 * 1009 * We can check that via USB Link State bits in DSTS register. 1010 */ 1011 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1012 1013 speed = reg & DWC3_DSTS_CONNECTSPD; 1014 if (speed == DWC3_DSTS_SUPERSPEED) { 1015 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1016 ret = -EINVAL; 1017 goto out; 1018 } 1019 1020 link_state = DWC3_DSTS_USBLNKST(reg); 1021 1022 switch (link_state) { 1023 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1024 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1025 break; 1026 default: 1027 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1028 link_state); 1029 ret = -EINVAL; 1030 goto out; 1031 } 1032 1033 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1034 1035 /* 1036 * Switch link state to Recovery. In HS/FS/LS this means 1037 * RemoteWakeup Request 1038 */ 1039 reg |= DWC3_DCTL_ULSTCHNG_RECOVERY; 1040 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1041 1042 /* wait for at least 2000us */ 1043 usleep_range(2000, 2500); 1044 1045 /* write zeroes to Link Change Request */ 1046 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1047 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1048 1049 /* pool until Link State change to ON */ 1050 timeout = jiffies + msecs_to_jiffies(100); 1051 1052 while (!(time_after(jiffies, timeout))) { 1053 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1054 1055 /* in HS, means ON */ 1056 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1057 break; 1058 } 1059 1060 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1061 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1062 ret = -EINVAL; 1063 } 1064 1065out: 1066 spin_unlock_irqrestore(&dwc->lock, flags); 1067 1068 return ret; 1069} 1070 1071static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1072 int is_selfpowered) 1073{ 1074 struct dwc3 *dwc = gadget_to_dwc(g); 1075 1076 dwc->is_selfpowered = !!is_selfpowered; 1077 1078 return 0; 1079} 1080 1081static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) 1082{ 1083 u32 reg; 1084 u32 timeout = 500; 1085 1086 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1087 if (is_on) 1088 reg |= DWC3_DCTL_RUN_STOP; 1089 else 1090 reg &= ~DWC3_DCTL_RUN_STOP; 1091 1092 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1093 1094 do { 1095 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1096 if (is_on) { 1097 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1098 break; 1099 } else { 1100 if (reg & DWC3_DSTS_DEVCTRLHLT) 1101 break; 1102 } 1103 timeout--; 1104 if (!timeout) 1105 break; 1106 udelay(1); 1107 } while (1); 1108 1109 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1110 dwc->gadget_driver 1111 ? dwc->gadget_driver->function : "no-function", 1112 is_on ? "connect" : "disconnect"); 1113} 1114 1115static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1116{ 1117 struct dwc3 *dwc = gadget_to_dwc(g); 1118 unsigned long flags; 1119 1120 is_on = !!is_on; 1121 1122 spin_lock_irqsave(&dwc->lock, flags); 1123 dwc3_gadget_run_stop(dwc, is_on); 1124 spin_unlock_irqrestore(&dwc->lock, flags); 1125 1126 return 0; 1127} 1128 1129static int dwc3_gadget_start(struct usb_gadget *g, 1130 struct usb_gadget_driver *driver) 1131{ 1132 struct dwc3 *dwc = gadget_to_dwc(g); 1133 struct dwc3_ep *dep; 1134 unsigned long flags; 1135 int ret = 0; 1136 u32 reg; 1137 1138 spin_lock_irqsave(&dwc->lock, flags); 1139 1140 if (dwc->gadget_driver) { 1141 dev_err(dwc->dev, "%s is already bound to %s\n", 1142 dwc->gadget.name, 1143 dwc->gadget_driver->driver.name); 1144 ret = -EBUSY; 1145 goto err0; 1146 } 1147 1148 dwc->gadget_driver = driver; 1149 dwc->gadget.dev.driver = &driver->driver; 1150 1151 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1152 1153 /* 1154 * REVISIT: power down scale might be different 1155 * depending on PHY used, need to pass that via platform_data 1156 */ 1157 reg |= DWC3_GCTL_PWRDNSCALE(0x61a) 1158 | DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE); 1159 reg &= ~DWC3_GCTL_DISSCRAMBLE; 1160 1161 /* 1162 * WORKAROUND: DWC3 revisions <1.90a have a bug 1163 * when The device fails to connect at SuperSpeed 1164 * and falls back to high-speed mode which causes 1165 * the device to enter in a Connect/Disconnect loop 1166 */ 1167 if (dwc->revision < DWC3_REVISION_190A) 1168 reg |= DWC3_GCTL_U2RSTECN; 1169 1170 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1171 1172 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1173 reg &= ~(DWC3_DCFG_SPEED_MASK); 1174 reg |= DWC3_DCFG_SUPERSPEED; 1175 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1176 1177 /* Start with SuperSpeed Default */ 1178 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1179 1180 dep = dwc->eps[0]; 1181 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); 1182 if (ret) { 1183 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1184 goto err0; 1185 } 1186 1187 dep = dwc->eps[1]; 1188 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); 1189 if (ret) { 1190 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1191 goto err1; 1192 } 1193 1194 /* begin to receive SETUP packets */ 1195 dwc->ep0state = EP0_SETUP_PHASE; 1196 dwc3_ep0_out_start(dwc); 1197 1198 spin_unlock_irqrestore(&dwc->lock, flags); 1199 1200 return 0; 1201 1202err1: 1203 __dwc3_gadget_ep_disable(dwc->eps[0]); 1204 1205err0: 1206 spin_unlock_irqrestore(&dwc->lock, flags); 1207 1208 return ret; 1209} 1210 1211static int dwc3_gadget_stop(struct usb_gadget *g, 1212 struct usb_gadget_driver *driver) 1213{ 1214 struct dwc3 *dwc = gadget_to_dwc(g); 1215 unsigned long flags; 1216 1217 spin_lock_irqsave(&dwc->lock, flags); 1218 1219 __dwc3_gadget_ep_disable(dwc->eps[0]); 1220 __dwc3_gadget_ep_disable(dwc->eps[1]); 1221 1222 dwc->gadget_driver = NULL; 1223 dwc->gadget.dev.driver = NULL; 1224 1225 spin_unlock_irqrestore(&dwc->lock, flags); 1226 1227 return 0; 1228} 1229static const struct usb_gadget_ops dwc3_gadget_ops = { 1230 .get_frame = dwc3_gadget_get_frame, 1231 .wakeup = dwc3_gadget_wakeup, 1232 .set_selfpowered = dwc3_gadget_set_selfpowered, 1233 .pullup = dwc3_gadget_pullup, 1234 .udc_start = dwc3_gadget_start, 1235 .udc_stop = dwc3_gadget_stop, 1236}; 1237 1238/* -------------------------------------------------------------------------- */ 1239 1240static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1241{ 1242 struct dwc3_ep *dep; 1243 u8 epnum; 1244 1245 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1246 1247 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1248 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1249 if (!dep) { 1250 dev_err(dwc->dev, "can't allocate endpoint %d\n", 1251 epnum); 1252 return -ENOMEM; 1253 } 1254 1255 dep->dwc = dwc; 1256 dep->number = epnum; 1257 dwc->eps[epnum] = dep; 1258 1259 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1260 (epnum & 1) ? "in" : "out"); 1261 dep->endpoint.name = dep->name; 1262 dep->direction = (epnum & 1); 1263 1264 if (epnum == 0 || epnum == 1) { 1265 dep->endpoint.maxpacket = 512; 1266 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1267 if (!epnum) 1268 dwc->gadget.ep0 = &dep->endpoint; 1269 } else { 1270 int ret; 1271 1272 dep->endpoint.maxpacket = 1024; 1273 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1274 list_add_tail(&dep->endpoint.ep_list, 1275 &dwc->gadget.ep_list); 1276 1277 ret = dwc3_alloc_trb_pool(dep); 1278 if (ret) { 1279 dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name); 1280 return ret; 1281 } 1282 } 1283 INIT_LIST_HEAD(&dep->request_list); 1284 INIT_LIST_HEAD(&dep->req_queued); 1285 } 1286 1287 return 0; 1288} 1289 1290static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1291{ 1292 struct dwc3_ep *dep; 1293 u8 epnum; 1294 1295 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1296 dep = dwc->eps[epnum]; 1297 dwc3_free_trb_pool(dep); 1298 1299 if (epnum != 0 && epnum != 1) 1300 list_del(&dep->endpoint.ep_list); 1301 1302 kfree(dep); 1303 } 1304} 1305 1306static void dwc3_gadget_release(struct device *dev) 1307{ 1308 dev_dbg(dev, "%s\n", __func__); 1309} 1310 1311/* -------------------------------------------------------------------------- */ 1312static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1313 const struct dwc3_event_depevt *event, int status) 1314{ 1315 struct dwc3_request *req; 1316 struct dwc3_trb trb; 1317 unsigned int count; 1318 unsigned int s_pkt = 0; 1319 1320 do { 1321 req = next_request(&dep->req_queued); 1322 if (!req) 1323 break; 1324 1325 dwc3_trb_to_nat(req->trb, &trb); 1326 1327 if (trb.hwo && status != -ESHUTDOWN) 1328 /* 1329 * We continue despite the error. There is not much we 1330 * can do. If we don't clean in up we loop for ever. If 1331 * we skip the TRB than it gets overwritten reused after 1332 * a while since we use them in a ring buffer. a BUG() 1333 * would help. Lets hope that if this occures, someone 1334 * fixes the root cause instead of looking away :) 1335 */ 1336 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1337 dep->name, req->trb); 1338 count = trb.length; 1339 1340 if (dep->direction) { 1341 if (count) { 1342 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1343 dep->name); 1344 status = -ECONNRESET; 1345 } 1346 } else { 1347 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1348 s_pkt = 1; 1349 } 1350 1351 /* 1352 * We assume here we will always receive the entire data block 1353 * which we should receive. Meaning, if we program RX to 1354 * receive 4K but we receive only 2K, we assume that's all we 1355 * should receive and we simply bounce the request back to the 1356 * gadget driver for further processing. 1357 */ 1358 req->request.actual += req->request.length - count; 1359 dwc3_gadget_giveback(dep, req, status); 1360 if (s_pkt) 1361 break; 1362 if ((event->status & DEPEVT_STATUS_LST) && trb.lst) 1363 break; 1364 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) 1365 break; 1366 } while (1); 1367 1368 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) 1369 return 0; 1370 return 1; 1371} 1372 1373static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1374 struct dwc3_ep *dep, const struct dwc3_event_depevt *event, 1375 int start_new) 1376{ 1377 unsigned status = 0; 1378 int clean_busy; 1379 1380 if (event->status & DEPEVT_STATUS_BUSERR) 1381 status = -ECONNRESET; 1382 1383 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1384 if (clean_busy) { 1385 dep->flags &= ~DWC3_EP_BUSY; 1386 dep->res_trans_idx = 0; 1387 } 1388} 1389 1390static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1391 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1392{ 1393 u32 uf; 1394 1395 if (list_empty(&dep->request_list)) { 1396 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1397 dep->name); 1398 return; 1399 } 1400 1401 if (event->parameters) { 1402 u32 mask; 1403 1404 mask = ~(dep->interval - 1); 1405 uf = event->parameters & mask; 1406 /* 4 micro frames in the future */ 1407 uf += dep->interval * 4; 1408 } else { 1409 uf = 0; 1410 } 1411 1412 __dwc3_gadget_kick_transfer(dep, uf, 1); 1413} 1414 1415static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep, 1416 const struct dwc3_event_depevt *event) 1417{ 1418 struct dwc3 *dwc = dep->dwc; 1419 struct dwc3_event_depevt mod_ev = *event; 1420 1421 /* 1422 * We were asked to remove one requests. It is possible that this 1423 * request and a few other were started together and have the same 1424 * transfer index. Since we stopped the complete endpoint we don't 1425 * know how many requests were already completed (and not yet) 1426 * reported and how could be done (later). We purge them all until 1427 * the end of the list. 1428 */ 1429 mod_ev.status = DEPEVT_STATUS_LST; 1430 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN); 1431 dep->flags &= ~DWC3_EP_BUSY; 1432 /* pending requets are ignored and are queued on XferNotReady */ 1433} 1434 1435static void dwc3_ep_cmd_compl(struct dwc3_ep *dep, 1436 const struct dwc3_event_depevt *event) 1437{ 1438 u32 param = event->parameters; 1439 u32 cmd_type = (param >> 8) & ((1 << 5) - 1); 1440 1441 switch (cmd_type) { 1442 case DWC3_DEPCMD_ENDTRANSFER: 1443 dwc3_process_ep_cmd_complete(dep, event); 1444 break; 1445 case DWC3_DEPCMD_STARTTRANSFER: 1446 dep->res_trans_idx = param & 0x7f; 1447 break; 1448 default: 1449 printk(KERN_ERR "%s() unknown /unexpected type: %d\n", 1450 __func__, cmd_type); 1451 break; 1452 }; 1453} 1454 1455static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1456 const struct dwc3_event_depevt *event) 1457{ 1458 struct dwc3_ep *dep; 1459 u8 epnum = event->endpoint_number; 1460 1461 dep = dwc->eps[epnum]; 1462 1463 dev_vdbg(dwc->dev, "%s: %s\n", dep->name, 1464 dwc3_ep_event_string(event->endpoint_event)); 1465 1466 if (epnum == 0 || epnum == 1) { 1467 dwc3_ep0_interrupt(dwc, event); 1468 return; 1469 } 1470 1471 switch (event->endpoint_event) { 1472 case DWC3_DEPEVT_XFERCOMPLETE: 1473 if (usb_endpoint_xfer_isoc(dep->desc)) { 1474 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1475 dep->name); 1476 return; 1477 } 1478 1479 dwc3_endpoint_transfer_complete(dwc, dep, event, 1); 1480 break; 1481 case DWC3_DEPEVT_XFERINPROGRESS: 1482 if (!usb_endpoint_xfer_isoc(dep->desc)) { 1483 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", 1484 dep->name); 1485 return; 1486 } 1487 1488 dwc3_endpoint_transfer_complete(dwc, dep, event, 0); 1489 break; 1490 case DWC3_DEPEVT_XFERNOTREADY: 1491 if (usb_endpoint_xfer_isoc(dep->desc)) { 1492 dwc3_gadget_start_isoc(dwc, dep, event); 1493 } else { 1494 int ret; 1495 1496 dev_vdbg(dwc->dev, "%s: reason %s\n", 1497 dep->name, event->status 1498 ? "Transfer Active" 1499 : "Transfer Not Active"); 1500 1501 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1502 if (!ret || ret == -EBUSY) 1503 return; 1504 1505 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1506 dep->name); 1507 } 1508 1509 break; 1510 case DWC3_DEPEVT_RXTXFIFOEVT: 1511 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 1512 break; 1513 case DWC3_DEPEVT_STREAMEVT: 1514 dev_dbg(dwc->dev, "%s Stream Event\n", dep->name); 1515 break; 1516 case DWC3_DEPEVT_EPCMDCMPLT: 1517 dwc3_ep_cmd_compl(dep, event); 1518 break; 1519 } 1520} 1521 1522static void dwc3_disconnect_gadget(struct dwc3 *dwc) 1523{ 1524 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 1525 spin_unlock(&dwc->lock); 1526 dwc->gadget_driver->disconnect(&dwc->gadget); 1527 spin_lock(&dwc->lock); 1528 } 1529} 1530 1531static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) 1532{ 1533 struct dwc3_ep *dep; 1534 struct dwc3_gadget_ep_cmd_params params; 1535 u32 cmd; 1536 int ret; 1537 1538 dep = dwc->eps[epnum]; 1539 1540 WARN_ON(!dep->res_trans_idx); 1541 if (dep->res_trans_idx) { 1542 cmd = DWC3_DEPCMD_ENDTRANSFER; 1543 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; 1544 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx); 1545 memset(¶ms, 0, sizeof(params)); 1546 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 1547 WARN_ON_ONCE(ret); 1548 dep->res_trans_idx = 0; 1549 } 1550} 1551 1552static void dwc3_stop_active_transfers(struct dwc3 *dwc) 1553{ 1554 u32 epnum; 1555 1556 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1557 struct dwc3_ep *dep; 1558 1559 dep = dwc->eps[epnum]; 1560 if (!(dep->flags & DWC3_EP_ENABLED)) 1561 continue; 1562 1563 dwc3_remove_requests(dwc, dep); 1564 } 1565} 1566 1567static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 1568{ 1569 u32 epnum; 1570 1571 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1572 struct dwc3_ep *dep; 1573 struct dwc3_gadget_ep_cmd_params params; 1574 int ret; 1575 1576 dep = dwc->eps[epnum]; 1577 1578 if (!(dep->flags & DWC3_EP_STALL)) 1579 continue; 1580 1581 dep->flags &= ~DWC3_EP_STALL; 1582 1583 memset(¶ms, 0, sizeof(params)); 1584 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1585 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1586 WARN_ON_ONCE(ret); 1587 } 1588} 1589 1590static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 1591{ 1592 dev_vdbg(dwc->dev, "%s\n", __func__); 1593#if 0 1594 XXX 1595 U1/U2 is powersave optimization. Skip it for now. Anyway we need to 1596 enable it before we can disable it. 1597 1598 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1599 reg &= ~DWC3_DCTL_INITU1ENA; 1600 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1601 1602 reg &= ~DWC3_DCTL_INITU2ENA; 1603 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1604#endif 1605 1606 dwc3_stop_active_transfers(dwc); 1607 dwc3_disconnect_gadget(dwc); 1608 1609 dwc->gadget.speed = USB_SPEED_UNKNOWN; 1610} 1611 1612static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on) 1613{ 1614 u32 reg; 1615 1616 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 1617 1618 if (on) 1619 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 1620 else 1621 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 1622 1623 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 1624} 1625 1626static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on) 1627{ 1628 u32 reg; 1629 1630 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1631 1632 if (on) 1633 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 1634 else 1635 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 1636 1637 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1638} 1639 1640static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 1641{ 1642 u32 reg; 1643 1644 dev_vdbg(dwc->dev, "%s\n", __func__); 1645 1646 /* Enable PHYs */ 1647 dwc3_gadget_usb2_phy_power(dwc, true); 1648 dwc3_gadget_usb3_phy_power(dwc, true); 1649 1650 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) 1651 dwc3_disconnect_gadget(dwc); 1652 1653 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1654 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 1655 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1656 1657 dwc3_stop_active_transfers(dwc); 1658 dwc3_clear_stall_all_ep(dwc); 1659 1660 /* Reset device address to zero */ 1661 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1662 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 1663 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1664 1665 /* 1666 * Wait for RxFifo to drain 1667 * 1668 * REVISIT probably shouldn't wait forever. 1669 * In case Hardware ends up in a screwed up 1670 * case, we error out, notify the user and, 1671 * maybe, WARN() or BUG() but leave the rest 1672 * of the kernel working fine. 1673 * 1674 * REVISIT the below is rather CPU intensive, 1675 * maybe we should read and if it doesn't work 1676 * sleep (not busy wait) for a few useconds. 1677 * 1678 * REVISIT why wait until the RXFIFO is empty anyway? 1679 */ 1680 while (!(dwc3_readl(dwc->regs, DWC3_DSTS) 1681 & DWC3_DSTS_RXFIFOEMPTY)) 1682 cpu_relax(); 1683} 1684 1685static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 1686{ 1687 u32 reg; 1688 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 1689 1690 /* 1691 * We change the clock only at SS but I dunno why I would want to do 1692 * this. Maybe it becomes part of the power saving plan. 1693 */ 1694 1695 if (speed != DWC3_DSTS_SUPERSPEED) 1696 return; 1697 1698 /* 1699 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 1700 * each time on Connect Done. 1701 */ 1702 if (!usb30_clock) 1703 return; 1704 1705 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1706 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 1707 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1708} 1709 1710static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed) 1711{ 1712 switch (speed) { 1713 case USB_SPEED_SUPER: 1714 dwc3_gadget_usb2_phy_power(dwc, false); 1715 break; 1716 case USB_SPEED_HIGH: 1717 case USB_SPEED_FULL: 1718 case USB_SPEED_LOW: 1719 dwc3_gadget_usb3_phy_power(dwc, false); 1720 break; 1721 } 1722} 1723 1724static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 1725{ 1726 struct dwc3_gadget_ep_cmd_params params; 1727 struct dwc3_ep *dep; 1728 int ret; 1729 u32 reg; 1730 u8 speed; 1731 1732 dev_vdbg(dwc->dev, "%s\n", __func__); 1733 1734 memset(¶ms, 0x00, sizeof(params)); 1735 1736 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1737 speed = reg & DWC3_DSTS_CONNECTSPD; 1738 dwc->speed = speed; 1739 1740 dwc3_update_ram_clk_sel(dwc, speed); 1741 1742 switch (speed) { 1743 case DWC3_DCFG_SUPERSPEED: 1744 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1745 dwc->gadget.ep0->maxpacket = 512; 1746 dwc->gadget.speed = USB_SPEED_SUPER; 1747 break; 1748 case DWC3_DCFG_HIGHSPEED: 1749 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1750 dwc->gadget.ep0->maxpacket = 64; 1751 dwc->gadget.speed = USB_SPEED_HIGH; 1752 break; 1753 case DWC3_DCFG_FULLSPEED2: 1754 case DWC3_DCFG_FULLSPEED1: 1755 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1756 dwc->gadget.ep0->maxpacket = 64; 1757 dwc->gadget.speed = USB_SPEED_FULL; 1758 break; 1759 case DWC3_DCFG_LOWSPEED: 1760 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 1761 dwc->gadget.ep0->maxpacket = 8; 1762 dwc->gadget.speed = USB_SPEED_LOW; 1763 break; 1764 } 1765 1766 /* Disable unneded PHY */ 1767 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed); 1768 1769 dep = dwc->eps[0]; 1770 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); 1771 if (ret) { 1772 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1773 return; 1774 } 1775 1776 dep = dwc->eps[1]; 1777 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); 1778 if (ret) { 1779 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1780 return; 1781 } 1782 1783 /* 1784 * Configure PHY via GUSB3PIPECTLn if required. 1785 * 1786 * Update GTXFIFOSIZn 1787 * 1788 * In both cases reset values should be sufficient. 1789 */ 1790} 1791 1792static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 1793{ 1794 dev_vdbg(dwc->dev, "%s\n", __func__); 1795 1796 /* 1797 * TODO take core out of low power mode when that's 1798 * implemented. 1799 */ 1800 1801 dwc->gadget_driver->resume(&dwc->gadget); 1802} 1803 1804static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 1805 unsigned int evtinfo) 1806{ 1807 dev_vdbg(dwc->dev, "%s\n", __func__); 1808 1809 /* The fith bit says SuperSpeed yes or no. */ 1810 dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK; 1811} 1812 1813static void dwc3_gadget_interrupt(struct dwc3 *dwc, 1814 const struct dwc3_event_devt *event) 1815{ 1816 switch (event->type) { 1817 case DWC3_DEVICE_EVENT_DISCONNECT: 1818 dwc3_gadget_disconnect_interrupt(dwc); 1819 break; 1820 case DWC3_DEVICE_EVENT_RESET: 1821 dwc3_gadget_reset_interrupt(dwc); 1822 break; 1823 case DWC3_DEVICE_EVENT_CONNECT_DONE: 1824 dwc3_gadget_conndone_interrupt(dwc); 1825 break; 1826 case DWC3_DEVICE_EVENT_WAKEUP: 1827 dwc3_gadget_wakeup_interrupt(dwc); 1828 break; 1829 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 1830 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 1831 break; 1832 case DWC3_DEVICE_EVENT_EOPF: 1833 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 1834 break; 1835 case DWC3_DEVICE_EVENT_SOF: 1836 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 1837 break; 1838 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 1839 dev_vdbg(dwc->dev, "Erratic Error\n"); 1840 break; 1841 case DWC3_DEVICE_EVENT_CMD_CMPL: 1842 dev_vdbg(dwc->dev, "Command Complete\n"); 1843 break; 1844 case DWC3_DEVICE_EVENT_OVERFLOW: 1845 dev_vdbg(dwc->dev, "Overflow\n"); 1846 break; 1847 default: 1848 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 1849 } 1850} 1851 1852static void dwc3_process_event_entry(struct dwc3 *dwc, 1853 const union dwc3_event *event) 1854{ 1855 /* Endpoint IRQ, handle it and return early */ 1856 if (event->type.is_devspec == 0) { 1857 /* depevt */ 1858 return dwc3_endpoint_interrupt(dwc, &event->depevt); 1859 } 1860 1861 switch (event->type.type) { 1862 case DWC3_EVENT_TYPE_DEV: 1863 dwc3_gadget_interrupt(dwc, &event->devt); 1864 break; 1865 /* REVISIT what to do with Carkit and I2C events ? */ 1866 default: 1867 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 1868 } 1869} 1870 1871static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 1872{ 1873 struct dwc3_event_buffer *evt; 1874 int left; 1875 u32 count; 1876 1877 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 1878 count &= DWC3_GEVNTCOUNT_MASK; 1879 if (!count) 1880 return IRQ_NONE; 1881 1882 evt = dwc->ev_buffs[buf]; 1883 left = count; 1884 1885 while (left > 0) { 1886 union dwc3_event event; 1887 1888 memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw)); 1889 dwc3_process_event_entry(dwc, &event); 1890 /* 1891 * XXX we wrap around correctly to the next entry as almost all 1892 * entries are 4 bytes in size. There is one entry which has 12 1893 * bytes which is a regular entry followed by 8 bytes data. ATM 1894 * I don't know how things are organized if were get next to the 1895 * a boundary so I worry about that once we try to handle that. 1896 */ 1897 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 1898 left -= 4; 1899 1900 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 1901 } 1902 1903 return IRQ_HANDLED; 1904} 1905 1906static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 1907{ 1908 struct dwc3 *dwc = _dwc; 1909 int i; 1910 irqreturn_t ret = IRQ_NONE; 1911 1912 spin_lock(&dwc->lock); 1913 1914 for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) { 1915 irqreturn_t status; 1916 1917 status = dwc3_process_event_buf(dwc, i); 1918 if (status == IRQ_HANDLED) 1919 ret = status; 1920 } 1921 1922 spin_unlock(&dwc->lock); 1923 1924 return ret; 1925} 1926 1927/** 1928 * dwc3_gadget_init - Initializes gadget related registers 1929 * @dwc: Pointer to out controller context structure 1930 * 1931 * Returns 0 on success otherwise negative errno. 1932 */ 1933int __devinit dwc3_gadget_init(struct dwc3 *dwc) 1934{ 1935 u32 reg; 1936 int ret; 1937 int irq; 1938 1939 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 1940 &dwc->ctrl_req_addr, GFP_KERNEL); 1941 if (!dwc->ctrl_req) { 1942 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 1943 ret = -ENOMEM; 1944 goto err0; 1945 } 1946 1947 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 1948 &dwc->ep0_trb_addr, GFP_KERNEL); 1949 if (!dwc->ep0_trb) { 1950 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 1951 ret = -ENOMEM; 1952 goto err1; 1953 } 1954 1955 dwc->setup_buf = dma_alloc_coherent(dwc->dev, 1956 sizeof(*dwc->setup_buf) * 2, 1957 &dwc->setup_buf_addr, GFP_KERNEL); 1958 if (!dwc->setup_buf) { 1959 dev_err(dwc->dev, "failed to allocate setup buffer\n"); 1960 ret = -ENOMEM; 1961 goto err2; 1962 } 1963 1964 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 1965 512, &dwc->ep0_bounce_addr, GFP_KERNEL); 1966 if (!dwc->ep0_bounce) { 1967 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 1968 ret = -ENOMEM; 1969 goto err3; 1970 } 1971 1972 dev_set_name(&dwc->gadget.dev, "gadget"); 1973 1974 dwc->gadget.ops = &dwc3_gadget_ops; 1975 dwc->gadget.is_dualspeed = true; 1976 dwc->gadget.speed = USB_SPEED_UNKNOWN; 1977 dwc->gadget.dev.parent = dwc->dev; 1978 1979 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask); 1980 1981 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms; 1982 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask; 1983 dwc->gadget.dev.release = dwc3_gadget_release; 1984 dwc->gadget.name = "dwc3-gadget"; 1985 1986 /* 1987 * REVISIT: Here we should clear all pending IRQs to be 1988 * sure we're starting from a well known location. 1989 */ 1990 1991 ret = dwc3_gadget_init_endpoints(dwc); 1992 if (ret) 1993 goto err4; 1994 1995 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1996 1997 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED, 1998 "dwc3", dwc); 1999 if (ret) { 2000 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2001 irq, ret); 2002 goto err5; 2003 } 2004 2005 /* Enable all but Start and End of Frame IRQs */ 2006 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 2007 DWC3_DEVTEN_EVNTOVERFLOWEN | 2008 DWC3_DEVTEN_CMDCMPLTEN | 2009 DWC3_DEVTEN_ERRTICERREN | 2010 DWC3_DEVTEN_WKUPEVTEN | 2011 DWC3_DEVTEN_ULSTCNGEN | 2012 DWC3_DEVTEN_CONNECTDONEEN | 2013 DWC3_DEVTEN_USBRSTEN | 2014 DWC3_DEVTEN_DISCONNEVTEN); 2015 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 2016 2017 ret = device_register(&dwc->gadget.dev); 2018 if (ret) { 2019 dev_err(dwc->dev, "failed to register gadget device\n"); 2020 put_device(&dwc->gadget.dev); 2021 goto err6; 2022 } 2023 2024 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2025 if (ret) { 2026 dev_err(dwc->dev, "failed to register udc\n"); 2027 goto err7; 2028 } 2029 2030 return 0; 2031 2032err7: 2033 device_unregister(&dwc->gadget.dev); 2034 2035err6: 2036 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2037 free_irq(irq, dwc); 2038 2039err5: 2040 dwc3_gadget_free_endpoints(dwc); 2041 2042err4: 2043 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, 2044 dwc->ep0_bounce_addr); 2045 2046err3: 2047 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, 2048 dwc->setup_buf, dwc->setup_buf_addr); 2049 2050err2: 2051 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2052 dwc->ep0_trb, dwc->ep0_trb_addr); 2053 2054err1: 2055 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2056 dwc->ctrl_req, dwc->ctrl_req_addr); 2057 2058err0: 2059 return ret; 2060} 2061 2062void dwc3_gadget_exit(struct dwc3 *dwc) 2063{ 2064 int irq; 2065 int i; 2066 2067 usb_del_gadget_udc(&dwc->gadget); 2068 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 2069 2070 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2071 free_irq(irq, dwc); 2072 2073 for (i = 0; i < ARRAY_SIZE(dwc->eps); i++) 2074 __dwc3_gadget_ep_disable(dwc->eps[i]); 2075 2076 dwc3_gadget_free_endpoints(dwc); 2077 2078 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, 2079 dwc->ep0_bounce_addr); 2080 2081 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, 2082 dwc->setup_buf, dwc->setup_buf_addr); 2083 2084 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2085 dwc->ep0_trb, dwc->ep0_trb_addr); 2086 2087 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2088 dwc->ctrl_req, dwc->ctrl_req_addr); 2089 2090 device_unregister(&dwc->gadget.dev); 2091} 2092