gadget.c revision 47a1685f139271de401212bd69d17374ca5a5270
1/** 2 * linux/drivers/usb/gadget/s3c-hsotg.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com 6 * 7 * Copyright 2008 Openmoko, Inc. 8 * Copyright 2008 Simtec Electronics 9 * Ben Dooks <ben@simtec.co.uk> 10 * http://armlinux.simtec.co.uk/ 11 * 12 * S3C USB2.0 High-speed / OtG driver 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19#include <linux/kernel.h> 20#include <linux/module.h> 21#include <linux/spinlock.h> 22#include <linux/interrupt.h> 23#include <linux/platform_device.h> 24#include <linux/dma-mapping.h> 25#include <linux/debugfs.h> 26#include <linux/seq_file.h> 27#include <linux/delay.h> 28#include <linux/io.h> 29#include <linux/slab.h> 30#include <linux/clk.h> 31#include <linux/regulator/consumer.h> 32#include <linux/of_platform.h> 33#include <linux/phy/phy.h> 34 35#include <linux/usb/ch9.h> 36#include <linux/usb/gadget.h> 37#include <linux/usb/phy.h> 38#include <linux/platform_data/s3c-hsotg.h> 39 40#include "hw.h" 41 42static const char * const s3c_hsotg_supply_names[] = { 43 "vusb_d", /* digital USB supply, 1.2V */ 44 "vusb_a", /* analog USB supply, 1.1V */ 45}; 46 47/* 48 * EP0_MPS_LIMIT 49 * 50 * Unfortunately there seems to be a limit of the amount of data that can 51 * be transferred by IN transactions on EP0. This is either 127 bytes or 3 52 * packets (which practically means 1 packet and 63 bytes of data) when the 53 * MPS is set to 64. 54 * 55 * This means if we are wanting to move >127 bytes of data, we need to 56 * split the transactions up, but just doing one packet at a time does 57 * not work (this may be an implicit DATA0 PID on first packet of the 58 * transaction) and doing 2 packets is outside the controller's limits. 59 * 60 * If we try to lower the MPS size for EP0, then no transfers work properly 61 * for EP0, and the system will fail basic enumeration. As no cause for this 62 * has currently been found, we cannot support any large IN transfers for 63 * EP0. 64 */ 65#define EP0_MPS_LIMIT 64 66 67struct s3c_hsotg; 68struct s3c_hsotg_req; 69 70/** 71 * struct s3c_hsotg_ep - driver endpoint definition. 72 * @ep: The gadget layer representation of the endpoint. 73 * @name: The driver generated name for the endpoint. 74 * @queue: Queue of requests for this endpoint. 75 * @parent: Reference back to the parent device structure. 76 * @req: The current request that the endpoint is processing. This is 77 * used to indicate an request has been loaded onto the endpoint 78 * and has yet to be completed (maybe due to data move, or simply 79 * awaiting an ack from the core all the data has been completed). 80 * @debugfs: File entry for debugfs file for this endpoint. 81 * @lock: State lock to protect contents of endpoint. 82 * @dir_in: Set to true if this endpoint is of the IN direction, which 83 * means that it is sending data to the Host. 84 * @index: The index for the endpoint registers. 85 * @mc: Multi Count - number of transactions per microframe 86 * @interval - Interval for periodic endpoints 87 * @name: The name array passed to the USB core. 88 * @halted: Set if the endpoint has been halted. 89 * @periodic: Set if this is a periodic ep, such as Interrupt 90 * @isochronous: Set if this is a isochronous ep 91 * @sent_zlp: Set if we've sent a zero-length packet. 92 * @total_data: The total number of data bytes done. 93 * @fifo_size: The size of the FIFO (for periodic IN endpoints) 94 * @fifo_load: The amount of data loaded into the FIFO (periodic IN) 95 * @last_load: The offset of data for the last start of request. 96 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN 97 * 98 * This is the driver's state for each registered enpoint, allowing it 99 * to keep track of transactions that need doing. Each endpoint has a 100 * lock to protect the state, to try and avoid using an overall lock 101 * for the host controller as much as possible. 102 * 103 * For periodic IN endpoints, we have fifo_size and fifo_load to try 104 * and keep track of the amount of data in the periodic FIFO for each 105 * of these as we don't have a status register that tells us how much 106 * is in each of them. (note, this may actually be useless information 107 * as in shared-fifo mode periodic in acts like a single-frame packet 108 * buffer than a fifo) 109 */ 110struct s3c_hsotg_ep { 111 struct usb_ep ep; 112 struct list_head queue; 113 struct s3c_hsotg *parent; 114 struct s3c_hsotg_req *req; 115 struct dentry *debugfs; 116 117 118 unsigned long total_data; 119 unsigned int size_loaded; 120 unsigned int last_load; 121 unsigned int fifo_load; 122 unsigned short fifo_size; 123 124 unsigned char dir_in; 125 unsigned char index; 126 unsigned char mc; 127 unsigned char interval; 128 129 unsigned int halted:1; 130 unsigned int periodic:1; 131 unsigned int isochronous:1; 132 unsigned int sent_zlp:1; 133 134 char name[10]; 135}; 136 137/** 138 * struct s3c_hsotg - driver state. 139 * @dev: The parent device supplied to the probe function 140 * @driver: USB gadget driver 141 * @phy: The otg phy transceiver structure for phy control. 142 * @uphy: The otg phy transceiver structure for old USB phy control. 143 * @plat: The platform specific configuration data. This can be removed once 144 * all SoCs support usb transceiver. 145 * @regs: The memory area mapped for accessing registers. 146 * @irq: The IRQ number we are using 147 * @supplies: Definition of USB power supplies 148 * @phyif: PHY interface width 149 * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos. 150 * @num_of_eps: Number of available EPs (excluding EP0) 151 * @debug_root: root directrory for debugfs. 152 * @debug_file: main status file for debugfs. 153 * @debug_fifo: FIFO status file for debugfs. 154 * @ep0_reply: Request used for ep0 reply. 155 * @ep0_buff: Buffer for EP0 reply data, if needed. 156 * @ctrl_buff: Buffer for EP0 control requests. 157 * @ctrl_req: Request for EP0 control packets. 158 * @setup: NAK management for EP0 SETUP 159 * @last_rst: Time of last reset 160 * @eps: The endpoints being supplied to the gadget framework 161 */ 162struct s3c_hsotg { 163 struct device *dev; 164 struct usb_gadget_driver *driver; 165 struct phy *phy; 166 struct usb_phy *uphy; 167 struct s3c_hsotg_plat *plat; 168 169 spinlock_t lock; 170 171 void __iomem *regs; 172 int irq; 173 struct clk *clk; 174 175 struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)]; 176 177 u32 phyif; 178 unsigned int dedicated_fifos:1; 179 unsigned char num_of_eps; 180 181 struct dentry *debug_root; 182 struct dentry *debug_file; 183 struct dentry *debug_fifo; 184 185 struct usb_request *ep0_reply; 186 struct usb_request *ctrl_req; 187 u8 ep0_buff[8]; 188 u8 ctrl_buff[8]; 189 190 struct usb_gadget gadget; 191 unsigned int setup; 192 unsigned long last_rst; 193 struct s3c_hsotg_ep *eps; 194}; 195 196/** 197 * struct s3c_hsotg_req - data transfer request 198 * @req: The USB gadget request 199 * @queue: The list of requests for the endpoint this is queued for. 200 * @in_progress: Has already had size/packets written to core 201 * @mapped: DMA buffer for this request has been mapped via dma_map_single(). 202 */ 203struct s3c_hsotg_req { 204 struct usb_request req; 205 struct list_head queue; 206 unsigned char in_progress; 207 unsigned char mapped; 208}; 209 210/* conversion functions */ 211static inline struct s3c_hsotg_req *our_req(struct usb_request *req) 212{ 213 return container_of(req, struct s3c_hsotg_req, req); 214} 215 216static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep) 217{ 218 return container_of(ep, struct s3c_hsotg_ep, ep); 219} 220 221static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget) 222{ 223 return container_of(gadget, struct s3c_hsotg, gadget); 224} 225 226static inline void __orr32(void __iomem *ptr, u32 val) 227{ 228 writel(readl(ptr) | val, ptr); 229} 230 231static inline void __bic32(void __iomem *ptr, u32 val) 232{ 233 writel(readl(ptr) & ~val, ptr); 234} 235 236/* forward decleration of functions */ 237static void s3c_hsotg_dump(struct s3c_hsotg *hsotg); 238 239/** 240 * using_dma - return the DMA status of the driver. 241 * @hsotg: The driver state. 242 * 243 * Return true if we're using DMA. 244 * 245 * Currently, we have the DMA support code worked into everywhere 246 * that needs it, but the AMBA DMA implementation in the hardware can 247 * only DMA from 32bit aligned addresses. This means that gadgets such 248 * as the CDC Ethernet cannot work as they often pass packets which are 249 * not 32bit aligned. 250 * 251 * Unfortunately the choice to use DMA or not is global to the controller 252 * and seems to be only settable when the controller is being put through 253 * a core reset. This means we either need to fix the gadgets to take 254 * account of DMA alignment, or add bounce buffers (yuerk). 255 * 256 * Until this issue is sorted out, we always return 'false'. 257 */ 258static inline bool using_dma(struct s3c_hsotg *hsotg) 259{ 260 return false; /* support is not complete */ 261} 262 263/** 264 * s3c_hsotg_en_gsint - enable one or more of the general interrupt 265 * @hsotg: The device state 266 * @ints: A bitmask of the interrupts to enable 267 */ 268static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints) 269{ 270 u32 gsintmsk = readl(hsotg->regs + GINTMSK); 271 u32 new_gsintmsk; 272 273 new_gsintmsk = gsintmsk | ints; 274 275 if (new_gsintmsk != gsintmsk) { 276 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); 277 writel(new_gsintmsk, hsotg->regs + GINTMSK); 278 } 279} 280 281/** 282 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt 283 * @hsotg: The device state 284 * @ints: A bitmask of the interrupts to enable 285 */ 286static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints) 287{ 288 u32 gsintmsk = readl(hsotg->regs + GINTMSK); 289 u32 new_gsintmsk; 290 291 new_gsintmsk = gsintmsk & ~ints; 292 293 if (new_gsintmsk != gsintmsk) 294 writel(new_gsintmsk, hsotg->regs + GINTMSK); 295} 296 297/** 298 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq 299 * @hsotg: The device state 300 * @ep: The endpoint index 301 * @dir_in: True if direction is in. 302 * @en: The enable value, true to enable 303 * 304 * Set or clear the mask for an individual endpoint's interrupt 305 * request. 306 */ 307static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg, 308 unsigned int ep, unsigned int dir_in, 309 unsigned int en) 310{ 311 unsigned long flags; 312 u32 bit = 1 << ep; 313 u32 daint; 314 315 if (!dir_in) 316 bit <<= 16; 317 318 local_irq_save(flags); 319 daint = readl(hsotg->regs + DAINTMSK); 320 if (en) 321 daint |= bit; 322 else 323 daint &= ~bit; 324 writel(daint, hsotg->regs + DAINTMSK); 325 local_irq_restore(flags); 326} 327 328/** 329 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs 330 * @hsotg: The device instance. 331 */ 332static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg) 333{ 334 unsigned int ep; 335 unsigned int addr; 336 unsigned int size; 337 int timeout; 338 u32 val; 339 340 /* set FIFO sizes to 2048/1024 */ 341 342 writel(2048, hsotg->regs + GRXFSIZ); 343 writel((2048 << FIFOSIZE_STARTADDR_SHIFT) | 344 (1024 << FIFOSIZE_DEPTH_SHIFT), hsotg->regs + GNPTXFSIZ); 345 346 /* 347 * arange all the rest of the TX FIFOs, as some versions of this 348 * block have overlapping default addresses. This also ensures 349 * that if the settings have been changed, then they are set to 350 * known values. 351 */ 352 353 /* start at the end of the GNPTXFSIZ, rounded up */ 354 addr = 2048 + 1024; 355 size = 768; 356 357 /* 358 * currently we allocate TX FIFOs for all possible endpoints, 359 * and assume that they are all the same size. 360 */ 361 362 for (ep = 1; ep <= 15; ep++) { 363 val = addr; 364 val |= size << FIFOSIZE_DEPTH_SHIFT; 365 addr += size; 366 367 writel(val, hsotg->regs + DPTXFSIZN(ep)); 368 } 369 370 /* 371 * according to p428 of the design guide, we need to ensure that 372 * all fifos are flushed before continuing 373 */ 374 375 writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | 376 GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL); 377 378 /* wait until the fifos are both flushed */ 379 timeout = 100; 380 while (1) { 381 val = readl(hsotg->regs + GRSTCTL); 382 383 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0) 384 break; 385 386 if (--timeout == 0) { 387 dev_err(hsotg->dev, 388 "%s: timeout flushing fifos (GRSTCTL=%08x)\n", 389 __func__, val); 390 } 391 392 udelay(1); 393 } 394 395 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); 396} 397 398/** 399 * @ep: USB endpoint to allocate request for. 400 * @flags: Allocation flags 401 * 402 * Allocate a new USB request structure appropriate for the specified endpoint 403 */ 404static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, 405 gfp_t flags) 406{ 407 struct s3c_hsotg_req *req; 408 409 req = kzalloc(sizeof(struct s3c_hsotg_req), flags); 410 if (!req) 411 return NULL; 412 413 INIT_LIST_HEAD(&req->queue); 414 415 return &req->req; 416} 417 418/** 419 * is_ep_periodic - return true if the endpoint is in periodic mode. 420 * @hs_ep: The endpoint to query. 421 * 422 * Returns true if the endpoint is in periodic mode, meaning it is being 423 * used for an Interrupt or ISO transfer. 424 */ 425static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep) 426{ 427 return hs_ep->periodic; 428} 429 430/** 431 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request 432 * @hsotg: The device state. 433 * @hs_ep: The endpoint for the request 434 * @hs_req: The request being processed. 435 * 436 * This is the reverse of s3c_hsotg_map_dma(), called for the completion 437 * of a request to ensure the buffer is ready for access by the caller. 438 */ 439static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, 440 struct s3c_hsotg_ep *hs_ep, 441 struct s3c_hsotg_req *hs_req) 442{ 443 struct usb_request *req = &hs_req->req; 444 445 /* ignore this if we're not moving any data */ 446 if (hs_req->req.length == 0) 447 return; 448 449 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); 450} 451 452/** 453 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO 454 * @hsotg: The controller state. 455 * @hs_ep: The endpoint we're going to write for. 456 * @hs_req: The request to write data for. 457 * 458 * This is called when the TxFIFO has some space in it to hold a new 459 * transmission and we have something to give it. The actual setup of 460 * the data size is done elsewhere, so all we have to do is to actually 461 * write the data. 462 * 463 * The return value is zero if there is more space (or nothing was done) 464 * otherwise -ENOSPC is returned if the FIFO space was used up. 465 * 466 * This routine is only needed for PIO 467 */ 468static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg, 469 struct s3c_hsotg_ep *hs_ep, 470 struct s3c_hsotg_req *hs_req) 471{ 472 bool periodic = is_ep_periodic(hs_ep); 473 u32 gnptxsts = readl(hsotg->regs + GNPTXSTS); 474 int buf_pos = hs_req->req.actual; 475 int to_write = hs_ep->size_loaded; 476 void *data; 477 int can_write; 478 int pkt_round; 479 int max_transfer; 480 481 to_write -= (buf_pos - hs_ep->last_load); 482 483 /* if there's nothing to write, get out early */ 484 if (to_write == 0) 485 return 0; 486 487 if (periodic && !hsotg->dedicated_fifos) { 488 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index)); 489 int size_left; 490 int size_done; 491 492 /* 493 * work out how much data was loaded so we can calculate 494 * how much data is left in the fifo. 495 */ 496 497 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 498 499 /* 500 * if shared fifo, we cannot write anything until the 501 * previous data has been completely sent. 502 */ 503 if (hs_ep->fifo_load != 0) { 504 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 505 return -ENOSPC; 506 } 507 508 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", 509 __func__, size_left, 510 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); 511 512 /* how much of the data has moved */ 513 size_done = hs_ep->size_loaded - size_left; 514 515 /* how much data is left in the fifo */ 516 can_write = hs_ep->fifo_load - size_done; 517 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", 518 __func__, can_write); 519 520 can_write = hs_ep->fifo_size - can_write; 521 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", 522 __func__, can_write); 523 524 if (can_write <= 0) { 525 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 526 return -ENOSPC; 527 } 528 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) { 529 can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index)); 530 531 can_write &= 0xffff; 532 can_write *= 4; 533 } else { 534 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) { 535 dev_dbg(hsotg->dev, 536 "%s: no queue slots available (0x%08x)\n", 537 __func__, gnptxsts); 538 539 s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP); 540 return -ENOSPC; 541 } 542 543 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts); 544 can_write *= 4; /* fifo size is in 32bit quantities. */ 545 } 546 547 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; 548 549 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n", 550 __func__, gnptxsts, can_write, to_write, max_transfer); 551 552 /* 553 * limit to 512 bytes of data, it seems at least on the non-periodic 554 * FIFO, requests of >512 cause the endpoint to get stuck with a 555 * fragment of the end of the transfer in it. 556 */ 557 if (can_write > 512 && !periodic) 558 can_write = 512; 559 560 /* 561 * limit the write to one max-packet size worth of data, but allow 562 * the transfer to return that it did not run out of fifo space 563 * doing it. 564 */ 565 if (to_write > max_transfer) { 566 to_write = max_transfer; 567 568 /* it's needed only when we do not use dedicated fifos */ 569 if (!hsotg->dedicated_fifos) 570 s3c_hsotg_en_gsint(hsotg, 571 periodic ? GINTSTS_PTXFEMP : 572 GINTSTS_NPTXFEMP); 573 } 574 575 /* see if we can write data */ 576 577 if (to_write > can_write) { 578 to_write = can_write; 579 pkt_round = to_write % max_transfer; 580 581 /* 582 * Round the write down to an 583 * exact number of packets. 584 * 585 * Note, we do not currently check to see if we can ever 586 * write a full packet or not to the FIFO. 587 */ 588 589 if (pkt_round) 590 to_write -= pkt_round; 591 592 /* 593 * enable correct FIFO interrupt to alert us when there 594 * is more room left. 595 */ 596 597 /* it's needed only when we do not use dedicated fifos */ 598 if (!hsotg->dedicated_fifos) 599 s3c_hsotg_en_gsint(hsotg, 600 periodic ? GINTSTS_PTXFEMP : 601 GINTSTS_NPTXFEMP); 602 } 603 604 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 605 to_write, hs_req->req.length, can_write, buf_pos); 606 607 if (to_write <= 0) 608 return -ENOSPC; 609 610 hs_req->req.actual = buf_pos + to_write; 611 hs_ep->total_data += to_write; 612 613 if (periodic) 614 hs_ep->fifo_load += to_write; 615 616 to_write = DIV_ROUND_UP(to_write, 4); 617 data = hs_req->req.buf + buf_pos; 618 619 iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write); 620 621 return (to_write >= can_write) ? -ENOSPC : 0; 622} 623 624/** 625 * get_ep_limit - get the maximum data legnth for this endpoint 626 * @hs_ep: The endpoint 627 * 628 * Return the maximum data that can be queued in one go on a given endpoint 629 * so that transfers that are too long can be split. 630 */ 631static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep) 632{ 633 int index = hs_ep->index; 634 unsigned maxsize; 635 unsigned maxpkt; 636 637 if (index != 0) { 638 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1; 639 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1; 640 } else { 641 maxsize = 64+64; 642 if (hs_ep->dir_in) 643 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1; 644 else 645 maxpkt = 2; 646 } 647 648 /* we made the constant loading easier above by using +1 */ 649 maxpkt--; 650 maxsize--; 651 652 /* 653 * constrain by packet count if maxpkts*pktsize is greater 654 * than the length register size. 655 */ 656 657 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) 658 maxsize = maxpkt * hs_ep->ep.maxpacket; 659 660 return maxsize; 661} 662 663/** 664 * s3c_hsotg_start_req - start a USB request from an endpoint's queue 665 * @hsotg: The controller state. 666 * @hs_ep: The endpoint to process a request for 667 * @hs_req: The request to start. 668 * @continuing: True if we are doing more for the current request. 669 * 670 * Start the given request running by setting the endpoint registers 671 * appropriately, and writing any data to the FIFOs. 672 */ 673static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg, 674 struct s3c_hsotg_ep *hs_ep, 675 struct s3c_hsotg_req *hs_req, 676 bool continuing) 677{ 678 struct usb_request *ureq = &hs_req->req; 679 int index = hs_ep->index; 680 int dir_in = hs_ep->dir_in; 681 u32 epctrl_reg; 682 u32 epsize_reg; 683 u32 epsize; 684 u32 ctrl; 685 unsigned length; 686 unsigned packets; 687 unsigned maxreq; 688 689 if (index != 0) { 690 if (hs_ep->req && !continuing) { 691 dev_err(hsotg->dev, "%s: active request\n", __func__); 692 WARN_ON(1); 693 return; 694 } else if (hs_ep->req != hs_req && continuing) { 695 dev_err(hsotg->dev, 696 "%s: continue different req\n", __func__); 697 WARN_ON(1); 698 return; 699 } 700 } 701 702 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 703 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 704 705 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", 706 __func__, readl(hsotg->regs + epctrl_reg), index, 707 hs_ep->dir_in ? "in" : "out"); 708 709 /* If endpoint is stalled, we will restart request later */ 710 ctrl = readl(hsotg->regs + epctrl_reg); 711 712 if (ctrl & DXEPCTL_STALL) { 713 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); 714 return; 715 } 716 717 length = ureq->length - ureq->actual; 718 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", 719 ureq->length, ureq->actual); 720 if (0) 721 dev_dbg(hsotg->dev, 722 "REQ buf %p len %d dma 0x%pad noi=%d zp=%d snok=%d\n", 723 ureq->buf, length, &ureq->dma, 724 ureq->no_interrupt, ureq->zero, ureq->short_not_ok); 725 726 maxreq = get_ep_limit(hs_ep); 727 if (length > maxreq) { 728 int round = maxreq % hs_ep->ep.maxpacket; 729 730 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", 731 __func__, length, maxreq, round); 732 733 /* round down to multiple of packets */ 734 if (round) 735 maxreq -= round; 736 737 length = maxreq; 738 } 739 740 if (length) 741 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); 742 else 743 packets = 1; /* send one packet if length is zero. */ 744 745 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { 746 dev_err(hsotg->dev, "req length > maxpacket*mc\n"); 747 return; 748 } 749 750 if (dir_in && index != 0) 751 if (hs_ep->isochronous) 752 epsize = DXEPTSIZ_MC(packets); 753 else 754 epsize = DXEPTSIZ_MC(1); 755 else 756 epsize = 0; 757 758 if (index != 0 && ureq->zero) { 759 /* 760 * test for the packets being exactly right for the 761 * transfer 762 */ 763 764 if (length == (packets * hs_ep->ep.maxpacket)) 765 packets++; 766 } 767 768 epsize |= DXEPTSIZ_PKTCNT(packets); 769 epsize |= DXEPTSIZ_XFERSIZE(length); 770 771 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", 772 __func__, packets, length, ureq->length, epsize, epsize_reg); 773 774 /* store the request as the current one we're doing */ 775 hs_ep->req = hs_req; 776 777 /* write size / packets */ 778 writel(epsize, hsotg->regs + epsize_reg); 779 780 if (using_dma(hsotg) && !continuing) { 781 unsigned int dma_reg; 782 783 /* 784 * write DMA address to control register, buffer already 785 * synced by s3c_hsotg_ep_queue(). 786 */ 787 788 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); 789 writel(ureq->dma, hsotg->regs + dma_reg); 790 791 dev_dbg(hsotg->dev, "%s: 0x%pad => 0x%08x\n", 792 __func__, &ureq->dma, dma_reg); 793 } 794 795 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 796 ctrl |= DXEPCTL_USBACTEP; 797 798 dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup); 799 800 /* For Setup request do not clear NAK */ 801 if (hsotg->setup && index == 0) 802 hsotg->setup = 0; 803 else 804 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 805 806 807 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 808 writel(ctrl, hsotg->regs + epctrl_reg); 809 810 /* 811 * set these, it seems that DMA support increments past the end 812 * of the packet buffer so we need to calculate the length from 813 * this information. 814 */ 815 hs_ep->size_loaded = length; 816 hs_ep->last_load = ureq->actual; 817 818 if (dir_in && !using_dma(hsotg)) { 819 /* set these anyway, we may need them for non-periodic in */ 820 hs_ep->fifo_load = 0; 821 822 s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req); 823 } 824 825 /* 826 * clear the INTknTXFEmpMsk when we start request, more as a aide 827 * to debugging to see what is going on. 828 */ 829 if (dir_in) 830 writel(DIEPMSK_INTKNTXFEMPMSK, 831 hsotg->regs + DIEPINT(index)); 832 833 /* 834 * Note, trying to clear the NAK here causes problems with transmit 835 * on the S3C6400 ending up with the TXFIFO becoming full. 836 */ 837 838 /* check ep is enabled */ 839 if (!(readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA)) 840 dev_warn(hsotg->dev, 841 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 842 index, readl(hsotg->regs + epctrl_reg)); 843 844 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n", 845 __func__, readl(hsotg->regs + epctrl_reg)); 846 847 /* enable ep interrupts */ 848 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1); 849} 850 851/** 852 * s3c_hsotg_map_dma - map the DMA memory being used for the request 853 * @hsotg: The device state. 854 * @hs_ep: The endpoint the request is on. 855 * @req: The request being processed. 856 * 857 * We've been asked to queue a request, so ensure that the memory buffer 858 * is correctly setup for DMA. If we've been passed an extant DMA address 859 * then ensure the buffer has been synced to memory. If our buffer has no 860 * DMA memory, then we map the memory and mark our request to allow us to 861 * cleanup on completion. 862 */ 863static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg, 864 struct s3c_hsotg_ep *hs_ep, 865 struct usb_request *req) 866{ 867 struct s3c_hsotg_req *hs_req = our_req(req); 868 int ret; 869 870 /* if the length is zero, ignore the DMA data */ 871 if (hs_req->req.length == 0) 872 return 0; 873 874 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); 875 if (ret) 876 goto dma_error; 877 878 return 0; 879 880dma_error: 881 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", 882 __func__, req->buf, req->length); 883 884 return -EIO; 885} 886 887static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 888 gfp_t gfp_flags) 889{ 890 struct s3c_hsotg_req *hs_req = our_req(req); 891 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 892 struct s3c_hsotg *hs = hs_ep->parent; 893 bool first; 894 895 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 896 ep->name, req, req->length, req->buf, req->no_interrupt, 897 req->zero, req->short_not_ok); 898 899 /* initialise status of the request */ 900 INIT_LIST_HEAD(&hs_req->queue); 901 req->actual = 0; 902 req->status = -EINPROGRESS; 903 904 /* if we're using DMA, sync the buffers as necessary */ 905 if (using_dma(hs)) { 906 int ret = s3c_hsotg_map_dma(hs, hs_ep, req); 907 if (ret) 908 return ret; 909 } 910 911 first = list_empty(&hs_ep->queue); 912 list_add_tail(&hs_req->queue, &hs_ep->queue); 913 914 if (first) 915 s3c_hsotg_start_req(hs, hs_ep, hs_req, false); 916 917 return 0; 918} 919 920static int s3c_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, 921 gfp_t gfp_flags) 922{ 923 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 924 struct s3c_hsotg *hs = hs_ep->parent; 925 unsigned long flags = 0; 926 int ret = 0; 927 928 spin_lock_irqsave(&hs->lock, flags); 929 ret = s3c_hsotg_ep_queue(ep, req, gfp_flags); 930 spin_unlock_irqrestore(&hs->lock, flags); 931 932 return ret; 933} 934 935static void s3c_hsotg_ep_free_request(struct usb_ep *ep, 936 struct usb_request *req) 937{ 938 struct s3c_hsotg_req *hs_req = our_req(req); 939 940 kfree(hs_req); 941} 942 943/** 944 * s3c_hsotg_complete_oursetup - setup completion callback 945 * @ep: The endpoint the request was on. 946 * @req: The request completed. 947 * 948 * Called on completion of any requests the driver itself 949 * submitted that need cleaning up. 950 */ 951static void s3c_hsotg_complete_oursetup(struct usb_ep *ep, 952 struct usb_request *req) 953{ 954 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 955 struct s3c_hsotg *hsotg = hs_ep->parent; 956 957 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); 958 959 s3c_hsotg_ep_free_request(ep, req); 960} 961 962/** 963 * ep_from_windex - convert control wIndex value to endpoint 964 * @hsotg: The driver state. 965 * @windex: The control request wIndex field (in host order). 966 * 967 * Convert the given wIndex into a pointer to an driver endpoint 968 * structure, or return NULL if it is not a valid endpoint. 969 */ 970static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg, 971 u32 windex) 972{ 973 struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F]; 974 int dir = (windex & USB_DIR_IN) ? 1 : 0; 975 int idx = windex & 0x7F; 976 977 if (windex >= 0x100) 978 return NULL; 979 980 if (idx > hsotg->num_of_eps) 981 return NULL; 982 983 if (idx && ep->dir_in != dir) 984 return NULL; 985 986 return ep; 987} 988 989/** 990 * s3c_hsotg_send_reply - send reply to control request 991 * @hsotg: The device state 992 * @ep: Endpoint 0 993 * @buff: Buffer for request 994 * @length: Length of reply. 995 * 996 * Create a request and queue it on the given endpoint. This is useful as 997 * an internal method of sending replies to certain control requests, etc. 998 */ 999static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg, 1000 struct s3c_hsotg_ep *ep, 1001 void *buff, 1002 int length) 1003{ 1004 struct usb_request *req; 1005 int ret; 1006 1007 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); 1008 1009 req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); 1010 hsotg->ep0_reply = req; 1011 if (!req) { 1012 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); 1013 return -ENOMEM; 1014 } 1015 1016 req->buf = hsotg->ep0_buff; 1017 req->length = length; 1018 req->zero = 1; /* always do zero-length final transfer */ 1019 req->complete = s3c_hsotg_complete_oursetup; 1020 1021 if (length) 1022 memcpy(req->buf, buff, length); 1023 else 1024 ep->sent_zlp = 1; 1025 1026 ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); 1027 if (ret) { 1028 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); 1029 return ret; 1030 } 1031 1032 return 0; 1033} 1034 1035/** 1036 * s3c_hsotg_process_req_status - process request GET_STATUS 1037 * @hsotg: The device state 1038 * @ctrl: USB control request 1039 */ 1040static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg, 1041 struct usb_ctrlrequest *ctrl) 1042{ 1043 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; 1044 struct s3c_hsotg_ep *ep; 1045 __le16 reply; 1046 int ret; 1047 1048 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); 1049 1050 if (!ep0->dir_in) { 1051 dev_warn(hsotg->dev, "%s: direction out?\n", __func__); 1052 return -EINVAL; 1053 } 1054 1055 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1056 case USB_RECIP_DEVICE: 1057 reply = cpu_to_le16(0); /* bit 0 => self powered, 1058 * bit 1 => remote wakeup */ 1059 break; 1060 1061 case USB_RECIP_INTERFACE: 1062 /* currently, the data result should be zero */ 1063 reply = cpu_to_le16(0); 1064 break; 1065 1066 case USB_RECIP_ENDPOINT: 1067 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1068 if (!ep) 1069 return -ENOENT; 1070 1071 reply = cpu_to_le16(ep->halted ? 1 : 0); 1072 break; 1073 1074 default: 1075 return 0; 1076 } 1077 1078 if (le16_to_cpu(ctrl->wLength) != 2) 1079 return -EINVAL; 1080 1081 ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2); 1082 if (ret) { 1083 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); 1084 return ret; 1085 } 1086 1087 return 1; 1088} 1089 1090static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value); 1091 1092/** 1093 * get_ep_head - return the first request on the endpoint 1094 * @hs_ep: The controller endpoint to get 1095 * 1096 * Get the first request on the endpoint. 1097 */ 1098static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep) 1099{ 1100 if (list_empty(&hs_ep->queue)) 1101 return NULL; 1102 1103 return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue); 1104} 1105 1106/** 1107 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE 1108 * @hsotg: The device state 1109 * @ctrl: USB control request 1110 */ 1111static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg, 1112 struct usb_ctrlrequest *ctrl) 1113{ 1114 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; 1115 struct s3c_hsotg_req *hs_req; 1116 bool restart; 1117 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 1118 struct s3c_hsotg_ep *ep; 1119 int ret; 1120 bool halted; 1121 1122 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", 1123 __func__, set ? "SET" : "CLEAR"); 1124 1125 if (ctrl->bRequestType == USB_RECIP_ENDPOINT) { 1126 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1127 if (!ep) { 1128 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", 1129 __func__, le16_to_cpu(ctrl->wIndex)); 1130 return -ENOENT; 1131 } 1132 1133 switch (le16_to_cpu(ctrl->wValue)) { 1134 case USB_ENDPOINT_HALT: 1135 halted = ep->halted; 1136 1137 s3c_hsotg_ep_sethalt(&ep->ep, set); 1138 1139 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0); 1140 if (ret) { 1141 dev_err(hsotg->dev, 1142 "%s: failed to send reply\n", __func__); 1143 return ret; 1144 } 1145 1146 /* 1147 * we have to complete all requests for ep if it was 1148 * halted, and the halt was cleared by CLEAR_FEATURE 1149 */ 1150 1151 if (!set && halted) { 1152 /* 1153 * If we have request in progress, 1154 * then complete it 1155 */ 1156 if (ep->req) { 1157 hs_req = ep->req; 1158 ep->req = NULL; 1159 list_del_init(&hs_req->queue); 1160 hs_req->req.complete(&ep->ep, 1161 &hs_req->req); 1162 } 1163 1164 /* If we have pending request, then start it */ 1165 restart = !list_empty(&ep->queue); 1166 if (restart) { 1167 hs_req = get_ep_head(ep); 1168 s3c_hsotg_start_req(hsotg, ep, 1169 hs_req, false); 1170 } 1171 } 1172 1173 break; 1174 1175 default: 1176 return -ENOENT; 1177 } 1178 } else 1179 return -ENOENT; /* currently only deal with endpoint */ 1180 1181 return 1; 1182} 1183 1184static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); 1185static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg); 1186 1187/** 1188 * s3c_hsotg_stall_ep0 - stall ep0 1189 * @hsotg: The device state 1190 * 1191 * Set stall for ep0 as response for setup request. 1192 */ 1193static void s3c_hsotg_stall_ep0(struct s3c_hsotg *hsotg) { 1194 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; 1195 u32 reg; 1196 u32 ctrl; 1197 1198 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); 1199 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0; 1200 1201 /* 1202 * DxEPCTL_Stall will be cleared by EP once it has 1203 * taken effect, so no need to clear later. 1204 */ 1205 1206 ctrl = readl(hsotg->regs + reg); 1207 ctrl |= DXEPCTL_STALL; 1208 ctrl |= DXEPCTL_CNAK; 1209 writel(ctrl, hsotg->regs + reg); 1210 1211 dev_dbg(hsotg->dev, 1212 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n", 1213 ctrl, reg, readl(hsotg->regs + reg)); 1214 1215 /* 1216 * complete won't be called, so we enqueue 1217 * setup request here 1218 */ 1219 s3c_hsotg_enqueue_setup(hsotg); 1220} 1221 1222/** 1223 * s3c_hsotg_process_control - process a control request 1224 * @hsotg: The device state 1225 * @ctrl: The control request received 1226 * 1227 * The controller has received the SETUP phase of a control request, and 1228 * needs to work out what to do next (and whether to pass it on to the 1229 * gadget driver). 1230 */ 1231static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, 1232 struct usb_ctrlrequest *ctrl) 1233{ 1234 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; 1235 int ret = 0; 1236 u32 dcfg; 1237 1238 ep0->sent_zlp = 0; 1239 1240 dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n", 1241 ctrl->bRequest, ctrl->bRequestType, 1242 ctrl->wValue, ctrl->wLength); 1243 1244 /* 1245 * record the direction of the request, for later use when enquing 1246 * packets onto EP0. 1247 */ 1248 1249 ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0; 1250 dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in); 1251 1252 /* 1253 * if we've no data with this request, then the last part of the 1254 * transaction is going to implicitly be IN. 1255 */ 1256 if (ctrl->wLength == 0) 1257 ep0->dir_in = 1; 1258 1259 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1260 switch (ctrl->bRequest) { 1261 case USB_REQ_SET_ADDRESS: 1262 s3c_hsotg_disconnect(hsotg); 1263 dcfg = readl(hsotg->regs + DCFG); 1264 dcfg &= ~DCFG_DEVADDR_MASK; 1265 dcfg |= ctrl->wValue << DCFG_DEVADDR_SHIFT; 1266 writel(dcfg, hsotg->regs + DCFG); 1267 1268 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); 1269 1270 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0); 1271 return; 1272 1273 case USB_REQ_GET_STATUS: 1274 ret = s3c_hsotg_process_req_status(hsotg, ctrl); 1275 break; 1276 1277 case USB_REQ_CLEAR_FEATURE: 1278 case USB_REQ_SET_FEATURE: 1279 ret = s3c_hsotg_process_req_feature(hsotg, ctrl); 1280 break; 1281 } 1282 } 1283 1284 /* as a fallback, try delivering it to the driver to deal with */ 1285 1286 if (ret == 0 && hsotg->driver) { 1287 spin_unlock(&hsotg->lock); 1288 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1289 spin_lock(&hsotg->lock); 1290 if (ret < 0) 1291 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1292 } 1293 1294 /* 1295 * the request is either unhandlable, or is not formatted correctly 1296 * so respond with a STALL for the status stage to indicate failure. 1297 */ 1298 1299 if (ret < 0) 1300 s3c_hsotg_stall_ep0(hsotg); 1301} 1302 1303/** 1304 * s3c_hsotg_complete_setup - completion of a setup transfer 1305 * @ep: The endpoint the request was on. 1306 * @req: The request completed. 1307 * 1308 * Called on completion of any requests the driver itself submitted for 1309 * EP0 setup packets 1310 */ 1311static void s3c_hsotg_complete_setup(struct usb_ep *ep, 1312 struct usb_request *req) 1313{ 1314 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 1315 struct s3c_hsotg *hsotg = hs_ep->parent; 1316 1317 if (req->status < 0) { 1318 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); 1319 return; 1320 } 1321 1322 spin_lock(&hsotg->lock); 1323 if (req->actual == 0) 1324 s3c_hsotg_enqueue_setup(hsotg); 1325 else 1326 s3c_hsotg_process_control(hsotg, req->buf); 1327 spin_unlock(&hsotg->lock); 1328} 1329 1330/** 1331 * s3c_hsotg_enqueue_setup - start a request for EP0 packets 1332 * @hsotg: The device state. 1333 * 1334 * Enqueue a request on EP0 if necessary to received any SETUP packets 1335 * received from the host. 1336 */ 1337static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg) 1338{ 1339 struct usb_request *req = hsotg->ctrl_req; 1340 struct s3c_hsotg_req *hs_req = our_req(req); 1341 int ret; 1342 1343 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); 1344 1345 req->zero = 0; 1346 req->length = 8; 1347 req->buf = hsotg->ctrl_buff; 1348 req->complete = s3c_hsotg_complete_setup; 1349 1350 if (!list_empty(&hs_req->queue)) { 1351 dev_dbg(hsotg->dev, "%s already queued???\n", __func__); 1352 return; 1353 } 1354 1355 hsotg->eps[0].dir_in = 0; 1356 1357 ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC); 1358 if (ret < 0) { 1359 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); 1360 /* 1361 * Don't think there's much we can do other than watch the 1362 * driver fail. 1363 */ 1364 } 1365} 1366 1367/** 1368 * s3c_hsotg_complete_request - complete a request given to us 1369 * @hsotg: The device state. 1370 * @hs_ep: The endpoint the request was on. 1371 * @hs_req: The request to complete. 1372 * @result: The result code (0 => Ok, otherwise errno) 1373 * 1374 * The given request has finished, so call the necessary completion 1375 * if it has one and then look to see if we can start a new request 1376 * on the endpoint. 1377 * 1378 * Note, expects the ep to already be locked as appropriate. 1379 */ 1380static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg, 1381 struct s3c_hsotg_ep *hs_ep, 1382 struct s3c_hsotg_req *hs_req, 1383 int result) 1384{ 1385 bool restart; 1386 1387 if (!hs_req) { 1388 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 1389 return; 1390 } 1391 1392 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", 1393 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); 1394 1395 /* 1396 * only replace the status if we've not already set an error 1397 * from a previous transaction 1398 */ 1399 1400 if (hs_req->req.status == -EINPROGRESS) 1401 hs_req->req.status = result; 1402 1403 hs_ep->req = NULL; 1404 list_del_init(&hs_req->queue); 1405 1406 if (using_dma(hsotg)) 1407 s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req); 1408 1409 /* 1410 * call the complete request with the locks off, just in case the 1411 * request tries to queue more work for this endpoint. 1412 */ 1413 1414 if (hs_req->req.complete) { 1415 spin_unlock(&hsotg->lock); 1416 hs_req->req.complete(&hs_ep->ep, &hs_req->req); 1417 spin_lock(&hsotg->lock); 1418 } 1419 1420 /* 1421 * Look to see if there is anything else to do. Note, the completion 1422 * of the previous request may have caused a new request to be started 1423 * so be careful when doing this. 1424 */ 1425 1426 if (!hs_ep->req && result >= 0) { 1427 restart = !list_empty(&hs_ep->queue); 1428 if (restart) { 1429 hs_req = get_ep_head(hs_ep); 1430 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false); 1431 } 1432 } 1433} 1434 1435/** 1436 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint 1437 * @hsotg: The device state. 1438 * @ep_idx: The endpoint index for the data 1439 * @size: The size of data in the fifo, in bytes 1440 * 1441 * The FIFO status shows there is data to read from the FIFO for a given 1442 * endpoint, so sort out whether we need to read the data into a request 1443 * that has been made for that endpoint. 1444 */ 1445static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size) 1446{ 1447 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx]; 1448 struct s3c_hsotg_req *hs_req = hs_ep->req; 1449 void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx); 1450 int to_read; 1451 int max_req; 1452 int read_ptr; 1453 1454 1455 if (!hs_req) { 1456 u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx)); 1457 int ptr; 1458 1459 dev_warn(hsotg->dev, 1460 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 1461 __func__, size, ep_idx, epctl); 1462 1463 /* dump the data from the FIFO, we've nothing we can do */ 1464 for (ptr = 0; ptr < size; ptr += 4) 1465 (void)readl(fifo); 1466 1467 return; 1468 } 1469 1470 to_read = size; 1471 read_ptr = hs_req->req.actual; 1472 max_req = hs_req->req.length - read_ptr; 1473 1474 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", 1475 __func__, to_read, max_req, read_ptr, hs_req->req.length); 1476 1477 if (to_read > max_req) { 1478 /* 1479 * more data appeared than we where willing 1480 * to deal with in this request. 1481 */ 1482 1483 /* currently we don't deal this */ 1484 WARN_ON_ONCE(1); 1485 } 1486 1487 hs_ep->total_data += to_read; 1488 hs_req->req.actual += to_read; 1489 to_read = DIV_ROUND_UP(to_read, 4); 1490 1491 /* 1492 * note, we might over-write the buffer end by 3 bytes depending on 1493 * alignment of the data. 1494 */ 1495 ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read); 1496} 1497 1498/** 1499 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint 1500 * @hsotg: The device instance 1501 * @req: The request currently on this endpoint 1502 * 1503 * Generate a zero-length IN packet request for terminating a SETUP 1504 * transaction. 1505 * 1506 * Note, since we don't write any data to the TxFIFO, then it is 1507 * currently believed that we do not need to wait for any space in 1508 * the TxFIFO. 1509 */ 1510static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg, 1511 struct s3c_hsotg_req *req) 1512{ 1513 u32 ctrl; 1514 1515 if (!req) { 1516 dev_warn(hsotg->dev, "%s: no request?\n", __func__); 1517 return; 1518 } 1519 1520 if (req->req.length == 0) { 1521 hsotg->eps[0].sent_zlp = 1; 1522 s3c_hsotg_enqueue_setup(hsotg); 1523 return; 1524 } 1525 1526 hsotg->eps[0].dir_in = 1; 1527 hsotg->eps[0].sent_zlp = 1; 1528 1529 dev_dbg(hsotg->dev, "sending zero-length packet\n"); 1530 1531 /* issue a zero-sized packet to terminate this */ 1532 writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 1533 DXEPTSIZ_XFERSIZE(0), hsotg->regs + DIEPTSIZ(0)); 1534 1535 ctrl = readl(hsotg->regs + DIEPCTL0); 1536 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1537 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1538 ctrl |= DXEPCTL_USBACTEP; 1539 writel(ctrl, hsotg->regs + DIEPCTL0); 1540} 1541 1542/** 1543 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO 1544 * @hsotg: The device instance 1545 * @epnum: The endpoint received from 1546 * @was_setup: Set if processing a SetupDone event. 1547 * 1548 * The RXFIFO has delivered an OutDone event, which means that the data 1549 * transfer for an OUT endpoint has been completed, either by a short 1550 * packet or by the finish of a transfer. 1551 */ 1552static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg, 1553 int epnum, bool was_setup) 1554{ 1555 u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum)); 1556 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum]; 1557 struct s3c_hsotg_req *hs_req = hs_ep->req; 1558 struct usb_request *req = &hs_req->req; 1559 unsigned size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 1560 int result = 0; 1561 1562 if (!hs_req) { 1563 dev_dbg(hsotg->dev, "%s: no request active\n", __func__); 1564 return; 1565 } 1566 1567 if (using_dma(hsotg)) { 1568 unsigned size_done; 1569 1570 /* 1571 * Calculate the size of the transfer by checking how much 1572 * is left in the endpoint size register and then working it 1573 * out from the amount we loaded for the transfer. 1574 * 1575 * We need to do this as DMA pointers are always 32bit aligned 1576 * so may overshoot/undershoot the transfer. 1577 */ 1578 1579 size_done = hs_ep->size_loaded - size_left; 1580 size_done += hs_ep->last_load; 1581 1582 req->actual = size_done; 1583 } 1584 1585 /* if there is more request to do, schedule new transfer */ 1586 if (req->actual < req->length && size_left == 0) { 1587 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true); 1588 return; 1589 } else if (epnum == 0) { 1590 /* 1591 * After was_setup = 1 => 1592 * set CNAK for non Setup requests 1593 */ 1594 hsotg->setup = was_setup ? 0 : 1; 1595 } 1596 1597 if (req->actual < req->length && req->short_not_ok) { 1598 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", 1599 __func__, req->actual, req->length); 1600 1601 /* 1602 * todo - what should we return here? there's no one else 1603 * even bothering to check the status. 1604 */ 1605 } 1606 1607 if (epnum == 0) { 1608 /* 1609 * Condition req->complete != s3c_hsotg_complete_setup says: 1610 * send ZLP when we have an asynchronous request from gadget 1611 */ 1612 if (!was_setup && req->complete != s3c_hsotg_complete_setup) 1613 s3c_hsotg_send_zlp(hsotg, hs_req); 1614 } 1615 1616 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 1617} 1618 1619/** 1620 * s3c_hsotg_read_frameno - read current frame number 1621 * @hsotg: The device instance 1622 * 1623 * Return the current frame number 1624 */ 1625static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg) 1626{ 1627 u32 dsts; 1628 1629 dsts = readl(hsotg->regs + DSTS); 1630 dsts &= DSTS_SOFFN_MASK; 1631 dsts >>= DSTS_SOFFN_SHIFT; 1632 1633 return dsts; 1634} 1635 1636/** 1637 * s3c_hsotg_handle_rx - RX FIFO has data 1638 * @hsotg: The device instance 1639 * 1640 * The IRQ handler has detected that the RX FIFO has some data in it 1641 * that requires processing, so find out what is in there and do the 1642 * appropriate read. 1643 * 1644 * The RXFIFO is a true FIFO, the packets coming out are still in packet 1645 * chunks, so if you have x packets received on an endpoint you'll get x 1646 * FIFO events delivered, each with a packet's worth of data in it. 1647 * 1648 * When using DMA, we should not be processing events from the RXFIFO 1649 * as the actual data should be sent to the memory directly and we turn 1650 * on the completion interrupts to get notifications of transfer completion. 1651 */ 1652static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) 1653{ 1654 u32 grxstsr = readl(hsotg->regs + GRXSTSP); 1655 u32 epnum, status, size; 1656 1657 WARN_ON(using_dma(hsotg)); 1658 1659 epnum = grxstsr & GRXSTS_EPNUM_MASK; 1660 status = grxstsr & GRXSTS_PKTSTS_MASK; 1661 1662 size = grxstsr & GRXSTS_BYTECNT_MASK; 1663 size >>= GRXSTS_BYTECNT_SHIFT; 1664 1665 if (1) 1666 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 1667 __func__, grxstsr, size, epnum); 1668 1669 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) { 1670 case GRXSTS_PKTSTS_GLOBALOUTNAK: 1671 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n"); 1672 break; 1673 1674 case GRXSTS_PKTSTS_OUTDONE: 1675 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", 1676 s3c_hsotg_read_frameno(hsotg)); 1677 1678 if (!using_dma(hsotg)) 1679 s3c_hsotg_handle_outdone(hsotg, epnum, false); 1680 break; 1681 1682 case GRXSTS_PKTSTS_SETUPDONE: 1683 dev_dbg(hsotg->dev, 1684 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 1685 s3c_hsotg_read_frameno(hsotg), 1686 readl(hsotg->regs + DOEPCTL(0))); 1687 1688 s3c_hsotg_handle_outdone(hsotg, epnum, true); 1689 break; 1690 1691 case GRXSTS_PKTSTS_OUTRX: 1692 s3c_hsotg_rx_data(hsotg, epnum, size); 1693 break; 1694 1695 case GRXSTS_PKTSTS_SETUPRX: 1696 dev_dbg(hsotg->dev, 1697 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 1698 s3c_hsotg_read_frameno(hsotg), 1699 readl(hsotg->regs + DOEPCTL(0))); 1700 1701 s3c_hsotg_rx_data(hsotg, epnum, size); 1702 break; 1703 1704 default: 1705 dev_warn(hsotg->dev, "%s: unknown status %08x\n", 1706 __func__, grxstsr); 1707 1708 s3c_hsotg_dump(hsotg); 1709 break; 1710 } 1711} 1712 1713/** 1714 * s3c_hsotg_ep0_mps - turn max packet size into register setting 1715 * @mps: The maximum packet size in bytes. 1716 */ 1717static u32 s3c_hsotg_ep0_mps(unsigned int mps) 1718{ 1719 switch (mps) { 1720 case 64: 1721 return D0EPCTL_MPS_64; 1722 case 32: 1723 return D0EPCTL_MPS_32; 1724 case 16: 1725 return D0EPCTL_MPS_16; 1726 case 8: 1727 return D0EPCTL_MPS_8; 1728 } 1729 1730 /* bad max packet size, warn and return invalid result */ 1731 WARN_ON(1); 1732 return (u32)-1; 1733} 1734 1735/** 1736 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field 1737 * @hsotg: The driver state. 1738 * @ep: The index number of the endpoint 1739 * @mps: The maximum packet size in bytes 1740 * 1741 * Configure the maximum packet size for the given endpoint, updating 1742 * the hardware control registers to reflect this. 1743 */ 1744static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg, 1745 unsigned int ep, unsigned int mps) 1746{ 1747 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep]; 1748 void __iomem *regs = hsotg->regs; 1749 u32 mpsval; 1750 u32 mcval; 1751 u32 reg; 1752 1753 if (ep == 0) { 1754 /* EP0 is a special case */ 1755 mpsval = s3c_hsotg_ep0_mps(mps); 1756 if (mpsval > 3) 1757 goto bad_mps; 1758 hs_ep->ep.maxpacket = mps; 1759 hs_ep->mc = 1; 1760 } else { 1761 mpsval = mps & DXEPCTL_MPS_MASK; 1762 if (mpsval > 1024) 1763 goto bad_mps; 1764 mcval = ((mps >> 11) & 0x3) + 1; 1765 hs_ep->mc = mcval; 1766 if (mcval > 3) 1767 goto bad_mps; 1768 hs_ep->ep.maxpacket = mpsval; 1769 } 1770 1771 /* 1772 * update both the in and out endpoint controldir_ registers, even 1773 * if one of the directions may not be in use. 1774 */ 1775 1776 reg = readl(regs + DIEPCTL(ep)); 1777 reg &= ~DXEPCTL_MPS_MASK; 1778 reg |= mpsval; 1779 writel(reg, regs + DIEPCTL(ep)); 1780 1781 if (ep) { 1782 reg = readl(regs + DOEPCTL(ep)); 1783 reg &= ~DXEPCTL_MPS_MASK; 1784 reg |= mpsval; 1785 writel(reg, regs + DOEPCTL(ep)); 1786 } 1787 1788 return; 1789 1790bad_mps: 1791 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); 1792} 1793 1794/** 1795 * s3c_hsotg_txfifo_flush - flush Tx FIFO 1796 * @hsotg: The driver state 1797 * @idx: The index for the endpoint (0..15) 1798 */ 1799static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx) 1800{ 1801 int timeout; 1802 int val; 1803 1804 writel(GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH, 1805 hsotg->regs + GRSTCTL); 1806 1807 /* wait until the fifo is flushed */ 1808 timeout = 100; 1809 1810 while (1) { 1811 val = readl(hsotg->regs + GRSTCTL); 1812 1813 if ((val & (GRSTCTL_TXFFLSH)) == 0) 1814 break; 1815 1816 if (--timeout == 0) { 1817 dev_err(hsotg->dev, 1818 "%s: timeout flushing fifo (GRSTCTL=%08x)\n", 1819 __func__, val); 1820 } 1821 1822 udelay(1); 1823 } 1824} 1825 1826/** 1827 * s3c_hsotg_trytx - check to see if anything needs transmitting 1828 * @hsotg: The driver state 1829 * @hs_ep: The driver endpoint to check. 1830 * 1831 * Check to see if there is a request that has data to send, and if so 1832 * make an attempt to write data into the FIFO. 1833 */ 1834static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg, 1835 struct s3c_hsotg_ep *hs_ep) 1836{ 1837 struct s3c_hsotg_req *hs_req = hs_ep->req; 1838 1839 if (!hs_ep->dir_in || !hs_req) { 1840 /** 1841 * if request is not enqueued, we disable interrupts 1842 * for endpoints, excepting ep0 1843 */ 1844 if (hs_ep->index != 0) 1845 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, 1846 hs_ep->dir_in, 0); 1847 return 0; 1848 } 1849 1850 if (hs_req->req.actual < hs_req->req.length) { 1851 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", 1852 hs_ep->index); 1853 return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req); 1854 } 1855 1856 return 0; 1857} 1858 1859/** 1860 * s3c_hsotg_complete_in - complete IN transfer 1861 * @hsotg: The device state. 1862 * @hs_ep: The endpoint that has just completed. 1863 * 1864 * An IN transfer has been completed, update the transfer's state and then 1865 * call the relevant completion routines. 1866 */ 1867static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg, 1868 struct s3c_hsotg_ep *hs_ep) 1869{ 1870 struct s3c_hsotg_req *hs_req = hs_ep->req; 1871 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index)); 1872 int size_left, size_done; 1873 1874 if (!hs_req) { 1875 dev_dbg(hsotg->dev, "XferCompl but no req\n"); 1876 return; 1877 } 1878 1879 /* Finish ZLP handling for IN EP0 transactions */ 1880 if (hsotg->eps[0].sent_zlp) { 1881 dev_dbg(hsotg->dev, "zlp packet received\n"); 1882 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 1883 return; 1884 } 1885 1886 /* 1887 * Calculate the size of the transfer by checking how much is left 1888 * in the endpoint size register and then working it out from 1889 * the amount we loaded for the transfer. 1890 * 1891 * We do this even for DMA, as the transfer may have incremented 1892 * past the end of the buffer (DMA transfers are always 32bit 1893 * aligned). 1894 */ 1895 1896 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 1897 1898 size_done = hs_ep->size_loaded - size_left; 1899 size_done += hs_ep->last_load; 1900 1901 if (hs_req->req.actual != size_done) 1902 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", 1903 __func__, hs_req->req.actual, size_done); 1904 1905 hs_req->req.actual = size_done; 1906 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n", 1907 hs_req->req.length, hs_req->req.actual, hs_req->req.zero); 1908 1909 /* 1910 * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0 1911 * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B 1912 * ,256B ... ), after last MPS sized packet send IN ZLP packet to 1913 * inform the host that no more data is available. 1914 * The state of req.zero member is checked to be sure that the value to 1915 * send is smaller than wValue expected from host. 1916 * Check req.length to NOT send another ZLP when the current one is 1917 * under completion (the one for which this completion has been called). 1918 */ 1919 if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero && 1920 hs_req->req.length == hs_req->req.actual && 1921 !(hs_req->req.length % hs_ep->ep.maxpacket)) { 1922 1923 dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n"); 1924 s3c_hsotg_send_zlp(hsotg, hs_req); 1925 1926 return; 1927 } 1928 1929 if (!size_left && hs_req->req.actual < hs_req->req.length) { 1930 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); 1931 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true); 1932 } else 1933 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 1934} 1935 1936/** 1937 * s3c_hsotg_epint - handle an in/out endpoint interrupt 1938 * @hsotg: The driver state 1939 * @idx: The index for the endpoint (0..15) 1940 * @dir_in: Set if this is an IN endpoint 1941 * 1942 * Process and clear any interrupt pending for an individual endpoint 1943 */ 1944static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx, 1945 int dir_in) 1946{ 1947 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx]; 1948 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 1949 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 1950 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx); 1951 u32 ints; 1952 u32 ctrl; 1953 1954 ints = readl(hsotg->regs + epint_reg); 1955 ctrl = readl(hsotg->regs + epctl_reg); 1956 1957 /* Clear endpoint interrupts */ 1958 writel(ints, hsotg->regs + epint_reg); 1959 1960 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", 1961 __func__, idx, dir_in ? "in" : "out", ints); 1962 1963 if (ints & DXEPINT_XFERCOMPL) { 1964 if (hs_ep->isochronous && hs_ep->interval == 1) { 1965 if (ctrl & DXEPCTL_EOFRNUM) 1966 ctrl |= DXEPCTL_SETEVENFR; 1967 else 1968 ctrl |= DXEPCTL_SETODDFR; 1969 writel(ctrl, hsotg->regs + epctl_reg); 1970 } 1971 1972 dev_dbg(hsotg->dev, 1973 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n", 1974 __func__, readl(hsotg->regs + epctl_reg), 1975 readl(hsotg->regs + epsiz_reg)); 1976 1977 /* 1978 * we get OutDone from the FIFO, so we only need to look 1979 * at completing IN requests here 1980 */ 1981 if (dir_in) { 1982 s3c_hsotg_complete_in(hsotg, hs_ep); 1983 1984 if (idx == 0 && !hs_ep->req) 1985 s3c_hsotg_enqueue_setup(hsotg); 1986 } else if (using_dma(hsotg)) { 1987 /* 1988 * We're using DMA, we need to fire an OutDone here 1989 * as we ignore the RXFIFO. 1990 */ 1991 1992 s3c_hsotg_handle_outdone(hsotg, idx, false); 1993 } 1994 } 1995 1996 if (ints & DXEPINT_EPDISBLD) { 1997 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); 1998 1999 if (dir_in) { 2000 int epctl = readl(hsotg->regs + epctl_reg); 2001 2002 s3c_hsotg_txfifo_flush(hsotg, idx); 2003 2004 if ((epctl & DXEPCTL_STALL) && 2005 (epctl & DXEPCTL_EPTYPE_BULK)) { 2006 int dctl = readl(hsotg->regs + DCTL); 2007 2008 dctl |= DCTL_CGNPINNAK; 2009 writel(dctl, hsotg->regs + DCTL); 2010 } 2011 } 2012 } 2013 2014 if (ints & DXEPINT_AHBERR) 2015 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); 2016 2017 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */ 2018 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); 2019 2020 if (using_dma(hsotg) && idx == 0) { 2021 /* 2022 * this is the notification we've received a 2023 * setup packet. In non-DMA mode we'd get this 2024 * from the RXFIFO, instead we need to process 2025 * the setup here. 2026 */ 2027 2028 if (dir_in) 2029 WARN_ON_ONCE(1); 2030 else 2031 s3c_hsotg_handle_outdone(hsotg, 0, true); 2032 } 2033 } 2034 2035 if (ints & DXEPINT_BACK2BACKSETUP) 2036 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); 2037 2038 if (dir_in && !hs_ep->isochronous) { 2039 /* not sure if this is important, but we'll clear it anyway */ 2040 if (ints & DIEPMSK_INTKNTXFEMPMSK) { 2041 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", 2042 __func__, idx); 2043 } 2044 2045 /* this probably means something bad is happening */ 2046 if (ints & DIEPMSK_INTKNEPMISMSK) { 2047 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", 2048 __func__, idx); 2049 } 2050 2051 /* FIFO has space or is empty (see GAHBCFG) */ 2052 if (hsotg->dedicated_fifos && 2053 ints & DIEPMSK_TXFIFOEMPTY) { 2054 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 2055 __func__, idx); 2056 if (!using_dma(hsotg)) 2057 s3c_hsotg_trytx(hsotg, hs_ep); 2058 } 2059 } 2060} 2061 2062/** 2063 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) 2064 * @hsotg: The device state. 2065 * 2066 * Handle updating the device settings after the enumeration phase has 2067 * been completed. 2068 */ 2069static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg) 2070{ 2071 u32 dsts = readl(hsotg->regs + DSTS); 2072 int ep0_mps = 0, ep_mps; 2073 2074 /* 2075 * This should signal the finish of the enumeration phase 2076 * of the USB handshaking, so we should now know what rate 2077 * we connected at. 2078 */ 2079 2080 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); 2081 2082 /* 2083 * note, since we're limited by the size of transfer on EP0, and 2084 * it seems IN transfers must be a even number of packets we do 2085 * not advertise a 64byte MPS on EP0. 2086 */ 2087 2088 /* catch both EnumSpd_FS and EnumSpd_FS48 */ 2089 switch (dsts & DSTS_ENUMSPD_MASK) { 2090 case DSTS_ENUMSPD_FS: 2091 case DSTS_ENUMSPD_FS48: 2092 hsotg->gadget.speed = USB_SPEED_FULL; 2093 ep0_mps = EP0_MPS_LIMIT; 2094 ep_mps = 1023; 2095 break; 2096 2097 case DSTS_ENUMSPD_HS: 2098 hsotg->gadget.speed = USB_SPEED_HIGH; 2099 ep0_mps = EP0_MPS_LIMIT; 2100 ep_mps = 1024; 2101 break; 2102 2103 case DSTS_ENUMSPD_LS: 2104 hsotg->gadget.speed = USB_SPEED_LOW; 2105 /* 2106 * note, we don't actually support LS in this driver at the 2107 * moment, and the documentation seems to imply that it isn't 2108 * supported by the PHYs on some of the devices. 2109 */ 2110 break; 2111 } 2112 dev_info(hsotg->dev, "new device is %s\n", 2113 usb_speed_string(hsotg->gadget.speed)); 2114 2115 /* 2116 * we should now know the maximum packet size for an 2117 * endpoint, so set the endpoints to a default value. 2118 */ 2119 2120 if (ep0_mps) { 2121 int i; 2122 s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps); 2123 for (i = 1; i < hsotg->num_of_eps; i++) 2124 s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps); 2125 } 2126 2127 /* ensure after enumeration our EP0 is active */ 2128 2129 s3c_hsotg_enqueue_setup(hsotg); 2130 2131 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2132 readl(hsotg->regs + DIEPCTL0), 2133 readl(hsotg->regs + DOEPCTL0)); 2134} 2135 2136/** 2137 * kill_all_requests - remove all requests from the endpoint's queue 2138 * @hsotg: The device state. 2139 * @ep: The endpoint the requests may be on. 2140 * @result: The result code to use. 2141 * @force: Force removal of any current requests 2142 * 2143 * Go through the requests on the given endpoint and mark them 2144 * completed with the given result code. 2145 */ 2146static void kill_all_requests(struct s3c_hsotg *hsotg, 2147 struct s3c_hsotg_ep *ep, 2148 int result, bool force) 2149{ 2150 struct s3c_hsotg_req *req, *treq; 2151 2152 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 2153 /* 2154 * currently, we can't do much about an already 2155 * running request on an in endpoint 2156 */ 2157 2158 if (ep->req == req && ep->dir_in && !force) 2159 continue; 2160 2161 s3c_hsotg_complete_request(hsotg, ep, req, 2162 result); 2163 } 2164 if(hsotg->dedicated_fifos) 2165 if ((readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4 < 3072) 2166 s3c_hsotg_txfifo_flush(hsotg, ep->index); 2167} 2168 2169#define call_gadget(_hs, _entry) \ 2170do { \ 2171 if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \ 2172 (_hs)->driver && (_hs)->driver->_entry) { \ 2173 spin_unlock(&_hs->lock); \ 2174 (_hs)->driver->_entry(&(_hs)->gadget); \ 2175 spin_lock(&_hs->lock); \ 2176 } \ 2177} while (0) 2178 2179/** 2180 * s3c_hsotg_disconnect - disconnect service 2181 * @hsotg: The device state. 2182 * 2183 * The device has been disconnected. Remove all current 2184 * transactions and signal the gadget driver that this 2185 * has happened. 2186 */ 2187static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg) 2188{ 2189 unsigned ep; 2190 2191 for (ep = 0; ep < hsotg->num_of_eps; ep++) 2192 kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true); 2193 2194 call_gadget(hsotg, disconnect); 2195} 2196 2197/** 2198 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler 2199 * @hsotg: The device state: 2200 * @periodic: True if this is a periodic FIFO interrupt 2201 */ 2202static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic) 2203{ 2204 struct s3c_hsotg_ep *ep; 2205 int epno, ret; 2206 2207 /* look through for any more data to transmit */ 2208 2209 for (epno = 0; epno < hsotg->num_of_eps; epno++) { 2210 ep = &hsotg->eps[epno]; 2211 2212 if (!ep->dir_in) 2213 continue; 2214 2215 if ((periodic && !ep->periodic) || 2216 (!periodic && ep->periodic)) 2217 continue; 2218 2219 ret = s3c_hsotg_trytx(hsotg, ep); 2220 if (ret < 0) 2221 break; 2222 } 2223} 2224 2225/* IRQ flags which will trigger a retry around the IRQ loop */ 2226#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \ 2227 GINTSTS_PTXFEMP | \ 2228 GINTSTS_RXFLVL) 2229 2230/** 2231 * s3c_hsotg_corereset - issue softreset to the core 2232 * @hsotg: The device state 2233 * 2234 * Issue a soft reset to the core, and await the core finishing it. 2235 */ 2236static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg) 2237{ 2238 int timeout; 2239 u32 grstctl; 2240 2241 dev_dbg(hsotg->dev, "resetting core\n"); 2242 2243 /* issue soft reset */ 2244 writel(GRSTCTL_CSFTRST, hsotg->regs + GRSTCTL); 2245 2246 timeout = 10000; 2247 do { 2248 grstctl = readl(hsotg->regs + GRSTCTL); 2249 } while ((grstctl & GRSTCTL_CSFTRST) && timeout-- > 0); 2250 2251 if (grstctl & GRSTCTL_CSFTRST) { 2252 dev_err(hsotg->dev, "Failed to get CSftRst asserted\n"); 2253 return -EINVAL; 2254 } 2255 2256 timeout = 10000; 2257 2258 while (1) { 2259 u32 grstctl = readl(hsotg->regs + GRSTCTL); 2260 2261 if (timeout-- < 0) { 2262 dev_info(hsotg->dev, 2263 "%s: reset failed, GRSTCTL=%08x\n", 2264 __func__, grstctl); 2265 return -ETIMEDOUT; 2266 } 2267 2268 if (!(grstctl & GRSTCTL_AHBIDLE)) 2269 continue; 2270 2271 break; /* reset done */ 2272 } 2273 2274 dev_dbg(hsotg->dev, "reset successful\n"); 2275 return 0; 2276} 2277 2278/** 2279 * s3c_hsotg_core_init - issue softreset to the core 2280 * @hsotg: The device state 2281 * 2282 * Issue a soft reset to the core, and await the core finishing it. 2283 */ 2284static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg) 2285{ 2286 s3c_hsotg_corereset(hsotg); 2287 2288 /* 2289 * we must now enable ep0 ready for host detection and then 2290 * set configuration. 2291 */ 2292 2293 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2294 writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 2295 (0x5 << 10), hsotg->regs + GUSBCFG); 2296 2297 s3c_hsotg_init_fifo(hsotg); 2298 2299 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); 2300 2301 writel(1 << 18 | DCFG_DEVSPD_HS, hsotg->regs + DCFG); 2302 2303 /* Clear any pending OTG interrupts */ 2304 writel(0xffffffff, hsotg->regs + GOTGINT); 2305 2306 /* Clear any pending interrupts */ 2307 writel(0xffffffff, hsotg->regs + GINTSTS); 2308 2309 writel(GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT | 2310 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | 2311 GINTSTS_CONIDSTSCHNG | GINTSTS_USBRST | 2312 GINTSTS_ENUMDONE | GINTSTS_OTGINT | 2313 GINTSTS_USBSUSP | GINTSTS_WKUPINT, 2314 hsotg->regs + GINTMSK); 2315 2316 if (using_dma(hsotg)) 2317 writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | 2318 GAHBCFG_HBSTLEN_INCR4, 2319 hsotg->regs + GAHBCFG); 2320 else 2321 writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NP_TXF_EMP_LVL | 2322 GAHBCFG_P_TXF_EMP_LVL) : 0) | 2323 GAHBCFG_GLBL_INTR_EN, 2324 hsotg->regs + GAHBCFG); 2325 2326 /* 2327 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts 2328 * when we have no data to transfer. Otherwise we get being flooded by 2329 * interrupts. 2330 */ 2331 2332 writel(((hsotg->dedicated_fifos) ? DIEPMSK_TXFIFOEMPTY | 2333 DIEPMSK_INTKNTXFEMPMSK : 0) | 2334 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK | 2335 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 2336 DIEPMSK_INTKNEPMISMSK, 2337 hsotg->regs + DIEPMSK); 2338 2339 /* 2340 * don't need XferCompl, we get that from RXFIFO in slave mode. In 2341 * DMA mode we may need this. 2342 */ 2343 writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | 2344 DIEPMSK_TIMEOUTMSK) : 0) | 2345 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | 2346 DOEPMSK_SETUPMSK, 2347 hsotg->regs + DOEPMSK); 2348 2349 writel(0, hsotg->regs + DAINTMSK); 2350 2351 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2352 readl(hsotg->regs + DIEPCTL0), 2353 readl(hsotg->regs + DOEPCTL0)); 2354 2355 /* enable in and out endpoint interrupts */ 2356 s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT); 2357 2358 /* 2359 * Enable the RXFIFO when in slave mode, as this is how we collect 2360 * the data. In DMA mode, we get events from the FIFO but also 2361 * things we cannot process, so do not use it. 2362 */ 2363 if (!using_dma(hsotg)) 2364 s3c_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL); 2365 2366 /* Enable interrupts for EP0 in and out */ 2367 s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1); 2368 s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1); 2369 2370 __orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE); 2371 udelay(10); /* see openiboot */ 2372 __bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE); 2373 2374 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL)); 2375 2376 /* 2377 * DxEPCTL_USBActEp says RO in manual, but seems to be set by 2378 * writing to the EPCTL register.. 2379 */ 2380 2381 /* set to read 1 8byte packet */ 2382 writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 2383 DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0); 2384 2385 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) | 2386 DXEPCTL_CNAK | DXEPCTL_EPENA | 2387 DXEPCTL_USBACTEP, 2388 hsotg->regs + DOEPCTL0); 2389 2390 /* enable, but don't activate EP0in */ 2391 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) | 2392 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); 2393 2394 s3c_hsotg_enqueue_setup(hsotg); 2395 2396 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2397 readl(hsotg->regs + DIEPCTL0), 2398 readl(hsotg->regs + DOEPCTL0)); 2399 2400 /* clear global NAKs */ 2401 writel(DCTL_CGOUTNAK | DCTL_CGNPINNAK, 2402 hsotg->regs + DCTL); 2403 2404 /* must be at-least 3ms to allow bus to see disconnect */ 2405 mdelay(3); 2406 2407 /* remove the soft-disconnect and let's go */ 2408 __bic32(hsotg->regs + DCTL, DCTL_SFTDISCON); 2409} 2410 2411/** 2412 * s3c_hsotg_irq - handle device interrupt 2413 * @irq: The IRQ number triggered 2414 * @pw: The pw value when registered the handler. 2415 */ 2416static irqreturn_t s3c_hsotg_irq(int irq, void *pw) 2417{ 2418 struct s3c_hsotg *hsotg = pw; 2419 int retry_count = 8; 2420 u32 gintsts; 2421 u32 gintmsk; 2422 2423 spin_lock(&hsotg->lock); 2424irq_retry: 2425 gintsts = readl(hsotg->regs + GINTSTS); 2426 gintmsk = readl(hsotg->regs + GINTMSK); 2427 2428 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", 2429 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); 2430 2431 gintsts &= gintmsk; 2432 2433 if (gintsts & GINTSTS_OTGINT) { 2434 u32 otgint = readl(hsotg->regs + GOTGINT); 2435 2436 dev_info(hsotg->dev, "OTGInt: %08x\n", otgint); 2437 2438 writel(otgint, hsotg->regs + GOTGINT); 2439 } 2440 2441 if (gintsts & GINTSTS_SESSREQINT) { 2442 dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__); 2443 writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS); 2444 } 2445 2446 if (gintsts & GINTSTS_ENUMDONE) { 2447 writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS); 2448 2449 s3c_hsotg_irq_enumdone(hsotg); 2450 } 2451 2452 if (gintsts & GINTSTS_CONIDSTSCHNG) { 2453 dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n", 2454 readl(hsotg->regs + DSTS), 2455 readl(hsotg->regs + GOTGCTL)); 2456 2457 writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS); 2458 } 2459 2460 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) { 2461 u32 daint = readl(hsotg->regs + DAINT); 2462 u32 daintmsk = readl(hsotg->regs + DAINTMSK); 2463 u32 daint_out, daint_in; 2464 int ep; 2465 2466 daint &= daintmsk; 2467 daint_out = daint >> DAINT_OUTEP_SHIFT; 2468 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT); 2469 2470 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); 2471 2472 for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) { 2473 if (daint_out & 1) 2474 s3c_hsotg_epint(hsotg, ep, 0); 2475 } 2476 2477 for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) { 2478 if (daint_in & 1) 2479 s3c_hsotg_epint(hsotg, ep, 1); 2480 } 2481 } 2482 2483 if (gintsts & GINTSTS_USBRST) { 2484 2485 u32 usb_status = readl(hsotg->regs + GOTGCTL); 2486 2487 dev_info(hsotg->dev, "%s: USBRst\n", __func__); 2488 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 2489 readl(hsotg->regs + GNPTXSTS)); 2490 2491 writel(GINTSTS_USBRST, hsotg->regs + GINTSTS); 2492 2493 if (usb_status & GOTGCTL_BSESVLD) { 2494 if (time_after(jiffies, hsotg->last_rst + 2495 msecs_to_jiffies(200))) { 2496 2497 kill_all_requests(hsotg, &hsotg->eps[0], 2498 -ECONNRESET, true); 2499 2500 s3c_hsotg_core_init(hsotg); 2501 hsotg->last_rst = jiffies; 2502 } 2503 } 2504 } 2505 2506 /* check both FIFOs */ 2507 2508 if (gintsts & GINTSTS_NPTXFEMP) { 2509 dev_dbg(hsotg->dev, "NPTxFEmp\n"); 2510 2511 /* 2512 * Disable the interrupt to stop it happening again 2513 * unless one of these endpoint routines decides that 2514 * it needs re-enabling 2515 */ 2516 2517 s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP); 2518 s3c_hsotg_irq_fifoempty(hsotg, false); 2519 } 2520 2521 if (gintsts & GINTSTS_PTXFEMP) { 2522 dev_dbg(hsotg->dev, "PTxFEmp\n"); 2523 2524 /* See note in GINTSTS_NPTxFEmp */ 2525 2526 s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP); 2527 s3c_hsotg_irq_fifoempty(hsotg, true); 2528 } 2529 2530 if (gintsts & GINTSTS_RXFLVL) { 2531 /* 2532 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, 2533 * we need to retry s3c_hsotg_handle_rx if this is still 2534 * set. 2535 */ 2536 2537 s3c_hsotg_handle_rx(hsotg); 2538 } 2539 2540 if (gintsts & GINTSTS_MODEMIS) { 2541 dev_warn(hsotg->dev, "warning, mode mismatch triggered\n"); 2542 writel(GINTSTS_MODEMIS, hsotg->regs + GINTSTS); 2543 } 2544 2545 if (gintsts & GINTSTS_USBSUSP) { 2546 dev_info(hsotg->dev, "GINTSTS_USBSusp\n"); 2547 writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS); 2548 2549 call_gadget(hsotg, suspend); 2550 } 2551 2552 if (gintsts & GINTSTS_WKUPINT) { 2553 dev_info(hsotg->dev, "GINTSTS_WkUpIn\n"); 2554 writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); 2555 2556 call_gadget(hsotg, resume); 2557 } 2558 2559 if (gintsts & GINTSTS_ERLYSUSP) { 2560 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 2561 writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS); 2562 } 2563 2564 /* 2565 * these next two seem to crop-up occasionally causing the core 2566 * to shutdown the USB transfer, so try clearing them and logging 2567 * the occurrence. 2568 */ 2569 2570 if (gintsts & GINTSTS_GOUTNAKEFF) { 2571 dev_info(hsotg->dev, "GOUTNakEff triggered\n"); 2572 2573 writel(DCTL_CGOUTNAK, hsotg->regs + DCTL); 2574 2575 s3c_hsotg_dump(hsotg); 2576 } 2577 2578 if (gintsts & GINTSTS_GINNAKEFF) { 2579 dev_info(hsotg->dev, "GINNakEff triggered\n"); 2580 2581 writel(DCTL_CGNPINNAK, hsotg->regs + DCTL); 2582 2583 s3c_hsotg_dump(hsotg); 2584 } 2585 2586 /* 2587 * if we've had fifo events, we should try and go around the 2588 * loop again to see if there's any point in returning yet. 2589 */ 2590 2591 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 2592 goto irq_retry; 2593 2594 spin_unlock(&hsotg->lock); 2595 2596 return IRQ_HANDLED; 2597} 2598 2599/** 2600 * s3c_hsotg_ep_enable - enable the given endpoint 2601 * @ep: The USB endpint to configure 2602 * @desc: The USB endpoint descriptor to configure with. 2603 * 2604 * This is called from the USB gadget code's usb_ep_enable(). 2605 */ 2606static int s3c_hsotg_ep_enable(struct usb_ep *ep, 2607 const struct usb_endpoint_descriptor *desc) 2608{ 2609 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2610 struct s3c_hsotg *hsotg = hs_ep->parent; 2611 unsigned long flags; 2612 int index = hs_ep->index; 2613 u32 epctrl_reg; 2614 u32 epctrl; 2615 u32 mps; 2616 int dir_in; 2617 int ret = 0; 2618 2619 dev_dbg(hsotg->dev, 2620 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 2621 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, 2622 desc->wMaxPacketSize, desc->bInterval); 2623 2624 /* not to be called for EP0 */ 2625 WARN_ON(index == 0); 2626 2627 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; 2628 if (dir_in != hs_ep->dir_in) { 2629 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); 2630 return -EINVAL; 2631 } 2632 2633 mps = usb_endpoint_maxp(desc); 2634 2635 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */ 2636 2637 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 2638 epctrl = readl(hsotg->regs + epctrl_reg); 2639 2640 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", 2641 __func__, epctrl, epctrl_reg); 2642 2643 spin_lock_irqsave(&hsotg->lock, flags); 2644 2645 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); 2646 epctrl |= DXEPCTL_MPS(mps); 2647 2648 /* 2649 * mark the endpoint as active, otherwise the core may ignore 2650 * transactions entirely for this endpoint 2651 */ 2652 epctrl |= DXEPCTL_USBACTEP; 2653 2654 /* 2655 * set the NAK status on the endpoint, otherwise we might try and 2656 * do something with data that we've yet got a request to process 2657 * since the RXFIFO will take data for an endpoint even if the 2658 * size register hasn't been set. 2659 */ 2660 2661 epctrl |= DXEPCTL_SNAK; 2662 2663 /* update the endpoint state */ 2664 s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps); 2665 2666 /* default, set to non-periodic */ 2667 hs_ep->isochronous = 0; 2668 hs_ep->periodic = 0; 2669 hs_ep->halted = 0; 2670 hs_ep->interval = desc->bInterval; 2671 2672 if (hs_ep->interval > 1 && hs_ep->mc > 1) 2673 dev_err(hsotg->dev, "MC > 1 when interval is not 1\n"); 2674 2675 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 2676 case USB_ENDPOINT_XFER_ISOC: 2677 epctrl |= DXEPCTL_EPTYPE_ISO; 2678 epctrl |= DXEPCTL_SETEVENFR; 2679 hs_ep->isochronous = 1; 2680 if (dir_in) 2681 hs_ep->periodic = 1; 2682 break; 2683 2684 case USB_ENDPOINT_XFER_BULK: 2685 epctrl |= DXEPCTL_EPTYPE_BULK; 2686 break; 2687 2688 case USB_ENDPOINT_XFER_INT: 2689 if (dir_in) { 2690 /* 2691 * Allocate our TxFNum by simply using the index 2692 * of the endpoint for the moment. We could do 2693 * something better if the host indicates how 2694 * many FIFOs we are expecting to use. 2695 */ 2696 2697 hs_ep->periodic = 1; 2698 epctrl |= DXEPCTL_TXFNUM(index); 2699 } 2700 2701 epctrl |= DXEPCTL_EPTYPE_INTERRUPT; 2702 break; 2703 2704 case USB_ENDPOINT_XFER_CONTROL: 2705 epctrl |= DXEPCTL_EPTYPE_CONTROL; 2706 break; 2707 } 2708 2709 /* 2710 * if the hardware has dedicated fifos, we must give each IN EP 2711 * a unique tx-fifo even if it is non-periodic. 2712 */ 2713 if (dir_in && hsotg->dedicated_fifos) 2714 epctrl |= DXEPCTL_TXFNUM(index); 2715 2716 /* for non control endpoints, set PID to D0 */ 2717 if (index) 2718 epctrl |= DXEPCTL_SETD0PID; 2719 2720 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", 2721 __func__, epctrl); 2722 2723 writel(epctrl, hsotg->regs + epctrl_reg); 2724 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", 2725 __func__, readl(hsotg->regs + epctrl_reg)); 2726 2727 /* enable the endpoint interrupt */ 2728 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 2729 2730 spin_unlock_irqrestore(&hsotg->lock, flags); 2731 return ret; 2732} 2733 2734/** 2735 * s3c_hsotg_ep_disable - disable given endpoint 2736 * @ep: The endpoint to disable. 2737 */ 2738static int s3c_hsotg_ep_disable(struct usb_ep *ep) 2739{ 2740 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2741 struct s3c_hsotg *hsotg = hs_ep->parent; 2742 int dir_in = hs_ep->dir_in; 2743 int index = hs_ep->index; 2744 unsigned long flags; 2745 u32 epctrl_reg; 2746 u32 ctrl; 2747 2748 dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep); 2749 2750 if (ep == &hsotg->eps[0].ep) { 2751 dev_err(hsotg->dev, "%s: called for ep0\n", __func__); 2752 return -EINVAL; 2753 } 2754 2755 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 2756 2757 spin_lock_irqsave(&hsotg->lock, flags); 2758 /* terminate all requests with shutdown */ 2759 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false); 2760 2761 2762 ctrl = readl(hsotg->regs + epctrl_reg); 2763 ctrl &= ~DXEPCTL_EPENA; 2764 ctrl &= ~DXEPCTL_USBACTEP; 2765 ctrl |= DXEPCTL_SNAK; 2766 2767 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 2768 writel(ctrl, hsotg->regs + epctrl_reg); 2769 2770 /* disable endpoint interrupts */ 2771 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); 2772 2773 spin_unlock_irqrestore(&hsotg->lock, flags); 2774 return 0; 2775} 2776 2777/** 2778 * on_list - check request is on the given endpoint 2779 * @ep: The endpoint to check. 2780 * @test: The request to test if it is on the endpoint. 2781 */ 2782static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test) 2783{ 2784 struct s3c_hsotg_req *req, *treq; 2785 2786 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 2787 if (req == test) 2788 return true; 2789 } 2790 2791 return false; 2792} 2793 2794/** 2795 * s3c_hsotg_ep_dequeue - dequeue given endpoint 2796 * @ep: The endpoint to dequeue. 2797 * @req: The request to be removed from a queue. 2798 */ 2799static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 2800{ 2801 struct s3c_hsotg_req *hs_req = our_req(req); 2802 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2803 struct s3c_hsotg *hs = hs_ep->parent; 2804 unsigned long flags; 2805 2806 dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); 2807 2808 spin_lock_irqsave(&hs->lock, flags); 2809 2810 if (!on_list(hs_ep, hs_req)) { 2811 spin_unlock_irqrestore(&hs->lock, flags); 2812 return -EINVAL; 2813 } 2814 2815 s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); 2816 spin_unlock_irqrestore(&hs->lock, flags); 2817 2818 return 0; 2819} 2820 2821/** 2822 * s3c_hsotg_ep_sethalt - set halt on a given endpoint 2823 * @ep: The endpoint to set halt. 2824 * @value: Set or unset the halt. 2825 */ 2826static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value) 2827{ 2828 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2829 struct s3c_hsotg *hs = hs_ep->parent; 2830 int index = hs_ep->index; 2831 u32 epreg; 2832 u32 epctl; 2833 u32 xfertype; 2834 2835 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); 2836 2837 if (index == 0) { 2838 if (value) 2839 s3c_hsotg_stall_ep0(hs); 2840 else 2841 dev_warn(hs->dev, 2842 "%s: can't clear halt on ep0\n", __func__); 2843 return 0; 2844 } 2845 2846 /* write both IN and OUT control registers */ 2847 2848 epreg = DIEPCTL(index); 2849 epctl = readl(hs->regs + epreg); 2850 2851 if (value) { 2852 epctl |= DXEPCTL_STALL + DXEPCTL_SNAK; 2853 if (epctl & DXEPCTL_EPENA) 2854 epctl |= DXEPCTL_EPDIS; 2855 } else { 2856 epctl &= ~DXEPCTL_STALL; 2857 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 2858 if (xfertype == DXEPCTL_EPTYPE_BULK || 2859 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 2860 epctl |= DXEPCTL_SETD0PID; 2861 } 2862 2863 writel(epctl, hs->regs + epreg); 2864 2865 epreg = DOEPCTL(index); 2866 epctl = readl(hs->regs + epreg); 2867 2868 if (value) 2869 epctl |= DXEPCTL_STALL; 2870 else { 2871 epctl &= ~DXEPCTL_STALL; 2872 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 2873 if (xfertype == DXEPCTL_EPTYPE_BULK || 2874 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 2875 epctl |= DXEPCTL_SETD0PID; 2876 } 2877 2878 writel(epctl, hs->regs + epreg); 2879 2880 hs_ep->halted = value; 2881 2882 return 0; 2883} 2884 2885/** 2886 * s3c_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held 2887 * @ep: The endpoint to set halt. 2888 * @value: Set or unset the halt. 2889 */ 2890static int s3c_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) 2891{ 2892 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2893 struct s3c_hsotg *hs = hs_ep->parent; 2894 unsigned long flags = 0; 2895 int ret = 0; 2896 2897 spin_lock_irqsave(&hs->lock, flags); 2898 ret = s3c_hsotg_ep_sethalt(ep, value); 2899 spin_unlock_irqrestore(&hs->lock, flags); 2900 2901 return ret; 2902} 2903 2904static struct usb_ep_ops s3c_hsotg_ep_ops = { 2905 .enable = s3c_hsotg_ep_enable, 2906 .disable = s3c_hsotg_ep_disable, 2907 .alloc_request = s3c_hsotg_ep_alloc_request, 2908 .free_request = s3c_hsotg_ep_free_request, 2909 .queue = s3c_hsotg_ep_queue_lock, 2910 .dequeue = s3c_hsotg_ep_dequeue, 2911 .set_halt = s3c_hsotg_ep_sethalt_lock, 2912 /* note, don't believe we have any call for the fifo routines */ 2913}; 2914 2915/** 2916 * s3c_hsotg_phy_enable - enable platform phy dev 2917 * @hsotg: The driver state 2918 * 2919 * A wrapper for platform code responsible for controlling 2920 * low-level USB code 2921 */ 2922static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg) 2923{ 2924 struct platform_device *pdev = to_platform_device(hsotg->dev); 2925 2926 dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev); 2927 2928 if (hsotg->phy) { 2929 phy_init(hsotg->phy); 2930 phy_power_on(hsotg->phy); 2931 } else if (hsotg->uphy) 2932 usb_phy_init(hsotg->uphy); 2933 else if (hsotg->plat->phy_init) 2934 hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); 2935} 2936 2937/** 2938 * s3c_hsotg_phy_disable - disable platform phy dev 2939 * @hsotg: The driver state 2940 * 2941 * A wrapper for platform code responsible for controlling 2942 * low-level USB code 2943 */ 2944static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg) 2945{ 2946 struct platform_device *pdev = to_platform_device(hsotg->dev); 2947 2948 if (hsotg->phy) { 2949 phy_power_off(hsotg->phy); 2950 phy_exit(hsotg->phy); 2951 } else if (hsotg->uphy) 2952 usb_phy_shutdown(hsotg->uphy); 2953 else if (hsotg->plat->phy_exit) 2954 hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); 2955} 2956 2957/** 2958 * s3c_hsotg_init - initalize the usb core 2959 * @hsotg: The driver state 2960 */ 2961static void s3c_hsotg_init(struct s3c_hsotg *hsotg) 2962{ 2963 /* unmask subset of endpoint interrupts */ 2964 2965 writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 2966 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK, 2967 hsotg->regs + DIEPMSK); 2968 2969 writel(DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK | 2970 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK, 2971 hsotg->regs + DOEPMSK); 2972 2973 writel(0, hsotg->regs + DAINTMSK); 2974 2975 /* Be in disconnected state until gadget is registered */ 2976 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); 2977 2978 if (0) { 2979 /* post global nak until we're ready */ 2980 writel(DCTL_SGNPINNAK | DCTL_SGOUTNAK, 2981 hsotg->regs + DCTL); 2982 } 2983 2984 /* setup fifos */ 2985 2986 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 2987 readl(hsotg->regs + GRXFSIZ), 2988 readl(hsotg->regs + GNPTXFSIZ)); 2989 2990 s3c_hsotg_init_fifo(hsotg); 2991 2992 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2993 writel(GUSBCFG_PHYIF16 | GUSBCFG_TOUTCAL(7) | (0x5 << 10), 2994 hsotg->regs + GUSBCFG); 2995 2996 writel(using_dma(hsotg) ? GAHBCFG_DMA_EN : 0x0, 2997 hsotg->regs + GAHBCFG); 2998} 2999 3000/** 3001 * s3c_hsotg_udc_start - prepare the udc for work 3002 * @gadget: The usb gadget state 3003 * @driver: The usb gadget driver 3004 * 3005 * Perform initialization to prepare udc device and driver 3006 * to work. 3007 */ 3008static int s3c_hsotg_udc_start(struct usb_gadget *gadget, 3009 struct usb_gadget_driver *driver) 3010{ 3011 struct s3c_hsotg *hsotg = to_hsotg(gadget); 3012 int ret; 3013 3014 if (!hsotg) { 3015 pr_err("%s: called with no device\n", __func__); 3016 return -ENODEV; 3017 } 3018 3019 if (!driver) { 3020 dev_err(hsotg->dev, "%s: no driver\n", __func__); 3021 return -EINVAL; 3022 } 3023 3024 if (driver->max_speed < USB_SPEED_FULL) 3025 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 3026 3027 if (!driver->setup) { 3028 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 3029 return -EINVAL; 3030 } 3031 3032 WARN_ON(hsotg->driver); 3033 3034 driver->driver.bus = NULL; 3035 hsotg->driver = driver; 3036 hsotg->gadget.dev.of_node = hsotg->dev->of_node; 3037 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 3038 3039 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies), 3040 hsotg->supplies); 3041 if (ret) { 3042 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret); 3043 goto err; 3044 } 3045 3046 hsotg->last_rst = jiffies; 3047 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); 3048 return 0; 3049 3050err: 3051 hsotg->driver = NULL; 3052 return ret; 3053} 3054 3055/** 3056 * s3c_hsotg_udc_stop - stop the udc 3057 * @gadget: The usb gadget state 3058 * @driver: The usb gadget driver 3059 * 3060 * Stop udc hw block and stay tunned for future transmissions 3061 */ 3062static int s3c_hsotg_udc_stop(struct usb_gadget *gadget, 3063 struct usb_gadget_driver *driver) 3064{ 3065 struct s3c_hsotg *hsotg = to_hsotg(gadget); 3066 unsigned long flags = 0; 3067 int ep; 3068 3069 if (!hsotg) 3070 return -ENODEV; 3071 3072 /* all endpoints should be shutdown */ 3073 for (ep = 0; ep < hsotg->num_of_eps; ep++) 3074 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 3075 3076 spin_lock_irqsave(&hsotg->lock, flags); 3077 3078 s3c_hsotg_phy_disable(hsotg); 3079 3080 if (!driver) 3081 hsotg->driver = NULL; 3082 3083 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 3084 3085 spin_unlock_irqrestore(&hsotg->lock, flags); 3086 3087 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); 3088 3089 return 0; 3090} 3091 3092/** 3093 * s3c_hsotg_gadget_getframe - read the frame number 3094 * @gadget: The usb gadget state 3095 * 3096 * Read the {micro} frame number 3097 */ 3098static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget) 3099{ 3100 return s3c_hsotg_read_frameno(to_hsotg(gadget)); 3101} 3102 3103/** 3104 * s3c_hsotg_pullup - connect/disconnect the USB PHY 3105 * @gadget: The usb gadget state 3106 * @is_on: Current state of the USB PHY 3107 * 3108 * Connect/Disconnect the USB PHY pullup 3109 */ 3110static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on) 3111{ 3112 struct s3c_hsotg *hsotg = to_hsotg(gadget); 3113 unsigned long flags = 0; 3114 3115 dev_dbg(hsotg->dev, "%s: is_in: %d\n", __func__, is_on); 3116 3117 spin_lock_irqsave(&hsotg->lock, flags); 3118 if (is_on) { 3119 s3c_hsotg_phy_enable(hsotg); 3120 s3c_hsotg_core_init(hsotg); 3121 } else { 3122 s3c_hsotg_disconnect(hsotg); 3123 s3c_hsotg_phy_disable(hsotg); 3124 } 3125 3126 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 3127 spin_unlock_irqrestore(&hsotg->lock, flags); 3128 3129 return 0; 3130} 3131 3132static const struct usb_gadget_ops s3c_hsotg_gadget_ops = { 3133 .get_frame = s3c_hsotg_gadget_getframe, 3134 .udc_start = s3c_hsotg_udc_start, 3135 .udc_stop = s3c_hsotg_udc_stop, 3136 .pullup = s3c_hsotg_pullup, 3137}; 3138 3139/** 3140 * s3c_hsotg_initep - initialise a single endpoint 3141 * @hsotg: The device state. 3142 * @hs_ep: The endpoint to be initialised. 3143 * @epnum: The endpoint number 3144 * 3145 * Initialise the given endpoint (as part of the probe and device state 3146 * creation) to give to the gadget driver. Setup the endpoint name, any 3147 * direction information and other state that may be required. 3148 */ 3149static void s3c_hsotg_initep(struct s3c_hsotg *hsotg, 3150 struct s3c_hsotg_ep *hs_ep, 3151 int epnum) 3152{ 3153 u32 ptxfifo; 3154 char *dir; 3155 3156 if (epnum == 0) 3157 dir = ""; 3158 else if ((epnum % 2) == 0) { 3159 dir = "out"; 3160 } else { 3161 dir = "in"; 3162 hs_ep->dir_in = 1; 3163 } 3164 3165 hs_ep->index = epnum; 3166 3167 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); 3168 3169 INIT_LIST_HEAD(&hs_ep->queue); 3170 INIT_LIST_HEAD(&hs_ep->ep.ep_list); 3171 3172 /* add to the list of endpoints known by the gadget driver */ 3173 if (epnum) 3174 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); 3175 3176 hs_ep->parent = hsotg; 3177 hs_ep->ep.name = hs_ep->name; 3178 usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT); 3179 hs_ep->ep.ops = &s3c_hsotg_ep_ops; 3180 3181 /* 3182 * Read the FIFO size for the Periodic TX FIFO, even if we're 3183 * an OUT endpoint, we may as well do this if in future the 3184 * code is changed to make each endpoint's direction changeable. 3185 */ 3186 3187 ptxfifo = readl(hsotg->regs + DPTXFSIZN(epnum)); 3188 hs_ep->fifo_size = FIFOSIZE_DEPTH_GET(ptxfifo) * 4; 3189 3190 /* 3191 * if we're using dma, we need to set the next-endpoint pointer 3192 * to be something valid. 3193 */ 3194 3195 if (using_dma(hsotg)) { 3196 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15); 3197 writel(next, hsotg->regs + DIEPCTL(epnum)); 3198 writel(next, hsotg->regs + DOEPCTL(epnum)); 3199 } 3200} 3201 3202/** 3203 * s3c_hsotg_hw_cfg - read HW configuration registers 3204 * @param: The device state 3205 * 3206 * Read the USB core HW configuration registers 3207 */ 3208static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg) 3209{ 3210 u32 cfg2, cfg4; 3211 /* check hardware configuration */ 3212 3213 cfg2 = readl(hsotg->regs + 0x48); 3214 hsotg->num_of_eps = (cfg2 >> 10) & 0xF; 3215 3216 dev_info(hsotg->dev, "EPs:%d\n", hsotg->num_of_eps); 3217 3218 cfg4 = readl(hsotg->regs + 0x50); 3219 hsotg->dedicated_fifos = (cfg4 >> 25) & 1; 3220 3221 dev_info(hsotg->dev, "%s fifos\n", 3222 hsotg->dedicated_fifos ? "dedicated" : "shared"); 3223} 3224 3225/** 3226 * s3c_hsotg_dump - dump state of the udc 3227 * @param: The device state 3228 */ 3229static void s3c_hsotg_dump(struct s3c_hsotg *hsotg) 3230{ 3231#ifdef DEBUG 3232 struct device *dev = hsotg->dev; 3233 void __iomem *regs = hsotg->regs; 3234 u32 val; 3235 int idx; 3236 3237 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", 3238 readl(regs + DCFG), readl(regs + DCTL), 3239 readl(regs + DIEPMSK)); 3240 3241 dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n", 3242 readl(regs + GAHBCFG), readl(regs + 0x44)); 3243 3244 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 3245 readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ)); 3246 3247 /* show periodic fifo settings */ 3248 3249 for (idx = 1; idx <= 15; idx++) { 3250 val = readl(regs + DPTXFSIZN(idx)); 3251 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, 3252 val >> FIFOSIZE_DEPTH_SHIFT, 3253 val & FIFOSIZE_STARTADDR_MASK); 3254 } 3255 3256 for (idx = 0; idx < 15; idx++) { 3257 dev_info(dev, 3258 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, 3259 readl(regs + DIEPCTL(idx)), 3260 readl(regs + DIEPTSIZ(idx)), 3261 readl(regs + DIEPDMA(idx))); 3262 3263 val = readl(regs + DOEPCTL(idx)); 3264 dev_info(dev, 3265 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", 3266 idx, readl(regs + DOEPCTL(idx)), 3267 readl(regs + DOEPTSIZ(idx)), 3268 readl(regs + DOEPDMA(idx))); 3269 3270 } 3271 3272 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 3273 readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE)); 3274#endif 3275} 3276 3277/** 3278 * state_show - debugfs: show overall driver and device state. 3279 * @seq: The seq file to write to. 3280 * @v: Unused parameter. 3281 * 3282 * This debugfs entry shows the overall state of the hardware and 3283 * some general information about each of the endpoints available 3284 * to the system. 3285 */ 3286static int state_show(struct seq_file *seq, void *v) 3287{ 3288 struct s3c_hsotg *hsotg = seq->private; 3289 void __iomem *regs = hsotg->regs; 3290 int idx; 3291 3292 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n", 3293 readl(regs + DCFG), 3294 readl(regs + DCTL), 3295 readl(regs + DSTS)); 3296 3297 seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n", 3298 readl(regs + DIEPMSK), readl(regs + DOEPMSK)); 3299 3300 seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n", 3301 readl(regs + GINTMSK), 3302 readl(regs + GINTSTS)); 3303 3304 seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n", 3305 readl(regs + DAINTMSK), 3306 readl(regs + DAINT)); 3307 3308 seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n", 3309 readl(regs + GNPTXSTS), 3310 readl(regs + GRXSTSR)); 3311 3312 seq_puts(seq, "\nEndpoint status:\n"); 3313 3314 for (idx = 0; idx < 15; idx++) { 3315 u32 in, out; 3316 3317 in = readl(regs + DIEPCTL(idx)); 3318 out = readl(regs + DOEPCTL(idx)); 3319 3320 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x", 3321 idx, in, out); 3322 3323 in = readl(regs + DIEPTSIZ(idx)); 3324 out = readl(regs + DOEPTSIZ(idx)); 3325 3326 seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x", 3327 in, out); 3328 3329 seq_puts(seq, "\n"); 3330 } 3331 3332 return 0; 3333} 3334 3335static int state_open(struct inode *inode, struct file *file) 3336{ 3337 return single_open(file, state_show, inode->i_private); 3338} 3339 3340static const struct file_operations state_fops = { 3341 .owner = THIS_MODULE, 3342 .open = state_open, 3343 .read = seq_read, 3344 .llseek = seq_lseek, 3345 .release = single_release, 3346}; 3347 3348/** 3349 * fifo_show - debugfs: show the fifo information 3350 * @seq: The seq_file to write data to. 3351 * @v: Unused parameter. 3352 * 3353 * Show the FIFO information for the overall fifo and all the 3354 * periodic transmission FIFOs. 3355 */ 3356static int fifo_show(struct seq_file *seq, void *v) 3357{ 3358 struct s3c_hsotg *hsotg = seq->private; 3359 void __iomem *regs = hsotg->regs; 3360 u32 val; 3361 int idx; 3362 3363 seq_puts(seq, "Non-periodic FIFOs:\n"); 3364 seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ)); 3365 3366 val = readl(regs + GNPTXFSIZ); 3367 seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n", 3368 val >> FIFOSIZE_DEPTH_SHIFT, 3369 val & FIFOSIZE_DEPTH_MASK); 3370 3371 seq_puts(seq, "\nPeriodic TXFIFOs:\n"); 3372 3373 for (idx = 1; idx <= 15; idx++) { 3374 val = readl(regs + DPTXFSIZN(idx)); 3375 3376 seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx, 3377 val >> FIFOSIZE_DEPTH_SHIFT, 3378 val & FIFOSIZE_STARTADDR_MASK); 3379 } 3380 3381 return 0; 3382} 3383 3384static int fifo_open(struct inode *inode, struct file *file) 3385{ 3386 return single_open(file, fifo_show, inode->i_private); 3387} 3388 3389static const struct file_operations fifo_fops = { 3390 .owner = THIS_MODULE, 3391 .open = fifo_open, 3392 .read = seq_read, 3393 .llseek = seq_lseek, 3394 .release = single_release, 3395}; 3396 3397 3398static const char *decode_direction(int is_in) 3399{ 3400 return is_in ? "in" : "out"; 3401} 3402 3403/** 3404 * ep_show - debugfs: show the state of an endpoint. 3405 * @seq: The seq_file to write data to. 3406 * @v: Unused parameter. 3407 * 3408 * This debugfs entry shows the state of the given endpoint (one is 3409 * registered for each available). 3410 */ 3411static int ep_show(struct seq_file *seq, void *v) 3412{ 3413 struct s3c_hsotg_ep *ep = seq->private; 3414 struct s3c_hsotg *hsotg = ep->parent; 3415 struct s3c_hsotg_req *req; 3416 void __iomem *regs = hsotg->regs; 3417 int index = ep->index; 3418 int show_limit = 15; 3419 unsigned long flags; 3420 3421 seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n", 3422 ep->index, ep->ep.name, decode_direction(ep->dir_in)); 3423 3424 /* first show the register state */ 3425 3426 seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n", 3427 readl(regs + DIEPCTL(index)), 3428 readl(regs + DOEPCTL(index))); 3429 3430 seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n", 3431 readl(regs + DIEPDMA(index)), 3432 readl(regs + DOEPDMA(index))); 3433 3434 seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n", 3435 readl(regs + DIEPINT(index)), 3436 readl(regs + DOEPINT(index))); 3437 3438 seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n", 3439 readl(regs + DIEPTSIZ(index)), 3440 readl(regs + DOEPTSIZ(index))); 3441 3442 seq_puts(seq, "\n"); 3443 seq_printf(seq, "mps %d\n", ep->ep.maxpacket); 3444 seq_printf(seq, "total_data=%ld\n", ep->total_data); 3445 3446 seq_printf(seq, "request list (%p,%p):\n", 3447 ep->queue.next, ep->queue.prev); 3448 3449 spin_lock_irqsave(&hsotg->lock, flags); 3450 3451 list_for_each_entry(req, &ep->queue, queue) { 3452 if (--show_limit < 0) { 3453 seq_puts(seq, "not showing more requests...\n"); 3454 break; 3455 } 3456 3457 seq_printf(seq, "%c req %p: %d bytes @%p, ", 3458 req == ep->req ? '*' : ' ', 3459 req, req->req.length, req->req.buf); 3460 seq_printf(seq, "%d done, res %d\n", 3461 req->req.actual, req->req.status); 3462 } 3463 3464 spin_unlock_irqrestore(&hsotg->lock, flags); 3465 3466 return 0; 3467} 3468 3469static int ep_open(struct inode *inode, struct file *file) 3470{ 3471 return single_open(file, ep_show, inode->i_private); 3472} 3473 3474static const struct file_operations ep_fops = { 3475 .owner = THIS_MODULE, 3476 .open = ep_open, 3477 .read = seq_read, 3478 .llseek = seq_lseek, 3479 .release = single_release, 3480}; 3481 3482/** 3483 * s3c_hsotg_create_debug - create debugfs directory and files 3484 * @hsotg: The driver state 3485 * 3486 * Create the debugfs files to allow the user to get information 3487 * about the state of the system. The directory name is created 3488 * with the same name as the device itself, in case we end up 3489 * with multiple blocks in future systems. 3490 */ 3491static void s3c_hsotg_create_debug(struct s3c_hsotg *hsotg) 3492{ 3493 struct dentry *root; 3494 unsigned epidx; 3495 3496 root = debugfs_create_dir(dev_name(hsotg->dev), NULL); 3497 hsotg->debug_root = root; 3498 if (IS_ERR(root)) { 3499 dev_err(hsotg->dev, "cannot create debug root\n"); 3500 return; 3501 } 3502 3503 /* create general state file */ 3504 3505 hsotg->debug_file = debugfs_create_file("state", 0444, root, 3506 hsotg, &state_fops); 3507 3508 if (IS_ERR(hsotg->debug_file)) 3509 dev_err(hsotg->dev, "%s: failed to create state\n", __func__); 3510 3511 hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root, 3512 hsotg, &fifo_fops); 3513 3514 if (IS_ERR(hsotg->debug_fifo)) 3515 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__); 3516 3517 /* create one file for each endpoint */ 3518 3519 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) { 3520 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx]; 3521 3522 ep->debugfs = debugfs_create_file(ep->name, 0444, 3523 root, ep, &ep_fops); 3524 3525 if (IS_ERR(ep->debugfs)) 3526 dev_err(hsotg->dev, "failed to create %s debug file\n", 3527 ep->name); 3528 } 3529} 3530 3531/** 3532 * s3c_hsotg_delete_debug - cleanup debugfs entries 3533 * @hsotg: The driver state 3534 * 3535 * Cleanup (remove) the debugfs files for use on module exit. 3536 */ 3537static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg) 3538{ 3539 unsigned epidx; 3540 3541 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) { 3542 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx]; 3543 debugfs_remove(ep->debugfs); 3544 } 3545 3546 debugfs_remove(hsotg->debug_file); 3547 debugfs_remove(hsotg->debug_fifo); 3548 debugfs_remove(hsotg->debug_root); 3549} 3550 3551/** 3552 * s3c_hsotg_probe - probe function for hsotg driver 3553 * @pdev: The platform information for the driver 3554 */ 3555 3556static int s3c_hsotg_probe(struct platform_device *pdev) 3557{ 3558 struct s3c_hsotg_plat *plat = dev_get_platdata(&pdev->dev); 3559 struct phy *phy; 3560 struct usb_phy *uphy; 3561 struct device *dev = &pdev->dev; 3562 struct s3c_hsotg_ep *eps; 3563 struct s3c_hsotg *hsotg; 3564 struct resource *res; 3565 int epnum; 3566 int ret; 3567 int i; 3568 3569 hsotg = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsotg), GFP_KERNEL); 3570 if (!hsotg) { 3571 dev_err(dev, "cannot get memory\n"); 3572 return -ENOMEM; 3573 } 3574 3575 /* 3576 * Attempt to find a generic PHY, then look for an old style 3577 * USB PHY, finally fall back to pdata 3578 */ 3579 phy = devm_phy_get(&pdev->dev, "usb2-phy"); 3580 if (IS_ERR(phy)) { 3581 uphy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 3582 if (IS_ERR(uphy)) { 3583 /* Fallback for pdata */ 3584 plat = dev_get_platdata(&pdev->dev); 3585 if (!plat) { 3586 dev_err(&pdev->dev, 3587 "no platform data or transceiver defined\n"); 3588 return -EPROBE_DEFER; 3589 } 3590 hsotg->plat = plat; 3591 } else 3592 hsotg->uphy = uphy; 3593 } else 3594 hsotg->phy = phy; 3595 3596 hsotg->dev = dev; 3597 3598 hsotg->clk = devm_clk_get(&pdev->dev, "otg"); 3599 if (IS_ERR(hsotg->clk)) { 3600 dev_err(dev, "cannot get otg clock\n"); 3601 return PTR_ERR(hsotg->clk); 3602 } 3603 3604 platform_set_drvdata(pdev, hsotg); 3605 3606 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3607 3608 hsotg->regs = devm_ioremap_resource(&pdev->dev, res); 3609 if (IS_ERR(hsotg->regs)) { 3610 ret = PTR_ERR(hsotg->regs); 3611 goto err_clk; 3612 } 3613 3614 ret = platform_get_irq(pdev, 0); 3615 if (ret < 0) { 3616 dev_err(dev, "cannot find IRQ\n"); 3617 goto err_clk; 3618 } 3619 3620 spin_lock_init(&hsotg->lock); 3621 3622 hsotg->irq = ret; 3623 3624 ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0, 3625 dev_name(dev), hsotg); 3626 if (ret < 0) { 3627 dev_err(dev, "cannot claim IRQ\n"); 3628 goto err_clk; 3629 } 3630 3631 dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq); 3632 3633 hsotg->gadget.max_speed = USB_SPEED_HIGH; 3634 hsotg->gadget.ops = &s3c_hsotg_gadget_ops; 3635 hsotg->gadget.name = dev_name(dev); 3636 3637 /* reset the system */ 3638 3639 clk_prepare_enable(hsotg->clk); 3640 3641 /* regulators */ 3642 3643 for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++) 3644 hsotg->supplies[i].supply = s3c_hsotg_supply_names[i]; 3645 3646 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies), 3647 hsotg->supplies); 3648 if (ret) { 3649 dev_err(dev, "failed to request supplies: %d\n", ret); 3650 goto err_clk; 3651 } 3652 3653 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies), 3654 hsotg->supplies); 3655 3656 if (ret) { 3657 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret); 3658 goto err_supplies; 3659 } 3660 3661 /* Set default UTMI width */ 3662 hsotg->phyif = GUSBCFG_PHYIF16; 3663 3664 /* 3665 * If using the generic PHY framework, check if the PHY bus 3666 * width is 8-bit and set the phyif appropriately. 3667 */ 3668 if (hsotg->phy && (phy_get_bus_width(phy) == 8)) 3669 hsotg->phyif = GUSBCFG_PHYIF8; 3670 3671 if (hsotg->phy) 3672 phy_init(hsotg->phy); 3673 3674 /* usb phy enable */ 3675 s3c_hsotg_phy_enable(hsotg); 3676 3677 s3c_hsotg_corereset(hsotg); 3678 s3c_hsotg_init(hsotg); 3679 s3c_hsotg_hw_cfg(hsotg); 3680 3681 /* hsotg->num_of_eps holds number of EPs other than ep0 */ 3682 3683 if (hsotg->num_of_eps == 0) { 3684 dev_err(dev, "wrong number of EPs (zero)\n"); 3685 ret = -EINVAL; 3686 goto err_supplies; 3687 } 3688 3689 eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep), 3690 GFP_KERNEL); 3691 if (!eps) { 3692 dev_err(dev, "cannot get memory\n"); 3693 ret = -ENOMEM; 3694 goto err_supplies; 3695 } 3696 3697 hsotg->eps = eps; 3698 3699 /* setup endpoint information */ 3700 3701 INIT_LIST_HEAD(&hsotg->gadget.ep_list); 3702 hsotg->gadget.ep0 = &hsotg->eps[0].ep; 3703 3704 /* allocate EP0 request */ 3705 3706 hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep, 3707 GFP_KERNEL); 3708 if (!hsotg->ctrl_req) { 3709 dev_err(dev, "failed to allocate ctrl req\n"); 3710 ret = -ENOMEM; 3711 goto err_ep_mem; 3712 } 3713 3714 /* initialise the endpoints now the core has been initialised */ 3715 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) 3716 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum); 3717 3718 /* disable power and clock */ 3719 3720 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), 3721 hsotg->supplies); 3722 if (ret) { 3723 dev_err(hsotg->dev, "failed to disable supplies: %d\n", ret); 3724 goto err_ep_mem; 3725 } 3726 3727 s3c_hsotg_phy_disable(hsotg); 3728 3729 ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget); 3730 if (ret) 3731 goto err_ep_mem; 3732 3733 s3c_hsotg_create_debug(hsotg); 3734 3735 s3c_hsotg_dump(hsotg); 3736 3737 return 0; 3738 3739err_ep_mem: 3740 kfree(eps); 3741err_supplies: 3742 s3c_hsotg_phy_disable(hsotg); 3743err_clk: 3744 clk_disable_unprepare(hsotg->clk); 3745 3746 return ret; 3747} 3748 3749/** 3750 * s3c_hsotg_remove - remove function for hsotg driver 3751 * @pdev: The platform information for the driver 3752 */ 3753static int s3c_hsotg_remove(struct platform_device *pdev) 3754{ 3755 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev); 3756 3757 usb_del_gadget_udc(&hsotg->gadget); 3758 3759 s3c_hsotg_delete_debug(hsotg); 3760 3761 if (hsotg->driver) { 3762 /* should have been done already by driver model core */ 3763 usb_gadget_unregister_driver(hsotg->driver); 3764 } 3765 3766 s3c_hsotg_phy_disable(hsotg); 3767 if (hsotg->phy) 3768 phy_exit(hsotg->phy); 3769 clk_disable_unprepare(hsotg->clk); 3770 3771 return 0; 3772} 3773 3774static int s3c_hsotg_suspend(struct platform_device *pdev, pm_message_t state) 3775{ 3776 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev); 3777 unsigned long flags; 3778 int ret = 0; 3779 3780 if (hsotg->driver) 3781 dev_info(hsotg->dev, "suspending usb gadget %s\n", 3782 hsotg->driver->driver.name); 3783 3784 spin_lock_irqsave(&hsotg->lock, flags); 3785 s3c_hsotg_disconnect(hsotg); 3786 s3c_hsotg_phy_disable(hsotg); 3787 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 3788 spin_unlock_irqrestore(&hsotg->lock, flags); 3789 3790 if (hsotg->driver) { 3791 int ep; 3792 for (ep = 0; ep < hsotg->num_of_eps; ep++) 3793 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 3794 3795 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), 3796 hsotg->supplies); 3797 } 3798 3799 return ret; 3800} 3801 3802static int s3c_hsotg_resume(struct platform_device *pdev) 3803{ 3804 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev); 3805 unsigned long flags; 3806 int ret = 0; 3807 3808 if (hsotg->driver) { 3809 dev_info(hsotg->dev, "resuming usb gadget %s\n", 3810 hsotg->driver->driver.name); 3811 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies), 3812 hsotg->supplies); 3813 } 3814 3815 spin_lock_irqsave(&hsotg->lock, flags); 3816 hsotg->last_rst = jiffies; 3817 s3c_hsotg_phy_enable(hsotg); 3818 s3c_hsotg_core_init(hsotg); 3819 spin_unlock_irqrestore(&hsotg->lock, flags); 3820 3821 return ret; 3822} 3823 3824#ifdef CONFIG_OF 3825static const struct of_device_id s3c_hsotg_of_ids[] = { 3826 { .compatible = "samsung,s3c6400-hsotg", }, 3827 { .compatible = "snps,dwc2", }, 3828 { /* sentinel */ } 3829}; 3830MODULE_DEVICE_TABLE(of, s3c_hsotg_of_ids); 3831#endif 3832 3833static struct platform_driver s3c_hsotg_driver = { 3834 .driver = { 3835 .name = "s3c-hsotg", 3836 .owner = THIS_MODULE, 3837 .of_match_table = of_match_ptr(s3c_hsotg_of_ids), 3838 }, 3839 .probe = s3c_hsotg_probe, 3840 .remove = s3c_hsotg_remove, 3841 .suspend = s3c_hsotg_suspend, 3842 .resume = s3c_hsotg_resume, 3843}; 3844 3845module_platform_driver(s3c_hsotg_driver); 3846 3847MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device"); 3848MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 3849MODULE_LICENSE("GPL"); 3850MODULE_ALIAS("platform:s3c-hsotg"); 3851