file_storage.c revision 62fd2cac5bf5cf9e6fcb2fc40b32e7271e605c53
1/* 2 * file_storage.c -- File-backed USB Storage Gadget, for USB development 3 * 4 * Copyright (C) 2003-2007 Alan Stern 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The names of the above-listed copyright holders may not be used 17 * to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * ALTERNATIVELY, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") as published by the Free Software 22 * Foundation, either version 2 of that License or (at your option) any 23 * later version. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39/* 40 * The File-backed Storage Gadget acts as a USB Mass Storage device, 41 * appearing to the host as a disk drive. In addition to providing an 42 * example of a genuinely useful gadget driver for a USB device, it also 43 * illustrates a technique of double-buffering for increased throughput. 44 * Last but not least, it gives an easy way to probe the behavior of the 45 * Mass Storage drivers in a USB host. 46 * 47 * Backing storage is provided by a regular file or a block device, specified 48 * by the "file" module parameter. Access can be limited to read-only by 49 * setting the optional "ro" module parameter. The gadget will indicate that 50 * it has removable media if the optional "removable" module parameter is set. 51 * 52 * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI), 53 * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected 54 * by the optional "transport" module parameter. It also supports the 55 * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03), 56 * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by 57 * the optional "protocol" module parameter. In addition, the default 58 * Vendor ID, Product ID, and release number can be overridden. 59 * 60 * There is support for multiple logical units (LUNs), each of which has 61 * its own backing file. The number of LUNs can be set using the optional 62 * "luns" module parameter (anywhere from 1 to 8), and the corresponding 63 * files are specified using comma-separated lists for "file" and "ro". 64 * The default number of LUNs is taken from the number of "file" elements; 65 * it is 1 if "file" is not given. If "removable" is not set then a backing 66 * file must be specified for each LUN. If it is set, then an unspecified 67 * or empty backing filename means the LUN's medium is not loaded. 68 * 69 * Requirements are modest; only a bulk-in and a bulk-out endpoint are 70 * needed (an interrupt-out endpoint is also needed for CBI). The memory 71 * requirement amounts to two 16K buffers, size configurable by a parameter. 72 * Support is included for both full-speed and high-speed operation. 73 * 74 * Note that the driver is slightly non-portable in that it assumes a 75 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and 76 * interrupt-in endpoints. With most device controllers this isn't an 77 * issue, but there may be some with hardware restrictions that prevent 78 * a buffer from being used by more than one endpoint. 79 * 80 * Module options: 81 * 82 * file=filename[,filename...] 83 * Required if "removable" is not set, names of 84 * the files or block devices used for 85 * backing storage 86 * ro=b[,b...] Default false, booleans for read-only access 87 * removable Default false, boolean for removable media 88 * luns=N Default N = number of filenames, number of 89 * LUNs to support 90 * stall Default determined according to the type of 91 * USB device controller (usually true), 92 * boolean to permit the driver to halt 93 * bulk endpoints 94 * transport=XXX Default BBB, transport name (CB, CBI, or BBB) 95 * protocol=YYY Default SCSI, protocol name (RBC, 8020 or 96 * ATAPI, QIC, UFI, 8070, or SCSI; 97 * also 1 - 6) 98 * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID 99 * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID 100 * release=0xRRRR Override the USB release number (bcdDevice) 101 * buflen=N Default N=16384, buffer size used (will be 102 * rounded down to a multiple of 103 * PAGE_CACHE_SIZE) 104 * 105 * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro", 106 * "removable", "luns", and "stall" options are available; default values 107 * are used for everything else. 108 * 109 * The pathnames of the backing files and the ro settings are available in 110 * the attribute files "file" and "ro" in the lun<n> subdirectory of the 111 * gadget's sysfs directory. If the "removable" option is set, writing to 112 * these files will simulate ejecting/loading the medium (writing an empty 113 * line means eject) and adjusting a write-enable tab. Changes to the ro 114 * setting are not allowed when the medium is loaded. 115 * 116 * This gadget driver is heavily based on "Gadget Zero" by David Brownell. 117 * The driver's SCSI command interface was based on the "Information 118 * technology - Small Computer System Interface - 2" document from 119 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at 120 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception 121 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the 122 * "Universal Serial Bus Mass Storage Class UFI Command Specification" 123 * document, Revision 1.0, December 14, 1998, available at 124 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. 125 */ 126 127 128/* 129 * Driver Design 130 * 131 * The FSG driver is fairly straightforward. There is a main kernel 132 * thread that handles most of the work. Interrupt routines field 133 * callbacks from the controller driver: bulk- and interrupt-request 134 * completion notifications, endpoint-0 events, and disconnect events. 135 * Completion events are passed to the main thread by wakeup calls. Many 136 * ep0 requests are handled at interrupt time, but SetInterface, 137 * SetConfiguration, and device reset requests are forwarded to the 138 * thread in the form of "exceptions" using SIGUSR1 signals (since they 139 * should interrupt any ongoing file I/O operations). 140 * 141 * The thread's main routine implements the standard command/data/status 142 * parts of a SCSI interaction. It and its subroutines are full of tests 143 * for pending signals/exceptions -- all this polling is necessary since 144 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an 145 * indication that the driver really wants to be running in userspace.) 146 * An important point is that so long as the thread is alive it keeps an 147 * open reference to the backing file. This will prevent unmounting 148 * the backing file's underlying filesystem and could cause problems 149 * during system shutdown, for example. To prevent such problems, the 150 * thread catches INT, TERM, and KILL signals and converts them into 151 * an EXIT exception. 152 * 153 * In normal operation the main thread is started during the gadget's 154 * fsg_bind() callback and stopped during fsg_unbind(). But it can also 155 * exit when it receives a signal, and there's no point leaving the 156 * gadget running when the thread is dead. So just before the thread 157 * exits, it deregisters the gadget driver. This makes things a little 158 * tricky: The driver is deregistered at two places, and the exiting 159 * thread can indirectly call fsg_unbind() which in turn can tell the 160 * thread to exit. The first problem is resolved through the use of the 161 * REGISTERED atomic bitflag; the driver will only be deregistered once. 162 * The second problem is resolved by having fsg_unbind() check 163 * fsg->state; it won't try to stop the thread if the state is already 164 * FSG_STATE_TERMINATED. 165 * 166 * To provide maximum throughput, the driver uses a circular pipeline of 167 * buffer heads (struct fsg_buffhd). In principle the pipeline can be 168 * arbitrarily long; in practice the benefits don't justify having more 169 * than 2 stages (i.e., double buffering). But it helps to think of the 170 * pipeline as being a long one. Each buffer head contains a bulk-in and 171 * a bulk-out request pointer (since the buffer can be used for both 172 * output and input -- directions always are given from the host's 173 * point of view) as well as a pointer to the buffer and various state 174 * variables. 175 * 176 * Use of the pipeline follows a simple protocol. There is a variable 177 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. 178 * At any time that buffer head may still be in use from an earlier 179 * request, so each buffer head has a state variable indicating whether 180 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the 181 * buffer head to be EMPTY, filling the buffer either by file I/O or by 182 * USB I/O (during which the buffer head is BUSY), and marking the buffer 183 * head FULL when the I/O is complete. Then the buffer will be emptied 184 * (again possibly by USB I/O, during which it is marked BUSY) and 185 * finally marked EMPTY again (possibly by a completion routine). 186 * 187 * A module parameter tells the driver to avoid stalling the bulk 188 * endpoints wherever the transport specification allows. This is 189 * necessary for some UDCs like the SuperH, which cannot reliably clear a 190 * halt on a bulk endpoint. However, under certain circumstances the 191 * Bulk-only specification requires a stall. In such cases the driver 192 * will halt the endpoint and set a flag indicating that it should clear 193 * the halt in software during the next device reset. Hopefully this 194 * will permit everything to work correctly. Furthermore, although the 195 * specification allows the bulk-out endpoint to halt when the host sends 196 * too much data, implementing this would cause an unavoidable race. 197 * The driver will always use the "no-stall" approach for OUT transfers. 198 * 199 * One subtle point concerns sending status-stage responses for ep0 200 * requests. Some of these requests, such as device reset, can involve 201 * interrupting an ongoing file I/O operation, which might take an 202 * arbitrarily long time. During that delay the host might give up on 203 * the original ep0 request and issue a new one. When that happens the 204 * driver should not notify the host about completion of the original 205 * request, as the host will no longer be waiting for it. So the driver 206 * assigns to each ep0 request a unique tag, and it keeps track of the 207 * tag value of the request associated with a long-running exception 208 * (device-reset, interface-change, or configuration-change). When the 209 * exception handler is finished, the status-stage response is submitted 210 * only if the current ep0 request tag is equal to the exception request 211 * tag. Thus only the most recently received ep0 request will get a 212 * status-stage response. 213 * 214 * Warning: This driver source file is too long. It ought to be split up 215 * into a header file plus about 3 separate .c files, to handle the details 216 * of the Gadget, USB Mass Storage, and SCSI protocols. 217 */ 218 219 220/* #define VERBOSE_DEBUG */ 221/* #define DUMP_MSGS */ 222 223 224#include <linux/blkdev.h> 225#include <linux/completion.h> 226#include <linux/dcache.h> 227#include <linux/delay.h> 228#include <linux/device.h> 229#include <linux/fcntl.h> 230#include <linux/file.h> 231#include <linux/fs.h> 232#include <linux/kref.h> 233#include <linux/kthread.h> 234#include <linux/limits.h> 235#include <linux/rwsem.h> 236#include <linux/slab.h> 237#include <linux/spinlock.h> 238#include <linux/string.h> 239#include <linux/freezer.h> 240#include <linux/utsname.h> 241 242#include <linux/usb/ch9.h> 243#include <linux/usb/gadget.h> 244 245#include "gadget_chips.h" 246 247 248/*-------------------------------------------------------------------------*/ 249 250#define DRIVER_DESC "File-backed Storage Gadget" 251#define DRIVER_NAME "g_file_storage" 252#define DRIVER_VERSION "7 August 2007" 253 254static const char longname[] = DRIVER_DESC; 255static const char shortname[] = DRIVER_NAME; 256 257MODULE_DESCRIPTION(DRIVER_DESC); 258MODULE_AUTHOR("Alan Stern"); 259MODULE_LICENSE("Dual BSD/GPL"); 260 261/* Thanks to NetChip Technologies for donating this product ID. 262 * 263 * DO NOT REUSE THESE IDs with any other driver!! Ever!! 264 * Instead: allocate your own, using normal USB-IF procedures. */ 265#define DRIVER_VENDOR_ID 0x0525 // NetChip 266#define DRIVER_PRODUCT_ID 0xa4a5 // Linux-USB File-backed Storage Gadget 267 268 269/* 270 * This driver assumes self-powered hardware and has no way for users to 271 * trigger remote wakeup. It uses autoconfiguration to select endpoints 272 * and endpoint addresses. 273 */ 274 275 276/*-------------------------------------------------------------------------*/ 277 278#define LDBG(lun,fmt,args...) \ 279 dev_dbg(&(lun)->dev , fmt , ## args) 280#define MDBG(fmt,args...) \ 281 pr_debug(DRIVER_NAME ": " fmt , ## args) 282 283#ifndef DEBUG 284#undef VERBOSE_DEBUG 285#undef DUMP_MSGS 286#endif /* !DEBUG */ 287 288#ifdef VERBOSE_DEBUG 289#define VLDBG LDBG 290#else 291#define VLDBG(lun,fmt,args...) \ 292 do { } while (0) 293#endif /* VERBOSE_DEBUG */ 294 295#define LERROR(lun,fmt,args...) \ 296 dev_err(&(lun)->dev , fmt , ## args) 297#define LWARN(lun,fmt,args...) \ 298 dev_warn(&(lun)->dev , fmt , ## args) 299#define LINFO(lun,fmt,args...) \ 300 dev_info(&(lun)->dev , fmt , ## args) 301 302#define MINFO(fmt,args...) \ 303 pr_info(DRIVER_NAME ": " fmt , ## args) 304 305#define DBG(d, fmt, args...) \ 306 dev_dbg(&(d)->gadget->dev , fmt , ## args) 307#define VDBG(d, fmt, args...) \ 308 dev_vdbg(&(d)->gadget->dev , fmt , ## args) 309#define ERROR(d, fmt, args...) \ 310 dev_err(&(d)->gadget->dev , fmt , ## args) 311#define WARN(d, fmt, args...) \ 312 dev_warn(&(d)->gadget->dev , fmt , ## args) 313#define INFO(d, fmt, args...) \ 314 dev_info(&(d)->gadget->dev , fmt , ## args) 315 316 317/*-------------------------------------------------------------------------*/ 318 319/* Encapsulate the module parameter settings */ 320 321#define MAX_LUNS 8 322 323static struct { 324 char *file[MAX_LUNS]; 325 int ro[MAX_LUNS]; 326 unsigned int num_filenames; 327 unsigned int num_ros; 328 unsigned int nluns; 329 330 int removable; 331 int can_stall; 332 333 char *transport_parm; 334 char *protocol_parm; 335 unsigned short vendor; 336 unsigned short product; 337 unsigned short release; 338 unsigned int buflen; 339 340 int transport_type; 341 char *transport_name; 342 int protocol_type; 343 char *protocol_name; 344 345} mod_data = { // Default values 346 .transport_parm = "BBB", 347 .protocol_parm = "SCSI", 348 .removable = 0, 349 .can_stall = 1, 350 .vendor = DRIVER_VENDOR_ID, 351 .product = DRIVER_PRODUCT_ID, 352 .release = 0xffff, // Use controller chip type 353 .buflen = 16384, 354 }; 355 356 357module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames, 358 S_IRUGO); 359MODULE_PARM_DESC(file, "names of backing files or devices"); 360 361module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO); 362MODULE_PARM_DESC(ro, "true to force read-only"); 363 364module_param_named(luns, mod_data.nluns, uint, S_IRUGO); 365MODULE_PARM_DESC(luns, "number of LUNs"); 366 367module_param_named(removable, mod_data.removable, bool, S_IRUGO); 368MODULE_PARM_DESC(removable, "true to simulate removable media"); 369 370module_param_named(stall, mod_data.can_stall, bool, S_IRUGO); 371MODULE_PARM_DESC(stall, "false to prevent bulk stalls"); 372 373 374/* In the non-TEST version, only the module parameters listed above 375 * are available. */ 376#ifdef CONFIG_USB_FILE_STORAGE_TEST 377 378module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO); 379MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)"); 380 381module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO); 382MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, " 383 "8070, or SCSI)"); 384 385module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO); 386MODULE_PARM_DESC(vendor, "USB Vendor ID"); 387 388module_param_named(product, mod_data.product, ushort, S_IRUGO); 389MODULE_PARM_DESC(product, "USB Product ID"); 390 391module_param_named(release, mod_data.release, ushort, S_IRUGO); 392MODULE_PARM_DESC(release, "USB release number"); 393 394module_param_named(buflen, mod_data.buflen, uint, S_IRUGO); 395MODULE_PARM_DESC(buflen, "I/O buffer size"); 396 397#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 398 399 400/*-------------------------------------------------------------------------*/ 401 402/* USB protocol value = the transport method */ 403#define USB_PR_CBI 0x00 // Control/Bulk/Interrupt 404#define USB_PR_CB 0x01 // Control/Bulk w/o interrupt 405#define USB_PR_BULK 0x50 // Bulk-only 406 407/* USB subclass value = the protocol encapsulation */ 408#define USB_SC_RBC 0x01 // Reduced Block Commands (flash) 409#define USB_SC_8020 0x02 // SFF-8020i, MMC-2, ATAPI (CD-ROM) 410#define USB_SC_QIC 0x03 // QIC-157 (tape) 411#define USB_SC_UFI 0x04 // UFI (floppy) 412#define USB_SC_8070 0x05 // SFF-8070i (removable) 413#define USB_SC_SCSI 0x06 // Transparent SCSI 414 415/* Bulk-only data structures */ 416 417/* Command Block Wrapper */ 418struct bulk_cb_wrap { 419 __le32 Signature; // Contains 'USBC' 420 u32 Tag; // Unique per command id 421 __le32 DataTransferLength; // Size of the data 422 u8 Flags; // Direction in bit 7 423 u8 Lun; // LUN (normally 0) 424 u8 Length; // Of the CDB, <= MAX_COMMAND_SIZE 425 u8 CDB[16]; // Command Data Block 426}; 427 428#define USB_BULK_CB_WRAP_LEN 31 429#define USB_BULK_CB_SIG 0x43425355 // Spells out USBC 430#define USB_BULK_IN_FLAG 0x80 431 432/* Command Status Wrapper */ 433struct bulk_cs_wrap { 434 __le32 Signature; // Should = 'USBS' 435 u32 Tag; // Same as original command 436 __le32 Residue; // Amount not transferred 437 u8 Status; // See below 438}; 439 440#define USB_BULK_CS_WRAP_LEN 13 441#define USB_BULK_CS_SIG 0x53425355 // Spells out 'USBS' 442#define USB_STATUS_PASS 0 443#define USB_STATUS_FAIL 1 444#define USB_STATUS_PHASE_ERROR 2 445 446/* Bulk-only class specific requests */ 447#define USB_BULK_RESET_REQUEST 0xff 448#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe 449 450 451/* CBI Interrupt data structure */ 452struct interrupt_data { 453 u8 bType; 454 u8 bValue; 455}; 456 457#define CBI_INTERRUPT_DATA_LEN 2 458 459/* CBI Accept Device-Specific Command request */ 460#define USB_CBI_ADSC_REQUEST 0x00 461 462 463#define MAX_COMMAND_SIZE 16 // Length of a SCSI Command Data Block 464 465/* SCSI commands that we recognize */ 466#define SC_FORMAT_UNIT 0x04 467#define SC_INQUIRY 0x12 468#define SC_MODE_SELECT_6 0x15 469#define SC_MODE_SELECT_10 0x55 470#define SC_MODE_SENSE_6 0x1a 471#define SC_MODE_SENSE_10 0x5a 472#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e 473#define SC_READ_6 0x08 474#define SC_READ_10 0x28 475#define SC_READ_12 0xa8 476#define SC_READ_CAPACITY 0x25 477#define SC_READ_FORMAT_CAPACITIES 0x23 478#define SC_RELEASE 0x17 479#define SC_REQUEST_SENSE 0x03 480#define SC_RESERVE 0x16 481#define SC_SEND_DIAGNOSTIC 0x1d 482#define SC_START_STOP_UNIT 0x1b 483#define SC_SYNCHRONIZE_CACHE 0x35 484#define SC_TEST_UNIT_READY 0x00 485#define SC_VERIFY 0x2f 486#define SC_WRITE_6 0x0a 487#define SC_WRITE_10 0x2a 488#define SC_WRITE_12 0xaa 489 490/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */ 491#define SS_NO_SENSE 0 492#define SS_COMMUNICATION_FAILURE 0x040800 493#define SS_INVALID_COMMAND 0x052000 494#define SS_INVALID_FIELD_IN_CDB 0x052400 495#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100 496#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500 497#define SS_MEDIUM_NOT_PRESENT 0x023a00 498#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302 499#define SS_NOT_READY_TO_READY_TRANSITION 0x062800 500#define SS_RESET_OCCURRED 0x062900 501#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900 502#define SS_UNRECOVERED_READ_ERROR 0x031100 503#define SS_WRITE_ERROR 0x030c02 504#define SS_WRITE_PROTECTED 0x072700 505 506#define SK(x) ((u8) ((x) >> 16)) // Sense Key byte, etc. 507#define ASC(x) ((u8) ((x) >> 8)) 508#define ASCQ(x) ((u8) (x)) 509 510 511/*-------------------------------------------------------------------------*/ 512 513/* 514 * These definitions will permit the compiler to avoid generating code for 515 * parts of the driver that aren't used in the non-TEST version. Even gcc 516 * can recognize when a test of a constant expression yields a dead code 517 * path. 518 */ 519 520#ifdef CONFIG_USB_FILE_STORAGE_TEST 521 522#define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK) 523#define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI) 524#define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI) 525 526#else 527 528#define transport_is_bbb() 1 529#define transport_is_cbi() 0 530#define protocol_is_scsi() 1 531 532#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 533 534 535struct lun { 536 struct file *filp; 537 loff_t file_length; 538 loff_t num_sectors; 539 540 unsigned int ro : 1; 541 unsigned int prevent_medium_removal : 1; 542 unsigned int registered : 1; 543 unsigned int info_valid : 1; 544 545 u32 sense_data; 546 u32 sense_data_info; 547 u32 unit_attention_data; 548 549 struct device dev; 550}; 551 552#define backing_file_is_open(curlun) ((curlun)->filp != NULL) 553 554static struct lun *dev_to_lun(struct device *dev) 555{ 556 return container_of(dev, struct lun, dev); 557} 558 559 560/* Big enough to hold our biggest descriptor */ 561#define EP0_BUFSIZE 256 562#define DELAYED_STATUS (EP0_BUFSIZE + 999) // An impossibly large value 563 564/* Number of buffers we will use. 2 is enough for double-buffering */ 565#define NUM_BUFFERS 2 566 567enum fsg_buffer_state { 568 BUF_STATE_EMPTY = 0, 569 BUF_STATE_FULL, 570 BUF_STATE_BUSY 571}; 572 573struct fsg_buffhd { 574 void *buf; 575 enum fsg_buffer_state state; 576 struct fsg_buffhd *next; 577 578 /* The NetChip 2280 is faster, and handles some protocol faults 579 * better, if we don't submit any short bulk-out read requests. 580 * So we will record the intended request length here. */ 581 unsigned int bulk_out_intended_length; 582 583 struct usb_request *inreq; 584 int inreq_busy; 585 struct usb_request *outreq; 586 int outreq_busy; 587}; 588 589enum fsg_state { 590 FSG_STATE_COMMAND_PHASE = -10, // This one isn't used anywhere 591 FSG_STATE_DATA_PHASE, 592 FSG_STATE_STATUS_PHASE, 593 594 FSG_STATE_IDLE = 0, 595 FSG_STATE_ABORT_BULK_OUT, 596 FSG_STATE_RESET, 597 FSG_STATE_INTERFACE_CHANGE, 598 FSG_STATE_CONFIG_CHANGE, 599 FSG_STATE_DISCONNECT, 600 FSG_STATE_EXIT, 601 FSG_STATE_TERMINATED 602}; 603 604enum data_direction { 605 DATA_DIR_UNKNOWN = 0, 606 DATA_DIR_FROM_HOST, 607 DATA_DIR_TO_HOST, 608 DATA_DIR_NONE 609}; 610 611struct fsg_dev { 612 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */ 613 spinlock_t lock; 614 struct usb_gadget *gadget; 615 616 /* filesem protects: backing files in use */ 617 struct rw_semaphore filesem; 618 619 /* reference counting: wait until all LUNs are released */ 620 struct kref ref; 621 622 struct usb_ep *ep0; // Handy copy of gadget->ep0 623 struct usb_request *ep0req; // For control responses 624 unsigned int ep0_req_tag; 625 const char *ep0req_name; 626 627 struct usb_request *intreq; // For interrupt responses 628 int intreq_busy; 629 struct fsg_buffhd *intr_buffhd; 630 631 unsigned int bulk_out_maxpacket; 632 enum fsg_state state; // For exception handling 633 unsigned int exception_req_tag; 634 635 u8 config, new_config; 636 637 unsigned int running : 1; 638 unsigned int bulk_in_enabled : 1; 639 unsigned int bulk_out_enabled : 1; 640 unsigned int intr_in_enabled : 1; 641 unsigned int phase_error : 1; 642 unsigned int short_packet_received : 1; 643 unsigned int bad_lun_okay : 1; 644 645 unsigned long atomic_bitflags; 646#define REGISTERED 0 647#define IGNORE_BULK_OUT 1 648#define SUSPENDED 2 649 650 struct usb_ep *bulk_in; 651 struct usb_ep *bulk_out; 652 struct usb_ep *intr_in; 653 654 struct fsg_buffhd *next_buffhd_to_fill; 655 struct fsg_buffhd *next_buffhd_to_drain; 656 struct fsg_buffhd buffhds[NUM_BUFFERS]; 657 658 int thread_wakeup_needed; 659 struct completion thread_notifier; 660 struct task_struct *thread_task; 661 662 int cmnd_size; 663 u8 cmnd[MAX_COMMAND_SIZE]; 664 enum data_direction data_dir; 665 u32 data_size; 666 u32 data_size_from_cmnd; 667 u32 tag; 668 unsigned int lun; 669 u32 residue; 670 u32 usb_amount_left; 671 672 /* The CB protocol offers no way for a host to know when a command 673 * has completed. As a result the next command may arrive early, 674 * and we will still have to handle it. For that reason we need 675 * a buffer to store new commands when using CB (or CBI, which 676 * does not oblige a host to wait for command completion either). */ 677 int cbbuf_cmnd_size; 678 u8 cbbuf_cmnd[MAX_COMMAND_SIZE]; 679 680 unsigned int nluns; 681 struct lun *luns; 682 struct lun *curlun; 683}; 684 685typedef void (*fsg_routine_t)(struct fsg_dev *); 686 687static int exception_in_progress(struct fsg_dev *fsg) 688{ 689 return (fsg->state > FSG_STATE_IDLE); 690} 691 692/* Make bulk-out requests be divisible by the maxpacket size */ 693static void set_bulk_out_req_length(struct fsg_dev *fsg, 694 struct fsg_buffhd *bh, unsigned int length) 695{ 696 unsigned int rem; 697 698 bh->bulk_out_intended_length = length; 699 rem = length % fsg->bulk_out_maxpacket; 700 if (rem > 0) 701 length += fsg->bulk_out_maxpacket - rem; 702 bh->outreq->length = length; 703} 704 705static struct fsg_dev *the_fsg; 706static struct usb_gadget_driver fsg_driver; 707 708static void close_backing_file(struct lun *curlun); 709static void close_all_backing_files(struct fsg_dev *fsg); 710 711 712/*-------------------------------------------------------------------------*/ 713 714#ifdef DUMP_MSGS 715 716static void dump_msg(struct fsg_dev *fsg, const char *label, 717 const u8 *buf, unsigned int length) 718{ 719 if (length < 512) { 720 DBG(fsg, "%s, length %u:\n", label, length); 721 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 722 16, 1, buf, length, 0); 723 } 724} 725 726static void dump_cdb(struct fsg_dev *fsg) 727{} 728 729#else 730 731static void dump_msg(struct fsg_dev *fsg, const char *label, 732 const u8 *buf, unsigned int length) 733{} 734 735#ifdef VERBOSE_DEBUG 736 737static void dump_cdb(struct fsg_dev *fsg) 738{ 739 print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, 740 16, 1, fsg->cmnd, fsg->cmnd_size, 0); 741} 742 743#else 744 745static void dump_cdb(struct fsg_dev *fsg) 746{} 747 748#endif /* VERBOSE_DEBUG */ 749#endif /* DUMP_MSGS */ 750 751 752static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 753{ 754 const char *name; 755 756 if (ep == fsg->bulk_in) 757 name = "bulk-in"; 758 else if (ep == fsg->bulk_out) 759 name = "bulk-out"; 760 else 761 name = ep->name; 762 DBG(fsg, "%s set halt\n", name); 763 return usb_ep_set_halt(ep); 764} 765 766 767/*-------------------------------------------------------------------------*/ 768 769/* Routines for unaligned data access */ 770 771static u16 get_be16(u8 *buf) 772{ 773 return ((u16) buf[0] << 8) | ((u16) buf[1]); 774} 775 776static u32 get_be32(u8 *buf) 777{ 778 return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) | 779 ((u32) buf[2] << 8) | ((u32) buf[3]); 780} 781 782static void put_be16(u8 *buf, u16 val) 783{ 784 buf[0] = val >> 8; 785 buf[1] = val; 786} 787 788static void put_be32(u8 *buf, u32 val) 789{ 790 buf[0] = val >> 24; 791 buf[1] = val >> 16; 792 buf[2] = val >> 8; 793 buf[3] = val & 0xff; 794} 795 796 797/*-------------------------------------------------------------------------*/ 798 799/* 800 * DESCRIPTORS ... most are static, but strings and (full) configuration 801 * descriptors are built on demand. Also the (static) config and interface 802 * descriptors are adjusted during fsg_bind(). 803 */ 804#define STRING_MANUFACTURER 1 805#define STRING_PRODUCT 2 806#define STRING_SERIAL 3 807#define STRING_CONFIG 4 808#define STRING_INTERFACE 5 809 810/* There is only one configuration. */ 811#define CONFIG_VALUE 1 812 813static struct usb_device_descriptor 814device_desc = { 815 .bLength = sizeof device_desc, 816 .bDescriptorType = USB_DT_DEVICE, 817 818 .bcdUSB = __constant_cpu_to_le16(0x0200), 819 .bDeviceClass = USB_CLASS_PER_INTERFACE, 820 821 /* The next three values can be overridden by module parameters */ 822 .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID), 823 .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID), 824 .bcdDevice = __constant_cpu_to_le16(0xffff), 825 826 .iManufacturer = STRING_MANUFACTURER, 827 .iProduct = STRING_PRODUCT, 828 .iSerialNumber = STRING_SERIAL, 829 .bNumConfigurations = 1, 830}; 831 832static struct usb_config_descriptor 833config_desc = { 834 .bLength = sizeof config_desc, 835 .bDescriptorType = USB_DT_CONFIG, 836 837 /* wTotalLength computed by usb_gadget_config_buf() */ 838 .bNumInterfaces = 1, 839 .bConfigurationValue = CONFIG_VALUE, 840 .iConfiguration = STRING_CONFIG, 841 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, 842 .bMaxPower = 1, // self-powered 843}; 844 845static struct usb_otg_descriptor 846otg_desc = { 847 .bLength = sizeof(otg_desc), 848 .bDescriptorType = USB_DT_OTG, 849 850 .bmAttributes = USB_OTG_SRP, 851}; 852 853/* There is only one interface. */ 854 855static struct usb_interface_descriptor 856intf_desc = { 857 .bLength = sizeof intf_desc, 858 .bDescriptorType = USB_DT_INTERFACE, 859 860 .bNumEndpoints = 2, // Adjusted during fsg_bind() 861 .bInterfaceClass = USB_CLASS_MASS_STORAGE, 862 .bInterfaceSubClass = USB_SC_SCSI, // Adjusted during fsg_bind() 863 .bInterfaceProtocol = USB_PR_BULK, // Adjusted during fsg_bind() 864 .iInterface = STRING_INTERFACE, 865}; 866 867/* Three full-speed endpoint descriptors: bulk-in, bulk-out, 868 * and interrupt-in. */ 869 870static struct usb_endpoint_descriptor 871fs_bulk_in_desc = { 872 .bLength = USB_DT_ENDPOINT_SIZE, 873 .bDescriptorType = USB_DT_ENDPOINT, 874 875 .bEndpointAddress = USB_DIR_IN, 876 .bmAttributes = USB_ENDPOINT_XFER_BULK, 877 /* wMaxPacketSize set by autoconfiguration */ 878}; 879 880static struct usb_endpoint_descriptor 881fs_bulk_out_desc = { 882 .bLength = USB_DT_ENDPOINT_SIZE, 883 .bDescriptorType = USB_DT_ENDPOINT, 884 885 .bEndpointAddress = USB_DIR_OUT, 886 .bmAttributes = USB_ENDPOINT_XFER_BULK, 887 /* wMaxPacketSize set by autoconfiguration */ 888}; 889 890static struct usb_endpoint_descriptor 891fs_intr_in_desc = { 892 .bLength = USB_DT_ENDPOINT_SIZE, 893 .bDescriptorType = USB_DT_ENDPOINT, 894 895 .bEndpointAddress = USB_DIR_IN, 896 .bmAttributes = USB_ENDPOINT_XFER_INT, 897 .wMaxPacketSize = __constant_cpu_to_le16(2), 898 .bInterval = 32, // frames -> 32 ms 899}; 900 901static const struct usb_descriptor_header *fs_function[] = { 902 (struct usb_descriptor_header *) &otg_desc, 903 (struct usb_descriptor_header *) &intf_desc, 904 (struct usb_descriptor_header *) &fs_bulk_in_desc, 905 (struct usb_descriptor_header *) &fs_bulk_out_desc, 906 (struct usb_descriptor_header *) &fs_intr_in_desc, 907 NULL, 908}; 909#define FS_FUNCTION_PRE_EP_ENTRIES 2 910 911 912/* 913 * USB 2.0 devices need to expose both high speed and full speed 914 * descriptors, unless they only run at full speed. 915 * 916 * That means alternate endpoint descriptors (bigger packets) 917 * and a "device qualifier" ... plus more construction options 918 * for the config descriptor. 919 */ 920static struct usb_qualifier_descriptor 921dev_qualifier = { 922 .bLength = sizeof dev_qualifier, 923 .bDescriptorType = USB_DT_DEVICE_QUALIFIER, 924 925 .bcdUSB = __constant_cpu_to_le16(0x0200), 926 .bDeviceClass = USB_CLASS_PER_INTERFACE, 927 928 .bNumConfigurations = 1, 929}; 930 931static struct usb_endpoint_descriptor 932hs_bulk_in_desc = { 933 .bLength = USB_DT_ENDPOINT_SIZE, 934 .bDescriptorType = USB_DT_ENDPOINT, 935 936 /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ 937 .bmAttributes = USB_ENDPOINT_XFER_BULK, 938 .wMaxPacketSize = __constant_cpu_to_le16(512), 939}; 940 941static struct usb_endpoint_descriptor 942hs_bulk_out_desc = { 943 .bLength = USB_DT_ENDPOINT_SIZE, 944 .bDescriptorType = USB_DT_ENDPOINT, 945 946 /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ 947 .bmAttributes = USB_ENDPOINT_XFER_BULK, 948 .wMaxPacketSize = __constant_cpu_to_le16(512), 949 .bInterval = 1, // NAK every 1 uframe 950}; 951 952static struct usb_endpoint_descriptor 953hs_intr_in_desc = { 954 .bLength = USB_DT_ENDPOINT_SIZE, 955 .bDescriptorType = USB_DT_ENDPOINT, 956 957 /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */ 958 .bmAttributes = USB_ENDPOINT_XFER_INT, 959 .wMaxPacketSize = __constant_cpu_to_le16(2), 960 .bInterval = 9, // 2**(9-1) = 256 uframes -> 32 ms 961}; 962 963static const struct usb_descriptor_header *hs_function[] = { 964 (struct usb_descriptor_header *) &otg_desc, 965 (struct usb_descriptor_header *) &intf_desc, 966 (struct usb_descriptor_header *) &hs_bulk_in_desc, 967 (struct usb_descriptor_header *) &hs_bulk_out_desc, 968 (struct usb_descriptor_header *) &hs_intr_in_desc, 969 NULL, 970}; 971#define HS_FUNCTION_PRE_EP_ENTRIES 2 972 973/* Maxpacket and other transfer characteristics vary by speed. */ 974static struct usb_endpoint_descriptor * 975ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs, 976 struct usb_endpoint_descriptor *hs) 977{ 978 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) 979 return hs; 980 return fs; 981} 982 983 984/* The CBI specification limits the serial string to 12 uppercase hexadecimal 985 * characters. */ 986static char manufacturer[64]; 987static char serial[13]; 988 989/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */ 990static struct usb_string strings[] = { 991 {STRING_MANUFACTURER, manufacturer}, 992 {STRING_PRODUCT, longname}, 993 {STRING_SERIAL, serial}, 994 {STRING_CONFIG, "Self-powered"}, 995 {STRING_INTERFACE, "Mass Storage"}, 996 {} 997}; 998 999static struct usb_gadget_strings stringtab = { 1000 .language = 0x0409, // en-us 1001 .strings = strings, 1002}; 1003 1004 1005/* 1006 * Config descriptors must agree with the code that sets configurations 1007 * and with code managing interfaces and their altsettings. They must 1008 * also handle different speeds and other-speed requests. 1009 */ 1010static int populate_config_buf(struct usb_gadget *gadget, 1011 u8 *buf, u8 type, unsigned index) 1012{ 1013 enum usb_device_speed speed = gadget->speed; 1014 int len; 1015 const struct usb_descriptor_header **function; 1016 1017 if (index > 0) 1018 return -EINVAL; 1019 1020 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG) 1021 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed; 1022 if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH) 1023 function = hs_function; 1024 else 1025 function = fs_function; 1026 1027 /* for now, don't advertise srp-only devices */ 1028 if (!gadget_is_otg(gadget)) 1029 function++; 1030 1031 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function); 1032 ((struct usb_config_descriptor *) buf)->bDescriptorType = type; 1033 return len; 1034} 1035 1036 1037/*-------------------------------------------------------------------------*/ 1038 1039/* These routines may be called in process context or in_irq */ 1040 1041/* Caller must hold fsg->lock */ 1042static void wakeup_thread(struct fsg_dev *fsg) 1043{ 1044 /* Tell the main thread that something has happened */ 1045 fsg->thread_wakeup_needed = 1; 1046 if (fsg->thread_task) 1047 wake_up_process(fsg->thread_task); 1048} 1049 1050 1051static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state) 1052{ 1053 unsigned long flags; 1054 1055 /* Do nothing if a higher-priority exception is already in progress. 1056 * If a lower-or-equal priority exception is in progress, preempt it 1057 * and notify the main thread by sending it a signal. */ 1058 spin_lock_irqsave(&fsg->lock, flags); 1059 if (fsg->state <= new_state) { 1060 fsg->exception_req_tag = fsg->ep0_req_tag; 1061 fsg->state = new_state; 1062 if (fsg->thread_task) 1063 send_sig_info(SIGUSR1, SEND_SIG_FORCED, 1064 fsg->thread_task); 1065 } 1066 spin_unlock_irqrestore(&fsg->lock, flags); 1067} 1068 1069 1070/*-------------------------------------------------------------------------*/ 1071 1072/* The disconnect callback and ep0 routines. These always run in_irq, 1073 * except that ep0_queue() is called in the main thread to acknowledge 1074 * completion of various requests: set config, set interface, and 1075 * Bulk-only device reset. */ 1076 1077static void fsg_disconnect(struct usb_gadget *gadget) 1078{ 1079 struct fsg_dev *fsg = get_gadget_data(gadget); 1080 1081 DBG(fsg, "disconnect or port reset\n"); 1082 raise_exception(fsg, FSG_STATE_DISCONNECT); 1083} 1084 1085 1086static int ep0_queue(struct fsg_dev *fsg) 1087{ 1088 int rc; 1089 1090 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC); 1091 if (rc != 0 && rc != -ESHUTDOWN) { 1092 1093 /* We can't do much more than wait for a reset */ 1094 WARN(fsg, "error in submission: %s --> %d\n", 1095 fsg->ep0->name, rc); 1096 } 1097 return rc; 1098} 1099 1100static void ep0_complete(struct usb_ep *ep, struct usb_request *req) 1101{ 1102 struct fsg_dev *fsg = ep->driver_data; 1103 1104 if (req->actual > 0) 1105 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual); 1106 if (req->status || req->actual != req->length) 1107 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 1108 req->status, req->actual, req->length); 1109 if (req->status == -ECONNRESET) // Request was cancelled 1110 usb_ep_fifo_flush(ep); 1111 1112 if (req->status == 0 && req->context) 1113 ((fsg_routine_t) (req->context))(fsg); 1114} 1115 1116 1117/*-------------------------------------------------------------------------*/ 1118 1119/* Bulk and interrupt endpoint completion handlers. 1120 * These always run in_irq. */ 1121 1122static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 1123{ 1124 struct fsg_dev *fsg = ep->driver_data; 1125 struct fsg_buffhd *bh = req->context; 1126 1127 if (req->status || req->actual != req->length) 1128 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 1129 req->status, req->actual, req->length); 1130 if (req->status == -ECONNRESET) // Request was cancelled 1131 usb_ep_fifo_flush(ep); 1132 1133 /* Hold the lock while we update the request and buffer states */ 1134 smp_wmb(); 1135 spin_lock(&fsg->lock); 1136 bh->inreq_busy = 0; 1137 bh->state = BUF_STATE_EMPTY; 1138 wakeup_thread(fsg); 1139 spin_unlock(&fsg->lock); 1140} 1141 1142static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 1143{ 1144 struct fsg_dev *fsg = ep->driver_data; 1145 struct fsg_buffhd *bh = req->context; 1146 1147 dump_msg(fsg, "bulk-out", req->buf, req->actual); 1148 if (req->status || req->actual != bh->bulk_out_intended_length) 1149 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 1150 req->status, req->actual, 1151 bh->bulk_out_intended_length); 1152 if (req->status == -ECONNRESET) // Request was cancelled 1153 usb_ep_fifo_flush(ep); 1154 1155 /* Hold the lock while we update the request and buffer states */ 1156 smp_wmb(); 1157 spin_lock(&fsg->lock); 1158 bh->outreq_busy = 0; 1159 bh->state = BUF_STATE_FULL; 1160 wakeup_thread(fsg); 1161 spin_unlock(&fsg->lock); 1162} 1163 1164 1165#ifdef CONFIG_USB_FILE_STORAGE_TEST 1166static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) 1167{ 1168 struct fsg_dev *fsg = ep->driver_data; 1169 struct fsg_buffhd *bh = req->context; 1170 1171 if (req->status || req->actual != req->length) 1172 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 1173 req->status, req->actual, req->length); 1174 if (req->status == -ECONNRESET) // Request was cancelled 1175 usb_ep_fifo_flush(ep); 1176 1177 /* Hold the lock while we update the request and buffer states */ 1178 smp_wmb(); 1179 spin_lock(&fsg->lock); 1180 fsg->intreq_busy = 0; 1181 bh->state = BUF_STATE_EMPTY; 1182 wakeup_thread(fsg); 1183 spin_unlock(&fsg->lock); 1184} 1185 1186#else 1187static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) 1188{} 1189#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 1190 1191 1192/*-------------------------------------------------------------------------*/ 1193 1194/* Ep0 class-specific handlers. These always run in_irq. */ 1195 1196#ifdef CONFIG_USB_FILE_STORAGE_TEST 1197static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1198{ 1199 struct usb_request *req = fsg->ep0req; 1200 static u8 cbi_reset_cmnd[6] = { 1201 SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff}; 1202 1203 /* Error in command transfer? */ 1204 if (req->status || req->length != req->actual || 1205 req->actual < 6 || req->actual > MAX_COMMAND_SIZE) { 1206 1207 /* Not all controllers allow a protocol stall after 1208 * receiving control-out data, but we'll try anyway. */ 1209 fsg_set_halt(fsg, fsg->ep0); 1210 return; // Wait for reset 1211 } 1212 1213 /* Is it the special reset command? */ 1214 if (req->actual >= sizeof cbi_reset_cmnd && 1215 memcmp(req->buf, cbi_reset_cmnd, 1216 sizeof cbi_reset_cmnd) == 0) { 1217 1218 /* Raise an exception to stop the current operation 1219 * and reinitialize our state. */ 1220 DBG(fsg, "cbi reset request\n"); 1221 raise_exception(fsg, FSG_STATE_RESET); 1222 return; 1223 } 1224 1225 VDBG(fsg, "CB[I] accept device-specific command\n"); 1226 spin_lock(&fsg->lock); 1227 1228 /* Save the command for later */ 1229 if (fsg->cbbuf_cmnd_size) 1230 WARN(fsg, "CB[I] overwriting previous command\n"); 1231 fsg->cbbuf_cmnd_size = req->actual; 1232 memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size); 1233 1234 wakeup_thread(fsg); 1235 spin_unlock(&fsg->lock); 1236} 1237 1238#else 1239static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1240{} 1241#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 1242 1243 1244static int class_setup_req(struct fsg_dev *fsg, 1245 const struct usb_ctrlrequest *ctrl) 1246{ 1247 struct usb_request *req = fsg->ep0req; 1248 int value = -EOPNOTSUPP; 1249 u16 w_index = le16_to_cpu(ctrl->wIndex); 1250 u16 w_value = le16_to_cpu(ctrl->wValue); 1251 u16 w_length = le16_to_cpu(ctrl->wLength); 1252 1253 if (!fsg->config) 1254 return value; 1255 1256 /* Handle Bulk-only class-specific requests */ 1257 if (transport_is_bbb()) { 1258 switch (ctrl->bRequest) { 1259 1260 case USB_BULK_RESET_REQUEST: 1261 if (ctrl->bRequestType != (USB_DIR_OUT | 1262 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 1263 break; 1264 if (w_index != 0 || w_value != 0) { 1265 value = -EDOM; 1266 break; 1267 } 1268 1269 /* Raise an exception to stop the current operation 1270 * and reinitialize our state. */ 1271 DBG(fsg, "bulk reset request\n"); 1272 raise_exception(fsg, FSG_STATE_RESET); 1273 value = DELAYED_STATUS; 1274 break; 1275 1276 case USB_BULK_GET_MAX_LUN_REQUEST: 1277 if (ctrl->bRequestType != (USB_DIR_IN | 1278 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 1279 break; 1280 if (w_index != 0 || w_value != 0) { 1281 value = -EDOM; 1282 break; 1283 } 1284 VDBG(fsg, "get max LUN\n"); 1285 *(u8 *) req->buf = fsg->nluns - 1; 1286 value = 1; 1287 break; 1288 } 1289 } 1290 1291 /* Handle CBI class-specific requests */ 1292 else { 1293 switch (ctrl->bRequest) { 1294 1295 case USB_CBI_ADSC_REQUEST: 1296 if (ctrl->bRequestType != (USB_DIR_OUT | 1297 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 1298 break; 1299 if (w_index != 0 || w_value != 0) { 1300 value = -EDOM; 1301 break; 1302 } 1303 if (w_length > MAX_COMMAND_SIZE) { 1304 value = -EOVERFLOW; 1305 break; 1306 } 1307 value = w_length; 1308 fsg->ep0req->context = received_cbi_adsc; 1309 break; 1310 } 1311 } 1312 1313 if (value == -EOPNOTSUPP) 1314 VDBG(fsg, 1315 "unknown class-specific control req " 1316 "%02x.%02x v%04x i%04x l%u\n", 1317 ctrl->bRequestType, ctrl->bRequest, 1318 le16_to_cpu(ctrl->wValue), w_index, w_length); 1319 return value; 1320} 1321 1322 1323/*-------------------------------------------------------------------------*/ 1324 1325/* Ep0 standard request handlers. These always run in_irq. */ 1326 1327static int standard_setup_req(struct fsg_dev *fsg, 1328 const struct usb_ctrlrequest *ctrl) 1329{ 1330 struct usb_request *req = fsg->ep0req; 1331 int value = -EOPNOTSUPP; 1332 u16 w_index = le16_to_cpu(ctrl->wIndex); 1333 u16 w_value = le16_to_cpu(ctrl->wValue); 1334 1335 /* Usually this just stores reply data in the pre-allocated ep0 buffer, 1336 * but config change events will also reconfigure hardware. */ 1337 switch (ctrl->bRequest) { 1338 1339 case USB_REQ_GET_DESCRIPTOR: 1340 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | 1341 USB_RECIP_DEVICE)) 1342 break; 1343 switch (w_value >> 8) { 1344 1345 case USB_DT_DEVICE: 1346 VDBG(fsg, "get device descriptor\n"); 1347 value = sizeof device_desc; 1348 memcpy(req->buf, &device_desc, value); 1349 break; 1350 case USB_DT_DEVICE_QUALIFIER: 1351 VDBG(fsg, "get device qualifier\n"); 1352 if (!gadget_is_dualspeed(fsg->gadget)) 1353 break; 1354 value = sizeof dev_qualifier; 1355 memcpy(req->buf, &dev_qualifier, value); 1356 break; 1357 1358 case USB_DT_OTHER_SPEED_CONFIG: 1359 VDBG(fsg, "get other-speed config descriptor\n"); 1360 if (!gadget_is_dualspeed(fsg->gadget)) 1361 break; 1362 goto get_config; 1363 case USB_DT_CONFIG: 1364 VDBG(fsg, "get configuration descriptor\n"); 1365get_config: 1366 value = populate_config_buf(fsg->gadget, 1367 req->buf, 1368 w_value >> 8, 1369 w_value & 0xff); 1370 break; 1371 1372 case USB_DT_STRING: 1373 VDBG(fsg, "get string descriptor\n"); 1374 1375 /* wIndex == language code */ 1376 value = usb_gadget_get_string(&stringtab, 1377 w_value & 0xff, req->buf); 1378 break; 1379 } 1380 break; 1381 1382 /* One config, two speeds */ 1383 case USB_REQ_SET_CONFIGURATION: 1384 if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD | 1385 USB_RECIP_DEVICE)) 1386 break; 1387 VDBG(fsg, "set configuration\n"); 1388 if (w_value == CONFIG_VALUE || w_value == 0) { 1389 fsg->new_config = w_value; 1390 1391 /* Raise an exception to wipe out previous transaction 1392 * state (queued bufs, etc) and set the new config. */ 1393 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE); 1394 value = DELAYED_STATUS; 1395 } 1396 break; 1397 case USB_REQ_GET_CONFIGURATION: 1398 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | 1399 USB_RECIP_DEVICE)) 1400 break; 1401 VDBG(fsg, "get configuration\n"); 1402 *(u8 *) req->buf = fsg->config; 1403 value = 1; 1404 break; 1405 1406 case USB_REQ_SET_INTERFACE: 1407 if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD | 1408 USB_RECIP_INTERFACE)) 1409 break; 1410 if (fsg->config && w_index == 0) { 1411 1412 /* Raise an exception to wipe out previous transaction 1413 * state (queued bufs, etc) and install the new 1414 * interface altsetting. */ 1415 raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE); 1416 value = DELAYED_STATUS; 1417 } 1418 break; 1419 case USB_REQ_GET_INTERFACE: 1420 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | 1421 USB_RECIP_INTERFACE)) 1422 break; 1423 if (!fsg->config) 1424 break; 1425 if (w_index != 0) { 1426 value = -EDOM; 1427 break; 1428 } 1429 VDBG(fsg, "get interface\n"); 1430 *(u8 *) req->buf = 0; 1431 value = 1; 1432 break; 1433 1434 default: 1435 VDBG(fsg, 1436 "unknown control req %02x.%02x v%04x i%04x l%u\n", 1437 ctrl->bRequestType, ctrl->bRequest, 1438 w_value, w_index, le16_to_cpu(ctrl->wLength)); 1439 } 1440 1441 return value; 1442} 1443 1444 1445static int fsg_setup(struct usb_gadget *gadget, 1446 const struct usb_ctrlrequest *ctrl) 1447{ 1448 struct fsg_dev *fsg = get_gadget_data(gadget); 1449 int rc; 1450 int w_length = le16_to_cpu(ctrl->wLength); 1451 1452 ++fsg->ep0_req_tag; // Record arrival of a new request 1453 fsg->ep0req->context = NULL; 1454 fsg->ep0req->length = 0; 1455 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); 1456 1457 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) 1458 rc = class_setup_req(fsg, ctrl); 1459 else 1460 rc = standard_setup_req(fsg, ctrl); 1461 1462 /* Respond with data/status or defer until later? */ 1463 if (rc >= 0 && rc != DELAYED_STATUS) { 1464 rc = min(rc, w_length); 1465 fsg->ep0req->length = rc; 1466 fsg->ep0req->zero = rc < w_length; 1467 fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ? 1468 "ep0-in" : "ep0-out"); 1469 rc = ep0_queue(fsg); 1470 } 1471 1472 /* Device either stalls (rc < 0) or reports success */ 1473 return rc; 1474} 1475 1476 1477/*-------------------------------------------------------------------------*/ 1478 1479/* All the following routines run in process context */ 1480 1481 1482/* Use this for bulk or interrupt transfers, not ep0 */ 1483static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 1484 struct usb_request *req, int *pbusy, 1485 enum fsg_buffer_state *state) 1486{ 1487 int rc; 1488 1489 if (ep == fsg->bulk_in) 1490 dump_msg(fsg, "bulk-in", req->buf, req->length); 1491 else if (ep == fsg->intr_in) 1492 dump_msg(fsg, "intr-in", req->buf, req->length); 1493 1494 spin_lock_irq(&fsg->lock); 1495 *pbusy = 1; 1496 *state = BUF_STATE_BUSY; 1497 spin_unlock_irq(&fsg->lock); 1498 rc = usb_ep_queue(ep, req, GFP_KERNEL); 1499 if (rc != 0) { 1500 *pbusy = 0; 1501 *state = BUF_STATE_EMPTY; 1502 1503 /* We can't do much more than wait for a reset */ 1504 1505 /* Note: currently the net2280 driver fails zero-length 1506 * submissions if DMA is enabled. */ 1507 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && 1508 req->length == 0)) 1509 WARN(fsg, "error in submission: %s --> %d\n", 1510 ep->name, rc); 1511 } 1512} 1513 1514 1515static int sleep_thread(struct fsg_dev *fsg) 1516{ 1517 int rc = 0; 1518 1519 /* Wait until a signal arrives or we are woken up */ 1520 for (;;) { 1521 try_to_freeze(); 1522 set_current_state(TASK_INTERRUPTIBLE); 1523 if (signal_pending(current)) { 1524 rc = -EINTR; 1525 break; 1526 } 1527 if (fsg->thread_wakeup_needed) 1528 break; 1529 schedule(); 1530 } 1531 __set_current_state(TASK_RUNNING); 1532 fsg->thread_wakeup_needed = 0; 1533 return rc; 1534} 1535 1536 1537/*-------------------------------------------------------------------------*/ 1538 1539static int do_read(struct fsg_dev *fsg) 1540{ 1541 struct lun *curlun = fsg->curlun; 1542 u32 lba; 1543 struct fsg_buffhd *bh; 1544 int rc; 1545 u32 amount_left; 1546 loff_t file_offset, file_offset_tmp; 1547 unsigned int amount; 1548 unsigned int partial_page; 1549 ssize_t nread; 1550 1551 /* Get the starting Logical Block Address and check that it's 1552 * not too big */ 1553 if (fsg->cmnd[0] == SC_READ_6) 1554 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]); 1555 else { 1556 lba = get_be32(&fsg->cmnd[2]); 1557 1558 /* We allow DPO (Disable Page Out = don't save data in the 1559 * cache) and FUA (Force Unit Access = don't read from the 1560 * cache), but we don't implement them. */ 1561 if ((fsg->cmnd[1] & ~0x18) != 0) { 1562 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1563 return -EINVAL; 1564 } 1565 } 1566 if (lba >= curlun->num_sectors) { 1567 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1568 return -EINVAL; 1569 } 1570 file_offset = ((loff_t) lba) << 9; 1571 1572 /* Carry out the file reads */ 1573 amount_left = fsg->data_size_from_cmnd; 1574 if (unlikely(amount_left == 0)) 1575 return -EIO; // No default reply 1576 1577 for (;;) { 1578 1579 /* Figure out how much we need to read: 1580 * Try to read the remaining amount. 1581 * But don't read more than the buffer size. 1582 * And don't try to read past the end of the file. 1583 * Finally, if we're not at a page boundary, don't read past 1584 * the next page. 1585 * If this means reading 0 then we were asked to read past 1586 * the end of file. */ 1587 amount = min((unsigned int) amount_left, mod_data.buflen); 1588 amount = min((loff_t) amount, 1589 curlun->file_length - file_offset); 1590 partial_page = file_offset & (PAGE_CACHE_SIZE - 1); 1591 if (partial_page > 0) 1592 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE - 1593 partial_page); 1594 1595 /* Wait for the next buffer to become available */ 1596 bh = fsg->next_buffhd_to_fill; 1597 while (bh->state != BUF_STATE_EMPTY) { 1598 rc = sleep_thread(fsg); 1599 if (rc) 1600 return rc; 1601 } 1602 1603 /* If we were asked to read past the end of file, 1604 * end with an empty buffer. */ 1605 if (amount == 0) { 1606 curlun->sense_data = 1607 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1608 curlun->sense_data_info = file_offset >> 9; 1609 curlun->info_valid = 1; 1610 bh->inreq->length = 0; 1611 bh->state = BUF_STATE_FULL; 1612 break; 1613 } 1614 1615 /* Perform the read */ 1616 file_offset_tmp = file_offset; 1617 nread = vfs_read(curlun->filp, 1618 (char __user *) bh->buf, 1619 amount, &file_offset_tmp); 1620 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1621 (unsigned long long) file_offset, 1622 (int) nread); 1623 if (signal_pending(current)) 1624 return -EINTR; 1625 1626 if (nread < 0) { 1627 LDBG(curlun, "error in file read: %d\n", 1628 (int) nread); 1629 nread = 0; 1630 } else if (nread < amount) { 1631 LDBG(curlun, "partial file read: %d/%u\n", 1632 (int) nread, amount); 1633 nread -= (nread & 511); // Round down to a block 1634 } 1635 file_offset += nread; 1636 amount_left -= nread; 1637 fsg->residue -= nread; 1638 bh->inreq->length = nread; 1639 bh->state = BUF_STATE_FULL; 1640 1641 /* If an error occurred, report it and its position */ 1642 if (nread < amount) { 1643 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1644 curlun->sense_data_info = file_offset >> 9; 1645 curlun->info_valid = 1; 1646 break; 1647 } 1648 1649 if (amount_left == 0) 1650 break; // No more left to read 1651 1652 /* Send this buffer and go read some more */ 1653 bh->inreq->zero = 0; 1654 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1655 &bh->inreq_busy, &bh->state); 1656 fsg->next_buffhd_to_fill = bh->next; 1657 } 1658 1659 return -EIO; // No default reply 1660} 1661 1662 1663/*-------------------------------------------------------------------------*/ 1664 1665static int do_write(struct fsg_dev *fsg) 1666{ 1667 struct lun *curlun = fsg->curlun; 1668 u32 lba; 1669 struct fsg_buffhd *bh; 1670 int get_some_more; 1671 u32 amount_left_to_req, amount_left_to_write; 1672 loff_t usb_offset, file_offset, file_offset_tmp; 1673 unsigned int amount; 1674 unsigned int partial_page; 1675 ssize_t nwritten; 1676 int rc; 1677 1678 if (curlun->ro) { 1679 curlun->sense_data = SS_WRITE_PROTECTED; 1680 return -EINVAL; 1681 } 1682 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait 1683 1684 /* Get the starting Logical Block Address and check that it's 1685 * not too big */ 1686 if (fsg->cmnd[0] == SC_WRITE_6) 1687 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]); 1688 else { 1689 lba = get_be32(&fsg->cmnd[2]); 1690 1691 /* We allow DPO (Disable Page Out = don't save data in the 1692 * cache) and FUA (Force Unit Access = write directly to the 1693 * medium). We don't implement DPO; we implement FUA by 1694 * performing synchronous output. */ 1695 if ((fsg->cmnd[1] & ~0x18) != 0) { 1696 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1697 return -EINVAL; 1698 } 1699 if (fsg->cmnd[1] & 0x08) // FUA 1700 curlun->filp->f_flags |= O_SYNC; 1701 } 1702 if (lba >= curlun->num_sectors) { 1703 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1704 return -EINVAL; 1705 } 1706 1707 /* Carry out the file writes */ 1708 get_some_more = 1; 1709 file_offset = usb_offset = ((loff_t) lba) << 9; 1710 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd; 1711 1712 while (amount_left_to_write > 0) { 1713 1714 /* Queue a request for more data from the host */ 1715 bh = fsg->next_buffhd_to_fill; 1716 if (bh->state == BUF_STATE_EMPTY && get_some_more) { 1717 1718 /* Figure out how much we want to get: 1719 * Try to get the remaining amount. 1720 * But don't get more than the buffer size. 1721 * And don't try to go past the end of the file. 1722 * If we're not at a page boundary, 1723 * don't go past the next page. 1724 * If this means getting 0, then we were asked 1725 * to write past the end of file. 1726 * Finally, round down to a block boundary. */ 1727 amount = min(amount_left_to_req, mod_data.buflen); 1728 amount = min((loff_t) amount, curlun->file_length - 1729 usb_offset); 1730 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1); 1731 if (partial_page > 0) 1732 amount = min(amount, 1733 (unsigned int) PAGE_CACHE_SIZE - partial_page); 1734 1735 if (amount == 0) { 1736 get_some_more = 0; 1737 curlun->sense_data = 1738 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1739 curlun->sense_data_info = usb_offset >> 9; 1740 curlun->info_valid = 1; 1741 continue; 1742 } 1743 amount -= (amount & 511); 1744 if (amount == 0) { 1745 1746 /* Why were we were asked to transfer a 1747 * partial block? */ 1748 get_some_more = 0; 1749 continue; 1750 } 1751 1752 /* Get the next buffer */ 1753 usb_offset += amount; 1754 fsg->usb_amount_left -= amount; 1755 amount_left_to_req -= amount; 1756 if (amount_left_to_req == 0) 1757 get_some_more = 0; 1758 1759 /* amount is always divisible by 512, hence by 1760 * the bulk-out maxpacket size */ 1761 bh->outreq->length = bh->bulk_out_intended_length = 1762 amount; 1763 bh->outreq->short_not_ok = 1; 1764 start_transfer(fsg, fsg->bulk_out, bh->outreq, 1765 &bh->outreq_busy, &bh->state); 1766 fsg->next_buffhd_to_fill = bh->next; 1767 continue; 1768 } 1769 1770 /* Write the received data to the backing file */ 1771 bh = fsg->next_buffhd_to_drain; 1772 if (bh->state == BUF_STATE_EMPTY && !get_some_more) 1773 break; // We stopped early 1774 if (bh->state == BUF_STATE_FULL) { 1775 smp_rmb(); 1776 fsg->next_buffhd_to_drain = bh->next; 1777 bh->state = BUF_STATE_EMPTY; 1778 1779 /* Did something go wrong with the transfer? */ 1780 if (bh->outreq->status != 0) { 1781 curlun->sense_data = SS_COMMUNICATION_FAILURE; 1782 curlun->sense_data_info = file_offset >> 9; 1783 curlun->info_valid = 1; 1784 break; 1785 } 1786 1787 amount = bh->outreq->actual; 1788 if (curlun->file_length - file_offset < amount) { 1789 LERROR(curlun, 1790 "write %u @ %llu beyond end %llu\n", 1791 amount, (unsigned long long) file_offset, 1792 (unsigned long long) curlun->file_length); 1793 amount = curlun->file_length - file_offset; 1794 } 1795 1796 /* Perform the write */ 1797 file_offset_tmp = file_offset; 1798 nwritten = vfs_write(curlun->filp, 1799 (char __user *) bh->buf, 1800 amount, &file_offset_tmp); 1801 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 1802 (unsigned long long) file_offset, 1803 (int) nwritten); 1804 if (signal_pending(current)) 1805 return -EINTR; // Interrupted! 1806 1807 if (nwritten < 0) { 1808 LDBG(curlun, "error in file write: %d\n", 1809 (int) nwritten); 1810 nwritten = 0; 1811 } else if (nwritten < amount) { 1812 LDBG(curlun, "partial file write: %d/%u\n", 1813 (int) nwritten, amount); 1814 nwritten -= (nwritten & 511); 1815 // Round down to a block 1816 } 1817 file_offset += nwritten; 1818 amount_left_to_write -= nwritten; 1819 fsg->residue -= nwritten; 1820 1821 /* If an error occurred, report it and its position */ 1822 if (nwritten < amount) { 1823 curlun->sense_data = SS_WRITE_ERROR; 1824 curlun->sense_data_info = file_offset >> 9; 1825 curlun->info_valid = 1; 1826 break; 1827 } 1828 1829 /* Did the host decide to stop early? */ 1830 if (bh->outreq->actual != bh->outreq->length) { 1831 fsg->short_packet_received = 1; 1832 break; 1833 } 1834 continue; 1835 } 1836 1837 /* Wait for something to happen */ 1838 rc = sleep_thread(fsg); 1839 if (rc) 1840 return rc; 1841 } 1842 1843 return -EIO; // No default reply 1844} 1845 1846 1847/*-------------------------------------------------------------------------*/ 1848 1849/* Sync the file data, don't bother with the metadata. 1850 * This code was copied from fs/buffer.c:sys_fdatasync(). */ 1851static int fsync_sub(struct lun *curlun) 1852{ 1853 struct file *filp = curlun->filp; 1854 struct inode *inode; 1855 int rc, err; 1856 1857 if (curlun->ro || !filp) 1858 return 0; 1859 if (!filp->f_op->fsync) 1860 return -EINVAL; 1861 1862 inode = filp->f_path.dentry->d_inode; 1863 mutex_lock(&inode->i_mutex); 1864 rc = filemap_fdatawrite(inode->i_mapping); 1865 err = filp->f_op->fsync(filp, filp->f_path.dentry, 1); 1866 if (!rc) 1867 rc = err; 1868 err = filemap_fdatawait(inode->i_mapping); 1869 if (!rc) 1870 rc = err; 1871 mutex_unlock(&inode->i_mutex); 1872 VLDBG(curlun, "fdatasync -> %d\n", rc); 1873 return rc; 1874} 1875 1876static void fsync_all(struct fsg_dev *fsg) 1877{ 1878 int i; 1879 1880 for (i = 0; i < fsg->nluns; ++i) 1881 fsync_sub(&fsg->luns[i]); 1882} 1883 1884static int do_synchronize_cache(struct fsg_dev *fsg) 1885{ 1886 struct lun *curlun = fsg->curlun; 1887 int rc; 1888 1889 /* We ignore the requested LBA and write out all file's 1890 * dirty data buffers. */ 1891 rc = fsync_sub(curlun); 1892 if (rc) 1893 curlun->sense_data = SS_WRITE_ERROR; 1894 return 0; 1895} 1896 1897 1898/*-------------------------------------------------------------------------*/ 1899 1900static void invalidate_sub(struct lun *curlun) 1901{ 1902 struct file *filp = curlun->filp; 1903 struct inode *inode = filp->f_path.dentry->d_inode; 1904 unsigned long rc; 1905 1906 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1907 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); 1908} 1909 1910static int do_verify(struct fsg_dev *fsg) 1911{ 1912 struct lun *curlun = fsg->curlun; 1913 u32 lba; 1914 u32 verification_length; 1915 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 1916 loff_t file_offset, file_offset_tmp; 1917 u32 amount_left; 1918 unsigned int amount; 1919 ssize_t nread; 1920 1921 /* Get the starting Logical Block Address and check that it's 1922 * not too big */ 1923 lba = get_be32(&fsg->cmnd[2]); 1924 if (lba >= curlun->num_sectors) { 1925 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1926 return -EINVAL; 1927 } 1928 1929 /* We allow DPO (Disable Page Out = don't save data in the 1930 * cache) but we don't implement it. */ 1931 if ((fsg->cmnd[1] & ~0x10) != 0) { 1932 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1933 return -EINVAL; 1934 } 1935 1936 verification_length = get_be16(&fsg->cmnd[7]); 1937 if (unlikely(verification_length == 0)) 1938 return -EIO; // No default reply 1939 1940 /* Prepare to carry out the file verify */ 1941 amount_left = verification_length << 9; 1942 file_offset = ((loff_t) lba) << 9; 1943 1944 /* Write out all the dirty buffers before invalidating them */ 1945 fsync_sub(curlun); 1946 if (signal_pending(current)) 1947 return -EINTR; 1948 1949 invalidate_sub(curlun); 1950 if (signal_pending(current)) 1951 return -EINTR; 1952 1953 /* Just try to read the requested blocks */ 1954 while (amount_left > 0) { 1955 1956 /* Figure out how much we need to read: 1957 * Try to read the remaining amount, but not more than 1958 * the buffer size. 1959 * And don't try to read past the end of the file. 1960 * If this means reading 0 then we were asked to read 1961 * past the end of file. */ 1962 amount = min((unsigned int) amount_left, mod_data.buflen); 1963 amount = min((loff_t) amount, 1964 curlun->file_length - file_offset); 1965 if (amount == 0) { 1966 curlun->sense_data = 1967 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1968 curlun->sense_data_info = file_offset >> 9; 1969 curlun->info_valid = 1; 1970 break; 1971 } 1972 1973 /* Perform the read */ 1974 file_offset_tmp = file_offset; 1975 nread = vfs_read(curlun->filp, 1976 (char __user *) bh->buf, 1977 amount, &file_offset_tmp); 1978 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1979 (unsigned long long) file_offset, 1980 (int) nread); 1981 if (signal_pending(current)) 1982 return -EINTR; 1983 1984 if (nread < 0) { 1985 LDBG(curlun, "error in file verify: %d\n", 1986 (int) nread); 1987 nread = 0; 1988 } else if (nread < amount) { 1989 LDBG(curlun, "partial file verify: %d/%u\n", 1990 (int) nread, amount); 1991 nread -= (nread & 511); // Round down to a sector 1992 } 1993 if (nread == 0) { 1994 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1995 curlun->sense_data_info = file_offset >> 9; 1996 curlun->info_valid = 1; 1997 break; 1998 } 1999 file_offset += nread; 2000 amount_left -= nread; 2001 } 2002 return 0; 2003} 2004 2005 2006/*-------------------------------------------------------------------------*/ 2007 2008static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2009{ 2010 u8 *buf = (u8 *) bh->buf; 2011 2012 static char vendor_id[] = "Linux "; 2013 static char product_id[] = "File-Stor Gadget"; 2014 2015 if (!fsg->curlun) { // Unsupported LUNs are okay 2016 fsg->bad_lun_okay = 1; 2017 memset(buf, 0, 36); 2018 buf[0] = 0x7f; // Unsupported, no device-type 2019 return 36; 2020 } 2021 2022 memset(buf, 0, 8); // Non-removable, direct-access device 2023 if (mod_data.removable) 2024 buf[1] = 0x80; 2025 buf[2] = 2; // ANSI SCSI level 2 2026 buf[3] = 2; // SCSI-2 INQUIRY data format 2027 buf[4] = 31; // Additional length 2028 // No special options 2029 sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, product_id, 2030 mod_data.release); 2031 return 36; 2032} 2033 2034 2035static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2036{ 2037 struct lun *curlun = fsg->curlun; 2038 u8 *buf = (u8 *) bh->buf; 2039 u32 sd, sdinfo; 2040 int valid; 2041 2042 /* 2043 * From the SCSI-2 spec., section 7.9 (Unit attention condition): 2044 * 2045 * If a REQUEST SENSE command is received from an initiator 2046 * with a pending unit attention condition (before the target 2047 * generates the contingent allegiance condition), then the 2048 * target shall either: 2049 * a) report any pending sense data and preserve the unit 2050 * attention condition on the logical unit, or, 2051 * b) report the unit attention condition, may discard any 2052 * pending sense data, and clear the unit attention 2053 * condition on the logical unit for that initiator. 2054 * 2055 * FSG normally uses option a); enable this code to use option b). 2056 */ 2057#if 0 2058 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { 2059 curlun->sense_data = curlun->unit_attention_data; 2060 curlun->unit_attention_data = SS_NO_SENSE; 2061 } 2062#endif 2063 2064 if (!curlun) { // Unsupported LUNs are okay 2065 fsg->bad_lun_okay = 1; 2066 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 2067 sdinfo = 0; 2068 valid = 0; 2069 } else { 2070 sd = curlun->sense_data; 2071 sdinfo = curlun->sense_data_info; 2072 valid = curlun->info_valid << 7; 2073 curlun->sense_data = SS_NO_SENSE; 2074 curlun->sense_data_info = 0; 2075 curlun->info_valid = 0; 2076 } 2077 2078 memset(buf, 0, 18); 2079 buf[0] = valid | 0x70; // Valid, current error 2080 buf[2] = SK(sd); 2081 put_be32(&buf[3], sdinfo); // Sense information 2082 buf[7] = 18 - 8; // Additional sense length 2083 buf[12] = ASC(sd); 2084 buf[13] = ASCQ(sd); 2085 return 18; 2086} 2087 2088 2089static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2090{ 2091 struct lun *curlun = fsg->curlun; 2092 u32 lba = get_be32(&fsg->cmnd[2]); 2093 int pmi = fsg->cmnd[8]; 2094 u8 *buf = (u8 *) bh->buf; 2095 2096 /* Check the PMI and LBA fields */ 2097 if (pmi > 1 || (pmi == 0 && lba != 0)) { 2098 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2099 return -EINVAL; 2100 } 2101 2102 put_be32(&buf[0], curlun->num_sectors - 1); // Max logical block 2103 put_be32(&buf[4], 512); // Block length 2104 return 8; 2105} 2106 2107 2108static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2109{ 2110 struct lun *curlun = fsg->curlun; 2111 int mscmnd = fsg->cmnd[0]; 2112 u8 *buf = (u8 *) bh->buf; 2113 u8 *buf0 = buf; 2114 int pc, page_code; 2115 int changeable_values, all_pages; 2116 int valid_page = 0; 2117 int len, limit; 2118 2119 if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD 2120 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2121 return -EINVAL; 2122 } 2123 pc = fsg->cmnd[2] >> 6; 2124 page_code = fsg->cmnd[2] & 0x3f; 2125 if (pc == 3) { 2126 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; 2127 return -EINVAL; 2128 } 2129 changeable_values = (pc == 1); 2130 all_pages = (page_code == 0x3f); 2131 2132 /* Write the mode parameter header. Fixed values are: default 2133 * medium type, no cache control (DPOFUA), and no block descriptors. 2134 * The only variable value is the WriteProtect bit. We will fill in 2135 * the mode data length later. */ 2136 memset(buf, 0, 8); 2137 if (mscmnd == SC_MODE_SENSE_6) { 2138 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA 2139 buf += 4; 2140 limit = 255; 2141 } else { // SC_MODE_SENSE_10 2142 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA 2143 buf += 8; 2144 limit = 65535; // Should really be mod_data.buflen 2145 } 2146 2147 /* No block descriptors */ 2148 2149 /* The mode pages, in numerical order. The only page we support 2150 * is the Caching page. */ 2151 if (page_code == 0x08 || all_pages) { 2152 valid_page = 1; 2153 buf[0] = 0x08; // Page code 2154 buf[1] = 10; // Page length 2155 memset(buf+2, 0, 10); // None of the fields are changeable 2156 2157 if (!changeable_values) { 2158 buf[2] = 0x04; // Write cache enable, 2159 // Read cache not disabled 2160 // No cache retention priorities 2161 put_be16(&buf[4], 0xffff); // Don't disable prefetch 2162 // Minimum prefetch = 0 2163 put_be16(&buf[8], 0xffff); // Maximum prefetch 2164 put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling 2165 } 2166 buf += 12; 2167 } 2168 2169 /* Check that a valid page was requested and the mode data length 2170 * isn't too long. */ 2171 len = buf - buf0; 2172 if (!valid_page || len > limit) { 2173 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2174 return -EINVAL; 2175 } 2176 2177 /* Store the mode data length */ 2178 if (mscmnd == SC_MODE_SENSE_6) 2179 buf0[0] = len - 1; 2180 else 2181 put_be16(buf0, len - 2); 2182 return len; 2183} 2184 2185 2186static int do_start_stop(struct fsg_dev *fsg) 2187{ 2188 struct lun *curlun = fsg->curlun; 2189 int loej, start; 2190 2191 if (!mod_data.removable) { 2192 curlun->sense_data = SS_INVALID_COMMAND; 2193 return -EINVAL; 2194 } 2195 2196 // int immed = fsg->cmnd[1] & 0x01; 2197 loej = fsg->cmnd[4] & 0x02; 2198 start = fsg->cmnd[4] & 0x01; 2199 2200#ifdef CONFIG_USB_FILE_STORAGE_TEST 2201 if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed 2202 (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start 2203 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2204 return -EINVAL; 2205 } 2206 2207 if (!start) { 2208 2209 /* Are we allowed to unload the media? */ 2210 if (curlun->prevent_medium_removal) { 2211 LDBG(curlun, "unload attempt prevented\n"); 2212 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; 2213 return -EINVAL; 2214 } 2215 if (loej) { // Simulate an unload/eject 2216 up_read(&fsg->filesem); 2217 down_write(&fsg->filesem); 2218 close_backing_file(curlun); 2219 up_write(&fsg->filesem); 2220 down_read(&fsg->filesem); 2221 } 2222 } else { 2223 2224 /* Our emulation doesn't support mounting; the medium is 2225 * available for use as soon as it is loaded. */ 2226 if (!backing_file_is_open(curlun)) { 2227 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 2228 return -EINVAL; 2229 } 2230 } 2231#endif 2232 return 0; 2233} 2234 2235 2236static int do_prevent_allow(struct fsg_dev *fsg) 2237{ 2238 struct lun *curlun = fsg->curlun; 2239 int prevent; 2240 2241 if (!mod_data.removable) { 2242 curlun->sense_data = SS_INVALID_COMMAND; 2243 return -EINVAL; 2244 } 2245 2246 prevent = fsg->cmnd[4] & 0x01; 2247 if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent 2248 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2249 return -EINVAL; 2250 } 2251 2252 if (curlun->prevent_medium_removal && !prevent) 2253 fsync_sub(curlun); 2254 curlun->prevent_medium_removal = prevent; 2255 return 0; 2256} 2257 2258 2259static int do_read_format_capacities(struct fsg_dev *fsg, 2260 struct fsg_buffhd *bh) 2261{ 2262 struct lun *curlun = fsg->curlun; 2263 u8 *buf = (u8 *) bh->buf; 2264 2265 buf[0] = buf[1] = buf[2] = 0; 2266 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor 2267 buf += 4; 2268 2269 put_be32(&buf[0], curlun->num_sectors); // Number of blocks 2270 put_be32(&buf[4], 512); // Block length 2271 buf[4] = 0x02; // Current capacity 2272 return 12; 2273} 2274 2275 2276static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2277{ 2278 struct lun *curlun = fsg->curlun; 2279 2280 /* We don't support MODE SELECT */ 2281 curlun->sense_data = SS_INVALID_COMMAND; 2282 return -EINVAL; 2283} 2284 2285 2286/*-------------------------------------------------------------------------*/ 2287 2288static int halt_bulk_in_endpoint(struct fsg_dev *fsg) 2289{ 2290 int rc; 2291 2292 rc = fsg_set_halt(fsg, fsg->bulk_in); 2293 if (rc == -EAGAIN) 2294 VDBG(fsg, "delayed bulk-in endpoint halt\n"); 2295 while (rc != 0) { 2296 if (rc != -EAGAIN) { 2297 WARN(fsg, "usb_ep_set_halt -> %d\n", rc); 2298 rc = 0; 2299 break; 2300 } 2301 2302 /* Wait for a short time and then try again */ 2303 if (msleep_interruptible(100) != 0) 2304 return -EINTR; 2305 rc = usb_ep_set_halt(fsg->bulk_in); 2306 } 2307 return rc; 2308} 2309 2310static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) 2311{ 2312 int rc; 2313 2314 DBG(fsg, "bulk-in set wedge\n"); 2315 rc = usb_ep_set_wedge(fsg->bulk_in); 2316 if (rc == -EAGAIN) 2317 VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 2318 while (rc != 0) { 2319 if (rc != -EAGAIN) { 2320 WARN(fsg, "usb_ep_set_wedge -> %d\n", rc); 2321 rc = 0; 2322 break; 2323 } 2324 2325 /* Wait for a short time and then try again */ 2326 if (msleep_interruptible(100) != 0) 2327 return -EINTR; 2328 rc = usb_ep_set_wedge(fsg->bulk_in); 2329 } 2330 return rc; 2331} 2332 2333static int pad_with_zeros(struct fsg_dev *fsg) 2334{ 2335 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 2336 u32 nkeep = bh->inreq->length; 2337 u32 nsend; 2338 int rc; 2339 2340 bh->state = BUF_STATE_EMPTY; // For the first iteration 2341 fsg->usb_amount_left = nkeep + fsg->residue; 2342 while (fsg->usb_amount_left > 0) { 2343 2344 /* Wait for the next buffer to be free */ 2345 while (bh->state != BUF_STATE_EMPTY) { 2346 rc = sleep_thread(fsg); 2347 if (rc) 2348 return rc; 2349 } 2350 2351 nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen); 2352 memset(bh->buf + nkeep, 0, nsend - nkeep); 2353 bh->inreq->length = nsend; 2354 bh->inreq->zero = 0; 2355 start_transfer(fsg, fsg->bulk_in, bh->inreq, 2356 &bh->inreq_busy, &bh->state); 2357 bh = fsg->next_buffhd_to_fill = bh->next; 2358 fsg->usb_amount_left -= nsend; 2359 nkeep = 0; 2360 } 2361 return 0; 2362} 2363 2364static int throw_away_data(struct fsg_dev *fsg) 2365{ 2366 struct fsg_buffhd *bh; 2367 u32 amount; 2368 int rc; 2369 2370 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY || 2371 fsg->usb_amount_left > 0) { 2372 2373 /* Throw away the data in a filled buffer */ 2374 if (bh->state == BUF_STATE_FULL) { 2375 smp_rmb(); 2376 bh->state = BUF_STATE_EMPTY; 2377 fsg->next_buffhd_to_drain = bh->next; 2378 2379 /* A short packet or an error ends everything */ 2380 if (bh->outreq->actual != bh->outreq->length || 2381 bh->outreq->status != 0) { 2382 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); 2383 return -EINTR; 2384 } 2385 continue; 2386 } 2387 2388 /* Try to submit another request if we need one */ 2389 bh = fsg->next_buffhd_to_fill; 2390 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) { 2391 amount = min(fsg->usb_amount_left, 2392 (u32) mod_data.buflen); 2393 2394 /* amount is always divisible by 512, hence by 2395 * the bulk-out maxpacket size */ 2396 bh->outreq->length = bh->bulk_out_intended_length = 2397 amount; 2398 bh->outreq->short_not_ok = 1; 2399 start_transfer(fsg, fsg->bulk_out, bh->outreq, 2400 &bh->outreq_busy, &bh->state); 2401 fsg->next_buffhd_to_fill = bh->next; 2402 fsg->usb_amount_left -= amount; 2403 continue; 2404 } 2405 2406 /* Otherwise wait for something to happen */ 2407 rc = sleep_thread(fsg); 2408 if (rc) 2409 return rc; 2410 } 2411 return 0; 2412} 2413 2414 2415static int finish_reply(struct fsg_dev *fsg) 2416{ 2417 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 2418 int rc = 0; 2419 2420 switch (fsg->data_dir) { 2421 case DATA_DIR_NONE: 2422 break; // Nothing to send 2423 2424 /* If we don't know whether the host wants to read or write, 2425 * this must be CB or CBI with an unknown command. We mustn't 2426 * try to send or receive any data. So stall both bulk pipes 2427 * if we can and wait for a reset. */ 2428 case DATA_DIR_UNKNOWN: 2429 if (mod_data.can_stall) { 2430 fsg_set_halt(fsg, fsg->bulk_out); 2431 rc = halt_bulk_in_endpoint(fsg); 2432 } 2433 break; 2434 2435 /* All but the last buffer of data must have already been sent */ 2436 case DATA_DIR_TO_HOST: 2437 if (fsg->data_size == 0) 2438 ; // Nothing to send 2439 2440 /* If there's no residue, simply send the last buffer */ 2441 else if (fsg->residue == 0) { 2442 bh->inreq->zero = 0; 2443 start_transfer(fsg, fsg->bulk_in, bh->inreq, 2444 &bh->inreq_busy, &bh->state); 2445 fsg->next_buffhd_to_fill = bh->next; 2446 } 2447 2448 /* There is a residue. For CB and CBI, simply mark the end 2449 * of the data with a short packet. However, if we are 2450 * allowed to stall, there was no data at all (residue == 2451 * data_size), and the command failed (invalid LUN or 2452 * sense data is set), then halt the bulk-in endpoint 2453 * instead. */ 2454 else if (!transport_is_bbb()) { 2455 if (mod_data.can_stall && 2456 fsg->residue == fsg->data_size && 2457 (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) { 2458 bh->state = BUF_STATE_EMPTY; 2459 rc = halt_bulk_in_endpoint(fsg); 2460 } else { 2461 bh->inreq->zero = 1; 2462 start_transfer(fsg, fsg->bulk_in, bh->inreq, 2463 &bh->inreq_busy, &bh->state); 2464 fsg->next_buffhd_to_fill = bh->next; 2465 } 2466 } 2467 2468 /* For Bulk-only, if we're allowed to stall then send the 2469 * short packet and halt the bulk-in endpoint. If we can't 2470 * stall, pad out the remaining data with 0's. */ 2471 else { 2472 if (mod_data.can_stall) { 2473 bh->inreq->zero = 1; 2474 start_transfer(fsg, fsg->bulk_in, bh->inreq, 2475 &bh->inreq_busy, &bh->state); 2476 fsg->next_buffhd_to_fill = bh->next; 2477 rc = halt_bulk_in_endpoint(fsg); 2478 } else 2479 rc = pad_with_zeros(fsg); 2480 } 2481 break; 2482 2483 /* We have processed all we want from the data the host has sent. 2484 * There may still be outstanding bulk-out requests. */ 2485 case DATA_DIR_FROM_HOST: 2486 if (fsg->residue == 0) 2487 ; // Nothing to receive 2488 2489 /* Did the host stop sending unexpectedly early? */ 2490 else if (fsg->short_packet_received) { 2491 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); 2492 rc = -EINTR; 2493 } 2494 2495 /* We haven't processed all the incoming data. Even though 2496 * we may be allowed to stall, doing so would cause a race. 2497 * The controller may already have ACK'ed all the remaining 2498 * bulk-out packets, in which case the host wouldn't see a 2499 * STALL. Not realizing the endpoint was halted, it wouldn't 2500 * clear the halt -- leading to problems later on. */ 2501#if 0 2502 else if (mod_data.can_stall) { 2503 fsg_set_halt(fsg, fsg->bulk_out); 2504 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); 2505 rc = -EINTR; 2506 } 2507#endif 2508 2509 /* We can't stall. Read in the excess data and throw it 2510 * all away. */ 2511 else 2512 rc = throw_away_data(fsg); 2513 break; 2514 } 2515 return rc; 2516} 2517 2518 2519static int send_status(struct fsg_dev *fsg) 2520{ 2521 struct lun *curlun = fsg->curlun; 2522 struct fsg_buffhd *bh; 2523 int rc; 2524 u8 status = USB_STATUS_PASS; 2525 u32 sd, sdinfo = 0; 2526 2527 /* Wait for the next buffer to become available */ 2528 bh = fsg->next_buffhd_to_fill; 2529 while (bh->state != BUF_STATE_EMPTY) { 2530 rc = sleep_thread(fsg); 2531 if (rc) 2532 return rc; 2533 } 2534 2535 if (curlun) { 2536 sd = curlun->sense_data; 2537 sdinfo = curlun->sense_data_info; 2538 } else if (fsg->bad_lun_okay) 2539 sd = SS_NO_SENSE; 2540 else 2541 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 2542 2543 if (fsg->phase_error) { 2544 DBG(fsg, "sending phase-error status\n"); 2545 status = USB_STATUS_PHASE_ERROR; 2546 sd = SS_INVALID_COMMAND; 2547 } else if (sd != SS_NO_SENSE) { 2548 DBG(fsg, "sending command-failure status\n"); 2549 status = USB_STATUS_FAIL; 2550 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" 2551 " info x%x\n", 2552 SK(sd), ASC(sd), ASCQ(sd), sdinfo); 2553 } 2554 2555 if (transport_is_bbb()) { 2556 struct bulk_cs_wrap *csw = bh->buf; 2557 2558 /* Store and send the Bulk-only CSW */ 2559 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG); 2560 csw->Tag = fsg->tag; 2561 csw->Residue = cpu_to_le32(fsg->residue); 2562 csw->Status = status; 2563 2564 bh->inreq->length = USB_BULK_CS_WRAP_LEN; 2565 bh->inreq->zero = 0; 2566 start_transfer(fsg, fsg->bulk_in, bh->inreq, 2567 &bh->inreq_busy, &bh->state); 2568 2569 } else if (mod_data.transport_type == USB_PR_CB) { 2570 2571 /* Control-Bulk transport has no status phase! */ 2572 return 0; 2573 2574 } else { // USB_PR_CBI 2575 struct interrupt_data *buf = bh->buf; 2576 2577 /* Store and send the Interrupt data. UFI sends the ASC 2578 * and ASCQ bytes. Everything else sends a Type (which 2579 * is always 0) and the status Value. */ 2580 if (mod_data.protocol_type == USB_SC_UFI) { 2581 buf->bType = ASC(sd); 2582 buf->bValue = ASCQ(sd); 2583 } else { 2584 buf->bType = 0; 2585 buf->bValue = status; 2586 } 2587 fsg->intreq->length = CBI_INTERRUPT_DATA_LEN; 2588 2589 fsg->intr_buffhd = bh; // Point to the right buffhd 2590 fsg->intreq->buf = bh->inreq->buf; 2591 fsg->intreq->context = bh; 2592 start_transfer(fsg, fsg->intr_in, fsg->intreq, 2593 &fsg->intreq_busy, &bh->state); 2594 } 2595 2596 fsg->next_buffhd_to_fill = bh->next; 2597 return 0; 2598} 2599 2600 2601/*-------------------------------------------------------------------------*/ 2602 2603/* Check whether the command is properly formed and whether its data size 2604 * and direction agree with the values we already have. */ 2605static int check_command(struct fsg_dev *fsg, int cmnd_size, 2606 enum data_direction data_dir, unsigned int mask, 2607 int needs_medium, const char *name) 2608{ 2609 int i; 2610 int lun = fsg->cmnd[1] >> 5; 2611 static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 2612 char hdlen[20]; 2613 struct lun *curlun; 2614 2615 /* Adjust the expected cmnd_size for protocol encapsulation padding. 2616 * Transparent SCSI doesn't pad. */ 2617 if (protocol_is_scsi()) 2618 ; 2619 2620 /* There's some disagreement as to whether RBC pads commands or not. 2621 * We'll play it safe and accept either form. */ 2622 else if (mod_data.protocol_type == USB_SC_RBC) { 2623 if (fsg->cmnd_size == 12) 2624 cmnd_size = 12; 2625 2626 /* All the other protocols pad to 12 bytes */ 2627 } else 2628 cmnd_size = 12; 2629 2630 hdlen[0] = 0; 2631 if (fsg->data_dir != DATA_DIR_UNKNOWN) 2632 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir], 2633 fsg->data_size); 2634 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 2635 name, cmnd_size, dirletter[(int) data_dir], 2636 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen); 2637 2638 /* We can't reply at all until we know the correct data direction 2639 * and size. */ 2640 if (fsg->data_size_from_cmnd == 0) 2641 data_dir = DATA_DIR_NONE; 2642 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI 2643 fsg->data_dir = data_dir; 2644 fsg->data_size = fsg->data_size_from_cmnd; 2645 2646 } else { // Bulk-only 2647 if (fsg->data_size < fsg->data_size_from_cmnd) { 2648 2649 /* Host data size < Device data size is a phase error. 2650 * Carry out the command, but only transfer as much 2651 * as we are allowed. */ 2652 fsg->data_size_from_cmnd = fsg->data_size; 2653 fsg->phase_error = 1; 2654 } 2655 } 2656 fsg->residue = fsg->usb_amount_left = fsg->data_size; 2657 2658 /* Conflicting data directions is a phase error */ 2659 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) { 2660 fsg->phase_error = 1; 2661 return -EINVAL; 2662 } 2663 2664 /* Verify the length of the command itself */ 2665 if (cmnd_size != fsg->cmnd_size) { 2666 2667 /* Special case workaround: MS-Windows issues REQUEST SENSE 2668 * with cbw->Length == 12 (it should be 6). */ 2669 if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12) 2670 cmnd_size = fsg->cmnd_size; 2671 else { 2672 fsg->phase_error = 1; 2673 return -EINVAL; 2674 } 2675 } 2676 2677 /* Check that the LUN values are consistent */ 2678 if (transport_is_bbb()) { 2679 if (fsg->lun != lun) 2680 DBG(fsg, "using LUN %d from CBW, " 2681 "not LUN %d from CDB\n", 2682 fsg->lun, lun); 2683 } else 2684 fsg->lun = lun; // Use LUN from the command 2685 2686 /* Check the LUN */ 2687 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) { 2688 fsg->curlun = curlun = &fsg->luns[fsg->lun]; 2689 if (fsg->cmnd[0] != SC_REQUEST_SENSE) { 2690 curlun->sense_data = SS_NO_SENSE; 2691 curlun->sense_data_info = 0; 2692 curlun->info_valid = 0; 2693 } 2694 } else { 2695 fsg->curlun = curlun = NULL; 2696 fsg->bad_lun_okay = 0; 2697 2698 /* INQUIRY and REQUEST SENSE commands are explicitly allowed 2699 * to use unsupported LUNs; all others may not. */ 2700 if (fsg->cmnd[0] != SC_INQUIRY && 2701 fsg->cmnd[0] != SC_REQUEST_SENSE) { 2702 DBG(fsg, "unsupported LUN %d\n", fsg->lun); 2703 return -EINVAL; 2704 } 2705 } 2706 2707 /* If a unit attention condition exists, only INQUIRY and 2708 * REQUEST SENSE commands are allowed; anything else must fail. */ 2709 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 2710 fsg->cmnd[0] != SC_INQUIRY && 2711 fsg->cmnd[0] != SC_REQUEST_SENSE) { 2712 curlun->sense_data = curlun->unit_attention_data; 2713 curlun->unit_attention_data = SS_NO_SENSE; 2714 return -EINVAL; 2715 } 2716 2717 /* Check that only command bytes listed in the mask are non-zero */ 2718 fsg->cmnd[1] &= 0x1f; // Mask away the LUN 2719 for (i = 1; i < cmnd_size; ++i) { 2720 if (fsg->cmnd[i] && !(mask & (1 << i))) { 2721 if (curlun) 2722 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2723 return -EINVAL; 2724 } 2725 } 2726 2727 /* If the medium isn't mounted and the command needs to access 2728 * it, return an error. */ 2729 if (curlun && !backing_file_is_open(curlun) && needs_medium) { 2730 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 2731 return -EINVAL; 2732 } 2733 2734 return 0; 2735} 2736 2737 2738static int do_scsi_command(struct fsg_dev *fsg) 2739{ 2740 struct fsg_buffhd *bh; 2741 int rc; 2742 int reply = -EINVAL; 2743 int i; 2744 static char unknown[16]; 2745 2746 dump_cdb(fsg); 2747 2748 /* Wait for the next buffer to become available for data or status */ 2749 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill; 2750 while (bh->state != BUF_STATE_EMPTY) { 2751 rc = sleep_thread(fsg); 2752 if (rc) 2753 return rc; 2754 } 2755 fsg->phase_error = 0; 2756 fsg->short_packet_received = 0; 2757 2758 down_read(&fsg->filesem); // We're using the backing file 2759 switch (fsg->cmnd[0]) { 2760 2761 case SC_INQUIRY: 2762 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2763 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2764 (1<<4), 0, 2765 "INQUIRY")) == 0) 2766 reply = do_inquiry(fsg, bh); 2767 break; 2768 2769 case SC_MODE_SELECT_6: 2770 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2771 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, 2772 (1<<1) | (1<<4), 0, 2773 "MODE SELECT(6)")) == 0) 2774 reply = do_mode_select(fsg, bh); 2775 break; 2776 2777 case SC_MODE_SELECT_10: 2778 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2779 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2780 (1<<1) | (3<<7), 0, 2781 "MODE SELECT(10)")) == 0) 2782 reply = do_mode_select(fsg, bh); 2783 break; 2784 2785 case SC_MODE_SENSE_6: 2786 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2787 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2788 (1<<1) | (1<<2) | (1<<4), 0, 2789 "MODE SENSE(6)")) == 0) 2790 reply = do_mode_sense(fsg, bh); 2791 break; 2792 2793 case SC_MODE_SENSE_10: 2794 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2795 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2796 (1<<1) | (1<<2) | (3<<7), 0, 2797 "MODE SENSE(10)")) == 0) 2798 reply = do_mode_sense(fsg, bh); 2799 break; 2800 2801 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL: 2802 fsg->data_size_from_cmnd = 0; 2803 if ((reply = check_command(fsg, 6, DATA_DIR_NONE, 2804 (1<<4), 0, 2805 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0) 2806 reply = do_prevent_allow(fsg); 2807 break; 2808 2809 case SC_READ_6: 2810 i = fsg->cmnd[4]; 2811 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2812 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2813 (7<<1) | (1<<4), 1, 2814 "READ(6)")) == 0) 2815 reply = do_read(fsg); 2816 break; 2817 2818 case SC_READ_10: 2819 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9; 2820 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2821 (1<<1) | (0xf<<2) | (3<<7), 1, 2822 "READ(10)")) == 0) 2823 reply = do_read(fsg); 2824 break; 2825 2826 case SC_READ_12: 2827 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9; 2828 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST, 2829 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2830 "READ(12)")) == 0) 2831 reply = do_read(fsg); 2832 break; 2833 2834 case SC_READ_CAPACITY: 2835 fsg->data_size_from_cmnd = 8; 2836 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2837 (0xf<<2) | (1<<8), 1, 2838 "READ CAPACITY")) == 0) 2839 reply = do_read_capacity(fsg, bh); 2840 break; 2841 2842 case SC_READ_FORMAT_CAPACITIES: 2843 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2844 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2845 (3<<7), 1, 2846 "READ FORMAT CAPACITIES")) == 0) 2847 reply = do_read_format_capacities(fsg, bh); 2848 break; 2849 2850 case SC_REQUEST_SENSE: 2851 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2852 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2853 (1<<4), 0, 2854 "REQUEST SENSE")) == 0) 2855 reply = do_request_sense(fsg, bh); 2856 break; 2857 2858 case SC_START_STOP_UNIT: 2859 fsg->data_size_from_cmnd = 0; 2860 if ((reply = check_command(fsg, 6, DATA_DIR_NONE, 2861 (1<<1) | (1<<4), 0, 2862 "START-STOP UNIT")) == 0) 2863 reply = do_start_stop(fsg); 2864 break; 2865 2866 case SC_SYNCHRONIZE_CACHE: 2867 fsg->data_size_from_cmnd = 0; 2868 if ((reply = check_command(fsg, 10, DATA_DIR_NONE, 2869 (0xf<<2) | (3<<7), 1, 2870 "SYNCHRONIZE CACHE")) == 0) 2871 reply = do_synchronize_cache(fsg); 2872 break; 2873 2874 case SC_TEST_UNIT_READY: 2875 fsg->data_size_from_cmnd = 0; 2876 reply = check_command(fsg, 6, DATA_DIR_NONE, 2877 0, 1, 2878 "TEST UNIT READY"); 2879 break; 2880 2881 /* Although optional, this command is used by MS-Windows. We 2882 * support a minimal version: BytChk must be 0. */ 2883 case SC_VERIFY: 2884 fsg->data_size_from_cmnd = 0; 2885 if ((reply = check_command(fsg, 10, DATA_DIR_NONE, 2886 (1<<1) | (0xf<<2) | (3<<7), 1, 2887 "VERIFY")) == 0) 2888 reply = do_verify(fsg); 2889 break; 2890 2891 case SC_WRITE_6: 2892 i = fsg->cmnd[4]; 2893 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2894 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, 2895 (7<<1) | (1<<4), 1, 2896 "WRITE(6)")) == 0) 2897 reply = do_write(fsg); 2898 break; 2899 2900 case SC_WRITE_10: 2901 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9; 2902 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2903 (1<<1) | (0xf<<2) | (3<<7), 1, 2904 "WRITE(10)")) == 0) 2905 reply = do_write(fsg); 2906 break; 2907 2908 case SC_WRITE_12: 2909 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9; 2910 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST, 2911 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2912 "WRITE(12)")) == 0) 2913 reply = do_write(fsg); 2914 break; 2915 2916 /* Some mandatory commands that we recognize but don't implement. 2917 * They don't mean much in this setting. It's left as an exercise 2918 * for anyone interested to implement RESERVE and RELEASE in terms 2919 * of Posix locks. */ 2920 case SC_FORMAT_UNIT: 2921 case SC_RELEASE: 2922 case SC_RESERVE: 2923 case SC_SEND_DIAGNOSTIC: 2924 // Fall through 2925 2926 default: 2927 fsg->data_size_from_cmnd = 0; 2928 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); 2929 if ((reply = check_command(fsg, fsg->cmnd_size, 2930 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { 2931 fsg->curlun->sense_data = SS_INVALID_COMMAND; 2932 reply = -EINVAL; 2933 } 2934 break; 2935 } 2936 up_read(&fsg->filesem); 2937 2938 if (reply == -EINTR || signal_pending(current)) 2939 return -EINTR; 2940 2941 /* Set up the single reply buffer for finish_reply() */ 2942 if (reply == -EINVAL) 2943 reply = 0; // Error reply length 2944 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) { 2945 reply = min((u32) reply, fsg->data_size_from_cmnd); 2946 bh->inreq->length = reply; 2947 bh->state = BUF_STATE_FULL; 2948 fsg->residue -= reply; 2949 } // Otherwise it's already set 2950 2951 return 0; 2952} 2953 2954 2955/*-------------------------------------------------------------------------*/ 2956 2957static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2958{ 2959 struct usb_request *req = bh->outreq; 2960 struct bulk_cb_wrap *cbw = req->buf; 2961 2962 /* Was this a real packet? Should it be ignored? */ 2963 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2964 return -EINVAL; 2965 2966 /* Is the CBW valid? */ 2967 if (req->actual != USB_BULK_CB_WRAP_LEN || 2968 cbw->Signature != __constant_cpu_to_le32( 2969 USB_BULK_CB_SIG)) { 2970 DBG(fsg, "invalid CBW: len %u sig 0x%x\n", 2971 req->actual, 2972 le32_to_cpu(cbw->Signature)); 2973 2974 /* The Bulk-only spec says we MUST stall the IN endpoint 2975 * (6.6.1), so it's unavoidable. It also says we must 2976 * retain this state until the next reset, but there's 2977 * no way to tell the controller driver it should ignore 2978 * Clear-Feature(HALT) requests. 2979 * 2980 * We aren't required to halt the OUT endpoint; instead 2981 * we can simply accept and discard any data received 2982 * until the next reset. */ 2983 wedge_bulk_in_endpoint(fsg); 2984 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2985 return -EINVAL; 2986 } 2987 2988 /* Is the CBW meaningful? */ 2989 if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG || 2990 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { 2991 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2992 "cmdlen %u\n", 2993 cbw->Lun, cbw->Flags, cbw->Length); 2994 2995 /* We can do anything we want here, so let's stall the 2996 * bulk pipes if we are allowed to. */ 2997 if (mod_data.can_stall) { 2998 fsg_set_halt(fsg, fsg->bulk_out); 2999 halt_bulk_in_endpoint(fsg); 3000 } 3001 return -EINVAL; 3002 } 3003 3004 /* Save the command for later */ 3005 fsg->cmnd_size = cbw->Length; 3006 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size); 3007 if (cbw->Flags & USB_BULK_IN_FLAG) 3008 fsg->data_dir = DATA_DIR_TO_HOST; 3009 else 3010 fsg->data_dir = DATA_DIR_FROM_HOST; 3011 fsg->data_size = le32_to_cpu(cbw->DataTransferLength); 3012 if (fsg->data_size == 0) 3013 fsg->data_dir = DATA_DIR_NONE; 3014 fsg->lun = cbw->Lun; 3015 fsg->tag = cbw->Tag; 3016 return 0; 3017} 3018 3019 3020static int get_next_command(struct fsg_dev *fsg) 3021{ 3022 struct fsg_buffhd *bh; 3023 int rc = 0; 3024 3025 if (transport_is_bbb()) { 3026 3027 /* Wait for the next buffer to become available */ 3028 bh = fsg->next_buffhd_to_fill; 3029 while (bh->state != BUF_STATE_EMPTY) { 3030 rc = sleep_thread(fsg); 3031 if (rc) 3032 return rc; 3033 } 3034 3035 /* Queue a request to read a Bulk-only CBW */ 3036 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN); 3037 bh->outreq->short_not_ok = 1; 3038 start_transfer(fsg, fsg->bulk_out, bh->outreq, 3039 &bh->outreq_busy, &bh->state); 3040 3041 /* We will drain the buffer in software, which means we 3042 * can reuse it for the next filling. No need to advance 3043 * next_buffhd_to_fill. */ 3044 3045 /* Wait for the CBW to arrive */ 3046 while (bh->state != BUF_STATE_FULL) { 3047 rc = sleep_thread(fsg); 3048 if (rc) 3049 return rc; 3050 } 3051 smp_rmb(); 3052 rc = received_cbw(fsg, bh); 3053 bh->state = BUF_STATE_EMPTY; 3054 3055 } else { // USB_PR_CB or USB_PR_CBI 3056 3057 /* Wait for the next command to arrive */ 3058 while (fsg->cbbuf_cmnd_size == 0) { 3059 rc = sleep_thread(fsg); 3060 if (rc) 3061 return rc; 3062 } 3063 3064 /* Is the previous status interrupt request still busy? 3065 * The host is allowed to skip reading the status, 3066 * so we must cancel it. */ 3067 if (fsg->intreq_busy) 3068 usb_ep_dequeue(fsg->intr_in, fsg->intreq); 3069 3070 /* Copy the command and mark the buffer empty */ 3071 fsg->data_dir = DATA_DIR_UNKNOWN; 3072 spin_lock_irq(&fsg->lock); 3073 fsg->cmnd_size = fsg->cbbuf_cmnd_size; 3074 memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size); 3075 fsg->cbbuf_cmnd_size = 0; 3076 spin_unlock_irq(&fsg->lock); 3077 } 3078 return rc; 3079} 3080 3081 3082/*-------------------------------------------------------------------------*/ 3083 3084static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep, 3085 const struct usb_endpoint_descriptor *d) 3086{ 3087 int rc; 3088 3089 ep->driver_data = fsg; 3090 rc = usb_ep_enable(ep, d); 3091 if (rc) 3092 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc); 3093 return rc; 3094} 3095 3096static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep, 3097 struct usb_request **preq) 3098{ 3099 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); 3100 if (*preq) 3101 return 0; 3102 ERROR(fsg, "can't allocate request for %s\n", ep->name); 3103 return -ENOMEM; 3104} 3105 3106/* 3107 * Reset interface setting and re-init endpoint state (toggle etc). 3108 * Call with altsetting < 0 to disable the interface. The only other 3109 * available altsetting is 0, which enables the interface. 3110 */ 3111static int do_set_interface(struct fsg_dev *fsg, int altsetting) 3112{ 3113 int rc = 0; 3114 int i; 3115 const struct usb_endpoint_descriptor *d; 3116 3117 if (fsg->running) 3118 DBG(fsg, "reset interface\n"); 3119 3120reset: 3121 /* Deallocate the requests */ 3122 for (i = 0; i < NUM_BUFFERS; ++i) { 3123 struct fsg_buffhd *bh = &fsg->buffhds[i]; 3124 3125 if (bh->inreq) { 3126 usb_ep_free_request(fsg->bulk_in, bh->inreq); 3127 bh->inreq = NULL; 3128 } 3129 if (bh->outreq) { 3130 usb_ep_free_request(fsg->bulk_out, bh->outreq); 3131 bh->outreq = NULL; 3132 } 3133 } 3134 if (fsg->intreq) { 3135 usb_ep_free_request(fsg->intr_in, fsg->intreq); 3136 fsg->intreq = NULL; 3137 } 3138 3139 /* Disable the endpoints */ 3140 if (fsg->bulk_in_enabled) { 3141 usb_ep_disable(fsg->bulk_in); 3142 fsg->bulk_in_enabled = 0; 3143 } 3144 if (fsg->bulk_out_enabled) { 3145 usb_ep_disable(fsg->bulk_out); 3146 fsg->bulk_out_enabled = 0; 3147 } 3148 if (fsg->intr_in_enabled) { 3149 usb_ep_disable(fsg->intr_in); 3150 fsg->intr_in_enabled = 0; 3151 } 3152 3153 fsg->running = 0; 3154 if (altsetting < 0 || rc != 0) 3155 return rc; 3156 3157 DBG(fsg, "set interface %d\n", altsetting); 3158 3159 /* Enable the endpoints */ 3160 d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc); 3161 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0) 3162 goto reset; 3163 fsg->bulk_in_enabled = 1; 3164 3165 d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc); 3166 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0) 3167 goto reset; 3168 fsg->bulk_out_enabled = 1; 3169 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize); 3170 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 3171 3172 if (transport_is_cbi()) { 3173 d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc); 3174 if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0) 3175 goto reset; 3176 fsg->intr_in_enabled = 1; 3177 } 3178 3179 /* Allocate the requests */ 3180 for (i = 0; i < NUM_BUFFERS; ++i) { 3181 struct fsg_buffhd *bh = &fsg->buffhds[i]; 3182 3183 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0) 3184 goto reset; 3185 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0) 3186 goto reset; 3187 bh->inreq->buf = bh->outreq->buf = bh->buf; 3188 bh->inreq->context = bh->outreq->context = bh; 3189 bh->inreq->complete = bulk_in_complete; 3190 bh->outreq->complete = bulk_out_complete; 3191 } 3192 if (transport_is_cbi()) { 3193 if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0) 3194 goto reset; 3195 fsg->intreq->complete = intr_in_complete; 3196 } 3197 3198 fsg->running = 1; 3199 for (i = 0; i < fsg->nluns; ++i) 3200 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED; 3201 return rc; 3202} 3203 3204 3205/* 3206 * Change our operational configuration. This code must agree with the code 3207 * that returns config descriptors, and with interface altsetting code. 3208 * 3209 * It's also responsible for power management interactions. Some 3210 * configurations might not work with our current power sources. 3211 * For now we just assume the gadget is always self-powered. 3212 */ 3213static int do_set_config(struct fsg_dev *fsg, u8 new_config) 3214{ 3215 int rc = 0; 3216 3217 /* Disable the single interface */ 3218 if (fsg->config != 0) { 3219 DBG(fsg, "reset config\n"); 3220 fsg->config = 0; 3221 rc = do_set_interface(fsg, -1); 3222 } 3223 3224 /* Enable the interface */ 3225 if (new_config != 0) { 3226 fsg->config = new_config; 3227 if ((rc = do_set_interface(fsg, 0)) != 0) 3228 fsg->config = 0; // Reset on errors 3229 else { 3230 char *speed; 3231 3232 switch (fsg->gadget->speed) { 3233 case USB_SPEED_LOW: speed = "low"; break; 3234 case USB_SPEED_FULL: speed = "full"; break; 3235 case USB_SPEED_HIGH: speed = "high"; break; 3236 default: speed = "?"; break; 3237 } 3238 INFO(fsg, "%s speed config #%d\n", speed, fsg->config); 3239 } 3240 } 3241 return rc; 3242} 3243 3244 3245/*-------------------------------------------------------------------------*/ 3246 3247static void handle_exception(struct fsg_dev *fsg) 3248{ 3249 siginfo_t info; 3250 int sig; 3251 int i; 3252 int num_active; 3253 struct fsg_buffhd *bh; 3254 enum fsg_state old_state; 3255 u8 new_config; 3256 struct lun *curlun; 3257 unsigned int exception_req_tag; 3258 int rc; 3259 3260 /* Clear the existing signals. Anything but SIGUSR1 is converted 3261 * into a high-priority EXIT exception. */ 3262 for (;;) { 3263 sig = dequeue_signal_lock(current, ¤t->blocked, &info); 3264 if (!sig) 3265 break; 3266 if (sig != SIGUSR1) { 3267 if (fsg->state < FSG_STATE_EXIT) 3268 DBG(fsg, "Main thread exiting on signal\n"); 3269 raise_exception(fsg, FSG_STATE_EXIT); 3270 } 3271 } 3272 3273 /* Cancel all the pending transfers */ 3274 if (fsg->intreq_busy) 3275 usb_ep_dequeue(fsg->intr_in, fsg->intreq); 3276 for (i = 0; i < NUM_BUFFERS; ++i) { 3277 bh = &fsg->buffhds[i]; 3278 if (bh->inreq_busy) 3279 usb_ep_dequeue(fsg->bulk_in, bh->inreq); 3280 if (bh->outreq_busy) 3281 usb_ep_dequeue(fsg->bulk_out, bh->outreq); 3282 } 3283 3284 /* Wait until everything is idle */ 3285 for (;;) { 3286 num_active = fsg->intreq_busy; 3287 for (i = 0; i < NUM_BUFFERS; ++i) { 3288 bh = &fsg->buffhds[i]; 3289 num_active += bh->inreq_busy + bh->outreq_busy; 3290 } 3291 if (num_active == 0) 3292 break; 3293 if (sleep_thread(fsg)) 3294 return; 3295 } 3296 3297 /* Clear out the controller's fifos */ 3298 if (fsg->bulk_in_enabled) 3299 usb_ep_fifo_flush(fsg->bulk_in); 3300 if (fsg->bulk_out_enabled) 3301 usb_ep_fifo_flush(fsg->bulk_out); 3302 if (fsg->intr_in_enabled) 3303 usb_ep_fifo_flush(fsg->intr_in); 3304 3305 /* Reset the I/O buffer states and pointers, the SCSI 3306 * state, and the exception. Then invoke the handler. */ 3307 spin_lock_irq(&fsg->lock); 3308 3309 for (i = 0; i < NUM_BUFFERS; ++i) { 3310 bh = &fsg->buffhds[i]; 3311 bh->state = BUF_STATE_EMPTY; 3312 } 3313 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain = 3314 &fsg->buffhds[0]; 3315 3316 exception_req_tag = fsg->exception_req_tag; 3317 new_config = fsg->new_config; 3318 old_state = fsg->state; 3319 3320 if (old_state == FSG_STATE_ABORT_BULK_OUT) 3321 fsg->state = FSG_STATE_STATUS_PHASE; 3322 else { 3323 for (i = 0; i < fsg->nluns; ++i) { 3324 curlun = &fsg->luns[i]; 3325 curlun->prevent_medium_removal = 0; 3326 curlun->sense_data = curlun->unit_attention_data = 3327 SS_NO_SENSE; 3328 curlun->sense_data_info = 0; 3329 curlun->info_valid = 0; 3330 } 3331 fsg->state = FSG_STATE_IDLE; 3332 } 3333 spin_unlock_irq(&fsg->lock); 3334 3335 /* Carry out any extra actions required for the exception */ 3336 switch (old_state) { 3337 default: 3338 break; 3339 3340 case FSG_STATE_ABORT_BULK_OUT: 3341 send_status(fsg); 3342 spin_lock_irq(&fsg->lock); 3343 if (fsg->state == FSG_STATE_STATUS_PHASE) 3344 fsg->state = FSG_STATE_IDLE; 3345 spin_unlock_irq(&fsg->lock); 3346 break; 3347 3348 case FSG_STATE_RESET: 3349 /* In case we were forced against our will to halt a 3350 * bulk endpoint, clear the halt now. (The SuperH UDC 3351 * requires this.) */ 3352 if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 3353 usb_ep_clear_halt(fsg->bulk_in); 3354 3355 if (transport_is_bbb()) { 3356 if (fsg->ep0_req_tag == exception_req_tag) 3357 ep0_queue(fsg); // Complete the status stage 3358 3359 } else if (transport_is_cbi()) 3360 send_status(fsg); // Status by interrupt pipe 3361 3362 /* Technically this should go here, but it would only be 3363 * a waste of time. Ditto for the INTERFACE_CHANGE and 3364 * CONFIG_CHANGE cases. */ 3365 // for (i = 0; i < fsg->nluns; ++i) 3366 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED; 3367 break; 3368 3369 case FSG_STATE_INTERFACE_CHANGE: 3370 rc = do_set_interface(fsg, 0); 3371 if (fsg->ep0_req_tag != exception_req_tag) 3372 break; 3373 if (rc != 0) // STALL on errors 3374 fsg_set_halt(fsg, fsg->ep0); 3375 else // Complete the status stage 3376 ep0_queue(fsg); 3377 break; 3378 3379 case FSG_STATE_CONFIG_CHANGE: 3380 rc = do_set_config(fsg, new_config); 3381 if (fsg->ep0_req_tag != exception_req_tag) 3382 break; 3383 if (rc != 0) // STALL on errors 3384 fsg_set_halt(fsg, fsg->ep0); 3385 else // Complete the status stage 3386 ep0_queue(fsg); 3387 break; 3388 3389 case FSG_STATE_DISCONNECT: 3390 fsync_all(fsg); 3391 do_set_config(fsg, 0); // Unconfigured state 3392 break; 3393 3394 case FSG_STATE_EXIT: 3395 case FSG_STATE_TERMINATED: 3396 do_set_config(fsg, 0); // Free resources 3397 spin_lock_irq(&fsg->lock); 3398 fsg->state = FSG_STATE_TERMINATED; // Stop the thread 3399 spin_unlock_irq(&fsg->lock); 3400 break; 3401 } 3402} 3403 3404 3405/*-------------------------------------------------------------------------*/ 3406 3407static int fsg_main_thread(void *fsg_) 3408{ 3409 struct fsg_dev *fsg = fsg_; 3410 3411 /* Allow the thread to be killed by a signal, but set the signal mask 3412 * to block everything but INT, TERM, KILL, and USR1. */ 3413 allow_signal(SIGINT); 3414 allow_signal(SIGTERM); 3415 allow_signal(SIGKILL); 3416 allow_signal(SIGUSR1); 3417 3418 /* Allow the thread to be frozen */ 3419 set_freezable(); 3420 3421 /* Arrange for userspace references to be interpreted as kernel 3422 * pointers. That way we can pass a kernel pointer to a routine 3423 * that expects a __user pointer and it will work okay. */ 3424 set_fs(get_ds()); 3425 3426 /* The main loop */ 3427 while (fsg->state != FSG_STATE_TERMINATED) { 3428 if (exception_in_progress(fsg) || signal_pending(current)) { 3429 handle_exception(fsg); 3430 continue; 3431 } 3432 3433 if (!fsg->running) { 3434 sleep_thread(fsg); 3435 continue; 3436 } 3437 3438 if (get_next_command(fsg)) 3439 continue; 3440 3441 spin_lock_irq(&fsg->lock); 3442 if (!exception_in_progress(fsg)) 3443 fsg->state = FSG_STATE_DATA_PHASE; 3444 spin_unlock_irq(&fsg->lock); 3445 3446 if (do_scsi_command(fsg) || finish_reply(fsg)) 3447 continue; 3448 3449 spin_lock_irq(&fsg->lock); 3450 if (!exception_in_progress(fsg)) 3451 fsg->state = FSG_STATE_STATUS_PHASE; 3452 spin_unlock_irq(&fsg->lock); 3453 3454 if (send_status(fsg)) 3455 continue; 3456 3457 spin_lock_irq(&fsg->lock); 3458 if (!exception_in_progress(fsg)) 3459 fsg->state = FSG_STATE_IDLE; 3460 spin_unlock_irq(&fsg->lock); 3461 } 3462 3463 spin_lock_irq(&fsg->lock); 3464 fsg->thread_task = NULL; 3465 spin_unlock_irq(&fsg->lock); 3466 3467 /* In case we are exiting because of a signal, unregister the 3468 * gadget driver and close the backing file. */ 3469 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) { 3470 usb_gadget_unregister_driver(&fsg_driver); 3471 close_all_backing_files(fsg); 3472 } 3473 3474 /* Let the unbind and cleanup routines know the thread has exited */ 3475 complete_and_exit(&fsg->thread_notifier, 0); 3476} 3477 3478 3479/*-------------------------------------------------------------------------*/ 3480 3481/* If the next two routines are called while the gadget is registered, 3482 * the caller must own fsg->filesem for writing. */ 3483 3484static int open_backing_file(struct lun *curlun, const char *filename) 3485{ 3486 int ro; 3487 struct file *filp = NULL; 3488 int rc = -EINVAL; 3489 struct inode *inode = NULL; 3490 loff_t size; 3491 loff_t num_sectors; 3492 3493 /* R/W if we can, R/O if we must */ 3494 ro = curlun->ro; 3495 if (!ro) { 3496 filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); 3497 if (-EROFS == PTR_ERR(filp)) 3498 ro = 1; 3499 } 3500 if (ro) 3501 filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0); 3502 if (IS_ERR(filp)) { 3503 LINFO(curlun, "unable to open backing file: %s\n", filename); 3504 return PTR_ERR(filp); 3505 } 3506 3507 if (!(filp->f_mode & FMODE_WRITE)) 3508 ro = 1; 3509 3510 if (filp->f_path.dentry) 3511 inode = filp->f_path.dentry->d_inode; 3512 if (inode && S_ISBLK(inode->i_mode)) { 3513 if (bdev_read_only(inode->i_bdev)) 3514 ro = 1; 3515 } else if (!inode || !S_ISREG(inode->i_mode)) { 3516 LINFO(curlun, "invalid file type: %s\n", filename); 3517 goto out; 3518 } 3519 3520 /* If we can't read the file, it's no good. 3521 * If we can't write the file, use it read-only. */ 3522 if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) { 3523 LINFO(curlun, "file not readable: %s\n", filename); 3524 goto out; 3525 } 3526 if (!(filp->f_op->write || filp->f_op->aio_write)) 3527 ro = 1; 3528 3529 size = i_size_read(inode->i_mapping->host); 3530 if (size < 0) { 3531 LINFO(curlun, "unable to find file size: %s\n", filename); 3532 rc = (int) size; 3533 goto out; 3534 } 3535 num_sectors = size >> 9; // File size in 512-byte sectors 3536 if (num_sectors == 0) { 3537 LINFO(curlun, "file too small: %s\n", filename); 3538 rc = -ETOOSMALL; 3539 goto out; 3540 } 3541 3542 get_file(filp); 3543 curlun->ro = ro; 3544 curlun->filp = filp; 3545 curlun->file_length = size; 3546 curlun->num_sectors = num_sectors; 3547 LDBG(curlun, "open backing file: %s\n", filename); 3548 rc = 0; 3549 3550out: 3551 filp_close(filp, current->files); 3552 return rc; 3553} 3554 3555 3556static void close_backing_file(struct lun *curlun) 3557{ 3558 if (curlun->filp) { 3559 LDBG(curlun, "close backing file\n"); 3560 fput(curlun->filp); 3561 curlun->filp = NULL; 3562 } 3563} 3564 3565static void close_all_backing_files(struct fsg_dev *fsg) 3566{ 3567 int i; 3568 3569 for (i = 0; i < fsg->nluns; ++i) 3570 close_backing_file(&fsg->luns[i]); 3571} 3572 3573 3574static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *buf) 3575{ 3576 struct lun *curlun = dev_to_lun(dev); 3577 3578 return sprintf(buf, "%d\n", curlun->ro); 3579} 3580 3581static ssize_t show_file(struct device *dev, struct device_attribute *attr, 3582 char *buf) 3583{ 3584 struct lun *curlun = dev_to_lun(dev); 3585 struct fsg_dev *fsg = dev_get_drvdata(dev); 3586 char *p; 3587 ssize_t rc; 3588 3589 down_read(&fsg->filesem); 3590 if (backing_file_is_open(curlun)) { // Get the complete pathname 3591 p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1); 3592 if (IS_ERR(p)) 3593 rc = PTR_ERR(p); 3594 else { 3595 rc = strlen(p); 3596 memmove(buf, p, rc); 3597 buf[rc] = '\n'; // Add a newline 3598 buf[++rc] = 0; 3599 } 3600 } else { // No file, return 0 bytes 3601 *buf = 0; 3602 rc = 0; 3603 } 3604 up_read(&fsg->filesem); 3605 return rc; 3606} 3607 3608 3609static ssize_t store_ro(struct device *dev, struct device_attribute *attr, 3610 const char *buf, size_t count) 3611{ 3612 ssize_t rc = count; 3613 struct lun *curlun = dev_to_lun(dev); 3614 struct fsg_dev *fsg = dev_get_drvdata(dev); 3615 int i; 3616 3617 if (sscanf(buf, "%d", &i) != 1) 3618 return -EINVAL; 3619 3620 /* Allow the write-enable status to change only while the backing file 3621 * is closed. */ 3622 down_read(&fsg->filesem); 3623 if (backing_file_is_open(curlun)) { 3624 LDBG(curlun, "read-only status change prevented\n"); 3625 rc = -EBUSY; 3626 } else { 3627 curlun->ro = !!i; 3628 LDBG(curlun, "read-only status set to %d\n", curlun->ro); 3629 } 3630 up_read(&fsg->filesem); 3631 return rc; 3632} 3633 3634static ssize_t store_file(struct device *dev, struct device_attribute *attr, 3635 const char *buf, size_t count) 3636{ 3637 struct lun *curlun = dev_to_lun(dev); 3638 struct fsg_dev *fsg = dev_get_drvdata(dev); 3639 int rc = 0; 3640 3641 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) { 3642 LDBG(curlun, "eject attempt prevented\n"); 3643 return -EBUSY; // "Door is locked" 3644 } 3645 3646 /* Remove a trailing newline */ 3647 if (count > 0 && buf[count-1] == '\n') 3648 ((char *) buf)[count-1] = 0; // Ugh! 3649 3650 /* Eject current medium */ 3651 down_write(&fsg->filesem); 3652 if (backing_file_is_open(curlun)) { 3653 close_backing_file(curlun); 3654 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; 3655 } 3656 3657 /* Load new medium */ 3658 if (count > 0 && buf[0]) { 3659 rc = open_backing_file(curlun, buf); 3660 if (rc == 0) 3661 curlun->unit_attention_data = 3662 SS_NOT_READY_TO_READY_TRANSITION; 3663 } 3664 up_write(&fsg->filesem); 3665 return (rc < 0 ? rc : count); 3666} 3667 3668 3669/* The write permissions and store_xxx pointers are set in fsg_bind() */ 3670static DEVICE_ATTR(ro, 0444, show_ro, NULL); 3671static DEVICE_ATTR(file, 0444, show_file, NULL); 3672 3673 3674/*-------------------------------------------------------------------------*/ 3675 3676static void fsg_release(struct kref *ref) 3677{ 3678 struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref); 3679 3680 kfree(fsg->luns); 3681 kfree(fsg); 3682} 3683 3684static void lun_release(struct device *dev) 3685{ 3686 struct fsg_dev *fsg = dev_get_drvdata(dev); 3687 3688 kref_put(&fsg->ref, fsg_release); 3689} 3690 3691static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget) 3692{ 3693 struct fsg_dev *fsg = get_gadget_data(gadget); 3694 int i; 3695 struct lun *curlun; 3696 struct usb_request *req = fsg->ep0req; 3697 3698 DBG(fsg, "unbind\n"); 3699 clear_bit(REGISTERED, &fsg->atomic_bitflags); 3700 3701 /* Unregister the sysfs attribute files and the LUNs */ 3702 for (i = 0; i < fsg->nluns; ++i) { 3703 curlun = &fsg->luns[i]; 3704 if (curlun->registered) { 3705 device_remove_file(&curlun->dev, &dev_attr_ro); 3706 device_remove_file(&curlun->dev, &dev_attr_file); 3707 device_unregister(&curlun->dev); 3708 curlun->registered = 0; 3709 } 3710 } 3711 3712 /* If the thread isn't already dead, tell it to exit now */ 3713 if (fsg->state != FSG_STATE_TERMINATED) { 3714 raise_exception(fsg, FSG_STATE_EXIT); 3715 wait_for_completion(&fsg->thread_notifier); 3716 3717 /* The cleanup routine waits for this completion also */ 3718 complete(&fsg->thread_notifier); 3719 } 3720 3721 /* Free the data buffers */ 3722 for (i = 0; i < NUM_BUFFERS; ++i) 3723 kfree(fsg->buffhds[i].buf); 3724 3725 /* Free the request and buffer for endpoint 0 */ 3726 if (req) { 3727 kfree(req->buf); 3728 usb_ep_free_request(fsg->ep0, req); 3729 } 3730 3731 set_gadget_data(gadget, NULL); 3732} 3733 3734 3735static int __init check_parameters(struct fsg_dev *fsg) 3736{ 3737 int prot; 3738 int gcnum; 3739 3740 /* Store the default values */ 3741 mod_data.transport_type = USB_PR_BULK; 3742 mod_data.transport_name = "Bulk-only"; 3743 mod_data.protocol_type = USB_SC_SCSI; 3744 mod_data.protocol_name = "Transparent SCSI"; 3745 3746 if (gadget_is_sh(fsg->gadget)) 3747 mod_data.can_stall = 0; 3748 3749 if (mod_data.release == 0xffff) { // Parameter wasn't set 3750 /* The sa1100 controller is not supported */ 3751 if (gadget_is_sa1100(fsg->gadget)) 3752 gcnum = -1; 3753 else 3754 gcnum = usb_gadget_controller_number(fsg->gadget); 3755 if (gcnum >= 0) 3756 mod_data.release = 0x0300 + gcnum; 3757 else { 3758 WARN(fsg, "controller '%s' not recognized\n", 3759 fsg->gadget->name); 3760 mod_data.release = 0x0399; 3761 } 3762 } 3763 3764 prot = simple_strtol(mod_data.protocol_parm, NULL, 0); 3765 3766#ifdef CONFIG_USB_FILE_STORAGE_TEST 3767 if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) { 3768 ; // Use default setting 3769 } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) { 3770 mod_data.transport_type = USB_PR_CB; 3771 mod_data.transport_name = "Control-Bulk"; 3772 } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) { 3773 mod_data.transport_type = USB_PR_CBI; 3774 mod_data.transport_name = "Control-Bulk-Interrupt"; 3775 } else { 3776 ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm); 3777 return -EINVAL; 3778 } 3779 3780 if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 || 3781 prot == USB_SC_SCSI) { 3782 ; // Use default setting 3783 } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 || 3784 prot == USB_SC_RBC) { 3785 mod_data.protocol_type = USB_SC_RBC; 3786 mod_data.protocol_name = "RBC"; 3787 } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 || 3788 strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 || 3789 prot == USB_SC_8020) { 3790 mod_data.protocol_type = USB_SC_8020; 3791 mod_data.protocol_name = "8020i (ATAPI)"; 3792 } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 || 3793 prot == USB_SC_QIC) { 3794 mod_data.protocol_type = USB_SC_QIC; 3795 mod_data.protocol_name = "QIC-157"; 3796 } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 || 3797 prot == USB_SC_UFI) { 3798 mod_data.protocol_type = USB_SC_UFI; 3799 mod_data.protocol_name = "UFI"; 3800 } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 || 3801 prot == USB_SC_8070) { 3802 mod_data.protocol_type = USB_SC_8070; 3803 mod_data.protocol_name = "8070i"; 3804 } else { 3805 ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm); 3806 return -EINVAL; 3807 } 3808 3809 mod_data.buflen &= PAGE_CACHE_MASK; 3810 if (mod_data.buflen <= 0) { 3811 ERROR(fsg, "invalid buflen\n"); 3812 return -ETOOSMALL; 3813 } 3814#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 3815 3816 return 0; 3817} 3818 3819 3820static int __init fsg_bind(struct usb_gadget *gadget) 3821{ 3822 struct fsg_dev *fsg = the_fsg; 3823 int rc; 3824 int i; 3825 struct lun *curlun; 3826 struct usb_ep *ep; 3827 struct usb_request *req; 3828 char *pathbuf, *p; 3829 3830 fsg->gadget = gadget; 3831 set_gadget_data(gadget, fsg); 3832 fsg->ep0 = gadget->ep0; 3833 fsg->ep0->driver_data = fsg; 3834 3835 if ((rc = check_parameters(fsg)) != 0) 3836 goto out; 3837 3838 if (mod_data.removable) { // Enable the store_xxx attributes 3839 dev_attr_ro.attr.mode = dev_attr_file.attr.mode = 0644; 3840 dev_attr_ro.store = store_ro; 3841 dev_attr_file.store = store_file; 3842 } 3843 3844 /* Find out how many LUNs there should be */ 3845 i = mod_data.nluns; 3846 if (i == 0) 3847 i = max(mod_data.num_filenames, 1u); 3848 if (i > MAX_LUNS) { 3849 ERROR(fsg, "invalid number of LUNs: %d\n", i); 3850 rc = -EINVAL; 3851 goto out; 3852 } 3853 3854 /* Create the LUNs, open their backing files, and register the 3855 * LUN devices in sysfs. */ 3856 fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL); 3857 if (!fsg->luns) { 3858 rc = -ENOMEM; 3859 goto out; 3860 } 3861 fsg->nluns = i; 3862 3863 for (i = 0; i < fsg->nluns; ++i) { 3864 curlun = &fsg->luns[i]; 3865 curlun->ro = mod_data.ro[i]; 3866 curlun->dev.release = lun_release; 3867 curlun->dev.parent = &gadget->dev; 3868 curlun->dev.driver = &fsg_driver.driver; 3869 dev_set_drvdata(&curlun->dev, fsg); 3870 snprintf(curlun->dev.bus_id, BUS_ID_SIZE, 3871 "%s-lun%d", gadget->dev.bus_id, i); 3872 3873 if ((rc = device_register(&curlun->dev)) != 0) { 3874 INFO(fsg, "failed to register LUN%d: %d\n", i, rc); 3875 goto out; 3876 } 3877 if ((rc = device_create_file(&curlun->dev, 3878 &dev_attr_ro)) != 0 || 3879 (rc = device_create_file(&curlun->dev, 3880 &dev_attr_file)) != 0) { 3881 device_unregister(&curlun->dev); 3882 goto out; 3883 } 3884 curlun->registered = 1; 3885 kref_get(&fsg->ref); 3886 3887 if (mod_data.file[i] && *mod_data.file[i]) { 3888 if ((rc = open_backing_file(curlun, 3889 mod_data.file[i])) != 0) 3890 goto out; 3891 } else if (!mod_data.removable) { 3892 ERROR(fsg, "no file given for LUN%d\n", i); 3893 rc = -EINVAL; 3894 goto out; 3895 } 3896 } 3897 3898 /* Find all the endpoints we will use */ 3899 usb_ep_autoconfig_reset(gadget); 3900 ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc); 3901 if (!ep) 3902 goto autoconf_fail; 3903 ep->driver_data = fsg; // claim the endpoint 3904 fsg->bulk_in = ep; 3905 3906 ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc); 3907 if (!ep) 3908 goto autoconf_fail; 3909 ep->driver_data = fsg; // claim the endpoint 3910 fsg->bulk_out = ep; 3911 3912 if (transport_is_cbi()) { 3913 ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc); 3914 if (!ep) 3915 goto autoconf_fail; 3916 ep->driver_data = fsg; // claim the endpoint 3917 fsg->intr_in = ep; 3918 } 3919 3920 /* Fix up the descriptors */ 3921 device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket; 3922 device_desc.idVendor = cpu_to_le16(mod_data.vendor); 3923 device_desc.idProduct = cpu_to_le16(mod_data.product); 3924 device_desc.bcdDevice = cpu_to_le16(mod_data.release); 3925 3926 i = (transport_is_cbi() ? 3 : 2); // Number of endpoints 3927 intf_desc.bNumEndpoints = i; 3928 intf_desc.bInterfaceSubClass = mod_data.protocol_type; 3929 intf_desc.bInterfaceProtocol = mod_data.transport_type; 3930 fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3931 3932 if (gadget_is_dualspeed(gadget)) { 3933 hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3934 3935 /* Assume ep0 uses the same maxpacket value for both speeds */ 3936 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket; 3937 3938 /* Assume endpoint addresses are the same for both speeds */ 3939 hs_bulk_in_desc.bEndpointAddress = 3940 fs_bulk_in_desc.bEndpointAddress; 3941 hs_bulk_out_desc.bEndpointAddress = 3942 fs_bulk_out_desc.bEndpointAddress; 3943 hs_intr_in_desc.bEndpointAddress = 3944 fs_intr_in_desc.bEndpointAddress; 3945 } 3946 3947 if (gadget_is_otg(gadget)) 3948 otg_desc.bmAttributes |= USB_OTG_HNP; 3949 3950 rc = -ENOMEM; 3951 3952 /* Allocate the request and buffer for endpoint 0 */ 3953 fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL); 3954 if (!req) 3955 goto out; 3956 req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL); 3957 if (!req->buf) 3958 goto out; 3959 req->complete = ep0_complete; 3960 3961 /* Allocate the data buffers */ 3962 for (i = 0; i < NUM_BUFFERS; ++i) { 3963 struct fsg_buffhd *bh = &fsg->buffhds[i]; 3964 3965 /* Allocate for the bulk-in endpoint. We assume that 3966 * the buffer will also work with the bulk-out (and 3967 * interrupt-in) endpoint. */ 3968 bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL); 3969 if (!bh->buf) 3970 goto out; 3971 bh->next = bh + 1; 3972 } 3973 fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0]; 3974 3975 /* This should reflect the actual gadget power source */ 3976 usb_gadget_set_selfpowered(gadget); 3977 3978 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", 3979 init_utsname()->sysname, init_utsname()->release, 3980 gadget->name); 3981 3982 /* On a real device, serial[] would be loaded from permanent 3983 * storage. We just encode it from the driver version string. */ 3984 for (i = 0; i < sizeof(serial) - 2; i += 2) { 3985 unsigned char c = DRIVER_VERSION[i / 2]; 3986 3987 if (!c) 3988 break; 3989 sprintf(&serial[i], "%02X", c); 3990 } 3991 3992 fsg->thread_task = kthread_create(fsg_main_thread, fsg, 3993 "file-storage-gadget"); 3994 if (IS_ERR(fsg->thread_task)) { 3995 rc = PTR_ERR(fsg->thread_task); 3996 goto out; 3997 } 3998 3999 INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n"); 4000 INFO(fsg, "Number of LUNs=%d\n", fsg->nluns); 4001 4002 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 4003 for (i = 0; i < fsg->nluns; ++i) { 4004 curlun = &fsg->luns[i]; 4005 if (backing_file_is_open(curlun)) { 4006 p = NULL; 4007 if (pathbuf) { 4008 p = d_path(&curlun->filp->f_path, 4009 pathbuf, PATH_MAX); 4010 if (IS_ERR(p)) 4011 p = NULL; 4012 } 4013 LINFO(curlun, "ro=%d, file: %s\n", 4014 curlun->ro, (p ? p : "(error)")); 4015 } 4016 } 4017 kfree(pathbuf); 4018 4019 DBG(fsg, "transport=%s (x%02x)\n", 4020 mod_data.transport_name, mod_data.transport_type); 4021 DBG(fsg, "protocol=%s (x%02x)\n", 4022 mod_data.protocol_name, mod_data.protocol_type); 4023 DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n", 4024 mod_data.vendor, mod_data.product, mod_data.release); 4025 DBG(fsg, "removable=%d, stall=%d, buflen=%u\n", 4026 mod_data.removable, mod_data.can_stall, 4027 mod_data.buflen); 4028 DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task)); 4029 4030 set_bit(REGISTERED, &fsg->atomic_bitflags); 4031 4032 /* Tell the thread to start working */ 4033 wake_up_process(fsg->thread_task); 4034 return 0; 4035 4036autoconf_fail: 4037 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 4038 rc = -ENOTSUPP; 4039 4040out: 4041 fsg->state = FSG_STATE_TERMINATED; // The thread is dead 4042 fsg_unbind(gadget); 4043 close_all_backing_files(fsg); 4044 return rc; 4045} 4046 4047 4048/*-------------------------------------------------------------------------*/ 4049 4050static void fsg_suspend(struct usb_gadget *gadget) 4051{ 4052 struct fsg_dev *fsg = get_gadget_data(gadget); 4053 4054 DBG(fsg, "suspend\n"); 4055 set_bit(SUSPENDED, &fsg->atomic_bitflags); 4056} 4057 4058static void fsg_resume(struct usb_gadget *gadget) 4059{ 4060 struct fsg_dev *fsg = get_gadget_data(gadget); 4061 4062 DBG(fsg, "resume\n"); 4063 clear_bit(SUSPENDED, &fsg->atomic_bitflags); 4064} 4065 4066 4067/*-------------------------------------------------------------------------*/ 4068 4069static struct usb_gadget_driver fsg_driver = { 4070#ifdef CONFIG_USB_GADGET_DUALSPEED 4071 .speed = USB_SPEED_HIGH, 4072#else 4073 .speed = USB_SPEED_FULL, 4074#endif 4075 .function = (char *) longname, 4076 .bind = fsg_bind, 4077 .unbind = fsg_unbind, 4078 .disconnect = fsg_disconnect, 4079 .setup = fsg_setup, 4080 .suspend = fsg_suspend, 4081 .resume = fsg_resume, 4082 4083 .driver = { 4084 .name = (char *) shortname, 4085 .owner = THIS_MODULE, 4086 // .release = ... 4087 // .suspend = ... 4088 // .resume = ... 4089 }, 4090}; 4091 4092 4093static int __init fsg_alloc(void) 4094{ 4095 struct fsg_dev *fsg; 4096 4097 fsg = kzalloc(sizeof *fsg, GFP_KERNEL); 4098 if (!fsg) 4099 return -ENOMEM; 4100 spin_lock_init(&fsg->lock); 4101 init_rwsem(&fsg->filesem); 4102 kref_init(&fsg->ref); 4103 init_completion(&fsg->thread_notifier); 4104 4105 the_fsg = fsg; 4106 return 0; 4107} 4108 4109 4110static int __init fsg_init(void) 4111{ 4112 int rc; 4113 struct fsg_dev *fsg; 4114 4115 if ((rc = fsg_alloc()) != 0) 4116 return rc; 4117 fsg = the_fsg; 4118 if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0) 4119 kref_put(&fsg->ref, fsg_release); 4120 return rc; 4121} 4122module_init(fsg_init); 4123 4124 4125static void __exit fsg_cleanup(void) 4126{ 4127 struct fsg_dev *fsg = the_fsg; 4128 4129 /* Unregister the driver iff the thread hasn't already done so */ 4130 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) 4131 usb_gadget_unregister_driver(&fsg_driver); 4132 4133 /* Wait for the thread to finish up */ 4134 wait_for_completion(&fsg->thread_notifier); 4135 4136 close_all_backing_files(fsg); 4137 kref_put(&fsg->ref, fsg_release); 4138} 4139module_exit(fsg_cleanup); 4140