1/* 2 * f_mass_storage.c -- Mass Storage USB Composite Function 3 * 4 * Copyright (C) 2003-2008 Alan Stern 5 * Copyright (C) 2009 Samsung Electronics 6 * Author: Michal Nazarewicz <mina86@mina86.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") as published by the Free Software 24 * Foundation, either version 2 of that License or (at your option) any 25 * later version. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40/* 41 * The Mass Storage Function acts as a USB Mass Storage device, 42 * appearing to the host as a disk drive or as a CD-ROM drive. In 43 * addition to providing an example of a genuinely useful composite 44 * function for a USB device, it also illustrates a technique of 45 * double-buffering for increased throughput. 46 * 47 * Function supports multiple logical units (LUNs). Backing storage 48 * for each LUN is provided by a regular file or a block device. 49 * Access for each LUN can be limited to read-only. Moreover, the 50 * function can indicate that LUN is removable and/or CD-ROM. (The 51 * later implies read-only access.) 52 * 53 * MSF is configured by specifying a fsg_config structure. It has the 54 * following fields: 55 * 56 * nluns Number of LUNs function have (anywhere from 1 57 * to FSG_MAX_LUNS which is 8). 58 * luns An array of LUN configuration values. This 59 * should be filled for each LUN that 60 * function will include (ie. for "nluns" 61 * LUNs). Each element of the array has 62 * the following fields: 63 * ->filename The path to the backing file for the LUN. 64 * Required if LUN is not marked as 65 * removable. 66 * ->ro Flag specifying access to the LUN shall be 67 * read-only. This is implied if CD-ROM 68 * emulation is enabled as well as when 69 * it was impossible to open "filename" 70 * in R/W mode. 71 * ->removable Flag specifying that LUN shall be indicated as 72 * being removable. 73 * ->cdrom Flag specifying that LUN shall be reported as 74 * being a CD-ROM. 75 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12) 76 * commands for this LUN shall be ignored. 77 * 78 * lun_name_format A printf-like format for names of the LUN 79 * devices. This determines how the 80 * directory in sysfs will be named. 81 * Unless you are using several MSFs in 82 * a single gadget (as opposed to single 83 * MSF in many configurations) you may 84 * leave it as NULL (in which case 85 * "lun%d" will be used). In the format 86 * you can use "%d" to index LUNs for 87 * MSF's with more than one LUN. (Beware 88 * that there is only one integer given 89 * as an argument for the format and 90 * specifying invalid format may cause 91 * unspecified behaviour.) 92 * thread_name Name of the kernel thread process used by the 93 * MSF. You can safely set it to NULL 94 * (in which case default "file-storage" 95 * will be used). 96 * 97 * vendor_name 98 * product_name 99 * release Information used as a reply to INQUIRY 100 * request. To use default set to NULL, 101 * NULL, 0xffff respectively. The first 102 * field should be 8 and the second 16 103 * characters or less. 104 * 105 * can_stall Set to permit function to halt bulk endpoints. 106 * Disabled on some USB devices known not 107 * to work correctly. You should set it 108 * to true. 109 * 110 * If "removable" is not set for a LUN then a backing file must be 111 * specified. If it is set, then NULL filename means the LUN's medium 112 * is not loaded (an empty string as "filename" in the fsg_config 113 * structure causes error). The CD-ROM emulation includes a single 114 * data track and no audio tracks; hence there need be only one 115 * backing file per LUN. 116 * 117 * 118 * MSF includes support for module parameters. If gadget using it 119 * decides to use it, the following module parameters will be 120 * available: 121 * 122 * file=filename[,filename...] 123 * Names of the files or block devices used for 124 * backing storage. 125 * ro=b[,b...] Default false, boolean for read-only access. 126 * removable=b[,b...] 127 * Default true, boolean for removable media. 128 * cdrom=b[,b...] Default false, boolean for whether to emulate 129 * a CD-ROM drive. 130 * nofua=b[,b...] Default false, booleans for ignore FUA flag 131 * in SCSI WRITE(10,12) commands 132 * luns=N Default N = number of filenames, number of 133 * LUNs to support. 134 * stall Default determined according to the type of 135 * USB device controller (usually true), 136 * boolean to permit the driver to halt 137 * bulk endpoints. 138 * 139 * The module parameters may be prefixed with some string. You need 140 * to consult gadget's documentation or source to verify whether it is 141 * using those module parameters and if it does what are the prefixes 142 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is 143 * the prefix). 144 * 145 * 146 * Requirements are modest; only a bulk-in and a bulk-out endpoint are 147 * needed. The memory requirement amounts to two 16K buffers, size 148 * configurable by a parameter. Support is included for both 149 * full-speed and high-speed operation. 150 * 151 * Note that the driver is slightly non-portable in that it assumes a 152 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and 153 * interrupt-in endpoints. With most device controllers this isn't an 154 * issue, but there may be some with hardware restrictions that prevent 155 * a buffer from being used by more than one endpoint. 156 * 157 * 158 * The pathnames of the backing files and the ro settings are 159 * available in the attribute files "file" and "ro" in the lun<n> (or 160 * to be more precise in a directory which name comes from 161 * "lun_name_format" option!) subdirectory of the gadget's sysfs 162 * directory. If the "removable" option is set, writing to these 163 * files will simulate ejecting/loading the medium (writing an empty 164 * line means eject) and adjusting a write-enable tab. Changes to the 165 * ro setting are not allowed when the medium is loaded or if CD-ROM 166 * emulation is being used. 167 * 168 * When a LUN receive an "eject" SCSI request (Start/Stop Unit), 169 * if the LUN is removable, the backing file is released to simulate 170 * ejection. 171 * 172 * 173 * This function is heavily based on "File-backed Storage Gadget" by 174 * Alan Stern which in turn is heavily based on "Gadget Zero" by David 175 * Brownell. The driver's SCSI command interface was based on the 176 * "Information technology - Small Computer System Interface - 2" 177 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93, 178 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. 179 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which 180 * was based on the "Universal Serial Bus Mass Storage Class UFI 181 * Command Specification" document, Revision 1.0, December 14, 1998, 182 * available at 183 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. 184 */ 185 186/* 187 * Driver Design 188 * 189 * The MSF is fairly straightforward. There is a main kernel 190 * thread that handles most of the work. Interrupt routines field 191 * callbacks from the controller driver: bulk- and interrupt-request 192 * completion notifications, endpoint-0 events, and disconnect events. 193 * Completion events are passed to the main thread by wakeup calls. Many 194 * ep0 requests are handled at interrupt time, but SetInterface, 195 * SetConfiguration, and device reset requests are forwarded to the 196 * thread in the form of "exceptions" using SIGUSR1 signals (since they 197 * should interrupt any ongoing file I/O operations). 198 * 199 * The thread's main routine implements the standard command/data/status 200 * parts of a SCSI interaction. It and its subroutines are full of tests 201 * for pending signals/exceptions -- all this polling is necessary since 202 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an 203 * indication that the driver really wants to be running in userspace.) 204 * An important point is that so long as the thread is alive it keeps an 205 * open reference to the backing file. This will prevent unmounting 206 * the backing file's underlying filesystem and could cause problems 207 * during system shutdown, for example. To prevent such problems, the 208 * thread catches INT, TERM, and KILL signals and converts them into 209 * an EXIT exception. 210 * 211 * In normal operation the main thread is started during the gadget's 212 * fsg_bind() callback and stopped during fsg_unbind(). But it can 213 * also exit when it receives a signal, and there's no point leaving 214 * the gadget running when the thread is dead. At of this moment, MSF 215 * provides no way to deregister the gadget when thread dies -- maybe 216 * a callback functions is needed. 217 * 218 * To provide maximum throughput, the driver uses a circular pipeline of 219 * buffer heads (struct fsg_buffhd). In principle the pipeline can be 220 * arbitrarily long; in practice the benefits don't justify having more 221 * than 2 stages (i.e., double buffering). But it helps to think of the 222 * pipeline as being a long one. Each buffer head contains a bulk-in and 223 * a bulk-out request pointer (since the buffer can be used for both 224 * output and input -- directions always are given from the host's 225 * point of view) as well as a pointer to the buffer and various state 226 * variables. 227 * 228 * Use of the pipeline follows a simple protocol. There is a variable 229 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. 230 * At any time that buffer head may still be in use from an earlier 231 * request, so each buffer head has a state variable indicating whether 232 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the 233 * buffer head to be EMPTY, filling the buffer either by file I/O or by 234 * USB I/O (during which the buffer head is BUSY), and marking the buffer 235 * head FULL when the I/O is complete. Then the buffer will be emptied 236 * (again possibly by USB I/O, during which it is marked BUSY) and 237 * finally marked EMPTY again (possibly by a completion routine). 238 * 239 * A module parameter tells the driver to avoid stalling the bulk 240 * endpoints wherever the transport specification allows. This is 241 * necessary for some UDCs like the SuperH, which cannot reliably clear a 242 * halt on a bulk endpoint. However, under certain circumstances the 243 * Bulk-only specification requires a stall. In such cases the driver 244 * will halt the endpoint and set a flag indicating that it should clear 245 * the halt in software during the next device reset. Hopefully this 246 * will permit everything to work correctly. Furthermore, although the 247 * specification allows the bulk-out endpoint to halt when the host sends 248 * too much data, implementing this would cause an unavoidable race. 249 * The driver will always use the "no-stall" approach for OUT transfers. 250 * 251 * One subtle point concerns sending status-stage responses for ep0 252 * requests. Some of these requests, such as device reset, can involve 253 * interrupting an ongoing file I/O operation, which might take an 254 * arbitrarily long time. During that delay the host might give up on 255 * the original ep0 request and issue a new one. When that happens the 256 * driver should not notify the host about completion of the original 257 * request, as the host will no longer be waiting for it. So the driver 258 * assigns to each ep0 request a unique tag, and it keeps track of the 259 * tag value of the request associated with a long-running exception 260 * (device-reset, interface-change, or configuration-change). When the 261 * exception handler is finished, the status-stage response is submitted 262 * only if the current ep0 request tag is equal to the exception request 263 * tag. Thus only the most recently received ep0 request will get a 264 * status-stage response. 265 * 266 * Warning: This driver source file is too long. It ought to be split up 267 * into a header file plus about 3 separate .c files, to handle the details 268 * of the Gadget, USB Mass Storage, and SCSI protocols. 269 */ 270 271 272/* #define VERBOSE_DEBUG */ 273/* #define DUMP_MSGS */ 274 275#include <linux/blkdev.h> 276#include <linux/completion.h> 277#include <linux/dcache.h> 278#include <linux/delay.h> 279#include <linux/device.h> 280#include <linux/fcntl.h> 281#include <linux/file.h> 282#include <linux/fs.h> 283#include <linux/kref.h> 284#include <linux/kthread.h> 285#include <linux/limits.h> 286#include <linux/rwsem.h> 287#include <linux/slab.h> 288#include <linux/spinlock.h> 289#include <linux/string.h> 290#include <linux/freezer.h> 291#include <linux/utsname.h> 292 293#include <linux/usb/ch9.h> 294#include <linux/usb/gadget.h> 295#include <linux/usb/composite.h> 296 297#include "gadget_chips.h" 298 299 300/*------------------------------------------------------------------------*/ 301 302#define FSG_DRIVER_DESC "Mass Storage Function" 303#define FSG_DRIVER_VERSION "2009/09/11" 304 305static const char fsg_string_interface[] = "Mass Storage"; 306 307#define FSG_NO_DEVICE_STRINGS 1 308#define FSG_NO_OTG 1 309#define FSG_NO_INTR_EP 1 310 311#include "storage_common.c" 312 313 314/*-------------------------------------------------------------------------*/ 315 316struct fsg_dev; 317struct fsg_common; 318 319/* FSF callback functions */ 320struct fsg_operations { 321 /* 322 * Callback function to call when thread exits. If no 323 * callback is set or it returns value lower then zero MSF 324 * will force eject all LUNs it operates on (including those 325 * marked as non-removable or with prevent_medium_removal flag 326 * set). 327 */ 328 int (*thread_exits)(struct fsg_common *common); 329 330 /* 331 * Called prior to ejection. Negative return means error, 332 * zero means to continue with ejection, positive means not to 333 * eject. 334 */ 335 int (*pre_eject)(struct fsg_common *common, 336 struct fsg_lun *lun, int num); 337 /* 338 * Called after ejection. Negative return means error, zero 339 * or positive is just a success. 340 */ 341 int (*post_eject)(struct fsg_common *common, 342 struct fsg_lun *lun, int num); 343}; 344 345/* Data shared by all the FSG instances. */ 346struct fsg_common { 347 struct usb_gadget *gadget; 348 struct usb_composite_dev *cdev; 349 struct fsg_dev *fsg, *new_fsg; 350 wait_queue_head_t fsg_wait; 351 352 /* filesem protects: backing files in use */ 353 struct rw_semaphore filesem; 354 355 /* lock protects: state, all the req_busy's */ 356 spinlock_t lock; 357 358 struct usb_ep *ep0; /* Copy of gadget->ep0 */ 359 struct usb_request *ep0req; /* Copy of cdev->req */ 360 unsigned int ep0_req_tag; 361 362 struct fsg_buffhd *next_buffhd_to_fill; 363 struct fsg_buffhd *next_buffhd_to_drain; 364 struct fsg_buffhd *buffhds; 365 366 int cmnd_size; 367 u8 cmnd[MAX_COMMAND_SIZE]; 368 369 unsigned int nluns; 370 unsigned int lun; 371 struct fsg_lun *luns; 372 struct fsg_lun *curlun; 373 374 unsigned int bulk_out_maxpacket; 375 enum fsg_state state; /* For exception handling */ 376 unsigned int exception_req_tag; 377 378 enum data_direction data_dir; 379 u32 data_size; 380 u32 data_size_from_cmnd; 381 u32 tag; 382 u32 residue; 383 u32 usb_amount_left; 384 385 unsigned int can_stall:1; 386 unsigned int free_storage_on_release:1; 387 unsigned int phase_error:1; 388 unsigned int short_packet_received:1; 389 unsigned int bad_lun_okay:1; 390 unsigned int running:1; 391 392 int thread_wakeup_needed; 393 struct completion thread_notifier; 394 struct task_struct *thread_task; 395 396 /* Callback functions. */ 397 const struct fsg_operations *ops; 398 /* Gadget's private data. */ 399 void *private_data; 400 401 /* 402 * Vendor (8 chars), product (16 chars), release (4 403 * hexadecimal digits) and NUL byte 404 */ 405 char inquiry_string[8 + 16 + 4 + 1]; 406 407 struct kref ref; 408}; 409 410struct fsg_config { 411 unsigned nluns; 412 struct fsg_lun_config { 413 const char *filename; 414 char ro; 415 char removable; 416 char cdrom; 417 char nofua; 418 } luns[FSG_MAX_LUNS]; 419 420 const char *lun_name_format; 421 const char *thread_name; 422 423 /* Callback functions. */ 424 const struct fsg_operations *ops; 425 /* Gadget's private data. */ 426 void *private_data; 427 428 const char *vendor_name; /* 8 characters or less */ 429 const char *product_name; /* 16 characters or less */ 430 u16 release; 431 432 char can_stall; 433}; 434 435struct fsg_dev { 436 struct usb_function function; 437 struct usb_gadget *gadget; /* Copy of cdev->gadget */ 438 struct fsg_common *common; 439 440 u16 interface_number; 441 442 unsigned int bulk_in_enabled:1; 443 unsigned int bulk_out_enabled:1; 444 445 unsigned long atomic_bitflags; 446#define IGNORE_BULK_OUT 0 447 448 struct usb_ep *bulk_in; 449 struct usb_ep *bulk_out; 450}; 451 452static inline int __fsg_is_set(struct fsg_common *common, 453 const char *func, unsigned line) 454{ 455 if (common->fsg) 456 return 1; 457 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line); 458 WARN_ON(1); 459 return 0; 460} 461 462#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) 463 464static inline struct fsg_dev *fsg_from_func(struct usb_function *f) 465{ 466 return container_of(f, struct fsg_dev, function); 467} 468 469typedef void (*fsg_routine_t)(struct fsg_dev *); 470 471static int exception_in_progress(struct fsg_common *common) 472{ 473 return common->state > FSG_STATE_IDLE; 474} 475 476/* Make bulk-out requests be divisible by the maxpacket size */ 477static void set_bulk_out_req_length(struct fsg_common *common, 478 struct fsg_buffhd *bh, unsigned int length) 479{ 480 unsigned int rem; 481 482 bh->bulk_out_intended_length = length; 483 rem = length % common->bulk_out_maxpacket; 484 if (rem > 0) 485 length += common->bulk_out_maxpacket - rem; 486 bh->outreq->length = length; 487} 488 489 490/*-------------------------------------------------------------------------*/ 491 492static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 493{ 494 const char *name; 495 496 if (ep == fsg->bulk_in) 497 name = "bulk-in"; 498 else if (ep == fsg->bulk_out) 499 name = "bulk-out"; 500 else 501 name = ep->name; 502 DBG(fsg, "%s set halt\n", name); 503 return usb_ep_set_halt(ep); 504} 505 506 507/*-------------------------------------------------------------------------*/ 508 509/* These routines may be called in process context or in_irq */ 510 511/* Caller must hold fsg->lock */ 512static void wakeup_thread(struct fsg_common *common) 513{ 514 /* Tell the main thread that something has happened */ 515 common->thread_wakeup_needed = 1; 516 if (common->thread_task) 517 wake_up_process(common->thread_task); 518} 519 520static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 521{ 522 unsigned long flags; 523 524 /* 525 * Do nothing if a higher-priority exception is already in progress. 526 * If a lower-or-equal priority exception is in progress, preempt it 527 * and notify the main thread by sending it a signal. 528 */ 529 spin_lock_irqsave(&common->lock, flags); 530 if (common->state <= new_state) { 531 common->exception_req_tag = common->ep0_req_tag; 532 common->state = new_state; 533 if (common->thread_task) 534 send_sig_info(SIGUSR1, SEND_SIG_FORCED, 535 common->thread_task); 536 } 537 spin_unlock_irqrestore(&common->lock, flags); 538} 539 540 541/*-------------------------------------------------------------------------*/ 542 543static int ep0_queue(struct fsg_common *common) 544{ 545 int rc; 546 547 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC); 548 common->ep0->driver_data = common; 549 if (rc != 0 && rc != -ESHUTDOWN) { 550 /* We can't do much more than wait for a reset */ 551 WARNING(common, "error in submission: %s --> %d\n", 552 common->ep0->name, rc); 553 } 554 return rc; 555} 556 557 558/*-------------------------------------------------------------------------*/ 559 560/* Completion handlers. These always run in_irq. */ 561 562static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 563{ 564 struct fsg_common *common = ep->driver_data; 565 struct fsg_buffhd *bh = req->context; 566 567 if (req->status || req->actual != req->length) 568 DBG(common, "%s --> %d, %u/%u\n", __func__, 569 req->status, req->actual, req->length); 570 if (req->status == -ECONNRESET) /* Request was cancelled */ 571 usb_ep_fifo_flush(ep); 572 573 /* Hold the lock while we update the request and buffer states */ 574 smp_wmb(); 575 spin_lock(&common->lock); 576 bh->inreq_busy = 0; 577 bh->state = BUF_STATE_EMPTY; 578 wakeup_thread(common); 579 spin_unlock(&common->lock); 580} 581 582static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 583{ 584 struct fsg_common *common = ep->driver_data; 585 struct fsg_buffhd *bh = req->context; 586 587 dump_msg(common, "bulk-out", req->buf, req->actual); 588 if (req->status || req->actual != bh->bulk_out_intended_length) 589 DBG(common, "%s --> %d, %u/%u\n", __func__, 590 req->status, req->actual, bh->bulk_out_intended_length); 591 if (req->status == -ECONNRESET) /* Request was cancelled */ 592 usb_ep_fifo_flush(ep); 593 594 /* Hold the lock while we update the request and buffer states */ 595 smp_wmb(); 596 spin_lock(&common->lock); 597 bh->outreq_busy = 0; 598 bh->state = BUF_STATE_FULL; 599 wakeup_thread(common); 600 spin_unlock(&common->lock); 601} 602 603static int fsg_setup(struct usb_function *f, 604 const struct usb_ctrlrequest *ctrl) 605{ 606 struct fsg_dev *fsg = fsg_from_func(f); 607 struct usb_request *req = fsg->common->ep0req; 608 u16 w_index = le16_to_cpu(ctrl->wIndex); 609 u16 w_value = le16_to_cpu(ctrl->wValue); 610 u16 w_length = le16_to_cpu(ctrl->wLength); 611 612 if (!fsg_is_set(fsg->common)) 613 return -EOPNOTSUPP; 614 615 ++fsg->common->ep0_req_tag; /* Record arrival of a new request */ 616 req->context = NULL; 617 req->length = 0; 618 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); 619 620 switch (ctrl->bRequest) { 621 622 case US_BULK_RESET_REQUEST: 623 if (ctrl->bRequestType != 624 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 625 break; 626 if (w_index != fsg->interface_number || w_value != 0 || 627 w_length != 0) 628 return -EDOM; 629 630 /* 631 * Raise an exception to stop the current operation 632 * and reinitialize our state. 633 */ 634 DBG(fsg, "bulk reset request\n"); 635 raise_exception(fsg->common, FSG_STATE_RESET); 636 return DELAYED_STATUS; 637 638 case US_BULK_GET_MAX_LUN: 639 if (ctrl->bRequestType != 640 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 641 break; 642 if (w_index != fsg->interface_number || w_value != 0 || 643 w_length != 1) 644 return -EDOM; 645 VDBG(fsg, "get max LUN\n"); 646 *(u8 *)req->buf = fsg->common->nluns - 1; 647 648 /* Respond with data/status */ 649 req->length = min((u16)1, w_length); 650 return ep0_queue(fsg->common); 651 } 652 653 VDBG(fsg, 654 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n", 655 ctrl->bRequestType, ctrl->bRequest, 656 le16_to_cpu(ctrl->wValue), w_index, w_length); 657 return -EOPNOTSUPP; 658} 659 660 661/*-------------------------------------------------------------------------*/ 662 663/* All the following routines run in process context */ 664 665/* Use this for bulk or interrupt transfers, not ep0 */ 666static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 667 struct usb_request *req, int *pbusy, 668 enum fsg_buffer_state *state) 669{ 670 int rc; 671 672 if (ep == fsg->bulk_in) 673 dump_msg(fsg, "bulk-in", req->buf, req->length); 674 675 spin_lock_irq(&fsg->common->lock); 676 *pbusy = 1; 677 *state = BUF_STATE_BUSY; 678 spin_unlock_irq(&fsg->common->lock); 679 rc = usb_ep_queue(ep, req, GFP_KERNEL); 680 if (rc != 0) { 681 *pbusy = 0; 682 *state = BUF_STATE_EMPTY; 683 684 /* We can't do much more than wait for a reset */ 685 686 /* 687 * Note: currently the net2280 driver fails zero-length 688 * submissions if DMA is enabled. 689 */ 690 if (rc != -ESHUTDOWN && 691 !(rc == -EOPNOTSUPP && req->length == 0)) 692 WARNING(fsg, "error in submission: %s --> %d\n", 693 ep->name, rc); 694 } 695} 696 697static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 698{ 699 if (!fsg_is_set(common)) 700 return false; 701 start_transfer(common->fsg, common->fsg->bulk_in, 702 bh->inreq, &bh->inreq_busy, &bh->state); 703 return true; 704} 705 706static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 707{ 708 if (!fsg_is_set(common)) 709 return false; 710 start_transfer(common->fsg, common->fsg->bulk_out, 711 bh->outreq, &bh->outreq_busy, &bh->state); 712 return true; 713} 714 715static int sleep_thread(struct fsg_common *common) 716{ 717 int rc = 0; 718 719 /* Wait until a signal arrives or we are woken up */ 720 for (;;) { 721 try_to_freeze(); 722 set_current_state(TASK_INTERRUPTIBLE); 723 if (signal_pending(current)) { 724 rc = -EINTR; 725 break; 726 } 727 if (common->thread_wakeup_needed) 728 break; 729 schedule(); 730 } 731 __set_current_state(TASK_RUNNING); 732 common->thread_wakeup_needed = 0; 733 return rc; 734} 735 736 737/*-------------------------------------------------------------------------*/ 738 739static int do_read(struct fsg_common *common) 740{ 741 struct fsg_lun *curlun = common->curlun; 742 u32 lba; 743 struct fsg_buffhd *bh; 744 int rc; 745 u32 amount_left; 746 loff_t file_offset, file_offset_tmp; 747 unsigned int amount; 748 ssize_t nread; 749 750 /* 751 * Get the starting Logical Block Address and check that it's 752 * not too big. 753 */ 754 if (common->cmnd[0] == READ_6) 755 lba = get_unaligned_be24(&common->cmnd[1]); 756 else { 757 lba = get_unaligned_be32(&common->cmnd[2]); 758 759 /* 760 * We allow DPO (Disable Page Out = don't save data in the 761 * cache) and FUA (Force Unit Access = don't read from the 762 * cache), but we don't implement them. 763 */ 764 if ((common->cmnd[1] & ~0x18) != 0) { 765 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 766 return -EINVAL; 767 } 768 } 769 if (lba >= curlun->num_sectors) { 770 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 771 return -EINVAL; 772 } 773 file_offset = ((loff_t) lba) << curlun->blkbits; 774 775 /* Carry out the file reads */ 776 amount_left = common->data_size_from_cmnd; 777 if (unlikely(amount_left == 0)) 778 return -EIO; /* No default reply */ 779 780 for (;;) { 781 /* 782 * Figure out how much we need to read: 783 * Try to read the remaining amount. 784 * But don't read more than the buffer size. 785 * And don't try to read past the end of the file. 786 */ 787 amount = min(amount_left, FSG_BUFLEN); 788 amount = min((loff_t)amount, 789 curlun->file_length - file_offset); 790 791 /* Wait for the next buffer to become available */ 792 bh = common->next_buffhd_to_fill; 793 while (bh->state != BUF_STATE_EMPTY) { 794 rc = sleep_thread(common); 795 if (rc) 796 return rc; 797 } 798 799 /* 800 * If we were asked to read past the end of file, 801 * end with an empty buffer. 802 */ 803 if (amount == 0) { 804 curlun->sense_data = 805 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 806 curlun->sense_data_info = 807 file_offset >> curlun->blkbits; 808 curlun->info_valid = 1; 809 bh->inreq->length = 0; 810 bh->state = BUF_STATE_FULL; 811 break; 812 } 813 814 /* Perform the read */ 815 file_offset_tmp = file_offset; 816 nread = vfs_read(curlun->filp, 817 (char __user *)bh->buf, 818 amount, &file_offset_tmp); 819 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 820 (unsigned long long)file_offset, (int)nread); 821 if (signal_pending(current)) 822 return -EINTR; 823 824 if (nread < 0) { 825 LDBG(curlun, "error in file read: %d\n", (int)nread); 826 nread = 0; 827 } else if (nread < amount) { 828 LDBG(curlun, "partial file read: %d/%u\n", 829 (int)nread, amount); 830 nread = round_down(nread, curlun->blksize); 831 } 832 file_offset += nread; 833 amount_left -= nread; 834 common->residue -= nread; 835 836 /* 837 * Except at the end of the transfer, nread will be 838 * equal to the buffer size, which is divisible by the 839 * bulk-in maxpacket size. 840 */ 841 bh->inreq->length = nread; 842 bh->state = BUF_STATE_FULL; 843 844 /* If an error occurred, report it and its position */ 845 if (nread < amount) { 846 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 847 curlun->sense_data_info = 848 file_offset >> curlun->blkbits; 849 curlun->info_valid = 1; 850 break; 851 } 852 853 if (amount_left == 0) 854 break; /* No more left to read */ 855 856 /* Send this buffer and go read some more */ 857 bh->inreq->zero = 0; 858 if (!start_in_transfer(common, bh)) 859 /* Don't know what to do if common->fsg is NULL */ 860 return -EIO; 861 common->next_buffhd_to_fill = bh->next; 862 } 863 864 return -EIO; /* No default reply */ 865} 866 867 868/*-------------------------------------------------------------------------*/ 869 870static int do_write(struct fsg_common *common) 871{ 872 struct fsg_lun *curlun = common->curlun; 873 u32 lba; 874 struct fsg_buffhd *bh; 875 int get_some_more; 876 u32 amount_left_to_req, amount_left_to_write; 877 loff_t usb_offset, file_offset, file_offset_tmp; 878 unsigned int amount; 879 ssize_t nwritten; 880 int rc; 881 882 if (curlun->ro) { 883 curlun->sense_data = SS_WRITE_PROTECTED; 884 return -EINVAL; 885 } 886 spin_lock(&curlun->filp->f_lock); 887 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */ 888 spin_unlock(&curlun->filp->f_lock); 889 890 /* 891 * Get the starting Logical Block Address and check that it's 892 * not too big 893 */ 894 if (common->cmnd[0] == WRITE_6) 895 lba = get_unaligned_be24(&common->cmnd[1]); 896 else { 897 lba = get_unaligned_be32(&common->cmnd[2]); 898 899 /* 900 * We allow DPO (Disable Page Out = don't save data in the 901 * cache) and FUA (Force Unit Access = write directly to the 902 * medium). We don't implement DPO; we implement FUA by 903 * performing synchronous output. 904 */ 905 if (common->cmnd[1] & ~0x18) { 906 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 907 return -EINVAL; 908 } 909 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */ 910 spin_lock(&curlun->filp->f_lock); 911 curlun->filp->f_flags |= O_SYNC; 912 spin_unlock(&curlun->filp->f_lock); 913 } 914 } 915 if (lba >= curlun->num_sectors) { 916 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 917 return -EINVAL; 918 } 919 920 /* Carry out the file writes */ 921 get_some_more = 1; 922 file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits; 923 amount_left_to_req = common->data_size_from_cmnd; 924 amount_left_to_write = common->data_size_from_cmnd; 925 926 while (amount_left_to_write > 0) { 927 928 /* Queue a request for more data from the host */ 929 bh = common->next_buffhd_to_fill; 930 if (bh->state == BUF_STATE_EMPTY && get_some_more) { 931 932 /* 933 * Figure out how much we want to get: 934 * Try to get the remaining amount, 935 * but not more than the buffer size. 936 */ 937 amount = min(amount_left_to_req, FSG_BUFLEN); 938 939 /* Beyond the end of the backing file? */ 940 if (usb_offset >= curlun->file_length) { 941 get_some_more = 0; 942 curlun->sense_data = 943 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 944 curlun->sense_data_info = 945 usb_offset >> curlun->blkbits; 946 curlun->info_valid = 1; 947 continue; 948 } 949 950 /* Get the next buffer */ 951 usb_offset += amount; 952 common->usb_amount_left -= amount; 953 amount_left_to_req -= amount; 954 if (amount_left_to_req == 0) 955 get_some_more = 0; 956 957 /* 958 * Except at the end of the transfer, amount will be 959 * equal to the buffer size, which is divisible by 960 * the bulk-out maxpacket size. 961 */ 962 set_bulk_out_req_length(common, bh, amount); 963 if (!start_out_transfer(common, bh)) 964 /* Dunno what to do if common->fsg is NULL */ 965 return -EIO; 966 common->next_buffhd_to_fill = bh->next; 967 continue; 968 } 969 970 /* Write the received data to the backing file */ 971 bh = common->next_buffhd_to_drain; 972 if (bh->state == BUF_STATE_EMPTY && !get_some_more) 973 break; /* We stopped early */ 974 if (bh->state == BUF_STATE_FULL) { 975 smp_rmb(); 976 common->next_buffhd_to_drain = bh->next; 977 bh->state = BUF_STATE_EMPTY; 978 979 /* Did something go wrong with the transfer? */ 980 if (bh->outreq->status != 0) { 981 curlun->sense_data = SS_COMMUNICATION_FAILURE; 982 curlun->sense_data_info = 983 file_offset >> curlun->blkbits; 984 curlun->info_valid = 1; 985 break; 986 } 987 988 amount = bh->outreq->actual; 989 if (curlun->file_length - file_offset < amount) { 990 LERROR(curlun, 991 "write %u @ %llu beyond end %llu\n", 992 amount, (unsigned long long)file_offset, 993 (unsigned long long)curlun->file_length); 994 amount = curlun->file_length - file_offset; 995 } 996 997 /* Don't accept excess data. The spec doesn't say 998 * what to do in this case. We'll ignore the error. 999 */ 1000 amount = min(amount, bh->bulk_out_intended_length); 1001 1002 /* Don't write a partial block */ 1003 amount = round_down(amount, curlun->blksize); 1004 if (amount == 0) 1005 goto empty_write; 1006 1007 /* Perform the write */ 1008 file_offset_tmp = file_offset; 1009 nwritten = vfs_write(curlun->filp, 1010 (char __user *)bh->buf, 1011 amount, &file_offset_tmp); 1012 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 1013 (unsigned long long)file_offset, (int)nwritten); 1014 if (signal_pending(current)) 1015 return -EINTR; /* Interrupted! */ 1016 1017 if (nwritten < 0) { 1018 LDBG(curlun, "error in file write: %d\n", 1019 (int)nwritten); 1020 nwritten = 0; 1021 } else if (nwritten < amount) { 1022 LDBG(curlun, "partial file write: %d/%u\n", 1023 (int)nwritten, amount); 1024 nwritten = round_down(nwritten, curlun->blksize); 1025 } 1026 file_offset += nwritten; 1027 amount_left_to_write -= nwritten; 1028 common->residue -= nwritten; 1029 1030 /* If an error occurred, report it and its position */ 1031 if (nwritten < amount) { 1032 curlun->sense_data = SS_WRITE_ERROR; 1033 curlun->sense_data_info = 1034 file_offset >> curlun->blkbits; 1035 curlun->info_valid = 1; 1036 break; 1037 } 1038 1039 empty_write: 1040 /* Did the host decide to stop early? */ 1041 if (bh->outreq->actual < bh->bulk_out_intended_length) { 1042 common->short_packet_received = 1; 1043 break; 1044 } 1045 continue; 1046 } 1047 1048 /* Wait for something to happen */ 1049 rc = sleep_thread(common); 1050 if (rc) 1051 return rc; 1052 } 1053 1054 return -EIO; /* No default reply */ 1055} 1056 1057 1058/*-------------------------------------------------------------------------*/ 1059 1060static int do_synchronize_cache(struct fsg_common *common) 1061{ 1062 struct fsg_lun *curlun = common->curlun; 1063 int rc; 1064 1065 /* We ignore the requested LBA and write out all file's 1066 * dirty data buffers. */ 1067 rc = fsg_lun_fsync_sub(curlun); 1068 if (rc) 1069 curlun->sense_data = SS_WRITE_ERROR; 1070 return 0; 1071} 1072 1073 1074/*-------------------------------------------------------------------------*/ 1075 1076static void invalidate_sub(struct fsg_lun *curlun) 1077{ 1078 struct file *filp = curlun->filp; 1079 struct inode *inode = filp->f_path.dentry->d_inode; 1080 unsigned long rc; 1081 1082 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1083 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); 1084} 1085 1086static int do_verify(struct fsg_common *common) 1087{ 1088 struct fsg_lun *curlun = common->curlun; 1089 u32 lba; 1090 u32 verification_length; 1091 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1092 loff_t file_offset, file_offset_tmp; 1093 u32 amount_left; 1094 unsigned int amount; 1095 ssize_t nread; 1096 1097 /* 1098 * Get the starting Logical Block Address and check that it's 1099 * not too big. 1100 */ 1101 lba = get_unaligned_be32(&common->cmnd[2]); 1102 if (lba >= curlun->num_sectors) { 1103 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1104 return -EINVAL; 1105 } 1106 1107 /* 1108 * We allow DPO (Disable Page Out = don't save data in the 1109 * cache) but we don't implement it. 1110 */ 1111 if (common->cmnd[1] & ~0x10) { 1112 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1113 return -EINVAL; 1114 } 1115 1116 verification_length = get_unaligned_be16(&common->cmnd[7]); 1117 if (unlikely(verification_length == 0)) 1118 return -EIO; /* No default reply */ 1119 1120 /* Prepare to carry out the file verify */ 1121 amount_left = verification_length << curlun->blkbits; 1122 file_offset = ((loff_t) lba) << curlun->blkbits; 1123 1124 /* Write out all the dirty buffers before invalidating them */ 1125 fsg_lun_fsync_sub(curlun); 1126 if (signal_pending(current)) 1127 return -EINTR; 1128 1129 invalidate_sub(curlun); 1130 if (signal_pending(current)) 1131 return -EINTR; 1132 1133 /* Just try to read the requested blocks */ 1134 while (amount_left > 0) { 1135 /* 1136 * Figure out how much we need to read: 1137 * Try to read the remaining amount, but not more than 1138 * the buffer size. 1139 * And don't try to read past the end of the file. 1140 */ 1141 amount = min(amount_left, FSG_BUFLEN); 1142 amount = min((loff_t)amount, 1143 curlun->file_length - file_offset); 1144 if (amount == 0) { 1145 curlun->sense_data = 1146 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1147 curlun->sense_data_info = 1148 file_offset >> curlun->blkbits; 1149 curlun->info_valid = 1; 1150 break; 1151 } 1152 1153 /* Perform the read */ 1154 file_offset_tmp = file_offset; 1155 nread = vfs_read(curlun->filp, 1156 (char __user *) bh->buf, 1157 amount, &file_offset_tmp); 1158 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1159 (unsigned long long) file_offset, 1160 (int) nread); 1161 if (signal_pending(current)) 1162 return -EINTR; 1163 1164 if (nread < 0) { 1165 LDBG(curlun, "error in file verify: %d\n", (int)nread); 1166 nread = 0; 1167 } else if (nread < amount) { 1168 LDBG(curlun, "partial file verify: %d/%u\n", 1169 (int)nread, amount); 1170 nread = round_down(nread, curlun->blksize); 1171 } 1172 if (nread == 0) { 1173 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1174 curlun->sense_data_info = 1175 file_offset >> curlun->blkbits; 1176 curlun->info_valid = 1; 1177 break; 1178 } 1179 file_offset += nread; 1180 amount_left -= nread; 1181 } 1182 return 0; 1183} 1184 1185 1186/*-------------------------------------------------------------------------*/ 1187 1188static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh) 1189{ 1190 struct fsg_lun *curlun = common->curlun; 1191 u8 *buf = (u8 *) bh->buf; 1192 1193 if (!curlun) { /* Unsupported LUNs are okay */ 1194 common->bad_lun_okay = 1; 1195 memset(buf, 0, 36); 1196 buf[0] = 0x7f; /* Unsupported, no device-type */ 1197 buf[4] = 31; /* Additional length */ 1198 return 36; 1199 } 1200 1201 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK; 1202 buf[1] = curlun->removable ? 0x80 : 0; 1203 buf[2] = 2; /* ANSI SCSI level 2 */ 1204 buf[3] = 2; /* SCSI-2 INQUIRY data format */ 1205 buf[4] = 31; /* Additional length */ 1206 buf[5] = 0; /* No special options */ 1207 buf[6] = 0; 1208 buf[7] = 0; 1209 memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string); 1210 return 36; 1211} 1212 1213static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1214{ 1215 struct fsg_lun *curlun = common->curlun; 1216 u8 *buf = (u8 *) bh->buf; 1217 u32 sd, sdinfo; 1218 int valid; 1219 1220 /* 1221 * From the SCSI-2 spec., section 7.9 (Unit attention condition): 1222 * 1223 * If a REQUEST SENSE command is received from an initiator 1224 * with a pending unit attention condition (before the target 1225 * generates the contingent allegiance condition), then the 1226 * target shall either: 1227 * a) report any pending sense data and preserve the unit 1228 * attention condition on the logical unit, or, 1229 * b) report the unit attention condition, may discard any 1230 * pending sense data, and clear the unit attention 1231 * condition on the logical unit for that initiator. 1232 * 1233 * FSG normally uses option a); enable this code to use option b). 1234 */ 1235#if 0 1236 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { 1237 curlun->sense_data = curlun->unit_attention_data; 1238 curlun->unit_attention_data = SS_NO_SENSE; 1239 } 1240#endif 1241 1242 if (!curlun) { /* Unsupported LUNs are okay */ 1243 common->bad_lun_okay = 1; 1244 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1245 sdinfo = 0; 1246 valid = 0; 1247 } else { 1248 sd = curlun->sense_data; 1249 sdinfo = curlun->sense_data_info; 1250 valid = curlun->info_valid << 7; 1251 curlun->sense_data = SS_NO_SENSE; 1252 curlun->sense_data_info = 0; 1253 curlun->info_valid = 0; 1254 } 1255 1256 memset(buf, 0, 18); 1257 buf[0] = valid | 0x70; /* Valid, current error */ 1258 buf[2] = SK(sd); 1259 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ 1260 buf[7] = 18 - 8; /* Additional sense length */ 1261 buf[12] = ASC(sd); 1262 buf[13] = ASCQ(sd); 1263 return 18; 1264} 1265 1266static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) 1267{ 1268 struct fsg_lun *curlun = common->curlun; 1269 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1270 int pmi = common->cmnd[8]; 1271 u8 *buf = (u8 *)bh->buf; 1272 1273 /* Check the PMI and LBA fields */ 1274 if (pmi > 1 || (pmi == 0 && lba != 0)) { 1275 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1276 return -EINVAL; 1277 } 1278 1279 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); 1280 /* Max logical block */ 1281 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1282 return 8; 1283} 1284 1285static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) 1286{ 1287 struct fsg_lun *curlun = common->curlun; 1288 int msf = common->cmnd[1] & 0x02; 1289 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1290 u8 *buf = (u8 *)bh->buf; 1291 1292 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ 1293 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1294 return -EINVAL; 1295 } 1296 if (lba >= curlun->num_sectors) { 1297 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1298 return -EINVAL; 1299 } 1300 1301 memset(buf, 0, 8); 1302 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ 1303 store_cdrom_address(&buf[4], msf, lba); 1304 return 8; 1305} 1306 1307static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) 1308{ 1309 struct fsg_lun *curlun = common->curlun; 1310 int msf = common->cmnd[1] & 0x02; 1311 int start_track = common->cmnd[6]; 1312 u8 *buf = (u8 *)bh->buf; 1313 1314 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ 1315 start_track > 1) { 1316 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1317 return -EINVAL; 1318 } 1319 1320 memset(buf, 0, 20); 1321 buf[1] = (20-2); /* TOC data length */ 1322 buf[2] = 1; /* First track number */ 1323 buf[3] = 1; /* Last track number */ 1324 buf[5] = 0x16; /* Data track, copying allowed */ 1325 buf[6] = 0x01; /* Only track is number 1 */ 1326 store_cdrom_address(&buf[8], msf, 0); 1327 1328 buf[13] = 0x16; /* Lead-out track is data */ 1329 buf[14] = 0xAA; /* Lead-out track number */ 1330 store_cdrom_address(&buf[16], msf, curlun->num_sectors); 1331 return 20; 1332} 1333 1334static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1335{ 1336 struct fsg_lun *curlun = common->curlun; 1337 int mscmnd = common->cmnd[0]; 1338 u8 *buf = (u8 *) bh->buf; 1339 u8 *buf0 = buf; 1340 int pc, page_code; 1341 int changeable_values, all_pages; 1342 int valid_page = 0; 1343 int len, limit; 1344 1345 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ 1346 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1347 return -EINVAL; 1348 } 1349 pc = common->cmnd[2] >> 6; 1350 page_code = common->cmnd[2] & 0x3f; 1351 if (pc == 3) { 1352 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; 1353 return -EINVAL; 1354 } 1355 changeable_values = (pc == 1); 1356 all_pages = (page_code == 0x3f); 1357 1358 /* 1359 * Write the mode parameter header. Fixed values are: default 1360 * medium type, no cache control (DPOFUA), and no block descriptors. 1361 * The only variable value is the WriteProtect bit. We will fill in 1362 * the mode data length later. 1363 */ 1364 memset(buf, 0, 8); 1365 if (mscmnd == MODE_SENSE) { 1366 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1367 buf += 4; 1368 limit = 255; 1369 } else { /* MODE_SENSE_10 */ 1370 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1371 buf += 8; 1372 limit = 65535; /* Should really be FSG_BUFLEN */ 1373 } 1374 1375 /* No block descriptors */ 1376 1377 /* 1378 * The mode pages, in numerical order. The only page we support 1379 * is the Caching page. 1380 */ 1381 if (page_code == 0x08 || all_pages) { 1382 valid_page = 1; 1383 buf[0] = 0x08; /* Page code */ 1384 buf[1] = 10; /* Page length */ 1385 memset(buf+2, 0, 10); /* None of the fields are changeable */ 1386 1387 if (!changeable_values) { 1388 buf[2] = 0x04; /* Write cache enable, */ 1389 /* Read cache not disabled */ 1390 /* No cache retention priorities */ 1391 put_unaligned_be16(0xffff, &buf[4]); 1392 /* Don't disable prefetch */ 1393 /* Minimum prefetch = 0 */ 1394 put_unaligned_be16(0xffff, &buf[8]); 1395 /* Maximum prefetch */ 1396 put_unaligned_be16(0xffff, &buf[10]); 1397 /* Maximum prefetch ceiling */ 1398 } 1399 buf += 12; 1400 } 1401 1402 /* 1403 * Check that a valid page was requested and the mode data length 1404 * isn't too long. 1405 */ 1406 len = buf - buf0; 1407 if (!valid_page || len > limit) { 1408 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1409 return -EINVAL; 1410 } 1411 1412 /* Store the mode data length */ 1413 if (mscmnd == MODE_SENSE) 1414 buf0[0] = len - 1; 1415 else 1416 put_unaligned_be16(len - 2, buf0); 1417 return len; 1418} 1419 1420static int do_start_stop(struct fsg_common *common) 1421{ 1422 struct fsg_lun *curlun = common->curlun; 1423 int loej, start; 1424 1425 if (!curlun) { 1426 return -EINVAL; 1427 } else if (!curlun->removable) { 1428 curlun->sense_data = SS_INVALID_COMMAND; 1429 return -EINVAL; 1430 } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */ 1431 (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */ 1432 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1433 return -EINVAL; 1434 } 1435 1436 loej = common->cmnd[4] & 0x02; 1437 start = common->cmnd[4] & 0x01; 1438 1439 /* 1440 * Our emulation doesn't support mounting; the medium is 1441 * available for use as soon as it is loaded. 1442 */ 1443 if (start) { 1444 if (!fsg_lun_is_open(curlun)) { 1445 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1446 return -EINVAL; 1447 } 1448 return 0; 1449 } 1450 1451 /* Are we allowed to unload the media? */ 1452 if (curlun->prevent_medium_removal) { 1453 LDBG(curlun, "unload attempt prevented\n"); 1454 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; 1455 return -EINVAL; 1456 } 1457 1458 if (!loej) 1459 return 0; 1460 1461 /* Simulate an unload/eject */ 1462 if (common->ops && common->ops->pre_eject) { 1463 int r = common->ops->pre_eject(common, curlun, 1464 curlun - common->luns); 1465 if (unlikely(r < 0)) 1466 return r; 1467 else if (r) 1468 return 0; 1469 } 1470 1471 up_read(&common->filesem); 1472 down_write(&common->filesem); 1473 fsg_lun_close(curlun); 1474 up_write(&common->filesem); 1475 down_read(&common->filesem); 1476 1477 return common->ops && common->ops->post_eject 1478 ? min(0, common->ops->post_eject(common, curlun, 1479 curlun - common->luns)) 1480 : 0; 1481} 1482 1483static int do_prevent_allow(struct fsg_common *common) 1484{ 1485 struct fsg_lun *curlun = common->curlun; 1486 int prevent; 1487 1488 if (!common->curlun) { 1489 return -EINVAL; 1490 } else if (!common->curlun->removable) { 1491 common->curlun->sense_data = SS_INVALID_COMMAND; 1492 return -EINVAL; 1493 } 1494 1495 prevent = common->cmnd[4] & 0x01; 1496 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ 1497 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1498 return -EINVAL; 1499 } 1500 1501 if (curlun->prevent_medium_removal && !prevent) 1502 fsg_lun_fsync_sub(curlun); 1503 curlun->prevent_medium_removal = prevent; 1504 return 0; 1505} 1506 1507static int do_read_format_capacities(struct fsg_common *common, 1508 struct fsg_buffhd *bh) 1509{ 1510 struct fsg_lun *curlun = common->curlun; 1511 u8 *buf = (u8 *) bh->buf; 1512 1513 buf[0] = buf[1] = buf[2] = 0; 1514 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */ 1515 buf += 4; 1516 1517 put_unaligned_be32(curlun->num_sectors, &buf[0]); 1518 /* Number of blocks */ 1519 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1520 buf[4] = 0x02; /* Current capacity */ 1521 return 12; 1522} 1523 1524static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) 1525{ 1526 struct fsg_lun *curlun = common->curlun; 1527 1528 /* We don't support MODE SELECT */ 1529 if (curlun) 1530 curlun->sense_data = SS_INVALID_COMMAND; 1531 return -EINVAL; 1532} 1533 1534 1535/*-------------------------------------------------------------------------*/ 1536 1537static int halt_bulk_in_endpoint(struct fsg_dev *fsg) 1538{ 1539 int rc; 1540 1541 rc = fsg_set_halt(fsg, fsg->bulk_in); 1542 if (rc == -EAGAIN) 1543 VDBG(fsg, "delayed bulk-in endpoint halt\n"); 1544 while (rc != 0) { 1545 if (rc != -EAGAIN) { 1546 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); 1547 rc = 0; 1548 break; 1549 } 1550 1551 /* Wait for a short time and then try again */ 1552 if (msleep_interruptible(100) != 0) 1553 return -EINTR; 1554 rc = usb_ep_set_halt(fsg->bulk_in); 1555 } 1556 return rc; 1557} 1558 1559static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) 1560{ 1561 int rc; 1562 1563 DBG(fsg, "bulk-in set wedge\n"); 1564 rc = usb_ep_set_wedge(fsg->bulk_in); 1565 if (rc == -EAGAIN) 1566 VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 1567 while (rc != 0) { 1568 if (rc != -EAGAIN) { 1569 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); 1570 rc = 0; 1571 break; 1572 } 1573 1574 /* Wait for a short time and then try again */ 1575 if (msleep_interruptible(100) != 0) 1576 return -EINTR; 1577 rc = usb_ep_set_wedge(fsg->bulk_in); 1578 } 1579 return rc; 1580} 1581 1582static int throw_away_data(struct fsg_common *common) 1583{ 1584 struct fsg_buffhd *bh; 1585 u32 amount; 1586 int rc; 1587 1588 for (bh = common->next_buffhd_to_drain; 1589 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0; 1590 bh = common->next_buffhd_to_drain) { 1591 1592 /* Throw away the data in a filled buffer */ 1593 if (bh->state == BUF_STATE_FULL) { 1594 smp_rmb(); 1595 bh->state = BUF_STATE_EMPTY; 1596 common->next_buffhd_to_drain = bh->next; 1597 1598 /* A short packet or an error ends everything */ 1599 if (bh->outreq->actual < bh->bulk_out_intended_length || 1600 bh->outreq->status != 0) { 1601 raise_exception(common, 1602 FSG_STATE_ABORT_BULK_OUT); 1603 return -EINTR; 1604 } 1605 continue; 1606 } 1607 1608 /* Try to submit another request if we need one */ 1609 bh = common->next_buffhd_to_fill; 1610 if (bh->state == BUF_STATE_EMPTY 1611 && common->usb_amount_left > 0) { 1612 amount = min(common->usb_amount_left, FSG_BUFLEN); 1613 1614 /* 1615 * Except at the end of the transfer, amount will be 1616 * equal to the buffer size, which is divisible by 1617 * the bulk-out maxpacket size. 1618 */ 1619 set_bulk_out_req_length(common, bh, amount); 1620 if (!start_out_transfer(common, bh)) 1621 /* Dunno what to do if common->fsg is NULL */ 1622 return -EIO; 1623 common->next_buffhd_to_fill = bh->next; 1624 common->usb_amount_left -= amount; 1625 continue; 1626 } 1627 1628 /* Otherwise wait for something to happen */ 1629 rc = sleep_thread(common); 1630 if (rc) 1631 return rc; 1632 } 1633 return 0; 1634} 1635 1636static int finish_reply(struct fsg_common *common) 1637{ 1638 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1639 int rc = 0; 1640 1641 switch (common->data_dir) { 1642 case DATA_DIR_NONE: 1643 break; /* Nothing to send */ 1644 1645 /* 1646 * If we don't know whether the host wants to read or write, 1647 * this must be CB or CBI with an unknown command. We mustn't 1648 * try to send or receive any data. So stall both bulk pipes 1649 * if we can and wait for a reset. 1650 */ 1651 case DATA_DIR_UNKNOWN: 1652 if (!common->can_stall) { 1653 /* Nothing */ 1654 } else if (fsg_is_set(common)) { 1655 fsg_set_halt(common->fsg, common->fsg->bulk_out); 1656 rc = halt_bulk_in_endpoint(common->fsg); 1657 } else { 1658 /* Don't know what to do if common->fsg is NULL */ 1659 rc = -EIO; 1660 } 1661 break; 1662 1663 /* All but the last buffer of data must have already been sent */ 1664 case DATA_DIR_TO_HOST: 1665 if (common->data_size == 0) { 1666 /* Nothing to send */ 1667 1668 /* Don't know what to do if common->fsg is NULL */ 1669 } else if (!fsg_is_set(common)) { 1670 rc = -EIO; 1671 1672 /* If there's no residue, simply send the last buffer */ 1673 } else if (common->residue == 0) { 1674 bh->inreq->zero = 0; 1675 if (!start_in_transfer(common, bh)) 1676 return -EIO; 1677 common->next_buffhd_to_fill = bh->next; 1678 1679 /* 1680 * For Bulk-only, mark the end of the data with a short 1681 * packet. If we are allowed to stall, halt the bulk-in 1682 * endpoint. (Note: This violates the Bulk-Only Transport 1683 * specification, which requires us to pad the data if we 1684 * don't halt the endpoint. Presumably nobody will mind.) 1685 */ 1686 } else { 1687 bh->inreq->zero = 1; 1688 if (!start_in_transfer(common, bh)) 1689 rc = -EIO; 1690 common->next_buffhd_to_fill = bh->next; 1691 if (common->can_stall) 1692 rc = halt_bulk_in_endpoint(common->fsg); 1693 } 1694 break; 1695 1696 /* 1697 * We have processed all we want from the data the host has sent. 1698 * There may still be outstanding bulk-out requests. 1699 */ 1700 case DATA_DIR_FROM_HOST: 1701 if (common->residue == 0) { 1702 /* Nothing to receive */ 1703 1704 /* Did the host stop sending unexpectedly early? */ 1705 } else if (common->short_packet_received) { 1706 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1707 rc = -EINTR; 1708 1709 /* 1710 * We haven't processed all the incoming data. Even though 1711 * we may be allowed to stall, doing so would cause a race. 1712 * The controller may already have ACK'ed all the remaining 1713 * bulk-out packets, in which case the host wouldn't see a 1714 * STALL. Not realizing the endpoint was halted, it wouldn't 1715 * clear the halt -- leading to problems later on. 1716 */ 1717#if 0 1718 } else if (common->can_stall) { 1719 if (fsg_is_set(common)) 1720 fsg_set_halt(common->fsg, 1721 common->fsg->bulk_out); 1722 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1723 rc = -EINTR; 1724#endif 1725 1726 /* 1727 * We can't stall. Read in the excess data and throw it 1728 * all away. 1729 */ 1730 } else { 1731 rc = throw_away_data(common); 1732 } 1733 break; 1734 } 1735 return rc; 1736} 1737 1738static int send_status(struct fsg_common *common) 1739{ 1740 struct fsg_lun *curlun = common->curlun; 1741 struct fsg_buffhd *bh; 1742 struct bulk_cs_wrap *csw; 1743 int rc; 1744 u8 status = US_BULK_STAT_OK; 1745 u32 sd, sdinfo = 0; 1746 1747 /* Wait for the next buffer to become available */ 1748 bh = common->next_buffhd_to_fill; 1749 while (bh->state != BUF_STATE_EMPTY) { 1750 rc = sleep_thread(common); 1751 if (rc) 1752 return rc; 1753 } 1754 1755 if (curlun) { 1756 sd = curlun->sense_data; 1757 sdinfo = curlun->sense_data_info; 1758 } else if (common->bad_lun_okay) 1759 sd = SS_NO_SENSE; 1760 else 1761 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1762 1763 if (common->phase_error) { 1764 DBG(common, "sending phase-error status\n"); 1765 status = US_BULK_STAT_PHASE; 1766 sd = SS_INVALID_COMMAND; 1767 } else if (sd != SS_NO_SENSE) { 1768 DBG(common, "sending command-failure status\n"); 1769 status = US_BULK_STAT_FAIL; 1770 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" 1771 " info x%x\n", 1772 SK(sd), ASC(sd), ASCQ(sd), sdinfo); 1773 } 1774 1775 /* Store and send the Bulk-only CSW */ 1776 csw = (void *)bh->buf; 1777 1778 csw->Signature = cpu_to_le32(US_BULK_CS_SIGN); 1779 csw->Tag = common->tag; 1780 csw->Residue = cpu_to_le32(common->residue); 1781 csw->Status = status; 1782 1783 bh->inreq->length = US_BULK_CS_WRAP_LEN; 1784 bh->inreq->zero = 0; 1785 if (!start_in_transfer(common, bh)) 1786 /* Don't know what to do if common->fsg is NULL */ 1787 return -EIO; 1788 1789 common->next_buffhd_to_fill = bh->next; 1790 return 0; 1791} 1792 1793 1794/*-------------------------------------------------------------------------*/ 1795 1796/* 1797 * Check whether the command is properly formed and whether its data size 1798 * and direction agree with the values we already have. 1799 */ 1800static int check_command(struct fsg_common *common, int cmnd_size, 1801 enum data_direction data_dir, unsigned int mask, 1802 int needs_medium, const char *name) 1803{ 1804 int i; 1805 int lun = common->cmnd[1] >> 5; 1806 static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 1807 char hdlen[20]; 1808 struct fsg_lun *curlun; 1809 1810 hdlen[0] = 0; 1811 if (common->data_dir != DATA_DIR_UNKNOWN) 1812 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir], 1813 common->data_size); 1814 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 1815 name, cmnd_size, dirletter[(int) data_dir], 1816 common->data_size_from_cmnd, common->cmnd_size, hdlen); 1817 1818 /* 1819 * We can't reply at all until we know the correct data direction 1820 * and size. 1821 */ 1822 if (common->data_size_from_cmnd == 0) 1823 data_dir = DATA_DIR_NONE; 1824 if (common->data_size < common->data_size_from_cmnd) { 1825 /* 1826 * Host data size < Device data size is a phase error. 1827 * Carry out the command, but only transfer as much as 1828 * we are allowed. 1829 */ 1830 common->data_size_from_cmnd = common->data_size; 1831 common->phase_error = 1; 1832 } 1833 common->residue = common->data_size; 1834 common->usb_amount_left = common->data_size; 1835 1836 /* Conflicting data directions is a phase error */ 1837 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) { 1838 common->phase_error = 1; 1839 return -EINVAL; 1840 } 1841 1842 /* Verify the length of the command itself */ 1843 if (cmnd_size != common->cmnd_size) { 1844 1845 /* 1846 * Special case workaround: There are plenty of buggy SCSI 1847 * implementations. Many have issues with cbw->Length 1848 * field passing a wrong command size. For those cases we 1849 * always try to work around the problem by using the length 1850 * sent by the host side provided it is at least as large 1851 * as the correct command length. 1852 * Examples of such cases would be MS-Windows, which issues 1853 * REQUEST SENSE with cbw->Length == 12 where it should 1854 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and 1855 * REQUEST SENSE with cbw->Length == 10 where it should 1856 * be 6 as well. 1857 */ 1858 if (cmnd_size <= common->cmnd_size) { 1859 DBG(common, "%s is buggy! Expected length %d " 1860 "but we got %d\n", name, 1861 cmnd_size, common->cmnd_size); 1862 cmnd_size = common->cmnd_size; 1863 } else { 1864 common->phase_error = 1; 1865 return -EINVAL; 1866 } 1867 } 1868 1869 /* Check that the LUN values are consistent */ 1870 if (common->lun != lun) 1871 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n", 1872 common->lun, lun); 1873 1874 /* Check the LUN */ 1875 curlun = common->curlun; 1876 if (curlun) { 1877 if (common->cmnd[0] != REQUEST_SENSE) { 1878 curlun->sense_data = SS_NO_SENSE; 1879 curlun->sense_data_info = 0; 1880 curlun->info_valid = 0; 1881 } 1882 } else { 1883 common->bad_lun_okay = 0; 1884 1885 /* 1886 * INQUIRY and REQUEST SENSE commands are explicitly allowed 1887 * to use unsupported LUNs; all others may not. 1888 */ 1889 if (common->cmnd[0] != INQUIRY && 1890 common->cmnd[0] != REQUEST_SENSE) { 1891 DBG(common, "unsupported LUN %d\n", common->lun); 1892 return -EINVAL; 1893 } 1894 } 1895 1896 /* 1897 * If a unit attention condition exists, only INQUIRY and 1898 * REQUEST SENSE commands are allowed; anything else must fail. 1899 */ 1900 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 1901 common->cmnd[0] != INQUIRY && 1902 common->cmnd[0] != REQUEST_SENSE) { 1903 curlun->sense_data = curlun->unit_attention_data; 1904 curlun->unit_attention_data = SS_NO_SENSE; 1905 return -EINVAL; 1906 } 1907 1908 /* Check that only command bytes listed in the mask are non-zero */ 1909 common->cmnd[1] &= 0x1f; /* Mask away the LUN */ 1910 for (i = 1; i < cmnd_size; ++i) { 1911 if (common->cmnd[i] && !(mask & (1 << i))) { 1912 if (curlun) 1913 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1914 return -EINVAL; 1915 } 1916 } 1917 1918 /* If the medium isn't mounted and the command needs to access 1919 * it, return an error. */ 1920 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { 1921 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1922 return -EINVAL; 1923 } 1924 1925 return 0; 1926} 1927 1928/* wrapper of check_command for data size in blocks handling */ 1929static int check_command_size_in_blocks(struct fsg_common *common, 1930 int cmnd_size, enum data_direction data_dir, 1931 unsigned int mask, int needs_medium, const char *name) 1932{ 1933 if (common->curlun) 1934 common->data_size_from_cmnd <<= common->curlun->blkbits; 1935 return check_command(common, cmnd_size, data_dir, 1936 mask, needs_medium, name); 1937} 1938 1939static int do_scsi_command(struct fsg_common *common) 1940{ 1941 struct fsg_buffhd *bh; 1942 int rc; 1943 int reply = -EINVAL; 1944 int i; 1945 static char unknown[16]; 1946 1947 dump_cdb(common); 1948 1949 /* Wait for the next buffer to become available for data or status */ 1950 bh = common->next_buffhd_to_fill; 1951 common->next_buffhd_to_drain = bh; 1952 while (bh->state != BUF_STATE_EMPTY) { 1953 rc = sleep_thread(common); 1954 if (rc) 1955 return rc; 1956 } 1957 common->phase_error = 0; 1958 common->short_packet_received = 0; 1959 1960 down_read(&common->filesem); /* We're using the backing file */ 1961 switch (common->cmnd[0]) { 1962 1963 case INQUIRY: 1964 common->data_size_from_cmnd = common->cmnd[4]; 1965 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1966 (1<<4), 0, 1967 "INQUIRY"); 1968 if (reply == 0) 1969 reply = do_inquiry(common, bh); 1970 break; 1971 1972 case MODE_SELECT: 1973 common->data_size_from_cmnd = common->cmnd[4]; 1974 reply = check_command(common, 6, DATA_DIR_FROM_HOST, 1975 (1<<1) | (1<<4), 0, 1976 "MODE SELECT(6)"); 1977 if (reply == 0) 1978 reply = do_mode_select(common, bh); 1979 break; 1980 1981 case MODE_SELECT_10: 1982 common->data_size_from_cmnd = 1983 get_unaligned_be16(&common->cmnd[7]); 1984 reply = check_command(common, 10, DATA_DIR_FROM_HOST, 1985 (1<<1) | (3<<7), 0, 1986 "MODE SELECT(10)"); 1987 if (reply == 0) 1988 reply = do_mode_select(common, bh); 1989 break; 1990 1991 case MODE_SENSE: 1992 common->data_size_from_cmnd = common->cmnd[4]; 1993 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1994 (1<<1) | (1<<2) | (1<<4), 0, 1995 "MODE SENSE(6)"); 1996 if (reply == 0) 1997 reply = do_mode_sense(common, bh); 1998 break; 1999 2000 case MODE_SENSE_10: 2001 common->data_size_from_cmnd = 2002 get_unaligned_be16(&common->cmnd[7]); 2003 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2004 (1<<1) | (1<<2) | (3<<7), 0, 2005 "MODE SENSE(10)"); 2006 if (reply == 0) 2007 reply = do_mode_sense(common, bh); 2008 break; 2009 2010 case ALLOW_MEDIUM_REMOVAL: 2011 common->data_size_from_cmnd = 0; 2012 reply = check_command(common, 6, DATA_DIR_NONE, 2013 (1<<4), 0, 2014 "PREVENT-ALLOW MEDIUM REMOVAL"); 2015 if (reply == 0) 2016 reply = do_prevent_allow(common); 2017 break; 2018 2019 case READ_6: 2020 i = common->cmnd[4]; 2021 common->data_size_from_cmnd = (i == 0) ? 256 : i; 2022 reply = check_command_size_in_blocks(common, 6, 2023 DATA_DIR_TO_HOST, 2024 (7<<1) | (1<<4), 1, 2025 "READ(6)"); 2026 if (reply == 0) 2027 reply = do_read(common); 2028 break; 2029 2030 case READ_10: 2031 common->data_size_from_cmnd = 2032 get_unaligned_be16(&common->cmnd[7]); 2033 reply = check_command_size_in_blocks(common, 10, 2034 DATA_DIR_TO_HOST, 2035 (1<<1) | (0xf<<2) | (3<<7), 1, 2036 "READ(10)"); 2037 if (reply == 0) 2038 reply = do_read(common); 2039 break; 2040 2041 case READ_12: 2042 common->data_size_from_cmnd = 2043 get_unaligned_be32(&common->cmnd[6]); 2044 reply = check_command_size_in_blocks(common, 12, 2045 DATA_DIR_TO_HOST, 2046 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2047 "READ(12)"); 2048 if (reply == 0) 2049 reply = do_read(common); 2050 break; 2051 2052 case READ_CAPACITY: 2053 common->data_size_from_cmnd = 8; 2054 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2055 (0xf<<2) | (1<<8), 1, 2056 "READ CAPACITY"); 2057 if (reply == 0) 2058 reply = do_read_capacity(common, bh); 2059 break; 2060 2061 case READ_HEADER: 2062 if (!common->curlun || !common->curlun->cdrom) 2063 goto unknown_cmnd; 2064 common->data_size_from_cmnd = 2065 get_unaligned_be16(&common->cmnd[7]); 2066 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2067 (3<<7) | (0x1f<<1), 1, 2068 "READ HEADER"); 2069 if (reply == 0) 2070 reply = do_read_header(common, bh); 2071 break; 2072 2073 case READ_TOC: 2074 if (!common->curlun || !common->curlun->cdrom) 2075 goto unknown_cmnd; 2076 common->data_size_from_cmnd = 2077 get_unaligned_be16(&common->cmnd[7]); 2078 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2079 (7<<6) | (1<<1), 1, 2080 "READ TOC"); 2081 if (reply == 0) 2082 reply = do_read_toc(common, bh); 2083 break; 2084 2085 case READ_FORMAT_CAPACITIES: 2086 common->data_size_from_cmnd = 2087 get_unaligned_be16(&common->cmnd[7]); 2088 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2089 (3<<7), 1, 2090 "READ FORMAT CAPACITIES"); 2091 if (reply == 0) 2092 reply = do_read_format_capacities(common, bh); 2093 break; 2094 2095 case REQUEST_SENSE: 2096 common->data_size_from_cmnd = common->cmnd[4]; 2097 reply = check_command(common, 6, DATA_DIR_TO_HOST, 2098 (1<<4), 0, 2099 "REQUEST SENSE"); 2100 if (reply == 0) 2101 reply = do_request_sense(common, bh); 2102 break; 2103 2104 case START_STOP: 2105 common->data_size_from_cmnd = 0; 2106 reply = check_command(common, 6, DATA_DIR_NONE, 2107 (1<<1) | (1<<4), 0, 2108 "START-STOP UNIT"); 2109 if (reply == 0) 2110 reply = do_start_stop(common); 2111 break; 2112 2113 case SYNCHRONIZE_CACHE: 2114 common->data_size_from_cmnd = 0; 2115 reply = check_command(common, 10, DATA_DIR_NONE, 2116 (0xf<<2) | (3<<7), 1, 2117 "SYNCHRONIZE CACHE"); 2118 if (reply == 0) 2119 reply = do_synchronize_cache(common); 2120 break; 2121 2122 case TEST_UNIT_READY: 2123 common->data_size_from_cmnd = 0; 2124 reply = check_command(common, 6, DATA_DIR_NONE, 2125 0, 1, 2126 "TEST UNIT READY"); 2127 break; 2128 2129 /* 2130 * Although optional, this command is used by MS-Windows. We 2131 * support a minimal version: BytChk must be 0. 2132 */ 2133 case VERIFY: 2134 common->data_size_from_cmnd = 0; 2135 reply = check_command(common, 10, DATA_DIR_NONE, 2136 (1<<1) | (0xf<<2) | (3<<7), 1, 2137 "VERIFY"); 2138 if (reply == 0) 2139 reply = do_verify(common); 2140 break; 2141 2142 case WRITE_6: 2143 i = common->cmnd[4]; 2144 common->data_size_from_cmnd = (i == 0) ? 256 : i; 2145 reply = check_command_size_in_blocks(common, 6, 2146 DATA_DIR_FROM_HOST, 2147 (7<<1) | (1<<4), 1, 2148 "WRITE(6)"); 2149 if (reply == 0) 2150 reply = do_write(common); 2151 break; 2152 2153 case WRITE_10: 2154 common->data_size_from_cmnd = 2155 get_unaligned_be16(&common->cmnd[7]); 2156 reply = check_command_size_in_blocks(common, 10, 2157 DATA_DIR_FROM_HOST, 2158 (1<<1) | (0xf<<2) | (3<<7), 1, 2159 "WRITE(10)"); 2160 if (reply == 0) 2161 reply = do_write(common); 2162 break; 2163 2164 case WRITE_12: 2165 common->data_size_from_cmnd = 2166 get_unaligned_be32(&common->cmnd[6]); 2167 reply = check_command_size_in_blocks(common, 12, 2168 DATA_DIR_FROM_HOST, 2169 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2170 "WRITE(12)"); 2171 if (reply == 0) 2172 reply = do_write(common); 2173 break; 2174 2175 /* 2176 * Some mandatory commands that we recognize but don't implement. 2177 * They don't mean much in this setting. It's left as an exercise 2178 * for anyone interested to implement RESERVE and RELEASE in terms 2179 * of Posix locks. 2180 */ 2181 case FORMAT_UNIT: 2182 case RELEASE: 2183 case RESERVE: 2184 case SEND_DIAGNOSTIC: 2185 /* Fall through */ 2186 2187 default: 2188unknown_cmnd: 2189 common->data_size_from_cmnd = 0; 2190 sprintf(unknown, "Unknown x%02x", common->cmnd[0]); 2191 reply = check_command(common, common->cmnd_size, 2192 DATA_DIR_UNKNOWN, ~0, 0, unknown); 2193 if (reply == 0) { 2194 common->curlun->sense_data = SS_INVALID_COMMAND; 2195 reply = -EINVAL; 2196 } 2197 break; 2198 } 2199 up_read(&common->filesem); 2200 2201 if (reply == -EINTR || signal_pending(current)) 2202 return -EINTR; 2203 2204 /* Set up the single reply buffer for finish_reply() */ 2205 if (reply == -EINVAL) 2206 reply = 0; /* Error reply length */ 2207 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { 2208 reply = min((u32)reply, common->data_size_from_cmnd); 2209 bh->inreq->length = reply; 2210 bh->state = BUF_STATE_FULL; 2211 common->residue -= reply; 2212 } /* Otherwise it's already set */ 2213 2214 return 0; 2215} 2216 2217 2218/*-------------------------------------------------------------------------*/ 2219 2220static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2221{ 2222 struct usb_request *req = bh->outreq; 2223 struct bulk_cb_wrap *cbw = req->buf; 2224 struct fsg_common *common = fsg->common; 2225 2226 /* Was this a real packet? Should it be ignored? */ 2227 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2228 return -EINVAL; 2229 2230 /* Is the CBW valid? */ 2231 if (req->actual != US_BULK_CB_WRAP_LEN || 2232 cbw->Signature != cpu_to_le32( 2233 US_BULK_CB_SIGN)) { 2234 DBG(fsg, "invalid CBW: len %u sig 0x%x\n", 2235 req->actual, 2236 le32_to_cpu(cbw->Signature)); 2237 2238 /* 2239 * The Bulk-only spec says we MUST stall the IN endpoint 2240 * (6.6.1), so it's unavoidable. It also says we must 2241 * retain this state until the next reset, but there's 2242 * no way to tell the controller driver it should ignore 2243 * Clear-Feature(HALT) requests. 2244 * 2245 * We aren't required to halt the OUT endpoint; instead 2246 * we can simply accept and discard any data received 2247 * until the next reset. 2248 */ 2249 wedge_bulk_in_endpoint(fsg); 2250 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2251 return -EINVAL; 2252 } 2253 2254 /* Is the CBW meaningful? */ 2255 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN || 2256 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { 2257 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2258 "cmdlen %u\n", 2259 cbw->Lun, cbw->Flags, cbw->Length); 2260 2261 /* 2262 * We can do anything we want here, so let's stall the 2263 * bulk pipes if we are allowed to. 2264 */ 2265 if (common->can_stall) { 2266 fsg_set_halt(fsg, fsg->bulk_out); 2267 halt_bulk_in_endpoint(fsg); 2268 } 2269 return -EINVAL; 2270 } 2271 2272 /* Save the command for later */ 2273 common->cmnd_size = cbw->Length; 2274 memcpy(common->cmnd, cbw->CDB, common->cmnd_size); 2275 if (cbw->Flags & US_BULK_FLAG_IN) 2276 common->data_dir = DATA_DIR_TO_HOST; 2277 else 2278 common->data_dir = DATA_DIR_FROM_HOST; 2279 common->data_size = le32_to_cpu(cbw->DataTransferLength); 2280 if (common->data_size == 0) 2281 common->data_dir = DATA_DIR_NONE; 2282 common->lun = cbw->Lun; 2283 if (common->lun >= 0 && common->lun < common->nluns) 2284 common->curlun = &common->luns[common->lun]; 2285 else 2286 common->curlun = NULL; 2287 common->tag = cbw->Tag; 2288 return 0; 2289} 2290 2291static int get_next_command(struct fsg_common *common) 2292{ 2293 struct fsg_buffhd *bh; 2294 int rc = 0; 2295 2296 /* Wait for the next buffer to become available */ 2297 bh = common->next_buffhd_to_fill; 2298 while (bh->state != BUF_STATE_EMPTY) { 2299 rc = sleep_thread(common); 2300 if (rc) 2301 return rc; 2302 } 2303 2304 /* Queue a request to read a Bulk-only CBW */ 2305 set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN); 2306 if (!start_out_transfer(common, bh)) 2307 /* Don't know what to do if common->fsg is NULL */ 2308 return -EIO; 2309 2310 /* 2311 * We will drain the buffer in software, which means we 2312 * can reuse it for the next filling. No need to advance 2313 * next_buffhd_to_fill. 2314 */ 2315 2316 /* Wait for the CBW to arrive */ 2317 while (bh->state != BUF_STATE_FULL) { 2318 rc = sleep_thread(common); 2319 if (rc) 2320 return rc; 2321 } 2322 smp_rmb(); 2323 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO; 2324 bh->state = BUF_STATE_EMPTY; 2325 2326 return rc; 2327} 2328 2329 2330/*-------------------------------------------------------------------------*/ 2331 2332static int alloc_request(struct fsg_common *common, struct usb_ep *ep, 2333 struct usb_request **preq) 2334{ 2335 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); 2336 if (*preq) 2337 return 0; 2338 ERROR(common, "can't allocate request for %s\n", ep->name); 2339 return -ENOMEM; 2340} 2341 2342/* Reset interface setting and re-init endpoint state (toggle etc). */ 2343static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) 2344{ 2345 struct fsg_dev *fsg; 2346 int i, rc = 0; 2347 2348 if (common->running) 2349 DBG(common, "reset interface\n"); 2350 2351reset: 2352 /* Deallocate the requests */ 2353 if (common->fsg) { 2354 fsg = common->fsg; 2355 2356 for (i = 0; i < fsg_num_buffers; ++i) { 2357 struct fsg_buffhd *bh = &common->buffhds[i]; 2358 2359 if (bh->inreq) { 2360 usb_ep_free_request(fsg->bulk_in, bh->inreq); 2361 bh->inreq = NULL; 2362 } 2363 if (bh->outreq) { 2364 usb_ep_free_request(fsg->bulk_out, bh->outreq); 2365 bh->outreq = NULL; 2366 } 2367 } 2368 2369 /* Disable the endpoints */ 2370 if (fsg->bulk_in_enabled) { 2371 usb_ep_disable(fsg->bulk_in); 2372 fsg->bulk_in_enabled = 0; 2373 } 2374 if (fsg->bulk_out_enabled) { 2375 usb_ep_disable(fsg->bulk_out); 2376 fsg->bulk_out_enabled = 0; 2377 } 2378 2379 common->fsg = NULL; 2380 wake_up(&common->fsg_wait); 2381 } 2382 2383 common->running = 0; 2384 if (!new_fsg || rc) 2385 return rc; 2386 2387 common->fsg = new_fsg; 2388 fsg = common->fsg; 2389 2390 /* Enable the endpoints */ 2391 rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in); 2392 if (rc) 2393 goto reset; 2394 rc = usb_ep_enable(fsg->bulk_in); 2395 if (rc) 2396 goto reset; 2397 fsg->bulk_in->driver_data = common; 2398 fsg->bulk_in_enabled = 1; 2399 2400 rc = config_ep_by_speed(common->gadget, &(fsg->function), 2401 fsg->bulk_out); 2402 if (rc) 2403 goto reset; 2404 rc = usb_ep_enable(fsg->bulk_out); 2405 if (rc) 2406 goto reset; 2407 fsg->bulk_out->driver_data = common; 2408 fsg->bulk_out_enabled = 1; 2409 common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc); 2410 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2411 2412 /* Allocate the requests */ 2413 for (i = 0; i < fsg_num_buffers; ++i) { 2414 struct fsg_buffhd *bh = &common->buffhds[i]; 2415 2416 rc = alloc_request(common, fsg->bulk_in, &bh->inreq); 2417 if (rc) 2418 goto reset; 2419 rc = alloc_request(common, fsg->bulk_out, &bh->outreq); 2420 if (rc) 2421 goto reset; 2422 bh->inreq->buf = bh->outreq->buf = bh->buf; 2423 bh->inreq->context = bh->outreq->context = bh; 2424 bh->inreq->complete = bulk_in_complete; 2425 bh->outreq->complete = bulk_out_complete; 2426 } 2427 2428 common->running = 1; 2429 for (i = 0; i < common->nluns; ++i) 2430 common->luns[i].unit_attention_data = SS_RESET_OCCURRED; 2431 return rc; 2432} 2433 2434 2435/****************************** ALT CONFIGS ******************************/ 2436 2437static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2438{ 2439 struct fsg_dev *fsg = fsg_from_func(f); 2440 fsg->common->new_fsg = fsg; 2441 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2442 return USB_GADGET_DELAYED_STATUS; 2443} 2444 2445static void fsg_disable(struct usb_function *f) 2446{ 2447 struct fsg_dev *fsg = fsg_from_func(f); 2448 fsg->common->new_fsg = NULL; 2449 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2450} 2451 2452 2453/*-------------------------------------------------------------------------*/ 2454 2455static void handle_exception(struct fsg_common *common) 2456{ 2457 siginfo_t info; 2458 int i; 2459 struct fsg_buffhd *bh; 2460 enum fsg_state old_state; 2461 struct fsg_lun *curlun; 2462 unsigned int exception_req_tag; 2463 2464 /* 2465 * Clear the existing signals. Anything but SIGUSR1 is converted 2466 * into a high-priority EXIT exception. 2467 */ 2468 for (;;) { 2469 int sig = 2470 dequeue_signal_lock(current, ¤t->blocked, &info); 2471 if (!sig) 2472 break; 2473 if (sig != SIGUSR1) { 2474 if (common->state < FSG_STATE_EXIT) 2475 DBG(common, "Main thread exiting on signal\n"); 2476 raise_exception(common, FSG_STATE_EXIT); 2477 } 2478 } 2479 2480 /* Cancel all the pending transfers */ 2481 if (likely(common->fsg)) { 2482 for (i = 0; i < fsg_num_buffers; ++i) { 2483 bh = &common->buffhds[i]; 2484 if (bh->inreq_busy) 2485 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq); 2486 if (bh->outreq_busy) 2487 usb_ep_dequeue(common->fsg->bulk_out, 2488 bh->outreq); 2489 } 2490 2491 /* Wait until everything is idle */ 2492 for (;;) { 2493 int num_active = 0; 2494 for (i = 0; i < fsg_num_buffers; ++i) { 2495 bh = &common->buffhds[i]; 2496 num_active += bh->inreq_busy + bh->outreq_busy; 2497 } 2498 if (num_active == 0) 2499 break; 2500 if (sleep_thread(common)) 2501 return; 2502 } 2503 2504 /* Clear out the controller's fifos */ 2505 if (common->fsg->bulk_in_enabled) 2506 usb_ep_fifo_flush(common->fsg->bulk_in); 2507 if (common->fsg->bulk_out_enabled) 2508 usb_ep_fifo_flush(common->fsg->bulk_out); 2509 } 2510 2511 /* 2512 * Reset the I/O buffer states and pointers, the SCSI 2513 * state, and the exception. Then invoke the handler. 2514 */ 2515 spin_lock_irq(&common->lock); 2516 2517 for (i = 0; i < fsg_num_buffers; ++i) { 2518 bh = &common->buffhds[i]; 2519 bh->state = BUF_STATE_EMPTY; 2520 } 2521 common->next_buffhd_to_fill = &common->buffhds[0]; 2522 common->next_buffhd_to_drain = &common->buffhds[0]; 2523 exception_req_tag = common->exception_req_tag; 2524 old_state = common->state; 2525 2526 if (old_state == FSG_STATE_ABORT_BULK_OUT) 2527 common->state = FSG_STATE_STATUS_PHASE; 2528 else { 2529 for (i = 0; i < common->nluns; ++i) { 2530 curlun = &common->luns[i]; 2531 curlun->prevent_medium_removal = 0; 2532 curlun->sense_data = SS_NO_SENSE; 2533 curlun->unit_attention_data = SS_NO_SENSE; 2534 curlun->sense_data_info = 0; 2535 curlun->info_valid = 0; 2536 } 2537 common->state = FSG_STATE_IDLE; 2538 } 2539 spin_unlock_irq(&common->lock); 2540 2541 /* Carry out any extra actions required for the exception */ 2542 switch (old_state) { 2543 case FSG_STATE_ABORT_BULK_OUT: 2544 send_status(common); 2545 spin_lock_irq(&common->lock); 2546 if (common->state == FSG_STATE_STATUS_PHASE) 2547 common->state = FSG_STATE_IDLE; 2548 spin_unlock_irq(&common->lock); 2549 break; 2550 2551 case FSG_STATE_RESET: 2552 /* 2553 * In case we were forced against our will to halt a 2554 * bulk endpoint, clear the halt now. (The SuperH UDC 2555 * requires this.) 2556 */ 2557 if (!fsg_is_set(common)) 2558 break; 2559 if (test_and_clear_bit(IGNORE_BULK_OUT, 2560 &common->fsg->atomic_bitflags)) 2561 usb_ep_clear_halt(common->fsg->bulk_in); 2562 2563 if (common->ep0_req_tag == exception_req_tag) 2564 ep0_queue(common); /* Complete the status stage */ 2565 2566 /* 2567 * Technically this should go here, but it would only be 2568 * a waste of time. Ditto for the INTERFACE_CHANGE and 2569 * CONFIG_CHANGE cases. 2570 */ 2571 /* for (i = 0; i < common->nluns; ++i) */ 2572 /* common->luns[i].unit_attention_data = */ 2573 /* SS_RESET_OCCURRED; */ 2574 break; 2575 2576 case FSG_STATE_CONFIG_CHANGE: 2577 do_set_interface(common, common->new_fsg); 2578 if (common->new_fsg) 2579 usb_composite_setup_continue(common->cdev); 2580 break; 2581 2582 case FSG_STATE_EXIT: 2583 case FSG_STATE_TERMINATED: 2584 do_set_interface(common, NULL); /* Free resources */ 2585 spin_lock_irq(&common->lock); 2586 common->state = FSG_STATE_TERMINATED; /* Stop the thread */ 2587 spin_unlock_irq(&common->lock); 2588 break; 2589 2590 case FSG_STATE_INTERFACE_CHANGE: 2591 case FSG_STATE_DISCONNECT: 2592 case FSG_STATE_COMMAND_PHASE: 2593 case FSG_STATE_DATA_PHASE: 2594 case FSG_STATE_STATUS_PHASE: 2595 case FSG_STATE_IDLE: 2596 break; 2597 } 2598} 2599 2600 2601/*-------------------------------------------------------------------------*/ 2602 2603static int fsg_main_thread(void *common_) 2604{ 2605 struct fsg_common *common = common_; 2606 2607 /* 2608 * Allow the thread to be killed by a signal, but set the signal mask 2609 * to block everything but INT, TERM, KILL, and USR1. 2610 */ 2611 allow_signal(SIGINT); 2612 allow_signal(SIGTERM); 2613 allow_signal(SIGKILL); 2614 allow_signal(SIGUSR1); 2615 2616 /* Allow the thread to be frozen */ 2617 set_freezable(); 2618 2619 /* 2620 * Arrange for userspace references to be interpreted as kernel 2621 * pointers. That way we can pass a kernel pointer to a routine 2622 * that expects a __user pointer and it will work okay. 2623 */ 2624 set_fs(get_ds()); 2625 2626 /* The main loop */ 2627 while (common->state != FSG_STATE_TERMINATED) { 2628 if (exception_in_progress(common) || signal_pending(current)) { 2629 handle_exception(common); 2630 continue; 2631 } 2632 2633 if (!common->running) { 2634 sleep_thread(common); 2635 continue; 2636 } 2637 2638 if (get_next_command(common)) 2639 continue; 2640 2641 spin_lock_irq(&common->lock); 2642 if (!exception_in_progress(common)) 2643 common->state = FSG_STATE_DATA_PHASE; 2644 spin_unlock_irq(&common->lock); 2645 2646 if (do_scsi_command(common) || finish_reply(common)) 2647 continue; 2648 2649 spin_lock_irq(&common->lock); 2650 if (!exception_in_progress(common)) 2651 common->state = FSG_STATE_STATUS_PHASE; 2652 spin_unlock_irq(&common->lock); 2653 2654 if (send_status(common)) 2655 continue; 2656 2657 spin_lock_irq(&common->lock); 2658 if (!exception_in_progress(common)) 2659 common->state = FSG_STATE_IDLE; 2660 spin_unlock_irq(&common->lock); 2661 } 2662 2663 spin_lock_irq(&common->lock); 2664 common->thread_task = NULL; 2665 spin_unlock_irq(&common->lock); 2666 2667 if (!common->ops || !common->ops->thread_exits 2668 || common->ops->thread_exits(common) < 0) { 2669 struct fsg_lun *curlun = common->luns; 2670 unsigned i = common->nluns; 2671 2672 down_write(&common->filesem); 2673 for (; i--; ++curlun) { 2674 if (!fsg_lun_is_open(curlun)) 2675 continue; 2676 2677 fsg_lun_close(curlun); 2678 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; 2679 } 2680 up_write(&common->filesem); 2681 } 2682 2683 /* Let fsg_unbind() know the thread has exited */ 2684 complete_and_exit(&common->thread_notifier, 0); 2685} 2686 2687 2688/*************************** DEVICE ATTRIBUTES ***************************/ 2689 2690/* Write permission is checked per LUN in store_*() functions. */ 2691static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro); 2692static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua); 2693static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file); 2694 2695 2696/****************************** FSG COMMON ******************************/ 2697 2698static void fsg_common_release(struct kref *ref); 2699 2700static void fsg_lun_release(struct device *dev) 2701{ 2702 /* Nothing needs to be done */ 2703} 2704 2705static inline void fsg_common_get(struct fsg_common *common) 2706{ 2707 kref_get(&common->ref); 2708} 2709 2710static inline void fsg_common_put(struct fsg_common *common) 2711{ 2712 kref_put(&common->ref, fsg_common_release); 2713} 2714 2715static struct fsg_common *fsg_common_init(struct fsg_common *common, 2716 struct usb_composite_dev *cdev, 2717 struct fsg_config *cfg) 2718{ 2719 struct usb_gadget *gadget = cdev->gadget; 2720 struct fsg_buffhd *bh; 2721 struct fsg_lun *curlun; 2722 struct fsg_lun_config *lcfg; 2723 int nluns, i, rc; 2724 char *pathbuf; 2725 2726 rc = fsg_num_buffers_validate(); 2727 if (rc != 0) 2728 return ERR_PTR(rc); 2729 2730 /* Find out how many LUNs there should be */ 2731 nluns = cfg->nluns; 2732 if (nluns < 1 || nluns > FSG_MAX_LUNS) { 2733 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns); 2734 return ERR_PTR(-EINVAL); 2735 } 2736 2737 /* Allocate? */ 2738 if (!common) { 2739 common = kzalloc(sizeof *common, GFP_KERNEL); 2740 if (!common) 2741 return ERR_PTR(-ENOMEM); 2742 common->free_storage_on_release = 1; 2743 } else { 2744 memset(common, 0, sizeof *common); 2745 common->free_storage_on_release = 0; 2746 } 2747 2748 common->buffhds = kcalloc(fsg_num_buffers, 2749 sizeof *(common->buffhds), GFP_KERNEL); 2750 if (!common->buffhds) { 2751 if (common->free_storage_on_release) 2752 kfree(common); 2753 return ERR_PTR(-ENOMEM); 2754 } 2755 2756 common->ops = cfg->ops; 2757 common->private_data = cfg->private_data; 2758 2759 common->gadget = gadget; 2760 common->ep0 = gadget->ep0; 2761 common->ep0req = cdev->req; 2762 common->cdev = cdev; 2763 2764 /* Maybe allocate device-global string IDs, and patch descriptors */ 2765 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) { 2766 rc = usb_string_id(cdev); 2767 if (unlikely(rc < 0)) 2768 goto error_release; 2769 fsg_strings[FSG_STRING_INTERFACE].id = rc; 2770 fsg_intf_desc.iInterface = rc; 2771 } 2772 2773 /* 2774 * Create the LUNs, open their backing files, and register the 2775 * LUN devices in sysfs. 2776 */ 2777 curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); 2778 if (unlikely(!curlun)) { 2779 rc = -ENOMEM; 2780 goto error_release; 2781 } 2782 common->luns = curlun; 2783 2784 init_rwsem(&common->filesem); 2785 2786 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) { 2787 curlun->cdrom = !!lcfg->cdrom; 2788 curlun->ro = lcfg->cdrom || lcfg->ro; 2789 curlun->initially_ro = curlun->ro; 2790 curlun->removable = lcfg->removable; 2791 curlun->dev.release = fsg_lun_release; 2792 curlun->dev.parent = &gadget->dev; 2793 /* curlun->dev.driver = &fsg_driver.driver; XXX */ 2794 dev_set_drvdata(&curlun->dev, &common->filesem); 2795 dev_set_name(&curlun->dev, 2796 cfg->lun_name_format 2797 ? cfg->lun_name_format 2798 : "lun%d", 2799 i); 2800 2801 rc = device_register(&curlun->dev); 2802 if (rc) { 2803 INFO(common, "failed to register LUN%d: %d\n", i, rc); 2804 common->nluns = i; 2805 put_device(&curlun->dev); 2806 goto error_release; 2807 } 2808 2809 rc = device_create_file(&curlun->dev, &dev_attr_ro); 2810 if (rc) 2811 goto error_luns; 2812 rc = device_create_file(&curlun->dev, &dev_attr_file); 2813 if (rc) 2814 goto error_luns; 2815 rc = device_create_file(&curlun->dev, &dev_attr_nofua); 2816 if (rc) 2817 goto error_luns; 2818 2819 if (lcfg->filename) { 2820 rc = fsg_lun_open(curlun, lcfg->filename); 2821 if (rc) 2822 goto error_luns; 2823 } else if (!curlun->removable) { 2824 ERROR(common, "no file given for LUN%d\n", i); 2825 rc = -EINVAL; 2826 goto error_luns; 2827 } 2828 } 2829 common->nluns = nluns; 2830 2831 /* Data buffers cyclic list */ 2832 bh = common->buffhds; 2833 i = fsg_num_buffers; 2834 goto buffhds_first_it; 2835 do { 2836 bh->next = bh + 1; 2837 ++bh; 2838buffhds_first_it: 2839 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL); 2840 if (unlikely(!bh->buf)) { 2841 rc = -ENOMEM; 2842 goto error_release; 2843 } 2844 } while (--i); 2845 bh->next = common->buffhds; 2846 2847 /* Prepare inquiryString */ 2848 if (cfg->release != 0xffff) { 2849 i = cfg->release; 2850 } else { 2851 i = usb_gadget_controller_number(gadget); 2852 if (i >= 0) { 2853 i = 0x0300 + i; 2854 } else { 2855 WARNING(common, "controller '%s' not recognized\n", 2856 gadget->name); 2857 i = 0x0399; 2858 } 2859 } 2860 snprintf(common->inquiry_string, sizeof common->inquiry_string, 2861 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux", 2862 /* Assume product name dependent on the first LUN */ 2863 cfg->product_name ?: (common->luns->cdrom 2864 ? "File-Stor Gadget" 2865 : "File-CD Gadget"), 2866 i); 2867 2868 /* 2869 * Some peripheral controllers are known not to be able to 2870 * halt bulk endpoints correctly. If one of them is present, 2871 * disable stalls. 2872 */ 2873 common->can_stall = cfg->can_stall && 2874 !(gadget_is_at91(common->gadget)); 2875 2876 spin_lock_init(&common->lock); 2877 kref_init(&common->ref); 2878 2879 /* Tell the thread to start working */ 2880 common->thread_task = 2881 kthread_create(fsg_main_thread, common, 2882 cfg->thread_name ?: "file-storage"); 2883 if (IS_ERR(common->thread_task)) { 2884 rc = PTR_ERR(common->thread_task); 2885 goto error_release; 2886 } 2887 init_completion(&common->thread_notifier); 2888 init_waitqueue_head(&common->fsg_wait); 2889 2890 /* Information */ 2891 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n"); 2892 INFO(common, "Number of LUNs=%d\n", common->nluns); 2893 2894 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 2895 for (i = 0, nluns = common->nluns, curlun = common->luns; 2896 i < nluns; 2897 ++curlun, ++i) { 2898 char *p = "(no medium)"; 2899 if (fsg_lun_is_open(curlun)) { 2900 p = "(error)"; 2901 if (pathbuf) { 2902 p = d_path(&curlun->filp->f_path, 2903 pathbuf, PATH_MAX); 2904 if (IS_ERR(p)) 2905 p = "(error)"; 2906 } 2907 } 2908 LINFO(curlun, "LUN: %s%s%sfile: %s\n", 2909 curlun->removable ? "removable " : "", 2910 curlun->ro ? "read only " : "", 2911 curlun->cdrom ? "CD-ROM " : "", 2912 p); 2913 } 2914 kfree(pathbuf); 2915 2916 DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task)); 2917 2918 wake_up_process(common->thread_task); 2919 2920 return common; 2921 2922error_luns: 2923 common->nluns = i + 1; 2924error_release: 2925 common->state = FSG_STATE_TERMINATED; /* The thread is dead */ 2926 /* Call fsg_common_release() directly, ref might be not initialised. */ 2927 fsg_common_release(&common->ref); 2928 return ERR_PTR(rc); 2929} 2930 2931static void fsg_common_release(struct kref *ref) 2932{ 2933 struct fsg_common *common = container_of(ref, struct fsg_common, ref); 2934 2935 /* If the thread isn't already dead, tell it to exit now */ 2936 if (common->state != FSG_STATE_TERMINATED) { 2937 raise_exception(common, FSG_STATE_EXIT); 2938 wait_for_completion(&common->thread_notifier); 2939 } 2940 2941 if (likely(common->luns)) { 2942 struct fsg_lun *lun = common->luns; 2943 unsigned i = common->nluns; 2944 2945 /* In error recovery common->nluns may be zero. */ 2946 for (; i; --i, ++lun) { 2947 device_remove_file(&lun->dev, &dev_attr_nofua); 2948 device_remove_file(&lun->dev, &dev_attr_ro); 2949 device_remove_file(&lun->dev, &dev_attr_file); 2950 fsg_lun_close(lun); 2951 device_unregister(&lun->dev); 2952 } 2953 2954 kfree(common->luns); 2955 } 2956 2957 { 2958 struct fsg_buffhd *bh = common->buffhds; 2959 unsigned i = fsg_num_buffers; 2960 do { 2961 kfree(bh->buf); 2962 } while (++bh, --i); 2963 } 2964 2965 kfree(common->buffhds); 2966 if (common->free_storage_on_release) 2967 kfree(common); 2968} 2969 2970 2971/*-------------------------------------------------------------------------*/ 2972 2973static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) 2974{ 2975 struct fsg_dev *fsg = fsg_from_func(f); 2976 struct fsg_common *common = fsg->common; 2977 2978 DBG(fsg, "unbind\n"); 2979 if (fsg->common->fsg == fsg) { 2980 fsg->common->new_fsg = NULL; 2981 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2982 /* FIXME: make interruptible or killable somehow? */ 2983 wait_event(common->fsg_wait, common->fsg != fsg); 2984 } 2985 2986 fsg_common_put(common); 2987 usb_free_descriptors(fsg->function.descriptors); 2988 usb_free_descriptors(fsg->function.hs_descriptors); 2989 usb_free_descriptors(fsg->function.ss_descriptors); 2990 kfree(fsg); 2991} 2992 2993static int fsg_bind(struct usb_configuration *c, struct usb_function *f) 2994{ 2995 struct fsg_dev *fsg = fsg_from_func(f); 2996 struct usb_gadget *gadget = c->cdev->gadget; 2997 int i; 2998 struct usb_ep *ep; 2999 3000 fsg->gadget = gadget; 3001 3002 /* New interface */ 3003 i = usb_interface_id(c, f); 3004 if (i < 0) 3005 return i; 3006 fsg_intf_desc.bInterfaceNumber = i; 3007 fsg->interface_number = i; 3008 3009 /* Find all the endpoints we will use */ 3010 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); 3011 if (!ep) 3012 goto autoconf_fail; 3013 ep->driver_data = fsg->common; /* claim the endpoint */ 3014 fsg->bulk_in = ep; 3015 3016 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); 3017 if (!ep) 3018 goto autoconf_fail; 3019 ep->driver_data = fsg->common; /* claim the endpoint */ 3020 fsg->bulk_out = ep; 3021 3022 /* Copy descriptors */ 3023 f->descriptors = usb_copy_descriptors(fsg_fs_function); 3024 if (unlikely(!f->descriptors)) 3025 return -ENOMEM; 3026 3027 if (gadget_is_dualspeed(gadget)) { 3028 /* Assume endpoint addresses are the same for both speeds */ 3029 fsg_hs_bulk_in_desc.bEndpointAddress = 3030 fsg_fs_bulk_in_desc.bEndpointAddress; 3031 fsg_hs_bulk_out_desc.bEndpointAddress = 3032 fsg_fs_bulk_out_desc.bEndpointAddress; 3033 f->hs_descriptors = usb_copy_descriptors(fsg_hs_function); 3034 if (unlikely(!f->hs_descriptors)) { 3035 usb_free_descriptors(f->descriptors); 3036 return -ENOMEM; 3037 } 3038 } 3039 3040 if (gadget_is_superspeed(gadget)) { 3041 unsigned max_burst; 3042 3043 /* Calculate bMaxBurst, we know packet size is 1024 */ 3044 max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15); 3045 3046 fsg_ss_bulk_in_desc.bEndpointAddress = 3047 fsg_fs_bulk_in_desc.bEndpointAddress; 3048 fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst; 3049 3050 fsg_ss_bulk_out_desc.bEndpointAddress = 3051 fsg_fs_bulk_out_desc.bEndpointAddress; 3052 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; 3053 3054 f->ss_descriptors = usb_copy_descriptors(fsg_ss_function); 3055 if (unlikely(!f->ss_descriptors)) { 3056 usb_free_descriptors(f->hs_descriptors); 3057 usb_free_descriptors(f->descriptors); 3058 return -ENOMEM; 3059 } 3060 } 3061 3062 return 0; 3063 3064autoconf_fail: 3065 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 3066 return -ENOTSUPP; 3067} 3068 3069 3070/****************************** ADD FUNCTION ******************************/ 3071 3072static struct usb_gadget_strings *fsg_strings_array[] = { 3073 &fsg_stringtab, 3074 NULL, 3075}; 3076 3077static int fsg_bind_config(struct usb_composite_dev *cdev, 3078 struct usb_configuration *c, 3079 struct fsg_common *common) 3080{ 3081 struct fsg_dev *fsg; 3082 int rc; 3083 3084 fsg = kzalloc(sizeof *fsg, GFP_KERNEL); 3085 if (unlikely(!fsg)) 3086 return -ENOMEM; 3087 3088 fsg->function.name = FSG_DRIVER_DESC; 3089 fsg->function.strings = fsg_strings_array; 3090 fsg->function.bind = fsg_bind; 3091 fsg->function.unbind = fsg_unbind; 3092 fsg->function.setup = fsg_setup; 3093 fsg->function.set_alt = fsg_set_alt; 3094 fsg->function.disable = fsg_disable; 3095 3096 fsg->common = common; 3097 /* 3098 * Our caller holds a reference to common structure so we 3099 * don't have to be worry about it being freed until we return 3100 * from this function. So instead of incrementing counter now 3101 * and decrement in error recovery we increment it only when 3102 * call to usb_add_function() was successful. 3103 */ 3104 3105 rc = usb_add_function(c, &fsg->function); 3106 if (unlikely(rc)) 3107 kfree(fsg); 3108 else 3109 fsg_common_get(fsg->common); 3110 return rc; 3111} 3112 3113static inline int __deprecated __maybe_unused 3114fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c, 3115 struct fsg_common *common) 3116{ 3117 return fsg_bind_config(cdev, c, common); 3118} 3119 3120 3121/************************* Module parameters *************************/ 3122 3123struct fsg_module_parameters { 3124 char *file[FSG_MAX_LUNS]; 3125 bool ro[FSG_MAX_LUNS]; 3126 bool removable[FSG_MAX_LUNS]; 3127 bool cdrom[FSG_MAX_LUNS]; 3128 bool nofua[FSG_MAX_LUNS]; 3129 3130 unsigned int file_count, ro_count, removable_count, cdrom_count; 3131 unsigned int nofua_count; 3132 unsigned int luns; /* nluns */ 3133 bool stall; /* can_stall */ 3134}; 3135 3136#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \ 3137 module_param_array_named(prefix ## name, params.name, type, \ 3138 &prefix ## params.name ## _count, \ 3139 S_IRUGO); \ 3140 MODULE_PARM_DESC(prefix ## name, desc) 3141 3142#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \ 3143 module_param_named(prefix ## name, params.name, type, \ 3144 S_IRUGO); \ 3145 MODULE_PARM_DESC(prefix ## name, desc) 3146 3147#define FSG_MODULE_PARAMETERS(prefix, params) \ 3148 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \ 3149 "names of backing files or devices"); \ 3150 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \ 3151 "true to force read-only"); \ 3152 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \ 3153 "true to simulate removable media"); \ 3154 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \ 3155 "true to simulate CD-ROM instead of disk"); \ 3156 _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \ 3157 "true to ignore SCSI WRITE(10,12) FUA bit"); \ 3158 _FSG_MODULE_PARAM(prefix, params, luns, uint, \ 3159 "number of LUNs"); \ 3160 _FSG_MODULE_PARAM(prefix, params, stall, bool, \ 3161 "false to prevent bulk stalls") 3162 3163static void 3164fsg_config_from_params(struct fsg_config *cfg, 3165 const struct fsg_module_parameters *params) 3166{ 3167 struct fsg_lun_config *lun; 3168 unsigned i; 3169 3170 /* Configure LUNs */ 3171 cfg->nluns = 3172 min(params->luns ?: (params->file_count ?: 1u), 3173 (unsigned)FSG_MAX_LUNS); 3174 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) { 3175 lun->ro = !!params->ro[i]; 3176 lun->cdrom = !!params->cdrom[i]; 3177 lun->removable = /* Removable by default */ 3178 params->removable_count <= i || params->removable[i]; 3179 lun->filename = 3180 params->file_count > i && params->file[i][0] 3181 ? params->file[i] 3182 : 0; 3183 } 3184 3185 /* Let MSF use defaults */ 3186 cfg->lun_name_format = 0; 3187 cfg->thread_name = 0; 3188 cfg->vendor_name = 0; 3189 cfg->product_name = 0; 3190 cfg->release = 0xffff; 3191 3192 cfg->ops = NULL; 3193 cfg->private_data = NULL; 3194 3195 /* Finalise */ 3196 cfg->can_stall = params->stall; 3197} 3198 3199static inline struct fsg_common * 3200fsg_common_from_params(struct fsg_common *common, 3201 struct usb_composite_dev *cdev, 3202 const struct fsg_module_parameters *params) 3203 __attribute__((unused)); 3204static inline struct fsg_common * 3205fsg_common_from_params(struct fsg_common *common, 3206 struct usb_composite_dev *cdev, 3207 const struct fsg_module_parameters *params) 3208{ 3209 struct fsg_config cfg; 3210 fsg_config_from_params(&cfg, params); 3211 return fsg_common_init(common, cdev, &cfg); 3212} 3213 3214