2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
4 * Copyright (C) 2003-2008 Alan Stern
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42 * to providing an example of a genuinely useful gadget driver for a USB
43 * device, it also illustrates a technique of double-buffering for increased
44 * throughput. Last but not least, it gives an easy way to probe the
45 * behavior of the Mass Storage drivers in a USB host.
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. (For CD-ROM emulation,
50 * access is always read-only.) The gadget will indicate that it has
51 * removable media if the optional "removable" module parameter is set.
53 * There is support for multiple logical units (LUNs), each of which has
54 * its own backing file. The number of LUNs can be set using the optional
55 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
56 * files are specified using comma-separated lists for "file" and "ro".
57 * The default number of LUNs is taken from the number of "file" elements;
58 * it is 1 if "file" is not given. If "removable" is not set then a backing
59 * file must be specified for each LUN. If it is set, then an unspecified
60 * or empty backing filename means the LUN's medium is not loaded. Ideally
61 * each LUN would be settable independently as a disk drive or a CD-ROM
62 * drive, but currently all LUNs have to be the same type. The CD-ROM
63 * emulation includes a single data track and no audio tracks; hence there
64 * need be only one backing file per LUN. Note also that the CD-ROM block
65 * length is set to 512 rather than the more common value 2048.
67 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
68 * needed (an interrupt-out endpoint is also needed for CBI). The memory
69 * requirement amounts to two 16K buffers, size configurable by a parameter.
70 * Support is included for both full-speed and high-speed operation.
72 * Note that the driver is slightly non-portable in that it assumes a
73 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
74 * interrupt-in endpoints. With most device controllers this isn't an
75 * issue, but there may be some with hardware restrictions that prevent
76 * a buffer from being used by more than one endpoint.
80 * file=filename[,filename...]
81 * Required if "removable" is not set, names of
82 * the files or block devices used for
84 * ro=b[,b...] Default false, booleans for read-only access
85 * removable Default false, boolean for removable media
86 * luns=N Default N = number of filenames, number of
88 * stall Default determined according to the type of
89 * USB device controller (usually true),
90 * boolean to permit the driver to halt
92 * cdrom Default false, boolean for whether to emulate
95 * The pathnames of the backing files and the ro settings are available in
96 * the attribute files "file" and "ro" in the lun<n> subdirectory of the
97 * gadget's sysfs directory. If the "removable" option is set, writing to
98 * these files will simulate ejecting/loading the medium (writing an empty
99 * line means eject) and adjusting a write-enable tab. Changes to the ro
100 * setting are not allowed when the medium is loaded or if CD-ROM emulation
103 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
104 * The driver's SCSI command interface was based on the "Information
105 * technology - Small Computer System Interface - 2" document from
106 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
107 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
108 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
109 * "Universal Serial Bus Mass Storage Class UFI Command Specification"
110 * document, Revision 1.0, December 14, 1998, available at
111 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
118 * The FSG driver is fairly straightforward. There is a main kernel
119 * thread that handles most of the work. Interrupt routines field
120 * callbacks from the controller driver: bulk- and interrupt-request
121 * completion notifications, endpoint-0 events, and disconnect events.
122 * Completion events are passed to the main thread by wakeup calls. Many
123 * ep0 requests are handled at interrupt time, but SetInterface,
124 * SetConfiguration, and device reset requests are forwarded to the
125 * thread in the form of "exceptions" using SIGUSR1 signals (since they
126 * should interrupt any ongoing file I/O operations).
128 * The thread's main routine implements the standard command/data/status
129 * parts of a SCSI interaction. It and its subroutines are full of tests
130 * for pending signals/exceptions -- all this polling is necessary since
131 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
132 * indication that the driver really wants to be running in userspace.)
133 * An important point is that so long as the thread is alive it keeps an
134 * open reference to the backing file. This will prevent unmounting
135 * the backing file's underlying filesystem and could cause problems
136 * during system shutdown, for example. To prevent such problems, the
137 * thread catches INT, TERM, and KILL signals and converts them into
140 * In normal operation the main thread is started during the gadget's
141 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
142 * exit when it receives a signal, and there's no point leaving the
143 * gadget running when the thread is dead. So just before the thread
144 * exits, it deregisters the gadget driver. This makes things a little
145 * tricky: The driver is deregistered at two places, and the exiting
146 * thread can indirectly call fsg_unbind() which in turn can tell the
147 * thread to exit. The first problem is resolved through the use of the
148 * REGISTERED atomic bitflag; the driver will only be deregistered once.
149 * The second problem is resolved by having fsg_unbind() check
150 * fsg->state; it won't try to stop the thread if the state is already
151 * FSG_STATE_TERMINATED.
153 * To provide maximum throughput, the driver uses a circular pipeline of
154 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
155 * arbitrarily long; in practice the benefits don't justify having more
156 * than 2 stages (i.e., double buffering). But it helps to think of the
157 * pipeline as being a long one. Each buffer head contains a bulk-in and
158 * a bulk-out request pointer (since the buffer can be used for both
159 * output and input -- directions always are given from the host's
160 * point of view) as well as a pointer to the buffer and various state
163 * Use of the pipeline follows a simple protocol. There is a variable
164 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
165 * At any time that buffer head may still be in use from an earlier
166 * request, so each buffer head has a state variable indicating whether
167 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
168 * buffer head to be EMPTY, filling the buffer either by file I/O or by
169 * USB I/O (during which the buffer head is BUSY), and marking the buffer
170 * head FULL when the I/O is complete. Then the buffer will be emptied
171 * (again possibly by USB I/O, during which it is marked BUSY) and
172 * finally marked EMPTY again (possibly by a completion routine).
174 * A module parameter tells the driver to avoid stalling the bulk
175 * endpoints wherever the transport specification allows. This is
176 * necessary for some UDCs like the SuperH, which cannot reliably clear a
177 * halt on a bulk endpoint. However, under certain circumstances the
178 * Bulk-only specification requires a stall. In such cases the driver
179 * will halt the endpoint and set a flag indicating that it should clear
180 * the halt in software during the next device reset. Hopefully this
181 * will permit everything to work correctly. Furthermore, although the
182 * specification allows the bulk-out endpoint to halt when the host sends
183 * too much data, implementing this would cause an unavoidable race.
184 * The driver will always use the "no-stall" approach for OUT transfers.
186 * One subtle point concerns sending status-stage responses for ep0
187 * requests. Some of these requests, such as device reset, can involve
188 * interrupting an ongoing file I/O operation, which might take an
189 * arbitrarily long time. During that delay the host might give up on
190 * the original ep0 request and issue a new one. When that happens the
191 * driver should not notify the host about completion of the original
192 * request, as the host will no longer be waiting for it. So the driver
193 * assigns to each ep0 request a unique tag, and it keeps track of the
194 * tag value of the request associated with a long-running exception
195 * (device-reset, interface-change, or configuration-change). When the
196 * exception handler is finished, the status-stage response is submitted
197 * only if the current ep0 request tag is equal to the exception request
198 * tag. Thus only the most recently received ep0 request will get a
199 * status-stage response.
201 * Warning: This driver source file is too long. It ought to be split up
202 * into a header file plus about 3 separate .c files, to handle the details
203 * of the Gadget, USB Mass Storage, and SCSI protocols.
207 /* #define VERBOSE_DEBUG */
208 /* #define DUMP_MSGS */
211 #include <linux/blkdev.h>
212 #include <linux/completion.h>
213 #include <linux/dcache.h>
214 #include <linux/delay.h>
215 #include <linux/device.h>
216 #include <linux/fcntl.h>
217 #include <linux/file.h>
218 #include <linux/fs.h>
219 #include <linux/kref.h>
220 #include <linux/kthread.h>
221 #include <linux/limits.h>
222 #include <linux/rwsem.h>
223 #include <linux/slab.h>
224 #include <linux/spinlock.h>
225 #include <linux/string.h>
226 #include <linux/freezer.h>
227 #include <linux/utsname.h>
229 #include <linux/usb/ch9.h>
230 #include <linux/usb/gadget.h>
232 #include "gadget_chips.h"
237 * Kbuild is not very cooperative with respect to linking separately
238 * compiled library objects into one module. So for now we won't use
239 * separate compilation ... ensuring init/exit sections work to shrink
240 * the runtime footprint, and giving us at least some parts of what
241 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
243 #include "usbstring.c"
245 #include "epautoconf.c"
247 /*-------------------------------------------------------------------------*/
249 #define DRIVER_DESC "File-backed Storage Gadget"
250 #define DRIVER_NAME "g_file_storage"
251 #define DRIVER_VERSION "20 November 2008"
253 static char fsg_string_manufacturer[64];
254 static const char fsg_string_product[] = DRIVER_DESC;
255 static char fsg_string_serial[13];
256 static const char fsg_string_config[] = "Self-powered";
257 static const char fsg_string_interface[] = "Mass Storage";
260 #define FSG_NO_INTR_EP 1
262 #include "storage_common.c"
265 MODULE_DESCRIPTION(DRIVER_DESC);
266 MODULE_AUTHOR("Alan Stern");
267 MODULE_LICENSE("Dual BSD/GPL");
270 * This driver assumes self-powered hardware and has no way for users to
271 * trigger remote wakeup. It uses autoconfiguration to select endpoints
272 * and endpoint addresses.
276 /*-------------------------------------------------------------------------*/
279 /* Encapsulate the module parameter settings */
282 char *file[FSG_MAX_LUNS];
283 int ro[FSG_MAX_LUNS];
284 unsigned int num_filenames;
285 unsigned int num_ros;
292 unsigned short release;
293 } mod_data = { // Default values
300 module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
302 MODULE_PARM_DESC(file, "names of backing files or devices");
304 module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
305 MODULE_PARM_DESC(ro, "true to force read-only");
307 module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
308 MODULE_PARM_DESC(luns, "number of LUNs");
310 module_param_named(removable, mod_data.removable, bool, S_IRUGO);
311 MODULE_PARM_DESC(removable, "true to simulate removable media");
313 module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
314 MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
316 module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
317 MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
320 /*-------------------------------------------------------------------------*/
324 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
326 struct usb_gadget *gadget;
328 /* filesem protects: backing files in use */
329 struct rw_semaphore filesem;
331 /* reference counting: wait until all LUNs are released */
334 struct usb_ep *ep0; // Handy copy of gadget->ep0
335 struct usb_request *ep0req; // For control responses
336 unsigned int ep0_req_tag;
337 const char *ep0req_name;
339 unsigned int bulk_out_maxpacket;
340 enum fsg_state state; // For exception handling
341 unsigned int exception_req_tag;
343 u8 config, new_config;
345 unsigned int running : 1;
346 unsigned int bulk_in_enabled : 1;
347 unsigned int bulk_out_enabled : 1;
348 unsigned int phase_error : 1;
349 unsigned int short_packet_received : 1;
350 unsigned int bad_lun_okay : 1;
352 unsigned long atomic_bitflags;
354 #define IGNORE_BULK_OUT 1
356 struct usb_ep *bulk_in;
357 struct usb_ep *bulk_out;
359 struct fsg_buffhd *next_buffhd_to_fill;
360 struct fsg_buffhd *next_buffhd_to_drain;
361 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
363 int thread_wakeup_needed;
364 struct completion thread_notifier;
365 struct task_struct *thread_task;
368 u8 cmnd[MAX_COMMAND_SIZE];
369 enum data_direction data_dir;
371 u32 data_size_from_cmnd;
378 struct fsg_lun *luns;
379 struct fsg_lun *curlun;
382 typedef void (*fsg_routine_t)(struct fsg_dev *);
384 static int exception_in_progress(struct fsg_dev *fsg)
386 return (fsg->state > FSG_STATE_IDLE);
389 /* Make bulk-out requests be divisible by the maxpacket size */
390 static void set_bulk_out_req_length(struct fsg_dev *fsg,
391 struct fsg_buffhd *bh, unsigned int length)
395 bh->bulk_out_intended_length = length;
396 rem = length % fsg->bulk_out_maxpacket;
398 length += fsg->bulk_out_maxpacket - rem;
399 bh->outreq->length = length;
402 static struct fsg_dev *the_fsg;
403 static struct usb_gadget_driver fsg_driver;
406 /*-------------------------------------------------------------------------*/
408 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
412 if (ep == fsg->bulk_in)
414 else if (ep == fsg->bulk_out)
418 DBG(fsg, "%s set halt\n", name);
419 return usb_ep_set_halt(ep);
423 /*-------------------------------------------------------------------------*/
426 * DESCRIPTORS ... most are static, but strings and (full) configuration
427 * descriptors are built on demand. Also the (static) config and interface
428 * descriptors are adjusted during fsg_bind().
431 /* There is only one configuration. */
432 #define CONFIG_VALUE 1
434 static struct usb_device_descriptor
436 .bLength = sizeof device_desc,
437 .bDescriptorType = USB_DT_DEVICE,
439 .bcdUSB = cpu_to_le16(0x0200),
440 .bDeviceClass = USB_CLASS_PER_INTERFACE,
442 /* The next three values can be overridden by module parameters */
443 .idVendor = cpu_to_le16(FSG_VENDOR_ID),
444 .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
445 .bcdDevice = cpu_to_le16(0xffff),
447 .iManufacturer = FSG_STRING_MANUFACTURER,
448 .iProduct = FSG_STRING_PRODUCT,
449 .iSerialNumber = FSG_STRING_SERIAL,
450 .bNumConfigurations = 1,
453 static struct usb_config_descriptor
455 .bLength = sizeof config_desc,
456 .bDescriptorType = USB_DT_CONFIG,
458 /* wTotalLength computed by usb_gadget_config_buf() */
460 .bConfigurationValue = CONFIG_VALUE,
461 .iConfiguration = FSG_STRING_CONFIG,
462 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
463 .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
467 static struct usb_qualifier_descriptor
469 .bLength = sizeof dev_qualifier,
470 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
472 .bcdUSB = cpu_to_le16(0x0200),
473 .bDeviceClass = USB_CLASS_PER_INTERFACE,
475 .bNumConfigurations = 1,
481 * Config descriptors must agree with the code that sets configurations
482 * and with code managing interfaces and their altsettings. They must
483 * also handle different speeds and other-speed requests.
485 static int populate_config_buf(struct usb_gadget *gadget,
486 u8 *buf, u8 type, unsigned index)
488 enum usb_device_speed speed = gadget->speed;
490 const struct usb_descriptor_header **function;
495 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
496 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
497 if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH)
498 function = fsg_hs_function;
500 function = fsg_fs_function;
502 /* for now, don't advertise srp-only devices */
503 if (!gadget_is_otg(gadget))
506 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
507 ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
512 /*-------------------------------------------------------------------------*/
514 /* These routines may be called in process context or in_irq */
516 /* Caller must hold fsg->lock */
517 static void wakeup_thread(struct fsg_dev *fsg)
519 /* Tell the main thread that something has happened */
520 fsg->thread_wakeup_needed = 1;
521 if (fsg->thread_task)
522 wake_up_process(fsg->thread_task);
526 static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
530 /* Do nothing if a higher-priority exception is already in progress.
531 * If a lower-or-equal priority exception is in progress, preempt it
532 * and notify the main thread by sending it a signal. */
533 spin_lock_irqsave(&fsg->lock, flags);
534 if (fsg->state <= new_state) {
535 fsg->exception_req_tag = fsg->ep0_req_tag;
536 fsg->state = new_state;
537 if (fsg->thread_task)
538 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
541 spin_unlock_irqrestore(&fsg->lock, flags);
545 /*-------------------------------------------------------------------------*/
547 /* The disconnect callback and ep0 routines. These always run in_irq,
548 * except that ep0_queue() is called in the main thread to acknowledge
549 * completion of various requests: set config, set interface, and
550 * Bulk-only device reset. */
552 static void fsg_disconnect(struct usb_gadget *gadget)
554 struct fsg_dev *fsg = get_gadget_data(gadget);
556 DBG(fsg, "disconnect or port reset\n");
557 raise_exception(fsg, FSG_STATE_DISCONNECT);
561 static int ep0_queue(struct fsg_dev *fsg)
565 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
566 if (rc != 0 && rc != -ESHUTDOWN) {
568 /* We can't do much more than wait for a reset */
569 WARNING(fsg, "error in submission: %s --> %d\n",
575 static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
577 struct fsg_dev *fsg = ep->driver_data;
580 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
581 if (req->status || req->actual != req->length)
582 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
583 req->status, req->actual, req->length);
584 if (req->status == -ECONNRESET) // Request was cancelled
585 usb_ep_fifo_flush(ep);
587 if (req->status == 0 && req->context)
588 ((fsg_routine_t) (req->context))(fsg);
592 /*-------------------------------------------------------------------------*/
594 /* Bulk and interrupt endpoint completion handlers.
595 * These always run in_irq. */
597 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
599 struct fsg_dev *fsg = ep->driver_data;
600 struct fsg_buffhd *bh = req->context;
602 if (req->status || req->actual != req->length)
603 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
604 req->status, req->actual, req->length);
605 if (req->status == -ECONNRESET) // Request was cancelled
606 usb_ep_fifo_flush(ep);
608 /* Hold the lock while we update the request and buffer states */
610 spin_lock(&fsg->lock);
612 bh->state = BUF_STATE_EMPTY;
614 spin_unlock(&fsg->lock);
617 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
619 struct fsg_dev *fsg = ep->driver_data;
620 struct fsg_buffhd *bh = req->context;
622 dump_msg(fsg, "bulk-out", req->buf, req->actual);
623 if (req->status || req->actual != bh->bulk_out_intended_length)
624 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
625 req->status, req->actual,
626 bh->bulk_out_intended_length);
627 if (req->status == -ECONNRESET) // Request was cancelled
628 usb_ep_fifo_flush(ep);
630 /* Hold the lock while we update the request and buffer states */
632 spin_lock(&fsg->lock);
634 bh->state = BUF_STATE_FULL;
636 spin_unlock(&fsg->lock);
640 /*-------------------------------------------------------------------------*/
642 /* Ep0 class-specific handlers. These always run in_irq. */
644 static int class_setup_req(struct fsg_dev *fsg,
645 const struct usb_ctrlrequest *ctrl)
647 struct usb_request *req = fsg->ep0req;
648 u16 w_index = le16_to_cpu(ctrl->wIndex);
649 u16 w_value = le16_to_cpu(ctrl->wValue);
650 u16 w_length = le16_to_cpu(ctrl->wLength);
655 switch (ctrl->bRequest) {
657 case USB_BULK_RESET_REQUEST:
658 if (ctrl->bRequestType !=
659 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
661 if (w_index != 0 || w_value != 0)
664 /* Raise an exception to stop the current operation
665 * and reinitialize our state. */
666 DBG(fsg, "bulk reset request\n");
667 raise_exception(fsg, FSG_STATE_RESET);
668 return DELAYED_STATUS;
670 case USB_BULK_GET_MAX_LUN_REQUEST:
671 if (ctrl->bRequestType !=
672 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
674 if (w_index != 0 || w_value != 0)
676 VDBG(fsg, "get max LUN\n");
677 *(u8 *) req->buf = fsg->nluns - 1;
682 "unknown class-specific control req "
683 "%02x.%02x v%04x i%04x l%u\n",
684 ctrl->bRequestType, ctrl->bRequest,
685 le16_to_cpu(ctrl->wValue), w_index, w_length);
690 /*-------------------------------------------------------------------------*/
692 /* Ep0 standard request handlers. These always run in_irq. */
694 static int standard_setup_req(struct fsg_dev *fsg,
695 const struct usb_ctrlrequest *ctrl)
697 struct usb_request *req = fsg->ep0req;
698 int value = -EOPNOTSUPP;
699 u16 w_index = le16_to_cpu(ctrl->wIndex);
700 u16 w_value = le16_to_cpu(ctrl->wValue);
702 /* Usually this just stores reply data in the pre-allocated ep0 buffer,
703 * but config change events will also reconfigure hardware. */
704 switch (ctrl->bRequest) {
706 case USB_REQ_GET_DESCRIPTOR:
707 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
710 switch (w_value >> 8) {
713 VDBG(fsg, "get device descriptor\n");
714 value = sizeof device_desc;
715 memcpy(req->buf, &device_desc, value);
717 case USB_DT_DEVICE_QUALIFIER:
718 VDBG(fsg, "get device qualifier\n");
719 if (!gadget_is_dualspeed(fsg->gadget))
721 value = sizeof dev_qualifier;
722 memcpy(req->buf, &dev_qualifier, value);
725 case USB_DT_OTHER_SPEED_CONFIG:
726 VDBG(fsg, "get other-speed config descriptor\n");
727 if (!gadget_is_dualspeed(fsg->gadget))
731 VDBG(fsg, "get configuration descriptor\n");
733 value = populate_config_buf(fsg->gadget,
740 VDBG(fsg, "get string descriptor\n");
742 /* wIndex == language code */
743 value = usb_gadget_get_string(&fsg_stringtab,
744 w_value & 0xff, req->buf);
749 /* One config, two speeds */
750 case USB_REQ_SET_CONFIGURATION:
751 if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
754 VDBG(fsg, "set configuration\n");
755 if (w_value == CONFIG_VALUE || w_value == 0) {
756 fsg->new_config = w_value;
758 /* Raise an exception to wipe out previous transaction
759 * state (queued bufs, etc) and set the new config. */
760 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
761 value = DELAYED_STATUS;
764 case USB_REQ_GET_CONFIGURATION:
765 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
768 VDBG(fsg, "get configuration\n");
769 *(u8 *) req->buf = fsg->config;
773 case USB_REQ_SET_INTERFACE:
774 if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
775 USB_RECIP_INTERFACE))
777 if (fsg->config && w_index == 0) {
779 /* Raise an exception to wipe out previous transaction
780 * state (queued bufs, etc) and install the new
781 * interface altsetting. */
782 raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
783 value = DELAYED_STATUS;
786 case USB_REQ_GET_INTERFACE:
787 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
788 USB_RECIP_INTERFACE))
796 VDBG(fsg, "get interface\n");
797 *(u8 *) req->buf = 0;
803 "unknown control req %02x.%02x v%04x i%04x l%u\n",
804 ctrl->bRequestType, ctrl->bRequest,
805 w_value, w_index, le16_to_cpu(ctrl->wLength));
812 static int fsg_setup(struct usb_gadget *gadget,
813 const struct usb_ctrlrequest *ctrl)
815 struct fsg_dev *fsg = get_gadget_data(gadget);
817 int w_length = le16_to_cpu(ctrl->wLength);
819 ++fsg->ep0_req_tag; // Record arrival of a new request
820 fsg->ep0req->context = NULL;
821 fsg->ep0req->length = 0;
822 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
824 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
825 rc = class_setup_req(fsg, ctrl);
827 rc = standard_setup_req(fsg, ctrl);
829 /* Respond with data/status or defer until later? */
830 if (rc >= 0 && rc != DELAYED_STATUS) {
831 rc = min(rc, w_length);
832 fsg->ep0req->length = rc;
833 fsg->ep0req->zero = rc < w_length;
834 fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
835 "ep0-in" : "ep0-out");
839 /* Device either stalls (rc < 0) or reports success */
844 /*-------------------------------------------------------------------------*/
846 /* All the following routines run in process context */
849 /* Use this for bulk or interrupt transfers, not ep0 */
850 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
851 struct usb_request *req, int *pbusy,
852 enum fsg_buffer_state *state)
856 if (ep == fsg->bulk_in)
857 dump_msg(fsg, "bulk-in", req->buf, req->length);
859 spin_lock_irq(&fsg->lock);
861 *state = BUF_STATE_BUSY;
862 spin_unlock_irq(&fsg->lock);
863 rc = usb_ep_queue(ep, req, GFP_KERNEL);
866 *state = BUF_STATE_EMPTY;
868 /* We can't do much more than wait for a reset */
870 /* Note: currently the net2280 driver fails zero-length
871 * submissions if DMA is enabled. */
872 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
874 WARNING(fsg, "error in submission: %s --> %d\n",
880 static int sleep_thread(struct fsg_dev *fsg)
884 /* Wait until a signal arrives or we are woken up */
887 set_current_state(TASK_INTERRUPTIBLE);
888 if (signal_pending(current)) {
892 if (fsg->thread_wakeup_needed)
896 __set_current_state(TASK_RUNNING);
897 fsg->thread_wakeup_needed = 0;
902 /*-------------------------------------------------------------------------*/
904 static int do_read(struct fsg_dev *fsg)
906 struct fsg_lun *curlun = fsg->curlun;
908 struct fsg_buffhd *bh;
911 loff_t file_offset, file_offset_tmp;
913 unsigned int partial_page;
916 /* Get the starting Logical Block Address and check that it's
918 if (fsg->cmnd[0] == SC_READ_6)
919 lba = get_unaligned_be24(&fsg->cmnd[1]);
921 lba = get_unaligned_be32(&fsg->cmnd[2]);
923 /* We allow DPO (Disable Page Out = don't save data in the
924 * cache) and FUA (Force Unit Access = don't read from the
925 * cache), but we don't implement them. */
926 if ((fsg->cmnd[1] & ~0x18) != 0) {
927 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
931 if (lba >= curlun->num_sectors) {
932 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
935 file_offset = ((loff_t) lba) << 9;
937 /* Carry out the file reads */
938 amount_left = fsg->data_size_from_cmnd;
939 if (unlikely(amount_left == 0))
940 return -EIO; // No default reply
944 /* Figure out how much we need to read:
945 * Try to read the remaining amount.
946 * But don't read more than the buffer size.
947 * And don't try to read past the end of the file.
948 * Finally, if we're not at a page boundary, don't read past
950 * If this means reading 0 then we were asked to read past
951 * the end of file. */
952 amount = min(amount_left, FSG_BUFLEN);
953 amount = min((loff_t) amount,
954 curlun->file_length - file_offset);
955 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
956 if (partial_page > 0)
957 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
960 /* Wait for the next buffer to become available */
961 bh = fsg->next_buffhd_to_fill;
962 while (bh->state != BUF_STATE_EMPTY) {
963 rc = sleep_thread(fsg);
968 /* If we were asked to read past the end of file,
969 * end with an empty buffer. */
972 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
973 curlun->sense_data_info = file_offset >> 9;
974 curlun->info_valid = 1;
975 bh->inreq->length = 0;
976 bh->state = BUF_STATE_FULL;
980 /* Perform the read */
981 file_offset_tmp = file_offset;
982 nread = vfs_read(curlun->filp,
983 (char __user *) bh->buf,
984 amount, &file_offset_tmp);
985 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
986 (unsigned long long) file_offset,
988 if (signal_pending(current))
992 LDBG(curlun, "error in file read: %d\n",
995 } else if (nread < amount) {
996 LDBG(curlun, "partial file read: %d/%u\n",
997 (int) nread, amount);
998 nread -= (nread & 511); // Round down to a block
1000 file_offset += nread;
1001 amount_left -= nread;
1002 fsg->residue -= nread;
1003 bh->inreq->length = nread;
1004 bh->state = BUF_STATE_FULL;
1006 /* If an error occurred, report it and its position */
1007 if (nread < amount) {
1008 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1009 curlun->sense_data_info = file_offset >> 9;
1010 curlun->info_valid = 1;
1014 if (amount_left == 0)
1015 break; // No more left to read
1017 /* Send this buffer and go read some more */
1018 bh->inreq->zero = 0;
1019 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1020 &bh->inreq_busy, &bh->state);
1021 fsg->next_buffhd_to_fill = bh->next;
1024 return -EIO; // No default reply
1028 /*-------------------------------------------------------------------------*/
1030 static int do_write(struct fsg_dev *fsg)
1032 struct fsg_lun *curlun = fsg->curlun;
1034 struct fsg_buffhd *bh;
1036 u32 amount_left_to_req, amount_left_to_write;
1037 loff_t usb_offset, file_offset, file_offset_tmp;
1038 unsigned int amount;
1039 unsigned int partial_page;
1044 curlun->sense_data = SS_WRITE_PROTECTED;
1047 spin_lock(&curlun->filp->f_lock);
1048 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
1049 spin_unlock(&curlun->filp->f_lock);
1051 /* Get the starting Logical Block Address and check that it's
1053 if (fsg->cmnd[0] == SC_WRITE_6)
1054 lba = get_unaligned_be24(&fsg->cmnd[1]);
1056 lba = get_unaligned_be32(&fsg->cmnd[2]);
1058 /* We allow DPO (Disable Page Out = don't save data in the
1059 * cache) and FUA (Force Unit Access = write directly to the
1060 * medium). We don't implement DPO; we implement FUA by
1061 * performing synchronous output. */
1062 if ((fsg->cmnd[1] & ~0x18) != 0) {
1063 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1066 if (fsg->cmnd[1] & 0x08) { // FUA
1067 spin_lock(&curlun->filp->f_lock);
1068 curlun->filp->f_flags |= O_SYNC;
1069 spin_unlock(&curlun->filp->f_lock);
1072 if (lba >= curlun->num_sectors) {
1073 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1077 /* Carry out the file writes */
1079 file_offset = usb_offset = ((loff_t) lba) << 9;
1080 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
1082 while (amount_left_to_write > 0) {
1084 /* Queue a request for more data from the host */
1085 bh = fsg->next_buffhd_to_fill;
1086 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
1088 /* Figure out how much we want to get:
1089 * Try to get the remaining amount.
1090 * But don't get more than the buffer size.
1091 * And don't try to go past the end of the file.
1092 * If we're not at a page boundary,
1093 * don't go past the next page.
1094 * If this means getting 0, then we were asked
1095 * to write past the end of file.
1096 * Finally, round down to a block boundary. */
1097 amount = min(amount_left_to_req, FSG_BUFLEN);
1098 amount = min((loff_t) amount, curlun->file_length -
1100 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
1101 if (partial_page > 0)
1102 amount = min(amount,
1103 (unsigned int) PAGE_CACHE_SIZE - partial_page);
1107 curlun->sense_data =
1108 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1109 curlun->sense_data_info = usb_offset >> 9;
1110 curlun->info_valid = 1;
1113 amount -= (amount & 511);
1116 /* Why were we were asked to transfer a
1122 /* Get the next buffer */
1123 usb_offset += amount;
1124 fsg->usb_amount_left -= amount;
1125 amount_left_to_req -= amount;
1126 if (amount_left_to_req == 0)
1129 /* amount is always divisible by 512, hence by
1130 * the bulk-out maxpacket size */
1131 bh->outreq->length = bh->bulk_out_intended_length =
1133 bh->outreq->short_not_ok = 1;
1134 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1135 &bh->outreq_busy, &bh->state);
1136 fsg->next_buffhd_to_fill = bh->next;
1140 /* Write the received data to the backing file */
1141 bh = fsg->next_buffhd_to_drain;
1142 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
1143 break; // We stopped early
1144 if (bh->state == BUF_STATE_FULL) {
1146 fsg->next_buffhd_to_drain = bh->next;
1147 bh->state = BUF_STATE_EMPTY;
1149 /* Did something go wrong with the transfer? */
1150 if (bh->outreq->status != 0) {
1151 curlun->sense_data = SS_COMMUNICATION_FAILURE;
1152 curlun->sense_data_info = file_offset >> 9;
1153 curlun->info_valid = 1;
1157 amount = bh->outreq->actual;
1158 if (curlun->file_length - file_offset < amount) {
1160 "write %u @ %llu beyond end %llu\n",
1161 amount, (unsigned long long) file_offset,
1162 (unsigned long long) curlun->file_length);
1163 amount = curlun->file_length - file_offset;
1166 /* Perform the write */
1167 file_offset_tmp = file_offset;
1168 nwritten = vfs_write(curlun->filp,
1169 (char __user *) bh->buf,
1170 amount, &file_offset_tmp);
1171 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1172 (unsigned long long) file_offset,
1174 if (signal_pending(current))
1175 return -EINTR; // Interrupted!
1178 LDBG(curlun, "error in file write: %d\n",
1181 } else if (nwritten < amount) {
1182 LDBG(curlun, "partial file write: %d/%u\n",
1183 (int) nwritten, amount);
1184 nwritten -= (nwritten & 511);
1185 // Round down to a block
1187 file_offset += nwritten;
1188 amount_left_to_write -= nwritten;
1189 fsg->residue -= nwritten;
1191 /* If an error occurred, report it and its position */
1192 if (nwritten < amount) {
1193 curlun->sense_data = SS_WRITE_ERROR;
1194 curlun->sense_data_info = file_offset >> 9;
1195 curlun->info_valid = 1;
1199 /* Did the host decide to stop early? */
1200 if (bh->outreq->actual != bh->outreq->length) {
1201 fsg->short_packet_received = 1;
1207 /* Wait for something to happen */
1208 rc = sleep_thread(fsg);
1213 return -EIO; // No default reply
1217 /*-------------------------------------------------------------------------*/
1219 static int do_synchronize_cache(struct fsg_dev *fsg)
1221 struct fsg_lun *curlun = fsg->curlun;
1224 /* We ignore the requested LBA and write out all file's
1225 * dirty data buffers. */
1226 rc = fsg_lun_fsync_sub(curlun);
1228 curlun->sense_data = SS_WRITE_ERROR;
1233 /*-------------------------------------------------------------------------*/
1235 static void invalidate_sub(struct fsg_lun *curlun)
1237 struct file *filp = curlun->filp;
1238 struct inode *inode = filp->f_path.dentry->d_inode;
1241 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1242 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1245 static int do_verify(struct fsg_dev *fsg)
1247 struct fsg_lun *curlun = fsg->curlun;
1249 u32 verification_length;
1250 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1251 loff_t file_offset, file_offset_tmp;
1253 unsigned int amount;
1256 /* Get the starting Logical Block Address and check that it's
1258 lba = get_unaligned_be32(&fsg->cmnd[2]);
1259 if (lba >= curlun->num_sectors) {
1260 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1264 /* We allow DPO (Disable Page Out = don't save data in the
1265 * cache) but we don't implement it. */
1266 if ((fsg->cmnd[1] & ~0x10) != 0) {
1267 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1271 verification_length = get_unaligned_be16(&fsg->cmnd[7]);
1272 if (unlikely(verification_length == 0))
1273 return -EIO; // No default reply
1275 /* Prepare to carry out the file verify */
1276 amount_left = verification_length << 9;
1277 file_offset = ((loff_t) lba) << 9;
1279 /* Write out all the dirty buffers before invalidating them */
1280 fsg_lun_fsync_sub(curlun);
1281 if (signal_pending(current))
1284 invalidate_sub(curlun);
1285 if (signal_pending(current))
1288 /* Just try to read the requested blocks */
1289 while (amount_left > 0) {
1291 /* Figure out how much we need to read:
1292 * Try to read the remaining amount, but not more than
1294 * And don't try to read past the end of the file.
1295 * If this means reading 0 then we were asked to read
1296 * past the end of file. */
1297 amount = min(amount_left, FSG_BUFLEN);
1298 amount = min((loff_t) amount,
1299 curlun->file_length - file_offset);
1301 curlun->sense_data =
1302 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1303 curlun->sense_data_info = file_offset >> 9;
1304 curlun->info_valid = 1;
1308 /* Perform the read */
1309 file_offset_tmp = file_offset;
1310 nread = vfs_read(curlun->filp,
1311 (char __user *) bh->buf,
1312 amount, &file_offset_tmp);
1313 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1314 (unsigned long long) file_offset,
1316 if (signal_pending(current))
1320 LDBG(curlun, "error in file verify: %d\n",
1323 } else if (nread < amount) {
1324 LDBG(curlun, "partial file verify: %d/%u\n",
1325 (int) nread, amount);
1326 nread -= (nread & 511); // Round down to a sector
1329 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1330 curlun->sense_data_info = file_offset >> 9;
1331 curlun->info_valid = 1;
1334 file_offset += nread;
1335 amount_left -= nread;
1341 /*-------------------------------------------------------------------------*/
1343 static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1345 u8 *buf = (u8 *) bh->buf;
1347 static char vendor_id[] = "Linux ";
1348 static char product_disk_id[] = "File-Stor Gadget";
1349 static char product_cdrom_id[] = "File-CD Gadget ";
1351 if (!fsg->curlun) { // Unsupported LUNs are okay
1352 fsg->bad_lun_okay = 1;
1354 buf[0] = 0x7f; // Unsupported, no device-type
1355 buf[4] = 31; // Additional length
1360 buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK);
1361 if (mod_data.removable)
1363 buf[2] = 2; // ANSI SCSI level 2
1364 buf[3] = 2; // SCSI-2 INQUIRY data format
1365 buf[4] = 31; // Additional length
1366 // No special options
1367 sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
1368 (mod_data.cdrom ? product_cdrom_id :
1375 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1377 struct fsg_lun *curlun = fsg->curlun;
1378 u8 *buf = (u8 *) bh->buf;
1383 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1385 * If a REQUEST SENSE command is received from an initiator
1386 * with a pending unit attention condition (before the target
1387 * generates the contingent allegiance condition), then the
1388 * target shall either:
1389 * a) report any pending sense data and preserve the unit
1390 * attention condition on the logical unit, or,
1391 * b) report the unit attention condition, may discard any
1392 * pending sense data, and clear the unit attention
1393 * condition on the logical unit for that initiator.
1395 * FSG normally uses option a); enable this code to use option b).
1398 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1399 curlun->sense_data = curlun->unit_attention_data;
1400 curlun->unit_attention_data = SS_NO_SENSE;
1404 if (!curlun) { // Unsupported LUNs are okay
1405 fsg->bad_lun_okay = 1;
1406 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1410 sd = curlun->sense_data;
1411 sdinfo = curlun->sense_data_info;
1412 valid = curlun->info_valid << 7;
1413 curlun->sense_data = SS_NO_SENSE;
1414 curlun->sense_data_info = 0;
1415 curlun->info_valid = 0;
1419 buf[0] = valid | 0x70; // Valid, current error
1421 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1422 buf[7] = 18 - 8; // Additional sense length
1429 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1431 struct fsg_lun *curlun = fsg->curlun;
1432 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
1433 int pmi = fsg->cmnd[8];
1434 u8 *buf = (u8 *) bh->buf;
1436 /* Check the PMI and LBA fields */
1437 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1438 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1442 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1443 /* Max logical block */
1444 put_unaligned_be32(512, &buf[4]); /* Block length */
1449 static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1451 struct fsg_lun *curlun = fsg->curlun;
1452 int msf = fsg->cmnd[1] & 0x02;
1453 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
1454 u8 *buf = (u8 *) bh->buf;
1456 if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
1457 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1460 if (lba >= curlun->num_sectors) {
1461 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1466 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1467 store_cdrom_address(&buf[4], msf, lba);
1472 static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1474 struct fsg_lun *curlun = fsg->curlun;
1475 int msf = fsg->cmnd[1] & 0x02;
1476 int start_track = fsg->cmnd[6];
1477 u8 *buf = (u8 *) bh->buf;
1479 if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1481 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1486 buf[1] = (20-2); /* TOC data length */
1487 buf[2] = 1; /* First track number */
1488 buf[3] = 1; /* Last track number */
1489 buf[5] = 0x16; /* Data track, copying allowed */
1490 buf[6] = 0x01; /* Only track is number 1 */
1491 store_cdrom_address(&buf[8], msf, 0);
1493 buf[13] = 0x16; /* Lead-out track is data */
1494 buf[14] = 0xAA; /* Lead-out track number */
1495 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1500 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1502 struct fsg_lun *curlun = fsg->curlun;
1503 int mscmnd = fsg->cmnd[0];
1504 u8 *buf = (u8 *) bh->buf;
1507 int changeable_values, all_pages;
1511 if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
1512 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1515 pc = fsg->cmnd[2] >> 6;
1516 page_code = fsg->cmnd[2] & 0x3f;
1518 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1521 changeable_values = (pc == 1);
1522 all_pages = (page_code == 0x3f);
1524 /* Write the mode parameter header. Fixed values are: default
1525 * medium type, no cache control (DPOFUA), and no block descriptors.
1526 * The only variable value is the WriteProtect bit. We will fill in
1527 * the mode data length later. */
1529 if (mscmnd == SC_MODE_SENSE_6) {
1530 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1533 } else { // SC_MODE_SENSE_10
1534 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1536 limit = 65535; // Should really be FSG_BUFLEN
1539 /* No block descriptors */
1541 /* The mode pages, in numerical order. The only page we support
1542 * is the Caching page. */
1543 if (page_code == 0x08 || all_pages) {
1545 buf[0] = 0x08; // Page code
1546 buf[1] = 10; // Page length
1547 memset(buf+2, 0, 10); // None of the fields are changeable
1549 if (!changeable_values) {
1550 buf[2] = 0x04; // Write cache enable,
1551 // Read cache not disabled
1552 // No cache retention priorities
1553 put_unaligned_be16(0xffff, &buf[4]);
1554 /* Don't disable prefetch */
1555 /* Minimum prefetch = 0 */
1556 put_unaligned_be16(0xffff, &buf[8]);
1557 /* Maximum prefetch */
1558 put_unaligned_be16(0xffff, &buf[10]);
1559 /* Maximum prefetch ceiling */
1564 /* Check that a valid page was requested and the mode data length
1565 * isn't too long. */
1567 if (!valid_page || len > limit) {
1568 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1572 /* Store the mode data length */
1573 if (mscmnd == SC_MODE_SENSE_6)
1576 put_unaligned_be16(len - 2, buf0);
1581 static int do_start_stop(struct fsg_dev *fsg)
1583 if (!mod_data.removable) {
1584 fsg->curlun->sense_data = SS_INVALID_COMMAND;
1591 static int do_prevent_allow(struct fsg_dev *fsg)
1593 struct fsg_lun *curlun = fsg->curlun;
1596 if (!mod_data.removable) {
1597 curlun->sense_data = SS_INVALID_COMMAND;
1601 prevent = fsg->cmnd[4] & 0x01;
1602 if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
1603 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1607 if (curlun->prevent_medium_removal && !prevent)
1608 fsg_lun_fsync_sub(curlun);
1609 curlun->prevent_medium_removal = prevent;
1614 static int do_read_format_capacities(struct fsg_dev *fsg,
1615 struct fsg_buffhd *bh)
1617 struct fsg_lun *curlun = fsg->curlun;
1618 u8 *buf = (u8 *) bh->buf;
1620 buf[0] = buf[1] = buf[2] = 0;
1621 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
1624 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1625 /* Number of blocks */
1626 put_unaligned_be32(512, &buf[4]); /* Block length */
1627 buf[4] = 0x02; /* Current capacity */
1632 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1634 struct fsg_lun *curlun = fsg->curlun;
1636 /* We don't support MODE SELECT */
1637 curlun->sense_data = SS_INVALID_COMMAND;
1642 /*-------------------------------------------------------------------------*/
1644 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1648 rc = fsg_set_halt(fsg, fsg->bulk_in);
1650 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1652 if (rc != -EAGAIN) {
1653 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1658 /* Wait for a short time and then try again */
1659 if (msleep_interruptible(100) != 0)
1661 rc = usb_ep_set_halt(fsg->bulk_in);
1666 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1670 DBG(fsg, "bulk-in set wedge\n");
1671 rc = usb_ep_set_wedge(fsg->bulk_in);
1673 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1675 if (rc != -EAGAIN) {
1676 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1681 /* Wait for a short time and then try again */
1682 if (msleep_interruptible(100) != 0)
1684 rc = usb_ep_set_wedge(fsg->bulk_in);
1689 static int pad_with_zeros(struct fsg_dev *fsg)
1691 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1692 u32 nkeep = bh->inreq->length;
1696 bh->state = BUF_STATE_EMPTY; // For the first iteration
1697 fsg->usb_amount_left = nkeep + fsg->residue;
1698 while (fsg->usb_amount_left > 0) {
1700 /* Wait for the next buffer to be free */
1701 while (bh->state != BUF_STATE_EMPTY) {
1702 rc = sleep_thread(fsg);
1707 nsend = min(fsg->usb_amount_left, FSG_BUFLEN);
1708 memset(bh->buf + nkeep, 0, nsend - nkeep);
1709 bh->inreq->length = nsend;
1710 bh->inreq->zero = 0;
1711 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1712 &bh->inreq_busy, &bh->state);
1713 bh = fsg->next_buffhd_to_fill = bh->next;
1714 fsg->usb_amount_left -= nsend;
1720 static int throw_away_data(struct fsg_dev *fsg)
1722 struct fsg_buffhd *bh;
1726 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
1727 fsg->usb_amount_left > 0) {
1729 /* Throw away the data in a filled buffer */
1730 if (bh->state == BUF_STATE_FULL) {
1732 bh->state = BUF_STATE_EMPTY;
1733 fsg->next_buffhd_to_drain = bh->next;
1735 /* A short packet or an error ends everything */
1736 if (bh->outreq->actual != bh->outreq->length ||
1737 bh->outreq->status != 0) {
1738 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1744 /* Try to submit another request if we need one */
1745 bh = fsg->next_buffhd_to_fill;
1746 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
1747 amount = min(fsg->usb_amount_left, FSG_BUFLEN);
1749 /* amount is always divisible by 512, hence by
1750 * the bulk-out maxpacket size */
1751 bh->outreq->length = bh->bulk_out_intended_length =
1753 bh->outreq->short_not_ok = 1;
1754 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1755 &bh->outreq_busy, &bh->state);
1756 fsg->next_buffhd_to_fill = bh->next;
1757 fsg->usb_amount_left -= amount;
1761 /* Otherwise wait for something to happen */
1762 rc = sleep_thread(fsg);
1770 static int finish_reply(struct fsg_dev *fsg)
1772 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1775 switch (fsg->data_dir) {
1777 break; // Nothing to send
1779 /* If we don't know whether the host wants to read or write,
1780 * this must be CB or CBI with an unknown command. We mustn't
1781 * try to send or receive any data. So stall both bulk pipes
1782 * if we can and wait for a reset. */
1783 case DATA_DIR_UNKNOWN:
1784 if (mod_data.can_stall) {
1785 fsg_set_halt(fsg, fsg->bulk_out);
1786 rc = halt_bulk_in_endpoint(fsg);
1790 /* All but the last buffer of data must have already been sent */
1791 case DATA_DIR_TO_HOST:
1792 if (fsg->data_size == 0) {
1793 /* Nothing to send */
1795 /* If there's no residue, simply send the last buffer */
1796 } else if (fsg->residue == 0) {
1797 bh->inreq->zero = 0;
1798 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1799 &bh->inreq_busy, &bh->state);
1800 fsg->next_buffhd_to_fill = bh->next;
1802 /* For Bulk-only, if we're allowed to stall then send the
1803 * short packet and halt the bulk-in endpoint. If we can't
1804 * stall, pad out the remaining data with 0's. */
1805 } else if (mod_data.can_stall) {
1806 bh->inreq->zero = 1;
1807 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1808 &bh->inreq_busy, &bh->state);
1809 fsg->next_buffhd_to_fill = bh->next;
1810 rc = halt_bulk_in_endpoint(fsg);
1812 rc = pad_with_zeros(fsg);
1816 /* We have processed all we want from the data the host has sent.
1817 * There may still be outstanding bulk-out requests. */
1818 case DATA_DIR_FROM_HOST:
1819 if (fsg->residue == 0)
1820 ; // Nothing to receive
1822 /* Did the host stop sending unexpectedly early? */
1823 else if (fsg->short_packet_received) {
1824 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1828 /* We haven't processed all the incoming data. Even though
1829 * we may be allowed to stall, doing so would cause a race.
1830 * The controller may already have ACK'ed all the remaining
1831 * bulk-out packets, in which case the host wouldn't see a
1832 * STALL. Not realizing the endpoint was halted, it wouldn't
1833 * clear the halt -- leading to problems later on. */
1835 else if (mod_data.can_stall) {
1836 fsg_set_halt(fsg, fsg->bulk_out);
1837 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1842 /* We can't stall. Read in the excess data and throw it
1845 rc = throw_away_data(fsg);
1852 static int send_status(struct fsg_dev *fsg)
1854 struct fsg_lun *curlun = fsg->curlun;
1855 struct fsg_buffhd *bh;
1856 struct bulk_cs_wrap *csw;
1858 u8 status = USB_STATUS_PASS;
1861 /* Wait for the next buffer to become available */
1862 bh = fsg->next_buffhd_to_fill;
1863 while (bh->state != BUF_STATE_EMPTY) {
1864 rc = sleep_thread(fsg);
1870 sd = curlun->sense_data;
1871 sdinfo = curlun->sense_data_info;
1872 } else if (fsg->bad_lun_okay)
1875 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1877 if (fsg->phase_error) {
1878 DBG(fsg, "sending phase-error status\n");
1879 status = USB_STATUS_PHASE_ERROR;
1880 sd = SS_INVALID_COMMAND;
1881 } else if (sd != SS_NO_SENSE) {
1882 DBG(fsg, "sending command-failure status\n");
1883 status = USB_STATUS_FAIL;
1884 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1886 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1890 /* Store and send the Bulk-only CSW */
1893 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1894 csw->Tag = fsg->tag;
1895 csw->Residue = cpu_to_le32(fsg->residue);
1896 csw->Status = status;
1898 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1899 bh->inreq->zero = 0;
1900 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1901 &bh->inreq_busy, &bh->state);
1903 fsg->next_buffhd_to_fill = bh->next;
1908 /*-------------------------------------------------------------------------*/
1910 /* Check whether the command is properly formed and whether its data size
1911 * and direction agree with the values we already have. */
1912 static int check_command(struct fsg_dev *fsg, int cmnd_size,
1913 enum data_direction data_dir, unsigned int mask,
1914 int needs_medium, const char *name)
1917 int lun = fsg->cmnd[1] >> 5;
1918 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1920 struct fsg_lun *curlun;
1923 if (fsg->data_dir != DATA_DIR_UNKNOWN)
1924 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
1926 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1927 name, cmnd_size, dirletter[(int) data_dir],
1928 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
1930 /* We can't reply at all until we know the correct data direction
1932 if (fsg->data_size_from_cmnd == 0)
1933 data_dir = DATA_DIR_NONE;
1934 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
1935 fsg->data_dir = data_dir;
1936 fsg->data_size = fsg->data_size_from_cmnd;
1938 } else { // Bulk-only
1939 if (fsg->data_size < fsg->data_size_from_cmnd) {
1941 /* Host data size < Device data size is a phase error.
1942 * Carry out the command, but only transfer as much
1943 * as we are allowed. */
1944 fsg->data_size_from_cmnd = fsg->data_size;
1945 fsg->phase_error = 1;
1948 fsg->residue = fsg->usb_amount_left = fsg->data_size;
1950 /* Conflicting data directions is a phase error */
1951 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
1952 fsg->phase_error = 1;
1956 /* Verify the length of the command itself */
1957 if (cmnd_size != fsg->cmnd_size) {
1959 /* Special case workaround: There are plenty of buggy SCSI
1960 * implementations. Many have issues with cbw->Length
1961 * field passing a wrong command size. For those cases we
1962 * always try to work around the problem by using the length
1963 * sent by the host side provided it is at least as large
1964 * as the correct command length.
1965 * Examples of such cases would be MS-Windows, which issues
1966 * REQUEST SENSE with cbw->Length == 12 where it should
1967 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1968 * REQUEST SENSE with cbw->Length == 10 where it should
1971 if (cmnd_size <= fsg->cmnd_size) {
1972 DBG(fsg, "%s is buggy! Expected length %d "
1973 "but we got %d\n", name,
1974 cmnd_size, fsg->cmnd_size);
1975 cmnd_size = fsg->cmnd_size;
1977 fsg->phase_error = 1;
1982 /* Check that the LUN values are consistent */
1983 if (fsg->lun != lun)
1984 DBG(fsg, "using LUN %d from CBW, not LUN %d from CDB\n",
1988 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
1989 fsg->curlun = curlun = &fsg->luns[fsg->lun];
1990 if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
1991 curlun->sense_data = SS_NO_SENSE;
1992 curlun->sense_data_info = 0;
1993 curlun->info_valid = 0;
1996 fsg->curlun = curlun = NULL;
1997 fsg->bad_lun_okay = 0;
1999 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2000 * to use unsupported LUNs; all others may not. */
2001 if (fsg->cmnd[0] != SC_INQUIRY &&
2002 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2003 DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2008 /* If a unit attention condition exists, only INQUIRY and
2009 * REQUEST SENSE commands are allowed; anything else must fail. */
2010 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2011 fsg->cmnd[0] != SC_INQUIRY &&
2012 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2013 curlun->sense_data = curlun->unit_attention_data;
2014 curlun->unit_attention_data = SS_NO_SENSE;
2018 /* Check that only command bytes listed in the mask are non-zero */
2019 fsg->cmnd[1] &= 0x1f; // Mask away the LUN
2020 for (i = 1; i < cmnd_size; ++i) {
2021 if (fsg->cmnd[i] && !(mask & (1 << i))) {
2023 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2028 /* If the medium isn't mounted and the command needs to access
2029 * it, return an error. */
2030 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
2031 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2039 static int do_scsi_command(struct fsg_dev *fsg)
2041 struct fsg_buffhd *bh;
2043 int reply = -EINVAL;
2045 static char unknown[16];
2049 /* Wait for the next buffer to become available for data or status */
2050 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2051 while (bh->state != BUF_STATE_EMPTY) {
2052 rc = sleep_thread(fsg);
2056 fsg->phase_error = 0;
2057 fsg->short_packet_received = 0;
2059 down_read(&fsg->filesem); // We're using the backing file
2060 switch (fsg->cmnd[0]) {
2063 fsg->data_size_from_cmnd = fsg->cmnd[4];
2064 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2067 reply = do_inquiry(fsg, bh);
2070 case SC_MODE_SELECT_6:
2071 fsg->data_size_from_cmnd = fsg->cmnd[4];
2072 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2074 "MODE SELECT(6)")) == 0)
2075 reply = do_mode_select(fsg, bh);
2078 case SC_MODE_SELECT_10:
2079 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2080 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2082 "MODE SELECT(10)")) == 0)
2083 reply = do_mode_select(fsg, bh);
2086 case SC_MODE_SENSE_6:
2087 fsg->data_size_from_cmnd = fsg->cmnd[4];
2088 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2089 (1<<1) | (1<<2) | (1<<4), 0,
2090 "MODE SENSE(6)")) == 0)
2091 reply = do_mode_sense(fsg, bh);
2094 case SC_MODE_SENSE_10:
2095 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2096 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2097 (1<<1) | (1<<2) | (3<<7), 0,
2098 "MODE SENSE(10)")) == 0)
2099 reply = do_mode_sense(fsg, bh);
2102 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
2103 fsg->data_size_from_cmnd = 0;
2104 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2106 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2107 reply = do_prevent_allow(fsg);
2112 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2113 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2116 reply = do_read(fsg);
2120 fsg->data_size_from_cmnd =
2121 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2122 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2123 (1<<1) | (0xf<<2) | (3<<7), 1,
2125 reply = do_read(fsg);
2129 fsg->data_size_from_cmnd =
2130 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2131 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
2132 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2134 reply = do_read(fsg);
2137 case SC_READ_CAPACITY:
2138 fsg->data_size_from_cmnd = 8;
2139 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2140 (0xf<<2) | (1<<8), 1,
2141 "READ CAPACITY")) == 0)
2142 reply = do_read_capacity(fsg, bh);
2145 case SC_READ_HEADER:
2146 if (!mod_data.cdrom)
2148 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2149 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2150 (3<<7) | (0x1f<<1), 1,
2151 "READ HEADER")) == 0)
2152 reply = do_read_header(fsg, bh);
2156 if (!mod_data.cdrom)
2158 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2159 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2162 reply = do_read_toc(fsg, bh);
2165 case SC_READ_FORMAT_CAPACITIES:
2166 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2167 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2169 "READ FORMAT CAPACITIES")) == 0)
2170 reply = do_read_format_capacities(fsg, bh);
2173 case SC_REQUEST_SENSE:
2174 fsg->data_size_from_cmnd = fsg->cmnd[4];
2175 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2177 "REQUEST SENSE")) == 0)
2178 reply = do_request_sense(fsg, bh);
2181 case SC_START_STOP_UNIT:
2182 fsg->data_size_from_cmnd = 0;
2183 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2185 "START-STOP UNIT")) == 0)
2186 reply = do_start_stop(fsg);
2189 case SC_SYNCHRONIZE_CACHE:
2190 fsg->data_size_from_cmnd = 0;
2191 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2192 (0xf<<2) | (3<<7), 1,
2193 "SYNCHRONIZE CACHE")) == 0)
2194 reply = do_synchronize_cache(fsg);
2197 case SC_TEST_UNIT_READY:
2198 fsg->data_size_from_cmnd = 0;
2199 reply = check_command(fsg, 6, DATA_DIR_NONE,
2204 /* Although optional, this command is used by MS-Windows. We
2205 * support a minimal version: BytChk must be 0. */
2207 fsg->data_size_from_cmnd = 0;
2208 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2209 (1<<1) | (0xf<<2) | (3<<7), 1,
2211 reply = do_verify(fsg);
2216 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2217 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2220 reply = do_write(fsg);
2224 fsg->data_size_from_cmnd =
2225 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2226 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2227 (1<<1) | (0xf<<2) | (3<<7), 1,
2229 reply = do_write(fsg);
2233 fsg->data_size_from_cmnd =
2234 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2235 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
2236 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2238 reply = do_write(fsg);
2241 /* Some mandatory commands that we recognize but don't implement.
2242 * They don't mean much in this setting. It's left as an exercise
2243 * for anyone interested to implement RESERVE and RELEASE in terms
2244 * of Posix locks. */
2245 case SC_FORMAT_UNIT:
2248 case SC_SEND_DIAGNOSTIC:
2253 fsg->data_size_from_cmnd = 0;
2254 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2255 if ((reply = check_command(fsg, fsg->cmnd_size,
2256 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
2257 fsg->curlun->sense_data = SS_INVALID_COMMAND;
2262 up_read(&fsg->filesem);
2264 if (reply == -EINTR || signal_pending(current))
2267 /* Set up the single reply buffer for finish_reply() */
2268 if (reply == -EINVAL)
2269 reply = 0; // Error reply length
2270 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2271 reply = min((u32) reply, fsg->data_size_from_cmnd);
2272 bh->inreq->length = reply;
2273 bh->state = BUF_STATE_FULL;
2274 fsg->residue -= reply;
2275 } // Otherwise it's already set
2281 /*-------------------------------------------------------------------------*/
2283 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2285 struct usb_request *req = bh->outreq;
2286 struct fsg_bulk_cb_wrap *cbw = req->buf;
2288 /* Was this a real packet? Should it be ignored? */
2289 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2292 /* Is the CBW valid? */
2293 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2294 cbw->Signature != cpu_to_le32(
2296 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2298 le32_to_cpu(cbw->Signature));
2300 /* The Bulk-only spec says we MUST stall the IN endpoint
2301 * (6.6.1), so it's unavoidable. It also says we must
2302 * retain this state until the next reset, but there's
2303 * no way to tell the controller driver it should ignore
2304 * Clear-Feature(HALT) requests.
2306 * We aren't required to halt the OUT endpoint; instead
2307 * we can simply accept and discard any data received
2308 * until the next reset. */
2309 wedge_bulk_in_endpoint(fsg);
2310 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2314 /* Is the CBW meaningful? */
2315 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2316 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2317 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2319 cbw->Lun, cbw->Flags, cbw->Length);
2321 /* We can do anything we want here, so let's stall the
2322 * bulk pipes if we are allowed to. */
2323 if (mod_data.can_stall) {
2324 fsg_set_halt(fsg, fsg->bulk_out);
2325 halt_bulk_in_endpoint(fsg);
2330 /* Save the command for later */
2331 fsg->cmnd_size = cbw->Length;
2332 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2333 if (cbw->Flags & USB_BULK_IN_FLAG)
2334 fsg->data_dir = DATA_DIR_TO_HOST;
2336 fsg->data_dir = DATA_DIR_FROM_HOST;
2337 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2338 if (fsg->data_size == 0)
2339 fsg->data_dir = DATA_DIR_NONE;
2340 fsg->lun = cbw->Lun;
2341 fsg->tag = cbw->Tag;
2346 static int get_next_command(struct fsg_dev *fsg)
2348 struct fsg_buffhd *bh;
2351 /* Wait for the next buffer to become available */
2352 bh = fsg->next_buffhd_to_fill;
2353 while (bh->state != BUF_STATE_EMPTY) {
2354 rc = sleep_thread(fsg);
2359 /* Queue a request to read a Bulk-only CBW */
2360 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2361 bh->outreq->short_not_ok = 1;
2362 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2363 &bh->outreq_busy, &bh->state);
2365 /* We will drain the buffer in software, which means we
2366 * can reuse it for the next filling. No need to advance
2367 * next_buffhd_to_fill. */
2369 /* Wait for the CBW to arrive */
2370 while (bh->state != BUF_STATE_FULL) {
2371 rc = sleep_thread(fsg);
2376 rc = received_cbw(fsg, bh);
2377 bh->state = BUF_STATE_EMPTY;
2383 /*-------------------------------------------------------------------------*/
2385 static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2386 const struct usb_endpoint_descriptor *d)
2390 ep->driver_data = fsg;
2391 rc = usb_ep_enable(ep, d);
2393 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2397 static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2398 struct usb_request **preq)
2400 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2403 ERROR(fsg, "can't allocate request for %s\n", ep->name);
2408 * Reset interface setting and re-init endpoint state (toggle etc).
2409 * Call with altsetting < 0 to disable the interface. The only other
2410 * available altsetting is 0, which enables the interface.
2412 static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2416 const struct usb_endpoint_descriptor *d;
2419 DBG(fsg, "reset interface\n");
2422 /* Deallocate the requests */
2423 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2424 struct fsg_buffhd *bh = &fsg->buffhds[i];
2427 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2431 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2436 /* Disable the endpoints */
2437 if (fsg->bulk_in_enabled) {
2438 usb_ep_disable(fsg->bulk_in);
2439 fsg->bulk_in_enabled = 0;
2441 if (fsg->bulk_out_enabled) {
2442 usb_ep_disable(fsg->bulk_out);
2443 fsg->bulk_out_enabled = 0;
2447 if (altsetting < 0 || rc != 0)
2450 DBG(fsg, "set interface %d\n", altsetting);
2452 /* Enable the endpoints */
2453 d = fsg_ep_desc(fsg->gadget,
2454 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2455 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2457 fsg->bulk_in_enabled = 1;
2459 d = fsg_ep_desc(fsg->gadget,
2460 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2461 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2463 fsg->bulk_out_enabled = 1;
2464 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2465 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2467 /* Allocate the requests */
2468 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2469 struct fsg_buffhd *bh = &fsg->buffhds[i];
2471 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
2473 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
2475 bh->inreq->buf = bh->outreq->buf = bh->buf;
2476 bh->inreq->context = bh->outreq->context = bh;
2477 bh->inreq->complete = bulk_in_complete;
2478 bh->outreq->complete = bulk_out_complete;
2482 for (i = 0; i < fsg->nluns; ++i)
2483 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2489 * Change our operational configuration. This code must agree with the code
2490 * that returns config descriptors, and with interface altsetting code.
2492 * It's also responsible for power management interactions. Some
2493 * configurations might not work with our current power sources.
2494 * For now we just assume the gadget is always self-powered.
2496 static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2500 /* Disable the single interface */
2501 if (fsg->config != 0) {
2502 DBG(fsg, "reset config\n");
2504 rc = do_set_interface(fsg, -1);
2507 /* Enable the interface */
2508 if (new_config != 0) {
2509 fsg->config = new_config;
2510 if ((rc = do_set_interface(fsg, 0)) != 0)
2511 fsg->config = 0; // Reset on errors
2515 switch (fsg->gadget->speed) {
2516 case USB_SPEED_LOW: speed = "low"; break;
2517 case USB_SPEED_FULL: speed = "full"; break;
2518 case USB_SPEED_HIGH: speed = "high"; break;
2519 default: speed = "?"; break;
2521 INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
2528 /*-------------------------------------------------------------------------*/
2530 static void handle_exception(struct fsg_dev *fsg)
2536 struct fsg_buffhd *bh;
2537 enum fsg_state old_state;
2539 struct fsg_lun *curlun;
2540 unsigned int exception_req_tag;
2543 /* Clear the existing signals. Anything but SIGUSR1 is converted
2544 * into a high-priority EXIT exception. */
2546 sig = dequeue_signal_lock(current, ¤t->blocked, &info);
2549 if (sig != SIGUSR1) {
2550 if (fsg->state < FSG_STATE_EXIT)
2551 DBG(fsg, "Main thread exiting on signal\n");
2552 raise_exception(fsg, FSG_STATE_EXIT);
2556 /* Cancel all the pending transfers */
2557 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2558 bh = &fsg->buffhds[i];
2560 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2561 if (bh->outreq_busy)
2562 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2565 /* Wait until everything is idle */
2568 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2569 bh = &fsg->buffhds[i];
2570 num_active += bh->inreq_busy + bh->outreq_busy;
2572 if (num_active == 0)
2574 if (sleep_thread(fsg))
2578 /* Clear out the controller's fifos */
2579 if (fsg->bulk_in_enabled)
2580 usb_ep_fifo_flush(fsg->bulk_in);
2581 if (fsg->bulk_out_enabled)
2582 usb_ep_fifo_flush(fsg->bulk_out);
2584 /* Reset the I/O buffer states and pointers, the SCSI
2585 * state, and the exception. Then invoke the handler. */
2586 spin_lock_irq(&fsg->lock);
2588 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2589 bh = &fsg->buffhds[i];
2590 bh->state = BUF_STATE_EMPTY;
2592 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
2595 exception_req_tag = fsg->exception_req_tag;
2596 new_config = fsg->new_config;
2597 old_state = fsg->state;
2599 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2600 fsg->state = FSG_STATE_STATUS_PHASE;
2602 for (i = 0; i < fsg->nluns; ++i) {
2603 curlun = &fsg->luns[i];
2604 curlun->prevent_medium_removal = 0;
2605 curlun->sense_data = curlun->unit_attention_data =
2607 curlun->sense_data_info = 0;
2608 curlun->info_valid = 0;
2610 fsg->state = FSG_STATE_IDLE;
2612 spin_unlock_irq(&fsg->lock);
2614 /* Carry out any extra actions required for the exception */
2615 switch (old_state) {
2619 case FSG_STATE_ABORT_BULK_OUT:
2621 spin_lock_irq(&fsg->lock);
2622 if (fsg->state == FSG_STATE_STATUS_PHASE)
2623 fsg->state = FSG_STATE_IDLE;
2624 spin_unlock_irq(&fsg->lock);
2627 case FSG_STATE_RESET:
2628 /* In case we were forced against our will to halt a
2629 * bulk endpoint, clear the halt now. (The SuperH UDC
2630 * requires this.) */
2631 if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2632 usb_ep_clear_halt(fsg->bulk_in);
2634 if (fsg->ep0_req_tag == exception_req_tag)
2635 ep0_queue(fsg); // Complete the status stage
2637 /* Technically this should go here, but it would only be
2638 * a waste of time. Ditto for the INTERFACE_CHANGE and
2639 * CONFIG_CHANGE cases. */
2640 // for (i = 0; i < fsg->nluns; ++i)
2641 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2644 case FSG_STATE_INTERFACE_CHANGE:
2645 rc = do_set_interface(fsg, 0);
2646 if (fsg->ep0_req_tag != exception_req_tag)
2648 if (rc != 0) // STALL on errors
2649 fsg_set_halt(fsg, fsg->ep0);
2650 else // Complete the status stage
2654 case FSG_STATE_CONFIG_CHANGE:
2655 rc = do_set_config(fsg, new_config);
2656 if (fsg->ep0_req_tag != exception_req_tag)
2658 if (rc != 0) // STALL on errors
2659 fsg_set_halt(fsg, fsg->ep0);
2660 else // Complete the status stage
2664 case FSG_STATE_DISCONNECT:
2665 for (i = 0; i < fsg->nluns; ++i)
2666 fsg_lun_fsync_sub(fsg->luns + i);
2667 do_set_config(fsg, 0); // Unconfigured state
2670 case FSG_STATE_EXIT:
2671 case FSG_STATE_TERMINATED:
2672 do_set_config(fsg, 0); // Free resources
2673 spin_lock_irq(&fsg->lock);
2674 fsg->state = FSG_STATE_TERMINATED; // Stop the thread
2675 spin_unlock_irq(&fsg->lock);
2681 /*-------------------------------------------------------------------------*/
2683 static int fsg_main_thread(void *fsg_)
2685 struct fsg_dev *fsg = fsg_;
2687 /* Allow the thread to be killed by a signal, but set the signal mask
2688 * to block everything but INT, TERM, KILL, and USR1. */
2689 allow_signal(SIGINT);
2690 allow_signal(SIGTERM);
2691 allow_signal(SIGKILL);
2692 allow_signal(SIGUSR1);
2694 /* Allow the thread to be frozen */
2697 /* Arrange for userspace references to be interpreted as kernel
2698 * pointers. That way we can pass a kernel pointer to a routine
2699 * that expects a __user pointer and it will work okay. */
2703 while (fsg->state != FSG_STATE_TERMINATED) {
2704 if (exception_in_progress(fsg) || signal_pending(current)) {
2705 handle_exception(fsg);
2709 if (!fsg->running) {
2714 if (get_next_command(fsg))
2717 spin_lock_irq(&fsg->lock);
2718 if (!exception_in_progress(fsg))
2719 fsg->state = FSG_STATE_DATA_PHASE;
2720 spin_unlock_irq(&fsg->lock);
2722 if (do_scsi_command(fsg) || finish_reply(fsg))
2725 spin_lock_irq(&fsg->lock);
2726 if (!exception_in_progress(fsg))
2727 fsg->state = FSG_STATE_STATUS_PHASE;
2728 spin_unlock_irq(&fsg->lock);
2730 if (send_status(fsg))
2733 spin_lock_irq(&fsg->lock);
2734 if (!exception_in_progress(fsg))
2735 fsg->state = FSG_STATE_IDLE;
2736 spin_unlock_irq(&fsg->lock);
2739 spin_lock_irq(&fsg->lock);
2740 fsg->thread_task = NULL;
2741 spin_unlock_irq(&fsg->lock);
2743 /* If we are exiting because of a signal, unregister the
2745 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
2746 usb_gadget_unregister_driver(&fsg_driver);
2748 /* Let the unbind and cleanup routines know the thread has exited */
2749 complete_and_exit(&fsg->thread_notifier, 0);
2753 /*-------------------------------------------------------------------------*/
2756 /* The write permissions and store_xxx pointers are set in fsg_bind() */
2757 static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
2758 static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
2761 /*-------------------------------------------------------------------------*/
2763 static void fsg_release(struct kref *ref)
2765 struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
2771 static void lun_release(struct device *dev)
2773 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2774 struct fsg_dev *fsg =
2775 container_of(filesem, struct fsg_dev, filesem);
2777 kref_put(&fsg->ref, fsg_release);
2780 static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
2782 struct fsg_dev *fsg = get_gadget_data(gadget);
2784 struct fsg_lun *curlun;
2785 struct usb_request *req = fsg->ep0req;
2787 DBG(fsg, "unbind\n");
2788 clear_bit(REGISTERED, &fsg->atomic_bitflags);
2790 /* Unregister the sysfs attribute files and the LUNs */
2791 for (i = 0; i < fsg->nluns; ++i) {
2792 curlun = &fsg->luns[i];
2793 if (curlun->registered) {
2794 device_remove_file(&curlun->dev, &dev_attr_ro);
2795 device_remove_file(&curlun->dev, &dev_attr_file);
2796 fsg_lun_close(curlun);
2797 device_unregister(&curlun->dev);
2798 curlun->registered = 0;
2802 /* If the thread isn't already dead, tell it to exit now */
2803 if (fsg->state != FSG_STATE_TERMINATED) {
2804 raise_exception(fsg, FSG_STATE_EXIT);
2805 wait_for_completion(&fsg->thread_notifier);
2807 /* The cleanup routine waits for this completion also */
2808 complete(&fsg->thread_notifier);
2811 /* Free the data buffers */
2812 for (i = 0; i < FSG_NUM_BUFFERS; ++i)
2813 kfree(fsg->buffhds[i].buf);
2815 /* Free the request and buffer for endpoint 0 */
2818 usb_ep_free_request(fsg->ep0, req);
2821 set_gadget_data(gadget, NULL);
2825 static int __init check_parameters(struct fsg_dev *fsg)
2829 /* Some peripheral controllers are known not to be able to
2830 * halt bulk endpoints correctly. If one of them is present,
2833 if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
2834 mod_data.can_stall = 0;
2836 if (mod_data.release == 0xffff) { // Parameter wasn't set
2837 /* The sa1100 controller is not supported */
2838 if (gadget_is_sa1100(fsg->gadget))
2841 gcnum = usb_gadget_controller_number(fsg->gadget);
2843 mod_data.release = 0x0300 + gcnum;
2845 WARNING(fsg, "controller '%s' not recognized\n",
2847 mod_data.release = 0x0399;
2855 static int __init fsg_bind(struct usb_gadget *gadget)
2857 struct fsg_dev *fsg = the_fsg;
2860 struct fsg_lun *curlun;
2862 struct usb_request *req;
2865 fsg->gadget = gadget;
2866 set_gadget_data(gadget, fsg);
2867 fsg->ep0 = gadget->ep0;
2868 fsg->ep0->driver_data = fsg;
2870 if ((rc = check_parameters(fsg)) != 0)
2873 if (mod_data.removable) { // Enable the store_xxx attributes
2874 dev_attr_file.attr.mode = 0644;
2875 dev_attr_file.store = fsg_store_file;
2876 if (!mod_data.cdrom) {
2877 dev_attr_ro.attr.mode = 0644;
2878 dev_attr_ro.store = fsg_store_ro;
2882 /* Find out how many LUNs there should be */
2885 i = max(mod_data.num_filenames, 1u);
2886 if (i > FSG_MAX_LUNS) {
2887 ERROR(fsg, "invalid number of LUNs: %d\n", i);
2892 /* Create the LUNs, open their backing files, and register the
2893 * LUN devices in sysfs. */
2894 fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
2901 for (i = 0; i < fsg->nluns; ++i) {
2902 curlun = &fsg->luns[i];
2903 curlun->cdrom = !!mod_data.cdrom;
2904 curlun->ro = mod_data.cdrom || mod_data.ro[i];
2905 curlun->initially_ro = curlun->ro;
2906 curlun->removable = mod_data.removable;
2907 curlun->dev.release = lun_release;
2908 curlun->dev.parent = &gadget->dev;
2909 curlun->dev.driver = &fsg_driver.driver;
2910 dev_set_drvdata(&curlun->dev, &fsg->filesem);
2911 dev_set_name(&curlun->dev,"%s-lun%d",
2912 dev_name(&gadget->dev), i);
2914 if ((rc = device_register(&curlun->dev)) != 0) {
2915 INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
2918 if ((rc = device_create_file(&curlun->dev,
2919 &dev_attr_ro)) != 0 ||
2920 (rc = device_create_file(&curlun->dev,
2921 &dev_attr_file)) != 0) {
2922 device_unregister(&curlun->dev);
2925 curlun->registered = 1;
2926 kref_get(&fsg->ref);
2928 if (mod_data.file[i] && *mod_data.file[i]) {
2929 if ((rc = fsg_lun_open(curlun,
2930 mod_data.file[i])) != 0)
2932 } else if (!mod_data.removable) {
2933 ERROR(fsg, "no file given for LUN%d\n", i);
2939 /* Find all the endpoints we will use */
2940 usb_ep_autoconfig_reset(gadget);
2941 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2944 ep->driver_data = fsg; // claim the endpoint
2947 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2950 ep->driver_data = fsg; // claim the endpoint
2953 /* Fix up the descriptors */
2954 device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
2955 device_desc.bcdDevice = cpu_to_le16(mod_data.release);
2957 if (gadget_is_dualspeed(gadget)) {
2958 /* Assume ep0 uses the same maxpacket value for both speeds */
2959 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
2961 /* Assume endpoint addresses are the same for both speeds */
2962 fsg_hs_bulk_in_desc.bEndpointAddress =
2963 fsg_fs_bulk_in_desc.bEndpointAddress;
2964 fsg_hs_bulk_out_desc.bEndpointAddress =
2965 fsg_fs_bulk_out_desc.bEndpointAddress;
2968 if (gadget_is_otg(gadget))
2969 fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
2973 /* Allocate the request and buffer for endpoint 0 */
2974 fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
2977 req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
2980 req->complete = ep0_complete;
2982 /* Allocate the data buffers */
2983 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2984 struct fsg_buffhd *bh = &fsg->buffhds[i];
2986 /* Allocate for the bulk-in endpoint. We assume that
2987 * the buffer will also work with the bulk-out (and
2988 * interrupt-in) endpoint. */
2989 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
2994 fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
2996 /* This should reflect the actual gadget power source */
2997 usb_gadget_set_selfpowered(gadget);
2999 snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
3001 init_utsname()->sysname, init_utsname()->release,
3004 /* On a real device, serial[] would be loaded from permanent
3005 * storage. We just encode it from the driver version string. */
3006 for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
3007 unsigned char c = DRIVER_VERSION[i / 2];
3011 sprintf(&fsg_string_serial[i], "%02X", c);
3014 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
3015 "file-storage-gadget");
3016 if (IS_ERR(fsg->thread_task)) {