f_mtp.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268
  1. /*
  2. * Gadget Function Driver for MTP
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. * Author: Mike Lockwood <lockwood@android.com>
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. /* #define DEBUG */
  18. /* #define VERBOSE_DEBUG */
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/poll.h>
  22. #include <linux/delay.h>
  23. #include <linux/wait.h>
  24. #include <linux/err.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/types.h>
  27. #include <linux/file.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/usb.h>
  31. #include <linux/usb_usual.h>
  32. #include <linux/usb/ch9.h>
  33. #include <linux/usb/f_mtp.h>
  34. #define MTP_BULK_BUFFER_SIZE 16384
  35. #define INTR_BUFFER_SIZE 28
  36. /* String IDs */
  37. #define INTERFACE_STRING_INDEX 0
  38. /* values for mtp_dev.state */
  39. #define STATE_OFFLINE 0 /* initial state, disconnected */
  40. #define STATE_READY 1 /* ready for userspace calls */
  41. #define STATE_BUSY 2 /* processing userspace calls */
  42. #define STATE_CANCELED 3 /* transaction canceled by host */
  43. #define STATE_ERROR 4 /* error from completion routine */
  44. /* number of tx and rx requests to allocate */
  45. #define TX_REQ_MAX 4
  46. #define RX_REQ_MAX 2
  47. #define INTR_REQ_MAX 5
  48. /* ID for Microsoft MTP OS String */
  49. #define MTP_OS_STRING_ID 0xEE
  50. /* MTP class reqeusts */
  51. #define MTP_REQ_CANCEL 0x64
  52. #define MTP_REQ_GET_EXT_EVENT_DATA 0x65
  53. #define MTP_REQ_RESET 0x66
  54. #define MTP_REQ_GET_DEVICE_STATUS 0x67
  55. /* constants for device status */
  56. #define MTP_RESPONSE_OK 0x2001
  57. #define MTP_RESPONSE_DEVICE_BUSY 0x2019
  58. static const char mtp_shortname[] = "mtp_usb";
  59. struct mtp_dev {
  60. struct usb_function function;
  61. struct usb_composite_dev *cdev;
  62. spinlock_t lock;
  63. struct usb_ep *ep_in;
  64. struct usb_ep *ep_out;
  65. struct usb_ep *ep_intr;
  66. int state;
  67. /* synchronize access to our device file */
  68. atomic_t open_excl;
  69. /* to enforce only one ioctl at a time */
  70. atomic_t ioctl_excl;
  71. struct list_head tx_idle;
  72. struct list_head intr_idle;
  73. wait_queue_head_t read_wq;
  74. wait_queue_head_t write_wq;
  75. wait_queue_head_t intr_wq;
  76. struct usb_request *rx_req[RX_REQ_MAX];
  77. int rx_done;
  78. /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
  79. * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
  80. */
  81. struct workqueue_struct *wq;
  82. struct work_struct send_file_work;
  83. struct work_struct receive_file_work;
  84. struct file *xfer_file;
  85. loff_t xfer_file_offset;
  86. int64_t xfer_file_length;
  87. unsigned xfer_send_header;
  88. uint16_t xfer_command;
  89. uint32_t xfer_transaction_id;
  90. int xfer_result;
  91. };
  92. static struct usb_interface_descriptor mtp_interface_desc = {
  93. .bLength = USB_DT_INTERFACE_SIZE,
  94. .bDescriptorType = USB_DT_INTERFACE,
  95. .bInterfaceNumber = 0,
  96. .bNumEndpoints = 3,
  97. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  98. .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
  99. .bInterfaceProtocol = 0,
  100. };
  101. static struct usb_interface_descriptor ptp_interface_desc = {
  102. .bLength = USB_DT_INTERFACE_SIZE,
  103. .bDescriptorType = USB_DT_INTERFACE,
  104. .bInterfaceNumber = 0,
  105. .bNumEndpoints = 3,
  106. .bInterfaceClass = USB_CLASS_STILL_IMAGE,
  107. .bInterfaceSubClass = 1,
  108. .bInterfaceProtocol = 1,
  109. };
  110. static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
  111. .bLength = USB_DT_ENDPOINT_SIZE,
  112. .bDescriptorType = USB_DT_ENDPOINT,
  113. .bEndpointAddress = USB_DIR_IN,
  114. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  115. .wMaxPacketSize = __constant_cpu_to_le16(512),
  116. };
  117. static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
  118. .bLength = USB_DT_ENDPOINT_SIZE,
  119. .bDescriptorType = USB_DT_ENDPOINT,
  120. .bEndpointAddress = USB_DIR_OUT,
  121. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  122. .wMaxPacketSize = __constant_cpu_to_le16(512),
  123. };
  124. static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
  125. .bLength = USB_DT_ENDPOINT_SIZE,
  126. .bDescriptorType = USB_DT_ENDPOINT,
  127. .bEndpointAddress = USB_DIR_IN,
  128. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  129. };
  130. static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
  131. .bLength = USB_DT_ENDPOINT_SIZE,
  132. .bDescriptorType = USB_DT_ENDPOINT,
  133. .bEndpointAddress = USB_DIR_OUT,
  134. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  135. };
  136. static struct usb_endpoint_descriptor mtp_intr_desc = {
  137. .bLength = USB_DT_ENDPOINT_SIZE,
  138. .bDescriptorType = USB_DT_ENDPOINT,
  139. .bEndpointAddress = USB_DIR_IN,
  140. .bmAttributes = USB_ENDPOINT_XFER_INT,
  141. .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
  142. .bInterval = 6,
  143. };
  144. static struct usb_descriptor_header *fs_mtp_descs[] = {
  145. (struct usb_descriptor_header *) &mtp_interface_desc,
  146. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  147. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  148. (struct usb_descriptor_header *) &mtp_intr_desc,
  149. NULL,
  150. };
  151. static struct usb_descriptor_header *hs_mtp_descs[] = {
  152. (struct usb_descriptor_header *) &mtp_interface_desc,
  153. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  154. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  155. (struct usb_descriptor_header *) &mtp_intr_desc,
  156. NULL,
  157. };
  158. static struct usb_descriptor_header *fs_ptp_descs[] = {
  159. (struct usb_descriptor_header *) &ptp_interface_desc,
  160. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  161. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  162. (struct usb_descriptor_header *) &mtp_intr_desc,
  163. NULL,
  164. };
  165. static struct usb_descriptor_header *hs_ptp_descs[] = {
  166. (struct usb_descriptor_header *) &ptp_interface_desc,
  167. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  168. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  169. (struct usb_descriptor_header *) &mtp_intr_desc,
  170. NULL,
  171. };
  172. static struct usb_string mtp_string_defs[] = {
  173. /* Naming interface "MTP" so libmtp will recognize us */
  174. [INTERFACE_STRING_INDEX].s = "MTP",
  175. { }, /* end of list */
  176. };
  177. static struct usb_gadget_strings mtp_string_table = {
  178. .language = 0x0409, /* en-US */
  179. .strings = mtp_string_defs,
  180. };
  181. static struct usb_gadget_strings *mtp_strings[] = {
  182. &mtp_string_table,
  183. NULL,
  184. };
  185. /* Microsoft MTP OS String */
  186. static u8 mtp_os_string[] = {
  187. 18, /* sizeof(mtp_os_string) */
  188. USB_DT_STRING,
  189. /* Signature field: "MSFT100" */
  190. 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
  191. /* vendor code */
  192. 1,
  193. /* padding */
  194. 0
  195. };
  196. /* Microsoft Extended Configuration Descriptor Header Section */
  197. struct mtp_ext_config_desc_header {
  198. __le32 dwLength;
  199. __u16 bcdVersion;
  200. __le16 wIndex;
  201. __u8 bCount;
  202. __u8 reserved[7];
  203. };
  204. /* Microsoft Extended Configuration Descriptor Function Section */
  205. struct mtp_ext_config_desc_function {
  206. __u8 bFirstInterfaceNumber;
  207. __u8 bInterfaceCount;
  208. __u8 compatibleID[8];
  209. __u8 subCompatibleID[8];
  210. __u8 reserved[6];
  211. };
  212. /* MTP Extended Configuration Descriptor */
  213. struct {
  214. struct mtp_ext_config_desc_header header;
  215. struct mtp_ext_config_desc_function function;
  216. } mtp_ext_config_desc = {
  217. .header = {
  218. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
  219. .bcdVersion = __constant_cpu_to_le16(0x0100),
  220. .wIndex = __constant_cpu_to_le16(4),
  221. .bCount = __constant_cpu_to_le16(1),
  222. },
  223. .function = {
  224. .bFirstInterfaceNumber = 0,
  225. .bInterfaceCount = 1,
  226. .compatibleID = { 'M', 'T', 'P' },
  227. },
  228. };
  229. struct mtp_device_status {
  230. __le16 wLength;
  231. __le16 wCode;
  232. };
  233. /* temporary variable used between mtp_open() and mtp_gadget_bind() */
  234. static struct mtp_dev *_mtp_dev;
  235. static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
  236. {
  237. return container_of(f, struct mtp_dev, function);
  238. }
  239. static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
  240. {
  241. struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
  242. if (!req)
  243. return NULL;
  244. /* now allocate buffers for the requests */
  245. req->buf = kmalloc(buffer_size, GFP_KERNEL);
  246. if (!req->buf) {
  247. usb_ep_free_request(ep, req);
  248. return NULL;
  249. }
  250. return req;
  251. }
  252. static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
  253. {
  254. if (req) {
  255. kfree(req->buf);
  256. usb_ep_free_request(ep, req);
  257. }
  258. }
  259. static inline int mtp_lock(atomic_t *excl)
  260. {
  261. if (atomic_inc_return(excl) == 1) {
  262. return 0;
  263. } else {
  264. atomic_dec(excl);
  265. return -1;
  266. }
  267. }
  268. static inline void mtp_unlock(atomic_t *excl)
  269. {
  270. atomic_dec(excl);
  271. }
  272. /* add a request to the tail of a list */
  273. static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
  274. struct usb_request *req)
  275. {
  276. unsigned long flags;
  277. spin_lock_irqsave(&dev->lock, flags);
  278. list_add_tail(&req->list, head);
  279. spin_unlock_irqrestore(&dev->lock, flags);
  280. }
  281. /* remove a request from the head of a list */
  282. static struct usb_request
  283. *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
  284. {
  285. unsigned long flags;
  286. struct usb_request *req;
  287. spin_lock_irqsave(&dev->lock, flags);
  288. if (list_empty(head)) {
  289. req = 0;
  290. } else {
  291. req = list_first_entry(head, struct usb_request, list);
  292. list_del(&req->list);
  293. }
  294. spin_unlock_irqrestore(&dev->lock, flags);
  295. return req;
  296. }
  297. static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
  298. {
  299. struct mtp_dev *dev = _mtp_dev;
  300. if (req->status != 0)
  301. dev->state = STATE_ERROR;
  302. mtp_req_put(dev, &dev->tx_idle, req);
  303. wake_up(&dev->write_wq);
  304. }
  305. static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
  306. {
  307. struct mtp_dev *dev = _mtp_dev;
  308. dev->rx_done = 1;
  309. if (req->status != 0)
  310. dev->state = STATE_ERROR;
  311. wake_up(&dev->read_wq);
  312. }
  313. static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
  314. {
  315. struct mtp_dev *dev = _mtp_dev;
  316. if (req->status != 0)
  317. dev->state = STATE_ERROR;
  318. mtp_req_put(dev, &dev->intr_idle, req);
  319. wake_up(&dev->intr_wq);
  320. }
  321. static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
  322. struct usb_endpoint_descriptor *in_desc,
  323. struct usb_endpoint_descriptor *out_desc,
  324. struct usb_endpoint_descriptor *intr_desc)
  325. {
  326. struct usb_composite_dev *cdev = dev->cdev;
  327. struct usb_request *req;
  328. struct usb_ep *ep;
  329. int i;
  330. DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
  331. ep = usb_ep_autoconfig(cdev->gadget, in_desc);
  332. if (!ep) {
  333. DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
  334. return -ENODEV;
  335. }
  336. DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
  337. ep->driver_data = dev; /* claim the endpoint */
  338. dev->ep_in = ep;
  339. ep = usb_ep_autoconfig(cdev->gadget, out_desc);
  340. if (!ep) {
  341. DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
  342. return -ENODEV;
  343. }
  344. DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
  345. ep->driver_data = dev; /* claim the endpoint */
  346. dev->ep_out = ep;
  347. ep = usb_ep_autoconfig(cdev->gadget, out_desc);
  348. if (!ep) {
  349. DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
  350. return -ENODEV;
  351. }
  352. DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
  353. ep->driver_data = dev; /* claim the endpoint */
  354. dev->ep_out = ep;
  355. ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
  356. if (!ep) {
  357. DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
  358. return -ENODEV;
  359. }
  360. DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
  361. ep->driver_data = dev; /* claim the endpoint */
  362. dev->ep_intr = ep;
  363. /* now allocate requests for our endpoints */
  364. for (i = 0; i < TX_REQ_MAX; i++) {
  365. req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
  366. if (!req)
  367. goto fail;
  368. req->complete = mtp_complete_in;
  369. mtp_req_put(dev, &dev->tx_idle, req);
  370. }
  371. for (i = 0; i < RX_REQ_MAX; i++) {
  372. req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
  373. if (!req)
  374. goto fail;
  375. req->complete = mtp_complete_out;
  376. dev->rx_req[i] = req;
  377. }
  378. for (i = 0; i < INTR_REQ_MAX; i++) {
  379. req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
  380. if (!req)
  381. goto fail;
  382. req->complete = mtp_complete_intr;
  383. mtp_req_put(dev, &dev->intr_idle, req);
  384. }
  385. return 0;
  386. fail:
  387. printk(KERN_ERR "mtp_bind() could not allocate requests\n");
  388. return -1;
  389. }
  390. static ssize_t mtp_read(struct file *fp, char __user *buf,
  391. size_t count, loff_t *pos)
  392. {
  393. struct mtp_dev *dev = fp->private_data;
  394. struct usb_composite_dev *cdev = dev->cdev;
  395. struct usb_request *req;
  396. int r = count, xfer;
  397. int ret = 0;
  398. DBG(cdev, "mtp_read(%d)\n", count);
  399. if (count > MTP_BULK_BUFFER_SIZE)
  400. return -EINVAL;
  401. /* we will block until we're online */
  402. DBG(cdev, "mtp_read: waiting for online state\n");
  403. ret = wait_event_interruptible(dev->read_wq,
  404. dev->state != STATE_OFFLINE);
  405. if (ret < 0) {
  406. r = ret;
  407. goto done;
  408. }
  409. spin_lock_irq(&dev->lock);
  410. if (dev->state == STATE_CANCELED) {
  411. /* report cancelation to userspace */
  412. dev->state = STATE_READY;
  413. spin_unlock_irq(&dev->lock);
  414. return -ECANCELED;
  415. }
  416. dev->state = STATE_BUSY;
  417. spin_unlock_irq(&dev->lock);
  418. requeue_req:
  419. /* queue a request */
  420. req = dev->rx_req[0];
  421. req->length = count;
  422. dev->rx_done = 0;
  423. ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
  424. if (ret < 0) {
  425. r = -EIO;
  426. goto done;
  427. } else {
  428. DBG(cdev, "rx %p queue\n", req);
  429. }
  430. /* wait for a request to complete */
  431. ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
  432. if (ret < 0) {
  433. r = ret;
  434. usb_ep_dequeue(dev->ep_out, req);
  435. goto done;
  436. }
  437. if (dev->state == STATE_BUSY) {
  438. /* If we got a 0-len packet, throw it back and try again. */
  439. if (req->actual == 0)
  440. goto requeue_req;
  441. DBG(cdev, "rx %p %d\n", req, req->actual);
  442. xfer = (req->actual < count) ? req->actual : count;
  443. r = xfer;
  444. if (copy_to_user(buf, req->buf, xfer))
  445. r = -EFAULT;
  446. } else
  447. r = -EIO;
  448. done:
  449. spin_lock_irq(&dev->lock);
  450. if (dev->state == STATE_CANCELED)
  451. r = -ECANCELED;
  452. else if (dev->state != STATE_OFFLINE)
  453. dev->state = STATE_READY;
  454. spin_unlock_irq(&dev->lock);
  455. DBG(cdev, "mtp_read returning %d\n", r);
  456. return r;
  457. }
  458. static ssize_t mtp_write(struct file *fp, const char __user *buf,
  459. size_t count, loff_t *pos)
  460. {
  461. struct mtp_dev *dev = fp->private_data;
  462. struct usb_composite_dev *cdev = dev->cdev;
  463. struct usb_request *req = 0;
  464. int r = count, xfer;
  465. int sendZLP = 0;
  466. int ret;
  467. DBG(cdev, "mtp_write(%d)\n", count);
  468. spin_lock_irq(&dev->lock);
  469. if (dev->state == STATE_CANCELED) {
  470. /* report cancelation to userspace */
  471. dev->state = STATE_READY;
  472. spin_unlock_irq(&dev->lock);
  473. return -ECANCELED;
  474. }
  475. if (dev->state == STATE_OFFLINE) {
  476. spin_unlock_irq(&dev->lock);
  477. return -ENODEV;
  478. }
  479. dev->state = STATE_BUSY;
  480. spin_unlock_irq(&dev->lock);
  481. /* we need to send a zero length packet to signal the end of transfer
  482. * if the transfer size is aligned to a packet boundary.
  483. */
  484. if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
  485. sendZLP = 1;
  486. }
  487. while (count > 0 || sendZLP) {
  488. /* so we exit after sending ZLP */
  489. if (count == 0)
  490. sendZLP = 0;
  491. if (dev->state != STATE_BUSY) {
  492. DBG(cdev, "mtp_write dev->error\n");
  493. r = -EIO;
  494. break;
  495. }
  496. /* get an idle tx request to use */
  497. req = 0;
  498. ret = wait_event_interruptible(dev->write_wq,
  499. ((req = mtp_req_get(dev, &dev->tx_idle))
  500. || dev->state != STATE_BUSY));
  501. if (!req) {
  502. r = ret;
  503. break;
  504. }
  505. if (count > MTP_BULK_BUFFER_SIZE)
  506. xfer = MTP_BULK_BUFFER_SIZE;
  507. else
  508. xfer = count;
  509. if (xfer && copy_from_user(req->buf, buf, xfer)) {
  510. r = -EFAULT;
  511. break;
  512. }
  513. req->length = xfer;
  514. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  515. if (ret < 0) {
  516. DBG(cdev, "mtp_write: xfer error %d\n", ret);
  517. r = -EIO;
  518. break;
  519. }
  520. buf += xfer;
  521. count -= xfer;
  522. /* zero this so we don't try to free it on error exit */
  523. req = 0;
  524. }
  525. if (req)
  526. mtp_req_put(dev, &dev->tx_idle, req);
  527. spin_lock_irq(&dev->lock);
  528. if (dev->state == STATE_CANCELED)
  529. r = -ECANCELED;
  530. else if (dev->state != STATE_OFFLINE)
  531. dev->state = STATE_READY;
  532. spin_unlock_irq(&dev->lock);
  533. DBG(cdev, "mtp_write returning %d\n", r);
  534. return r;
  535. }
  536. /* read from a local file and write to USB */
  537. static void send_file_work(struct work_struct *data) {
  538. struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work);
  539. struct usb_composite_dev *cdev = dev->cdev;
  540. struct usb_request *req = 0;
  541. struct mtp_data_header *header;
  542. struct file *filp;
  543. loff_t offset;
  544. int64_t count;
  545. int xfer, ret, hdr_size;
  546. int r = 0;
  547. int sendZLP = 0;
  548. /* read our parameters */
  549. smp_rmb();
  550. filp = dev->xfer_file;
  551. offset = dev->xfer_file_offset;
  552. count = dev->xfer_file_length;
  553. DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
  554. if (dev->xfer_send_header) {
  555. hdr_size = sizeof(struct mtp_data_header);
  556. count += hdr_size;
  557. } else {
  558. hdr_size = 0;
  559. }
  560. /* we need to send a zero length packet to signal the end of transfer
  561. * if the transfer size is aligned to a packet boundary.
  562. */
  563. if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
  564. sendZLP = 1;
  565. }
  566. while (count > 0 || sendZLP) {
  567. /* so we exit after sending ZLP */
  568. if (count == 0)
  569. sendZLP = 0;
  570. /* get an idle tx request to use */
  571. req = 0;
  572. ret = wait_event_interruptible(dev->write_wq,
  573. (req = mtp_req_get(dev, &dev->tx_idle))
  574. || dev->state != STATE_BUSY);
  575. if (dev->state == STATE_CANCELED) {
  576. r = -ECANCELED;
  577. break;
  578. }
  579. if (!req) {
  580. r = ret;
  581. break;
  582. }
  583. if (count > MTP_BULK_BUFFER_SIZE)
  584. xfer = MTP_BULK_BUFFER_SIZE;
  585. else
  586. xfer = count;
  587. if (hdr_size) {
  588. /* prepend MTP data header */
  589. header = (struct mtp_data_header *)req->buf;
  590. header->length = __cpu_to_le32(count);
  591. header->type = __cpu_to_le16(2); /* data packet */
  592. header->command = __cpu_to_le16(dev->xfer_command);
  593. header->transaction_id = __cpu_to_le32(dev->xfer_transaction_id);
  594. }
  595. ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, &offset);
  596. if (ret < 0) {
  597. r = ret;
  598. break;
  599. }
  600. xfer = ret + hdr_size;
  601. hdr_size = 0;
  602. req->length = xfer;
  603. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  604. if (ret < 0) {
  605. DBG(cdev, "send_file_work: xfer error %d\n", ret);
  606. dev->state = STATE_ERROR;
  607. r = -EIO;
  608. break;
  609. }
  610. count -= xfer;
  611. /* zero this so we don't try to free it on error exit */
  612. req = 0;
  613. }
  614. if (req)
  615. mtp_req_put(dev, &dev->tx_idle, req);
  616. DBG(cdev, "send_file_work returning %d\n", r);
  617. /* write the result */
  618. dev->xfer_result = r;
  619. smp_wmb();
  620. }
  621. /* read from USB and write to a local file */
  622. static void receive_file_work(struct work_struct *data)
  623. {
  624. struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work);
  625. struct usb_composite_dev *cdev = dev->cdev;
  626. struct usb_request *read_req = NULL, *write_req = NULL;
  627. struct file *filp;
  628. loff_t offset;
  629. int64_t count;
  630. int ret, cur_buf = 0;
  631. int r = 0;
  632. /* read our parameters */
  633. smp_rmb();
  634. filp = dev->xfer_file;
  635. offset = dev->xfer_file_offset;
  636. count = dev->xfer_file_length;
  637. DBG(cdev, "receive_file_work(%lld)\n", count);
  638. while (count > 0 || write_req) {
  639. if (count > 0) {
  640. /* queue a request */
  641. read_req = dev->rx_req[cur_buf];
  642. cur_buf = (cur_buf + 1) % RX_REQ_MAX;
  643. read_req->length = (count > MTP_BULK_BUFFER_SIZE
  644. ? MTP_BULK_BUFFER_SIZE : count);
  645. dev->rx_done = 0;
  646. ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
  647. if (ret < 0) {
  648. r = -EIO;
  649. dev->state = STATE_ERROR;
  650. break;
  651. }
  652. }
  653. if (write_req) {
  654. DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
  655. ret = vfs_write(filp, write_req->buf, write_req->actual,
  656. &offset);
  657. DBG(cdev, "vfs_write %d\n", ret);
  658. if (ret != write_req->actual) {
  659. r = -EIO;
  660. dev->state = STATE_ERROR;
  661. break;
  662. }
  663. write_req = NULL;
  664. }
  665. if (read_req) {
  666. /* wait for our last read to complete */
  667. ret = wait_event_interruptible(dev->read_wq,
  668. dev->rx_done || dev->state != STATE_BUSY);
  669. if (dev->state == STATE_CANCELED) {
  670. r = -ECANCELED;
  671. if (!dev->rx_done)
  672. usb_ep_dequeue(dev->ep_out, read_req);
  673. break;
  674. }
  675. /* if xfer_file_length is 0xFFFFFFFF, then we read until
  676. * we get a zero length packet
  677. */
  678. if (count != 0xFFFFFFFF)
  679. count -= read_req->actual;
  680. if (read_req->actual < read_req->length) {
  681. /* short packet is used to signal EOF for sizes > 4 gig */
  682. DBG(cdev, "got short packet\n");
  683. count = 0;
  684. }
  685. write_req = read_req;
  686. read_req = NULL;
  687. }
  688. }
  689. DBG(cdev, "receive_file_work returning %d\n", r);
  690. /* write the result */
  691. dev->xfer_result = r;
  692. smp_wmb();
  693. }
  694. static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
  695. {
  696. struct usb_request *req= NULL;
  697. int ret;
  698. int length = event->length;
  699. DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
  700. if (length < 0 || length > INTR_BUFFER_SIZE)
  701. return -EINVAL;
  702. if (dev->state == STATE_OFFLINE)
  703. return -ENODEV;
  704. ret = wait_event_interruptible_timeout(dev->intr_wq,
  705. (req = mtp_req_get(dev, &dev->intr_idle)), msecs_to_jiffies(1000));
  706. if (!req)
  707. return -ETIME;
  708. if (copy_from_user(req->buf, (void __user *)event->data, length)) {
  709. mtp_req_put(dev, &dev->intr_idle, req);
  710. return -EFAULT;
  711. }
  712. req->length = length;
  713. ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
  714. if (ret)
  715. mtp_req_put(dev, &dev->intr_idle, req);
  716. return ret;
  717. }
  718. static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
  719. {
  720. struct mtp_dev *dev = fp->private_data;
  721. struct file *filp = NULL;
  722. int ret = -EINVAL;
  723. if (mtp_lock(&dev->ioctl_excl))
  724. return -EBUSY;
  725. switch (code) {
  726. case MTP_SEND_FILE:
  727. case MTP_RECEIVE_FILE:
  728. case MTP_SEND_FILE_WITH_HEADER:
  729. {
  730. struct mtp_file_range mfr;
  731. struct work_struct *work;
  732. spin_lock_irq(&dev->lock);
  733. if (dev->state == STATE_CANCELED) {
  734. /* report cancelation to userspace */
  735. dev->state = STATE_READY;
  736. spin_unlock_irq(&dev->lock);
  737. ret = -ECANCELED;
  738. goto out;
  739. }
  740. if (dev->state == STATE_OFFLINE) {
  741. spin_unlock_irq(&dev->lock);
  742. ret = -ENODEV;
  743. goto out;
  744. }
  745. dev->state = STATE_BUSY;
  746. spin_unlock_irq(&dev->lock);
  747. if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
  748. ret = -EFAULT;
  749. goto fail;
  750. }
  751. /* hold a reference to the file while we are working with it */
  752. filp = fget(mfr.fd);
  753. if (!filp) {
  754. ret = -EBADF;
  755. goto fail;
  756. }
  757. /* write the parameters */
  758. dev->xfer_file = filp;
  759. dev->xfer_file_offset = mfr.offset;
  760. dev->xfer_file_length = mfr.length;
  761. smp_wmb();
  762. if (code == MTP_SEND_FILE_WITH_HEADER) {
  763. work = &dev->send_file_work;
  764. dev->xfer_send_header = 1;
  765. dev->xfer_command = mfr.command;
  766. dev->xfer_transaction_id = mfr.transaction_id;
  767. } else if (code == MTP_SEND_FILE) {
  768. work = &dev->send_file_work;
  769. dev->xfer_send_header = 0;
  770. } else {
  771. work = &dev->receive_file_work;
  772. }
  773. /* We do the file transfer on a work queue so it will run
  774. * in kernel context, which is necessary for vfs_read and
  775. * vfs_write to use our buffers in the kernel address space.
  776. */
  777. queue_work(dev->wq, work);
  778. /* wait for operation to complete */
  779. flush_workqueue(dev->wq);
  780. fput(filp);
  781. /* read the result */
  782. smp_rmb();
  783. ret = dev->xfer_result;
  784. break;
  785. }
  786. case MTP_SEND_EVENT:
  787. {
  788. struct mtp_event event;
  789. /* return here so we don't change dev->state below,
  790. * which would interfere with bulk transfer state.
  791. */
  792. if (copy_from_user(&event, (void __user *)value, sizeof(event)))
  793. ret = -EFAULT;
  794. else
  795. ret = mtp_send_event(dev, &event);
  796. goto out;
  797. }
  798. }
  799. fail:
  800. spin_lock_irq(&dev->lock);
  801. if (dev->state == STATE_CANCELED)
  802. ret = -ECANCELED;
  803. else if (dev->state != STATE_OFFLINE)
  804. dev->state = STATE_READY;
  805. spin_unlock_irq(&dev->lock);
  806. out:
  807. mtp_unlock(&dev->ioctl_excl);
  808. DBG(dev->cdev, "ioctl returning %d\n", ret);
  809. return ret;
  810. }
  811. static int mtp_open(struct inode *ip, struct file *fp)
  812. {
  813. printk(KERN_INFO "mtp_open\n");
  814. if (mtp_lock(&_mtp_dev->open_excl))
  815. return -EBUSY;
  816. /* clear any error condition */
  817. if (_mtp_dev->state != STATE_OFFLINE)
  818. _mtp_dev->state = STATE_READY;
  819. fp->private_data = _mtp_dev;
  820. return 0;
  821. }
  822. static int mtp_release(struct inode *ip, struct file *fp)
  823. {
  824. printk(KERN_INFO "mtp_release\n");
  825. mtp_unlock(&_mtp_dev->open_excl);
  826. return 0;
  827. }
  828. /* file operations for /dev/mtp_usb */
  829. static const struct file_operations mtp_fops = {
  830. .owner = THIS_MODULE,
  831. .read = mtp_read,
  832. .write = mtp_write,
  833. .unlocked_ioctl = mtp_ioctl,
  834. .open = mtp_open,
  835. .release = mtp_release,
  836. };
  837. static struct miscdevice mtp_device = {
  838. .minor = MISC_DYNAMIC_MINOR,
  839. .name = mtp_shortname,
  840. .fops = &mtp_fops,
  841. };
  842. static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
  843. const struct usb_ctrlrequest *ctrl)
  844. {
  845. struct mtp_dev *dev = _mtp_dev;
  846. int value = -EOPNOTSUPP;
  847. u16 w_index = le16_to_cpu(ctrl->wIndex);
  848. u16 w_value = le16_to_cpu(ctrl->wValue);
  849. u16 w_length = le16_to_cpu(ctrl->wLength);
  850. unsigned long flags;
  851. VDBG(cdev, "mtp_ctrlrequest "
  852. "%02x.%02x v%04x i%04x l%u\n",
  853. ctrl->bRequestType, ctrl->bRequest,
  854. w_value, w_index, w_length);
  855. /* Handle MTP OS string */
  856. if (ctrl->bRequestType ==
  857. (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
  858. && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
  859. && (w_value >> 8) == USB_DT_STRING
  860. && (w_value & 0xFF) == MTP_OS_STRING_ID) {
  861. value = (w_length < sizeof(mtp_os_string)
  862. ? w_length : sizeof(mtp_os_string));
  863. memcpy(cdev->req->buf, mtp_os_string, value);
  864. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
  865. /* Handle MTP OS descriptor */
  866. DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
  867. ctrl->bRequest, w_index, w_value, w_length);
  868. if (ctrl->bRequest == 1
  869. && (ctrl->bRequestType & USB_DIR_IN)
  870. && (w_index == 4 || w_index == 5)) {
  871. value = (w_length < sizeof(mtp_ext_config_desc) ?
  872. w_length : sizeof(mtp_ext_config_desc));
  873. memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
  874. }
  875. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
  876. DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
  877. ctrl->bRequest, w_index, w_value, w_length);
  878. if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
  879. && w_value == 0) {
  880. DBG(cdev, "MTP_REQ_CANCEL\n");
  881. spin_lock_irqsave(&dev->lock, flags);
  882. if (dev->state == STATE_BUSY) {
  883. dev->state = STATE_CANCELED;
  884. wake_up(&dev->read_wq);
  885. wake_up(&dev->write_wq);
  886. }
  887. spin_unlock_irqrestore(&dev->lock, flags);
  888. /* We need to queue a request to read the remaining
  889. * bytes, but we don't actually need to look at
  890. * the contents.
  891. */
  892. value = w_length;
  893. } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
  894. && w_index == 0 && w_value == 0) {
  895. struct mtp_device_status *status = cdev->req->buf;
  896. status->wLength =
  897. __constant_cpu_to_le16(sizeof(*status));
  898. DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
  899. spin_lock_irqsave(&dev->lock, flags);
  900. /* device status is "busy" until we report
  901. * the cancelation to userspace
  902. */
  903. if (dev->state == STATE_CANCELED)
  904. status->wCode =
  905. __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
  906. else
  907. status->wCode =
  908. __cpu_to_le16(MTP_RESPONSE_OK);
  909. spin_unlock_irqrestore(&dev->lock, flags);
  910. value = sizeof(*status);
  911. }
  912. }
  913. /* respond with data transfer or status phase? */
  914. if (value >= 0) {
  915. int rc;
  916. cdev->req->zero = value < w_length;
  917. cdev->req->length = value;
  918. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  919. if (rc < 0)
  920. ERROR(cdev, "%s setup response queue error\n", __func__);
  921. }
  922. return value;
  923. }
  924. static int
  925. mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
  926. {
  927. struct usb_composite_dev *cdev = c->cdev;
  928. struct mtp_dev *dev = func_to_mtp(f);
  929. int id;
  930. int ret;
  931. dev->cdev = cdev;
  932. DBG(cdev, "mtp_function_bind dev: %p\n", dev);
  933. /* allocate interface ID(s) */
  934. id = usb_interface_id(c, f);
  935. if (id < 0)
  936. return id;
  937. mtp_interface_desc.bInterfaceNumber = id;
  938. /* allocate endpoints */
  939. ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
  940. &mtp_fullspeed_out_desc, &mtp_intr_desc);
  941. if (ret)
  942. return ret;
  943. /* support high speed hardware */
  944. if (gadget_is_dualspeed(c->cdev->gadget)) {
  945. mtp_highspeed_in_desc.bEndpointAddress =
  946. mtp_fullspeed_in_desc.bEndpointAddress;
  947. mtp_highspeed_out_desc.bEndpointAddress =
  948. mtp_fullspeed_out_desc.bEndpointAddress;
  949. }
  950. DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
  951. gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
  952. f->name, dev->ep_in->name, dev->ep_out->name);
  953. return 0;
  954. }
  955. static void
  956. mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
  957. {
  958. struct mtp_dev *dev = func_to_mtp(f);
  959. struct usb_request *req;
  960. int i;
  961. while ((req = mtp_req_get(dev, &dev->tx_idle)))
  962. mtp_request_free(req, dev->ep_in);
  963. for (i = 0; i < RX_REQ_MAX; i++)
  964. mtp_request_free(dev->rx_req[i], dev->ep_out);
  965. while ((req = mtp_req_get(dev, &dev->intr_idle)))
  966. mtp_request_free(req, dev->ep_intr);
  967. dev->state = STATE_OFFLINE;
  968. }
  969. static int mtp_function_set_alt(struct usb_function *f,
  970. unsigned intf, unsigned alt)
  971. {
  972. struct mtp_dev *dev = func_to_mtp(f);
  973. struct usb_composite_dev *cdev = f->config->cdev;
  974. int ret;
  975. DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
  976. ret = usb_ep_enable(dev->ep_in,
  977. ep_choose(cdev->gadget,
  978. &mtp_highspeed_in_desc,
  979. &mtp_fullspeed_in_desc));
  980. if (ret)
  981. return ret;
  982. ret = usb_ep_enable(dev->ep_out,
  983. ep_choose(cdev->gadget,
  984. &mtp_highspeed_out_desc,
  985. &mtp_fullspeed_out_desc));
  986. if (ret) {
  987. usb_ep_disable(dev->ep_in);
  988. return ret;
  989. }
  990. ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc);
  991. if (ret) {
  992. usb_ep_disable(dev->ep_out);
  993. usb_ep_disable(dev->ep_in);
  994. return ret;
  995. }
  996. dev->state = STATE_READY;
  997. /* readers may be blocked waiting for us to go online */
  998. wake_up(&dev->read_wq);
  999. return 0;
  1000. }
  1001. static void mtp_function_disable(struct usb_function *f)
  1002. {
  1003. struct mtp_dev *dev = func_to_mtp(f);
  1004. struct usb_composite_dev *cdev = dev->cdev;
  1005. DBG(cdev, "mtp_function_disable\n");
  1006. dev->state = STATE_OFFLINE;
  1007. usb_ep_disable(dev->ep_in);
  1008. usb_ep_disable(dev->ep_out);
  1009. usb_ep_disable(dev->ep_intr);
  1010. /* readers may be blocked waiting for us to go online */
  1011. wake_up(&dev->read_wq);
  1012. VDBG(cdev, "%s disabled\n", dev->function.name);
  1013. }
  1014. static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
  1015. {
  1016. struct mtp_dev *dev = _mtp_dev;
  1017. int ret = 0;
  1018. printk(KERN_INFO "mtp_bind_config\n");
  1019. /* allocate a string ID for our interface */
  1020. if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
  1021. ret = usb_string_id(c->cdev);
  1022. if (ret < 0)
  1023. return ret;
  1024. mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
  1025. mtp_interface_desc.iInterface = ret;
  1026. }
  1027. dev->cdev = c->cdev;
  1028. dev->function.name = "mtp";
  1029. dev->function.strings = mtp_strings;
  1030. if (ptp_config) {
  1031. dev->function.descriptors = fs_ptp_descs;
  1032. dev->function.hs_descriptors = hs_ptp_descs;
  1033. } else {
  1034. dev->function.descriptors = fs_mtp_descs;
  1035. dev->function.hs_descriptors = hs_mtp_descs;
  1036. }
  1037. dev->function.bind = mtp_function_bind;
  1038. dev->function.unbind = mtp_function_unbind;
  1039. dev->function.set_alt = mtp_function_set_alt;
  1040. dev->function.disable = mtp_function_disable;
  1041. return usb_add_function(c, &dev->function);
  1042. }
  1043. static int mtp_setup(void)
  1044. {
  1045. struct mtp_dev *dev;
  1046. int ret;
  1047. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1048. if (!dev)
  1049. return -ENOMEM;
  1050. spin_lock_init(&dev->lock);
  1051. init_waitqueue_head(&dev->read_wq);
  1052. init_waitqueue_head(&dev->write_wq);
  1053. init_waitqueue_head(&dev->intr_wq);
  1054. atomic_set(&dev->open_excl, 0);
  1055. atomic_set(&dev->ioctl_excl, 0);
  1056. INIT_LIST_HEAD(&dev->tx_idle);
  1057. INIT_LIST_HEAD(&dev->intr_idle);
  1058. dev->wq = create_singlethread_workqueue("f_mtp");
  1059. if (!dev->wq) {
  1060. ret = -ENOMEM;
  1061. goto err1;
  1062. }
  1063. INIT_WORK(&dev->send_file_work, send_file_work);
  1064. INIT_WORK(&dev->receive_file_work, receive_file_work);
  1065. _mtp_dev = dev;
  1066. ret = misc_register(&mtp_device);
  1067. if (ret)
  1068. goto err2;
  1069. return 0;
  1070. err2:
  1071. destroy_workqueue(dev->wq);
  1072. err1:
  1073. _mtp_dev = NULL;
  1074. kfree(dev);
  1075. printk(KERN_ERR "mtp gadget driver failed to initialize\n");
  1076. return ret;
  1077. }
  1078. static void mtp_cleanup(void)
  1079. {
  1080. struct mtp_dev *dev = _mtp_dev;
  1081. if (!dev)
  1082. return;
  1083. misc_deregister(&mtp_device);
  1084. destroy_workqueue(dev->wq);
  1085. _mtp_dev = NULL;
  1086. kfree(dev);
  1087. }