f_mtp.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435
  1. /*
  2. * Gadget Function Driver for MTP
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. * Author: Mike Lockwood <lockwood@android.com>
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. /* #define DEBUG */
  18. /* #define VERBOSE_DEBUG */
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/poll.h>
  22. #include <linux/delay.h>
  23. #include <linux/wait.h>
  24. #include <linux/err.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/types.h>
  27. #include <linux/file.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/usb.h>
  31. #include <linux/usb_usual.h>
  32. #include <linux/usb/ch9.h>
  33. #include <linux/usb/f_mtp.h>
  34. #define MTP_BULK_BUFFER_SIZE 16384
  35. #define INTR_BUFFER_SIZE 28
  36. /* String IDs */
  37. #define INTERFACE_STRING_INDEX 0
  38. /* values for mtp_dev.state */
  39. #define STATE_OFFLINE 0 /* initial state, disconnected */
  40. #define STATE_READY 1 /* ready for userspace calls */
  41. #define STATE_BUSY 2 /* processing userspace calls */
  42. #define STATE_CANCELED 3 /* transaction canceled by host */
  43. #define STATE_ERROR 4 /* error from completion routine */
  44. /* number of tx and rx requests to allocate */
  45. #define MTP_TX_REQ_MAX 8
  46. #define RX_REQ_MAX 2
  47. #define INTR_REQ_MAX 5
  48. /* ID for Microsoft MTP OS String */
  49. #define MTP_OS_STRING_ID 0xEE
  50. /* MTP class reqeusts */
  51. #define MTP_REQ_CANCEL 0x64
  52. #define MTP_REQ_GET_EXT_EVENT_DATA 0x65
  53. #define MTP_REQ_RESET 0x66
  54. #define MTP_REQ_GET_DEVICE_STATUS 0x67
  55. /* constants for device status */
  56. #define MTP_RESPONSE_OK 0x2001
  57. #define MTP_RESPONSE_DEVICE_BUSY 0x2019
  58. unsigned int mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
  59. module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
  60. unsigned int mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
  61. module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
  62. unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
  63. module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
  64. static const char mtp_shortname[] = "mtp_usb";
  65. struct mtp_dev {
  66. struct usb_function function;
  67. struct usb_composite_dev *cdev;
  68. spinlock_t lock;
  69. struct usb_ep *ep_in;
  70. struct usb_ep *ep_out;
  71. struct usb_ep *ep_intr;
  72. int state;
  73. /* synchronize access to our device file */
  74. atomic_t open_excl;
  75. /* to enforce only one ioctl at a time */
  76. atomic_t ioctl_excl;
  77. struct list_head tx_idle;
  78. struct list_head intr_idle;
  79. wait_queue_head_t read_wq;
  80. wait_queue_head_t write_wq;
  81. wait_queue_head_t intr_wq;
  82. struct usb_request *rx_req[RX_REQ_MAX];
  83. int rx_done;
  84. /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
  85. * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
  86. */
  87. struct workqueue_struct *wq;
  88. struct work_struct send_file_work;
  89. struct work_struct receive_file_work;
  90. struct file *xfer_file;
  91. loff_t xfer_file_offset;
  92. int64_t xfer_file_length;
  93. unsigned xfer_send_header;
  94. uint16_t xfer_command;
  95. uint32_t xfer_transaction_id;
  96. int xfer_result;
  97. };
  98. static struct usb_interface_descriptor mtp_interface_desc = {
  99. .bLength = USB_DT_INTERFACE_SIZE,
  100. .bDescriptorType = USB_DT_INTERFACE,
  101. .bInterfaceNumber = 0,
  102. .bNumEndpoints = 3,
  103. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  104. .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
  105. .bInterfaceProtocol = 0,
  106. };
  107. static struct usb_interface_descriptor ptp_interface_desc = {
  108. .bLength = USB_DT_INTERFACE_SIZE,
  109. .bDescriptorType = USB_DT_INTERFACE,
  110. .bInterfaceNumber = 0,
  111. .bNumEndpoints = 3,
  112. .bInterfaceClass = USB_CLASS_STILL_IMAGE,
  113. .bInterfaceSubClass = 1,
  114. .bInterfaceProtocol = 1,
  115. };
  116. static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
  117. .bLength = USB_DT_ENDPOINT_SIZE,
  118. .bDescriptorType = USB_DT_ENDPOINT,
  119. .bEndpointAddress = USB_DIR_IN,
  120. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  121. .wMaxPacketSize = __constant_cpu_to_le16(1024),
  122. };
  123. static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
  124. .bLength = sizeof mtp_superspeed_in_comp_desc,
  125. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  126. /* the following 2 values can be tweaked if necessary */
  127. .bMaxBurst = 2,
  128. /* .bmAttributes = 0, */
  129. };
  130. static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
  131. .bLength = USB_DT_ENDPOINT_SIZE,
  132. .bDescriptorType = USB_DT_ENDPOINT,
  133. .bEndpointAddress = USB_DIR_OUT,
  134. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  135. .wMaxPacketSize = __constant_cpu_to_le16(1024),
  136. };
  137. static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
  138. .bLength = sizeof mtp_superspeed_out_comp_desc,
  139. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  140. /* the following 2 values can be tweaked if necessary */
  141. .bMaxBurst = 2,
  142. /* .bmAttributes = 0, */
  143. };
  144. static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
  145. .bLength = USB_DT_ENDPOINT_SIZE,
  146. .bDescriptorType = USB_DT_ENDPOINT,
  147. .bEndpointAddress = USB_DIR_IN,
  148. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  149. .wMaxPacketSize = __constant_cpu_to_le16(512),
  150. };
  151. static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
  152. .bLength = USB_DT_ENDPOINT_SIZE,
  153. .bDescriptorType = USB_DT_ENDPOINT,
  154. .bEndpointAddress = USB_DIR_OUT,
  155. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  156. .wMaxPacketSize = __constant_cpu_to_le16(512),
  157. };
  158. static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
  159. .bLength = USB_DT_ENDPOINT_SIZE,
  160. .bDescriptorType = USB_DT_ENDPOINT,
  161. .bEndpointAddress = USB_DIR_IN,
  162. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  163. };
  164. static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
  165. .bLength = USB_DT_ENDPOINT_SIZE,
  166. .bDescriptorType = USB_DT_ENDPOINT,
  167. .bEndpointAddress = USB_DIR_OUT,
  168. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  169. };
  170. static struct usb_endpoint_descriptor mtp_intr_desc = {
  171. .bLength = USB_DT_ENDPOINT_SIZE,
  172. .bDescriptorType = USB_DT_ENDPOINT,
  173. .bEndpointAddress = USB_DIR_IN,
  174. .bmAttributes = USB_ENDPOINT_XFER_INT,
  175. .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
  176. .bInterval = 6,
  177. };
  178. static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
  179. .bLength = sizeof mtp_superspeed_intr_comp_desc,
  180. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  181. /* the following 3 values can be tweaked if necessary */
  182. /* .bMaxBurst = 0, */
  183. /* .bmAttributes = 0, */
  184. .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
  185. };
  186. static struct usb_descriptor_header *fs_mtp_descs[] = {
  187. (struct usb_descriptor_header *) &mtp_interface_desc,
  188. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  189. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  190. (struct usb_descriptor_header *) &mtp_intr_desc,
  191. NULL,
  192. };
  193. static struct usb_descriptor_header *hs_mtp_descs[] = {
  194. (struct usb_descriptor_header *) &mtp_interface_desc,
  195. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  196. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  197. (struct usb_descriptor_header *) &mtp_intr_desc,
  198. NULL,
  199. };
  200. static struct usb_descriptor_header *ss_mtp_descs[] = {
  201. (struct usb_descriptor_header *) &mtp_interface_desc,
  202. (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
  203. (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
  204. (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
  205. (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
  206. (struct usb_descriptor_header *) &mtp_intr_desc,
  207. (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
  208. NULL,
  209. };
  210. static struct usb_descriptor_header *fs_ptp_descs[] = {
  211. (struct usb_descriptor_header *) &ptp_interface_desc,
  212. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  213. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  214. (struct usb_descriptor_header *) &mtp_intr_desc,
  215. NULL,
  216. };
  217. static struct usb_descriptor_header *hs_ptp_descs[] = {
  218. (struct usb_descriptor_header *) &ptp_interface_desc,
  219. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  220. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  221. (struct usb_descriptor_header *) &mtp_intr_desc,
  222. NULL,
  223. };
  224. static struct usb_descriptor_header *ss_ptp_descs[] = {
  225. (struct usb_descriptor_header *) &ptp_interface_desc,
  226. (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
  227. (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
  228. (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
  229. (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
  230. (struct usb_descriptor_header *) &mtp_intr_desc,
  231. (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
  232. NULL,
  233. };
  234. static struct usb_string mtp_string_defs[] = {
  235. /* Naming interface "MTP" so libmtp will recognize us */
  236. [INTERFACE_STRING_INDEX].s = "MTP",
  237. { }, /* end of list */
  238. };
  239. static struct usb_gadget_strings mtp_string_table = {
  240. .language = 0x0409, /* en-US */
  241. .strings = mtp_string_defs,
  242. };
  243. static struct usb_gadget_strings *mtp_strings[] = {
  244. &mtp_string_table,
  245. NULL,
  246. };
  247. /* Microsoft MTP OS String */
  248. static u8 mtp_os_string[] = {
  249. 18, /* sizeof(mtp_os_string) */
  250. USB_DT_STRING,
  251. /* Signature field: "MSFT100" */
  252. 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
  253. /* vendor code */
  254. 1,
  255. /* padding */
  256. 0
  257. };
  258. /* Microsoft Extended Configuration Descriptor Header Section */
  259. struct mtp_ext_config_desc_header {
  260. __le32 dwLength;
  261. __u16 bcdVersion;
  262. __le16 wIndex;
  263. __u8 bCount;
  264. __u8 reserved[7];
  265. };
  266. /* Microsoft Extended Configuration Descriptor Function Section */
  267. struct mtp_ext_config_desc_function {
  268. __u8 bFirstInterfaceNumber;
  269. __u8 bInterfaceCount;
  270. __u8 compatibleID[8];
  271. __u8 subCompatibleID[8];
  272. __u8 reserved[6];
  273. };
  274. /* MTP Extended Configuration Descriptor */
  275. struct {
  276. struct mtp_ext_config_desc_header header;
  277. struct mtp_ext_config_desc_function function;
  278. } mtp_ext_config_desc = {
  279. .header = {
  280. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
  281. .bcdVersion = __constant_cpu_to_le16(0x0100),
  282. .wIndex = __constant_cpu_to_le16(4),
  283. .bCount = __constant_cpu_to_le16(1),
  284. },
  285. .function = {
  286. .bFirstInterfaceNumber = 0,
  287. .bInterfaceCount = 1,
  288. .compatibleID = { 'M', 'T', 'P' },
  289. },
  290. };
  291. struct mtp_device_status {
  292. __le16 wLength;
  293. __le16 wCode;
  294. };
  295. /* temporary variable used between mtp_open() and mtp_gadget_bind() */
  296. static struct mtp_dev *_mtp_dev;
  297. static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
  298. {
  299. return container_of(f, struct mtp_dev, function);
  300. }
  301. static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
  302. {
  303. struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
  304. if (!req)
  305. return NULL;
  306. /* now allocate buffers for the requests */
  307. req->buf = kmalloc(buffer_size, GFP_KERNEL);
  308. if (!req->buf) {
  309. usb_ep_free_request(ep, req);
  310. return NULL;
  311. }
  312. return req;
  313. }
  314. static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
  315. {
  316. if (req) {
  317. kfree(req->buf);
  318. usb_ep_free_request(ep, req);
  319. }
  320. }
  321. static inline int mtp_lock(atomic_t *excl)
  322. {
  323. if (atomic_inc_return(excl) == 1) {
  324. return 0;
  325. } else {
  326. atomic_dec(excl);
  327. return -1;
  328. }
  329. }
  330. static inline void mtp_unlock(atomic_t *excl)
  331. {
  332. atomic_dec(excl);
  333. }
  334. /* add a request to the tail of a list */
  335. static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
  336. struct usb_request *req)
  337. {
  338. unsigned long flags;
  339. spin_lock_irqsave(&dev->lock, flags);
  340. list_add_tail(&req->list, head);
  341. spin_unlock_irqrestore(&dev->lock, flags);
  342. }
  343. /* remove a request from the head of a list */
  344. static struct usb_request
  345. *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
  346. {
  347. unsigned long flags;
  348. struct usb_request *req;
  349. spin_lock_irqsave(&dev->lock, flags);
  350. if (list_empty(head)) {
  351. req = 0;
  352. } else {
  353. req = list_first_entry(head, struct usb_request, list);
  354. list_del(&req->list);
  355. }
  356. spin_unlock_irqrestore(&dev->lock, flags);
  357. return req;
  358. }
  359. static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
  360. {
  361. struct mtp_dev *dev = _mtp_dev;
  362. if (req->status != 0)
  363. dev->state = STATE_ERROR;
  364. mtp_req_put(dev, &dev->tx_idle, req);
  365. wake_up(&dev->write_wq);
  366. }
  367. static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
  368. {
  369. struct mtp_dev *dev = _mtp_dev;
  370. dev->rx_done = 1;
  371. if (req->status != 0)
  372. dev->state = STATE_ERROR;
  373. wake_up(&dev->read_wq);
  374. }
  375. static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
  376. {
  377. struct mtp_dev *dev = _mtp_dev;
  378. if (req->status != 0)
  379. dev->state = STATE_ERROR;
  380. mtp_req_put(dev, &dev->intr_idle, req);
  381. wake_up(&dev->intr_wq);
  382. }
  383. static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
  384. struct usb_endpoint_descriptor *in_desc,
  385. struct usb_endpoint_descriptor *out_desc,
  386. struct usb_endpoint_descriptor *intr_desc)
  387. {
  388. struct usb_composite_dev *cdev = dev->cdev;
  389. struct usb_request *req;
  390. struct usb_ep *ep;
  391. int i;
  392. DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
  393. ep = usb_ep_autoconfig(cdev->gadget, in_desc);
  394. if (!ep) {
  395. DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
  396. return -ENODEV;
  397. }
  398. DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
  399. ep->driver_data = dev; /* claim the endpoint */
  400. dev->ep_in = ep;
  401. ep = usb_ep_autoconfig(cdev->gadget, out_desc);
  402. if (!ep) {
  403. DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
  404. return -ENODEV;
  405. }
  406. DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
  407. ep->driver_data = dev; /* claim the endpoint */
  408. dev->ep_out = ep;
  409. ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
  410. if (!ep) {
  411. DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
  412. return -ENODEV;
  413. }
  414. DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
  415. ep->driver_data = dev; /* claim the endpoint */
  416. dev->ep_intr = ep;
  417. retry_tx_alloc:
  418. if (mtp_tx_req_len > MTP_BULK_BUFFER_SIZE)
  419. mtp_tx_reqs = 4;
  420. /* now allocate requests for our endpoints */
  421. for (i = 0; i < mtp_tx_reqs; i++) {
  422. req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
  423. if (!req) {
  424. if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
  425. goto fail;
  426. while ((req = mtp_req_get(dev, &dev->tx_idle)))
  427. mtp_request_free(req, dev->ep_in);
  428. mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
  429. mtp_tx_reqs = MTP_TX_REQ_MAX;
  430. goto retry_tx_alloc;
  431. }
  432. req->complete = mtp_complete_in;
  433. mtp_req_put(dev, &dev->tx_idle, req);
  434. }
  435. /*
  436. * The RX buffer should be aligned to EP max packet for
  437. * some controllers. At bind time, we don't know the
  438. * operational speed. Hence assuming super speed max
  439. * packet size.
  440. */
  441. if (mtp_rx_req_len % 1024)
  442. mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
  443. retry_rx_alloc:
  444. for (i = 0; i < RX_REQ_MAX; i++) {
  445. req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
  446. if (!req) {
  447. if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
  448. goto fail;
  449. for (; i > 0; i--)
  450. mtp_request_free(dev->rx_req[i], dev->ep_out);
  451. mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
  452. goto retry_rx_alloc;
  453. }
  454. req->complete = mtp_complete_out;
  455. dev->rx_req[i] = req;
  456. }
  457. for (i = 0; i < INTR_REQ_MAX; i++) {
  458. req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
  459. if (!req)
  460. goto fail;
  461. req->complete = mtp_complete_intr;
  462. mtp_req_put(dev, &dev->intr_idle, req);
  463. }
  464. return 0;
  465. fail:
  466. printk(KERN_ERR "mtp_bind() could not allocate requests\n");
  467. return -1;
  468. }
  469. static ssize_t mtp_read(struct file *fp, char __user *buf,
  470. size_t count, loff_t *pos)
  471. {
  472. struct mtp_dev *dev = fp->private_data;
  473. struct usb_composite_dev *cdev = dev->cdev;
  474. struct usb_request *req;
  475. int r = count, xfer, len;
  476. int ret = 0;
  477. DBG(cdev, "mtp_read(%d)\n", count);
  478. /* we will block until we're online */
  479. DBG(cdev, "mtp_read: waiting for online state\n");
  480. ret = wait_event_interruptible(dev->read_wq,
  481. dev->state != STATE_OFFLINE);
  482. if (ret < 0) {
  483. r = ret;
  484. goto done;
  485. }
  486. len = ALIGN(count, dev->ep_out->maxpacket);
  487. if (len > mtp_rx_req_len)
  488. return -EINVAL;
  489. spin_lock_irq(&dev->lock);
  490. if (dev->state == STATE_CANCELED) {
  491. /* report cancelation to userspace */
  492. dev->state = STATE_READY;
  493. spin_unlock_irq(&dev->lock);
  494. return -ECANCELED;
  495. }
  496. dev->state = STATE_BUSY;
  497. spin_unlock_irq(&dev->lock);
  498. requeue_req:
  499. /* queue a request */
  500. req = dev->rx_req[0];
  501. req->length = len;
  502. dev->rx_done = 0;
  503. ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
  504. if (ret < 0) {
  505. r = -EIO;
  506. goto done;
  507. } else {
  508. DBG(cdev, "rx %pK queue\n", req);
  509. }
  510. /* wait for a request to complete */
  511. ret = wait_event_interruptible(dev->read_wq,
  512. dev->rx_done || dev->state != STATE_BUSY);
  513. if (dev->state == STATE_CANCELED) {
  514. r = -ECANCELED;
  515. if (!dev->rx_done)
  516. usb_ep_dequeue(dev->ep_out, req);
  517. spin_lock_irq(&dev->lock);
  518. dev->state = STATE_CANCELED;
  519. spin_unlock_irq(&dev->lock);
  520. goto done;
  521. }
  522. if (ret < 0) {
  523. r = ret;
  524. usb_ep_dequeue(dev->ep_out, req);
  525. goto done;
  526. }
  527. if (dev->state == STATE_BUSY) {
  528. /* If we got a 0-len packet, throw it back and try again. */
  529. if (req->actual == 0)
  530. goto requeue_req;
  531. DBG(cdev, "rx %pK %d\n", req, req->actual);
  532. xfer = (req->actual < count) ? req->actual : count;
  533. r = xfer;
  534. if (copy_to_user(buf, req->buf, xfer))
  535. r = -EFAULT;
  536. } else
  537. r = -EIO;
  538. done:
  539. spin_lock_irq(&dev->lock);
  540. if (dev->state == STATE_CANCELED)
  541. r = -ECANCELED;
  542. else if (dev->state != STATE_OFFLINE)
  543. dev->state = STATE_READY;
  544. spin_unlock_irq(&dev->lock);
  545. DBG(cdev, "mtp_read returning %d\n", r);
  546. return r;
  547. }
  548. static ssize_t mtp_write(struct file *fp, const char __user *buf,
  549. size_t count, loff_t *pos)
  550. {
  551. struct mtp_dev *dev = fp->private_data;
  552. struct usb_composite_dev *cdev = dev->cdev;
  553. struct usb_request *req = 0;
  554. int r = count, xfer;
  555. int sendZLP = 0;
  556. int ret;
  557. DBG(cdev, "mtp_write(%d)\n", count);
  558. spin_lock_irq(&dev->lock);
  559. if (dev->state == STATE_CANCELED) {
  560. /* report cancelation to userspace */
  561. dev->state = STATE_READY;
  562. spin_unlock_irq(&dev->lock);
  563. return -ECANCELED;
  564. }
  565. if (dev->state == STATE_OFFLINE) {
  566. spin_unlock_irq(&dev->lock);
  567. return -ENODEV;
  568. }
  569. dev->state = STATE_BUSY;
  570. spin_unlock_irq(&dev->lock);
  571. /* we need to send a zero length packet to signal the end of transfer
  572. * if the transfer size is aligned to a packet boundary.
  573. */
  574. if ((count & (dev->ep_in->maxpacket - 1)) == 0)
  575. sendZLP = 1;
  576. while (count > 0 || sendZLP) {
  577. /* so we exit after sending ZLP */
  578. if (count == 0)
  579. sendZLP = 0;
  580. if (dev->state != STATE_BUSY) {
  581. DBG(cdev, "mtp_write dev->error\n");
  582. r = -EIO;
  583. break;
  584. }
  585. /* get an idle tx request to use */
  586. req = 0;
  587. ret = wait_event_interruptible(dev->write_wq,
  588. ((req = mtp_req_get(dev, &dev->tx_idle))
  589. || dev->state != STATE_BUSY));
  590. if (!req) {
  591. r = ret;
  592. break;
  593. }
  594. if (count > mtp_tx_req_len)
  595. xfer = mtp_tx_req_len;
  596. else
  597. xfer = count;
  598. if (xfer && copy_from_user(req->buf, buf, xfer)) {
  599. r = -EFAULT;
  600. break;
  601. }
  602. req->length = xfer;
  603. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  604. if (ret < 0) {
  605. DBG(cdev, "mtp_write: xfer error %d\n", ret);
  606. r = -EIO;
  607. break;
  608. }
  609. buf += xfer;
  610. count -= xfer;
  611. /* zero this so we don't try to free it on error exit */
  612. req = 0;
  613. }
  614. if (req)
  615. mtp_req_put(dev, &dev->tx_idle, req);
  616. spin_lock_irq(&dev->lock);
  617. if (dev->state == STATE_CANCELED)
  618. r = -ECANCELED;
  619. else if (dev->state != STATE_OFFLINE)
  620. dev->state = STATE_READY;
  621. spin_unlock_irq(&dev->lock);
  622. DBG(cdev, "mtp_write returning %d\n", r);
  623. return r;
  624. }
  625. /* read from a local file and write to USB */
  626. static void send_file_work(struct work_struct *data)
  627. {
  628. struct mtp_dev *dev = container_of(data, struct mtp_dev,
  629. send_file_work);
  630. struct usb_composite_dev *cdev = dev->cdev;
  631. struct usb_request *req = 0;
  632. struct mtp_data_header *header;
  633. struct file *filp;
  634. loff_t offset;
  635. int64_t count;
  636. int xfer, ret, hdr_size;
  637. int r = 0;
  638. int sendZLP = 0;
  639. /* read our parameters */
  640. smp_rmb();
  641. filp = dev->xfer_file;
  642. offset = dev->xfer_file_offset;
  643. count = dev->xfer_file_length;
  644. if (count < 0) {
  645. dev->xfer_result = -EINVAL;
  646. return;
  647. }
  648. DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
  649. if (dev->xfer_send_header) {
  650. hdr_size = sizeof(struct mtp_data_header);
  651. count += hdr_size;
  652. } else {
  653. hdr_size = 0;
  654. }
  655. /* we need to send a zero length packet to signal the end of transfer
  656. * if the transfer size is aligned to a packet boundary.
  657. */
  658. if ((count & (dev->ep_in->maxpacket - 1)) == 0)
  659. sendZLP = 1;
  660. while (count > 0 || sendZLP) {
  661. /* so we exit after sending ZLP */
  662. if (count == 0)
  663. sendZLP = 0;
  664. /* get an idle tx request to use */
  665. req = 0;
  666. ret = wait_event_interruptible(dev->write_wq,
  667. (req = mtp_req_get(dev, &dev->tx_idle))
  668. || dev->state != STATE_BUSY);
  669. if (dev->state == STATE_CANCELED) {
  670. r = -ECANCELED;
  671. break;
  672. }
  673. if (!req) {
  674. r = ret;
  675. break;
  676. }
  677. if (count > mtp_tx_req_len)
  678. xfer = mtp_tx_req_len;
  679. else
  680. xfer = count;
  681. if (hdr_size) {
  682. /* prepend MTP data header */
  683. header = (struct mtp_data_header *)req->buf;
  684. header->length = __cpu_to_le32(count);
  685. header->type = __cpu_to_le16(2); /* data packet */
  686. header->command = __cpu_to_le16(dev->xfer_command);
  687. header->transaction_id =
  688. __cpu_to_le32(dev->xfer_transaction_id);
  689. }
  690. ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
  691. &offset);
  692. if (ret < 0) {
  693. r = ret;
  694. break;
  695. }
  696. xfer = ret + hdr_size;
  697. hdr_size = 0;
  698. req->length = xfer;
  699. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  700. if (ret < 0) {
  701. DBG(cdev, "send_file_work: xfer error %d\n", ret);
  702. if (dev->state != STATE_OFFLINE)
  703. dev->state = STATE_ERROR;
  704. r = -EIO;
  705. break;
  706. }
  707. count -= xfer;
  708. /* zero this so we don't try to free it on error exit */
  709. req = 0;
  710. }
  711. if (req)
  712. mtp_req_put(dev, &dev->tx_idle, req);
  713. DBG(cdev, "send_file_work returning %d\n", r);
  714. /* write the result */
  715. dev->xfer_result = r;
  716. smp_wmb();
  717. }
  718. /* read from USB and write to a local file */
  719. static void receive_file_work(struct work_struct *data)
  720. {
  721. struct mtp_dev *dev = container_of(data, struct mtp_dev,
  722. receive_file_work);
  723. struct usb_composite_dev *cdev = dev->cdev;
  724. struct usb_request *read_req = NULL, *write_req = NULL;
  725. struct file *filp;
  726. loff_t offset;
  727. int64_t count;
  728. int ret, cur_buf = 0;
  729. int r = 0;
  730. /* read our parameters */
  731. smp_rmb();
  732. filp = dev->xfer_file;
  733. offset = dev->xfer_file_offset;
  734. count = dev->xfer_file_length;
  735. if (count < 0) {
  736. dev->xfer_result = -EINVAL;
  737. return;
  738. }
  739. DBG(cdev, "receive_file_work(%lld)\n", count);
  740. if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
  741. DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
  742. count, dev->ep_out->maxpacket);
  743. while (count > 0 || write_req) {
  744. if (count > 0) {
  745. /* queue a request */
  746. read_req = dev->rx_req[cur_buf];
  747. cur_buf = (cur_buf + 1) % RX_REQ_MAX;
  748. /* some h/w expects size to be aligned to ep's MTU */
  749. read_req->length = mtp_rx_req_len;
  750. dev->rx_done = 0;
  751. ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
  752. if (ret < 0) {
  753. r = -EIO;
  754. if (dev->state != STATE_OFFLINE)
  755. dev->state = STATE_ERROR;
  756. break;
  757. }
  758. }
  759. if (write_req) {
  760. DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
  761. ret = vfs_write(filp, write_req->buf, write_req->actual,
  762. &offset);
  763. DBG(cdev, "vfs_write %d\n", ret);
  764. if (ret != write_req->actual) {
  765. r = -EIO;
  766. if (dev->state != STATE_OFFLINE)
  767. dev->state = STATE_ERROR;
  768. break;
  769. }
  770. write_req = NULL;
  771. }
  772. if (read_req) {
  773. /* wait for our last read to complete */
  774. ret = wait_event_interruptible(dev->read_wq,
  775. dev->rx_done || dev->state != STATE_BUSY);
  776. if (dev->state == STATE_CANCELED
  777. || dev->state == STATE_OFFLINE) {
  778. if (dev->state == STATE_OFFLINE)
  779. r = -EIO;
  780. else
  781. r = -ECANCELED;
  782. if (!dev->rx_done)
  783. usb_ep_dequeue(dev->ep_out, read_req);
  784. break;
  785. }
  786. /* Check if we aligned the size due to MTU constraint */
  787. if (count < read_req->length)
  788. read_req->actual = (read_req->actual > count ?
  789. count : read_req->actual);
  790. /* if xfer_file_length is 0xFFFFFFFF, then we read until
  791. * we get a zero length packet
  792. */
  793. if (count != 0xFFFFFFFF)
  794. count -= read_req->actual;
  795. if (read_req->actual < read_req->length) {
  796. /*
  797. * short packet is used to signal EOF for
  798. * sizes > 4 gig
  799. */
  800. DBG(cdev, "got short packet\n");
  801. count = 0;
  802. }
  803. write_req = read_req;
  804. read_req = NULL;
  805. }
  806. }
  807. DBG(cdev, "receive_file_work returning %d\n", r);
  808. /* write the result */
  809. dev->xfer_result = r;
  810. smp_wmb();
  811. }
  812. static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
  813. {
  814. struct usb_request *req = NULL;
  815. int ret;
  816. int length = event->length;
  817. DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
  818. if (length < 0 || length > INTR_BUFFER_SIZE)
  819. return -EINVAL;
  820. if (dev->state == STATE_OFFLINE)
  821. return -ENODEV;
  822. ret = wait_event_interruptible_timeout(dev->intr_wq,
  823. (req = mtp_req_get(dev, &dev->intr_idle)),
  824. msecs_to_jiffies(1000));
  825. if (!req)
  826. return -ETIME;
  827. if (copy_from_user(req->buf, (void __user *)event->data, length)) {
  828. mtp_req_put(dev, &dev->intr_idle, req);
  829. return -EFAULT;
  830. }
  831. req->length = length;
  832. ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
  833. if (ret)
  834. mtp_req_put(dev, &dev->intr_idle, req);
  835. return ret;
  836. }
  837. static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
  838. {
  839. struct mtp_dev *dev = fp->private_data;
  840. struct file *filp = NULL;
  841. int ret = -EINVAL;
  842. if (mtp_lock(&dev->ioctl_excl))
  843. return -EBUSY;
  844. switch (code) {
  845. case MTP_SEND_FILE:
  846. case MTP_RECEIVE_FILE:
  847. case MTP_SEND_FILE_WITH_HEADER:
  848. {
  849. struct mtp_file_range mfr;
  850. struct work_struct *work;
  851. spin_lock_irq(&dev->lock);
  852. if (dev->state == STATE_CANCELED) {
  853. /* report cancelation to userspace */
  854. dev->state = STATE_READY;
  855. spin_unlock_irq(&dev->lock);
  856. ret = -ECANCELED;
  857. goto out;
  858. }
  859. if (dev->state == STATE_OFFLINE) {
  860. spin_unlock_irq(&dev->lock);
  861. ret = -ENODEV;
  862. goto out;
  863. }
  864. dev->state = STATE_BUSY;
  865. spin_unlock_irq(&dev->lock);
  866. if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
  867. ret = -EFAULT;
  868. goto fail;
  869. }
  870. /* hold a reference to the file while we are working with it */
  871. filp = fget(mfr.fd);
  872. if (!filp) {
  873. ret = -EBADF;
  874. goto fail;
  875. }
  876. /* write the parameters */
  877. dev->xfer_file = filp;
  878. dev->xfer_file_offset = mfr.offset;
  879. dev->xfer_file_length = mfr.length;
  880. smp_wmb();
  881. if (code == MTP_SEND_FILE_WITH_HEADER) {
  882. work = &dev->send_file_work;
  883. dev->xfer_send_header = 1;
  884. dev->xfer_command = mfr.command;
  885. dev->xfer_transaction_id = mfr.transaction_id;
  886. } else if (code == MTP_SEND_FILE) {
  887. work = &dev->send_file_work;
  888. dev->xfer_send_header = 0;
  889. } else {
  890. work = &dev->receive_file_work;
  891. }
  892. /* We do the file transfer on a work queue so it will run
  893. * in kernel context, which is necessary for vfs_read and
  894. * vfs_write to use our buffers in the kernel address space.
  895. */
  896. queue_work(dev->wq, work);
  897. /* wait for operation to complete */
  898. flush_workqueue(dev->wq);
  899. fput(filp);
  900. /* read the result */
  901. smp_rmb();
  902. ret = dev->xfer_result;
  903. break;
  904. }
  905. case MTP_SEND_EVENT:
  906. {
  907. struct mtp_event event;
  908. /* return here so we don't change dev->state below,
  909. * which would interfere with bulk transfer state.
  910. */
  911. if (copy_from_user(&event, (void __user *)value, sizeof(event)))
  912. ret = -EFAULT;
  913. else
  914. ret = mtp_send_event(dev, &event);
  915. goto out;
  916. }
  917. }
  918. fail:
  919. spin_lock_irq(&dev->lock);
  920. if (dev->state == STATE_CANCELED)
  921. ret = -ECANCELED;
  922. else if (dev->state != STATE_OFFLINE)
  923. dev->state = STATE_READY;
  924. spin_unlock_irq(&dev->lock);
  925. out:
  926. mtp_unlock(&dev->ioctl_excl);
  927. DBG(dev->cdev, "ioctl returning %d\n", ret);
  928. return ret;
  929. }
  930. static int mtp_open(struct inode *ip, struct file *fp)
  931. {
  932. printk(KERN_INFO "mtp_open\n");
  933. if (mtp_lock(&_mtp_dev->open_excl))
  934. return -EBUSY;
  935. /* clear any error condition */
  936. if (_mtp_dev->state != STATE_OFFLINE)
  937. _mtp_dev->state = STATE_READY;
  938. fp->private_data = _mtp_dev;
  939. return 0;
  940. }
  941. static int mtp_release(struct inode *ip, struct file *fp)
  942. {
  943. printk(KERN_INFO "mtp_release\n");
  944. mtp_unlock(&_mtp_dev->open_excl);
  945. return 0;
  946. }
  947. /* file operations for /dev/mtp_usb */
  948. static const struct file_operations mtp_fops = {
  949. .owner = THIS_MODULE,
  950. .read = mtp_read,
  951. .write = mtp_write,
  952. .unlocked_ioctl = mtp_ioctl,
  953. .open = mtp_open,
  954. .release = mtp_release,
  955. };
  956. static struct miscdevice mtp_device = {
  957. .minor = MISC_DYNAMIC_MINOR,
  958. .name = mtp_shortname,
  959. .fops = &mtp_fops,
  960. };
  961. static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
  962. const struct usb_ctrlrequest *ctrl)
  963. {
  964. struct mtp_dev *dev = _mtp_dev;
  965. int value = -EOPNOTSUPP;
  966. u16 w_index = le16_to_cpu(ctrl->wIndex);
  967. u16 w_value = le16_to_cpu(ctrl->wValue);
  968. u16 w_length = le16_to_cpu(ctrl->wLength);
  969. unsigned long flags;
  970. VDBG(cdev, "mtp_ctrlrequest "
  971. "%02x.%02x v%04x i%04x l%u\n",
  972. ctrl->bRequestType, ctrl->bRequest,
  973. w_value, w_index, w_length);
  974. /* Handle MTP OS string */
  975. if (ctrl->bRequestType ==
  976. (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
  977. && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
  978. && (w_value >> 8) == USB_DT_STRING
  979. && (w_value & 0xFF) == MTP_OS_STRING_ID) {
  980. value = (w_length < sizeof(mtp_os_string)
  981. ? w_length : sizeof(mtp_os_string));
  982. memcpy(cdev->req->buf, mtp_os_string, value);
  983. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
  984. /* Handle MTP OS descriptor */
  985. DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
  986. ctrl->bRequest, w_index, w_value, w_length);
  987. if (ctrl->bRequest == 1
  988. && (ctrl->bRequestType & USB_DIR_IN)
  989. && (w_index == 4 || w_index == 5)) {
  990. value = (w_length < sizeof(mtp_ext_config_desc) ?
  991. w_length : sizeof(mtp_ext_config_desc));
  992. memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
  993. }
  994. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
  995. DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
  996. ctrl->bRequest, w_index, w_value, w_length);
  997. if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
  998. && w_value == 0) {
  999. DBG(cdev, "MTP_REQ_CANCEL\n");
  1000. spin_lock_irqsave(&dev->lock, flags);
  1001. if (dev->state == STATE_BUSY) {
  1002. dev->state = STATE_CANCELED;
  1003. wake_up(&dev->read_wq);
  1004. wake_up(&dev->write_wq);
  1005. }
  1006. spin_unlock_irqrestore(&dev->lock, flags);
  1007. /* We need to queue a request to read the remaining
  1008. * bytes, but we don't actually need to look at
  1009. * the contents.
  1010. */
  1011. value = w_length;
  1012. } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
  1013. && w_index == 0 && w_value == 0) {
  1014. struct mtp_device_status *status = cdev->req->buf;
  1015. status->wLength =
  1016. __constant_cpu_to_le16(sizeof(*status));
  1017. DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
  1018. spin_lock_irqsave(&dev->lock, flags);
  1019. /* device status is "busy" until we report
  1020. * the cancelation to userspace
  1021. */
  1022. if (dev->state == STATE_CANCELED)
  1023. status->wCode =
  1024. __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
  1025. else
  1026. status->wCode =
  1027. __cpu_to_le16(MTP_RESPONSE_OK);
  1028. spin_unlock_irqrestore(&dev->lock, flags);
  1029. value = sizeof(*status);
  1030. }
  1031. }
  1032. /* respond with data transfer or status phase? */
  1033. if (value >= 0) {
  1034. int rc;
  1035. cdev->req->zero = value < w_length;
  1036. cdev->req->length = value;
  1037. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1038. if (rc < 0)
  1039. ERROR(cdev, "%s: response queue error\n", __func__);
  1040. }
  1041. return value;
  1042. }
  1043. static int
  1044. mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
  1045. {
  1046. struct usb_composite_dev *cdev = c->cdev;
  1047. struct mtp_dev *dev = func_to_mtp(f);
  1048. int id;
  1049. int ret;
  1050. dev->cdev = cdev;
  1051. DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
  1052. /* allocate interface ID(s) */
  1053. id = usb_interface_id(c, f);
  1054. if (id < 0)
  1055. return id;
  1056. mtp_interface_desc.bInterfaceNumber = id;
  1057. /* allocate endpoints */
  1058. ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
  1059. &mtp_fullspeed_out_desc, &mtp_intr_desc);
  1060. if (ret)
  1061. return ret;
  1062. /* support high speed hardware */
  1063. if (gadget_is_dualspeed(c->cdev->gadget)) {
  1064. mtp_highspeed_in_desc.bEndpointAddress =
  1065. mtp_fullspeed_in_desc.bEndpointAddress;
  1066. mtp_highspeed_out_desc.bEndpointAddress =
  1067. mtp_fullspeed_out_desc.bEndpointAddress;
  1068. }
  1069. /* support super speed hardware */
  1070. if (gadget_is_superspeed(c->cdev->gadget)) {
  1071. mtp_superspeed_in_desc.bEndpointAddress =
  1072. mtp_fullspeed_in_desc.bEndpointAddress;
  1073. mtp_superspeed_out_desc.bEndpointAddress =
  1074. mtp_fullspeed_out_desc.bEndpointAddress;
  1075. }
  1076. DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
  1077. gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
  1078. f->name, dev->ep_in->name, dev->ep_out->name);
  1079. return 0;
  1080. }
  1081. static void
  1082. mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
  1083. {
  1084. struct mtp_dev *dev = func_to_mtp(f);
  1085. struct usb_request *req;
  1086. int i;
  1087. while ((req = mtp_req_get(dev, &dev->tx_idle)))
  1088. mtp_request_free(req, dev->ep_in);
  1089. for (i = 0; i < RX_REQ_MAX; i++)
  1090. mtp_request_free(dev->rx_req[i], dev->ep_out);
  1091. while ((req = mtp_req_get(dev, &dev->intr_idle)))
  1092. mtp_request_free(req, dev->ep_intr);
  1093. dev->state = STATE_OFFLINE;
  1094. }
  1095. static int mtp_function_set_alt(struct usb_function *f,
  1096. unsigned intf, unsigned alt)
  1097. {
  1098. struct mtp_dev *dev = func_to_mtp(f);
  1099. struct usb_composite_dev *cdev = f->config->cdev;
  1100. int ret;
  1101. DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
  1102. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
  1103. if (ret) {
  1104. dev->ep_in->desc = NULL;
  1105. ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
  1106. dev->ep_in->name, ret);
  1107. return ret;
  1108. }
  1109. ret = usb_ep_enable(dev->ep_in);
  1110. if (ret) {
  1111. ERROR(cdev, "failed to enable ep %s, result %d\n",
  1112. dev->ep_in->name, ret);
  1113. return ret;
  1114. }
  1115. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
  1116. if (ret) {
  1117. dev->ep_out->desc = NULL;
  1118. ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
  1119. dev->ep_out->name, ret);
  1120. usb_ep_disable(dev->ep_in);
  1121. return ret;
  1122. }
  1123. ret = usb_ep_enable(dev->ep_out);
  1124. if (ret) {
  1125. ERROR(cdev, "failed to enable ep %s, result %d\n",
  1126. dev->ep_out->name, ret);
  1127. usb_ep_disable(dev->ep_in);
  1128. return ret;
  1129. }
  1130. dev->ep_intr->desc = &mtp_intr_desc;
  1131. ret = usb_ep_enable(dev->ep_intr);
  1132. if (ret) {
  1133. usb_ep_disable(dev->ep_out);
  1134. usb_ep_disable(dev->ep_in);
  1135. return ret;
  1136. }
  1137. dev->state = STATE_READY;
  1138. /* readers may be blocked waiting for us to go online */
  1139. wake_up(&dev->read_wq);
  1140. return 0;
  1141. }
  1142. static void mtp_function_disable(struct usb_function *f)
  1143. {
  1144. struct mtp_dev *dev = func_to_mtp(f);
  1145. struct usb_composite_dev *cdev = dev->cdev;
  1146. DBG(cdev, "mtp_function_disable\n");
  1147. dev->state = STATE_OFFLINE;
  1148. usb_ep_disable(dev->ep_in);
  1149. usb_ep_disable(dev->ep_out);
  1150. usb_ep_disable(dev->ep_intr);
  1151. /* readers may be blocked waiting for us to go online */
  1152. wake_up(&dev->read_wq);
  1153. VDBG(cdev, "%s disabled\n", dev->function.name);
  1154. }
  1155. static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
  1156. {
  1157. struct mtp_dev *dev = _mtp_dev;
  1158. int ret = 0;
  1159. printk(KERN_INFO "mtp_bind_config\n");
  1160. /* allocate a string ID for our interface */
  1161. if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
  1162. ret = usb_string_id(c->cdev);
  1163. if (ret < 0)
  1164. return ret;
  1165. mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
  1166. mtp_interface_desc.iInterface = ret;
  1167. }
  1168. dev->cdev = c->cdev;
  1169. dev->function.name = "mtp";
  1170. dev->function.strings = mtp_strings;
  1171. if (ptp_config) {
  1172. dev->function.fs_descriptors = fs_ptp_descs;
  1173. dev->function.hs_descriptors = hs_ptp_descs;
  1174. if (gadget_is_superspeed(c->cdev->gadget))
  1175. dev->function.ss_descriptors = ss_ptp_descs;
  1176. } else {
  1177. dev->function.fs_descriptors = fs_mtp_descs;
  1178. dev->function.hs_descriptors = hs_mtp_descs;
  1179. if (gadget_is_superspeed(c->cdev->gadget))
  1180. dev->function.ss_descriptors = ss_mtp_descs;
  1181. }
  1182. dev->function.bind = mtp_function_bind;
  1183. dev->function.unbind = mtp_function_unbind;
  1184. dev->function.set_alt = mtp_function_set_alt;
  1185. dev->function.disable = mtp_function_disable;
  1186. return usb_add_function(c, &dev->function);
  1187. }
  1188. static int mtp_setup(void)
  1189. {
  1190. struct mtp_dev *dev;
  1191. int ret;
  1192. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1193. if (!dev)
  1194. return -ENOMEM;
  1195. spin_lock_init(&dev->lock);
  1196. init_waitqueue_head(&dev->read_wq);
  1197. init_waitqueue_head(&dev->write_wq);
  1198. init_waitqueue_head(&dev->intr_wq);
  1199. atomic_set(&dev->open_excl, 0);
  1200. atomic_set(&dev->ioctl_excl, 0);
  1201. INIT_LIST_HEAD(&dev->tx_idle);
  1202. INIT_LIST_HEAD(&dev->intr_idle);
  1203. dev->wq = create_singlethread_workqueue("f_mtp");
  1204. if (!dev->wq) {
  1205. ret = -ENOMEM;
  1206. goto err1;
  1207. }
  1208. INIT_WORK(&dev->send_file_work, send_file_work);
  1209. INIT_WORK(&dev->receive_file_work, receive_file_work);
  1210. _mtp_dev = dev;
  1211. ret = misc_register(&mtp_device);
  1212. if (ret)
  1213. goto err2;
  1214. return 0;
  1215. err2:
  1216. destroy_workqueue(dev->wq);
  1217. err1:
  1218. _mtp_dev = NULL;
  1219. kfree(dev);
  1220. printk(KERN_ERR "mtp gadget driver failed to initialize\n");
  1221. return ret;
  1222. }
  1223. static void mtp_cleanup(void)
  1224. {
  1225. struct mtp_dev *dev = _mtp_dev;
  1226. if (!dev)
  1227. return;
  1228. misc_deregister(&mtp_device);
  1229. destroy_workqueue(dev->wq);
  1230. _mtp_dev = NULL;
  1231. kfree(dev);
  1232. }