f_rmnet_smd.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396
  1. /*
  2. * f_rmnet.c -- RmNet function driver
  3. *
  4. * Copyright (C) 2003-2005,2008 David Brownell
  5. * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  6. * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
  7. * Copyright (C) 2008 Nokia Corporation
  8. * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/kernel.h>
  27. #include <linux/err.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/list.h>
  30. #include <linux/device.h>
  31. #include <linux/wait.h>
  32. #include <linux/workqueue.h>
  33. #include <linux/bitops.h>
  34. #include <linux/termios.h>
  35. #include <linux/debugfs.h>
  36. #include <mach/msm_smd.h>
  37. #include <linux/usb/cdc.h>
  38. #include <linux/usb/composite.h>
  39. #include <linux/usb/ch9.h>
  40. #include "gadget_chips.h"
  41. #ifndef CONFIG_MSM_SMD
  42. #define CONFIG_RMNET_SMD_CTL_CHANNEL ""
  43. #define CONFIG_RMNET_SMD_DATA_CHANNEL ""
  44. #endif
  45. static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
  46. module_param(rmnet_ctl_ch, charp, S_IRUGO);
  47. MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
  48. static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
  49. module_param(rmnet_data_ch, charp, S_IRUGO);
  50. MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
  51. #define RMNET_SMD_ACM_CTRL_DTR (1 << 0)
  52. #define RMNET_SMD_NOTIFY_INTERVAL 5
  53. #define RMNET_SMD_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
  54. #define QMI_REQ_MAX 4
  55. #define QMI_REQ_SIZE 2048
  56. #define QMI_RESP_MAX 8
  57. #define QMI_RESP_SIZE 2048
  58. #define RMNET_RX_REQ_MAX 8
  59. #define RMNET_RX_REQ_SIZE 2048
  60. #define RMNET_TX_REQ_MAX 8
  61. #define RMNET_TX_REQ_SIZE 2048
  62. #define RMNET_TXN_MAX 2048
  63. /* QMI requests & responses buffer*/
  64. struct qmi_buf {
  65. void *buf;
  66. int len;
  67. struct list_head list;
  68. };
  69. /* Control & data SMD channel private data */
  70. struct rmnet_smd_ch_info {
  71. struct smd_channel *ch;
  72. struct tasklet_struct tx_tlet;
  73. struct tasklet_struct rx_tlet;
  74. #define CH_OPENED 0
  75. unsigned long flags;
  76. /* pending rx packet length */
  77. atomic_t rx_pkt;
  78. /* wait for smd open event*/
  79. wait_queue_head_t wait;
  80. };
  81. struct rmnet_smd_dev {
  82. struct usb_function function;
  83. struct usb_composite_dev *cdev;
  84. struct usb_ep *epout;
  85. struct usb_ep *epin;
  86. struct usb_ep *epnotify;
  87. struct usb_request *notify_req;
  88. u8 ifc_id;
  89. /* QMI lists */
  90. struct list_head qmi_req_pool;
  91. struct list_head qmi_resp_pool;
  92. struct list_head qmi_req_q;
  93. struct list_head qmi_resp_q;
  94. /* Tx/Rx lists */
  95. struct list_head tx_idle;
  96. struct list_head rx_idle;
  97. struct list_head rx_queue;
  98. spinlock_t lock;
  99. atomic_t online;
  100. atomic_t notify_count;
  101. struct platform_driver pdrv;
  102. u8 is_pdrv_used;
  103. struct rmnet_smd_ch_info smd_ctl;
  104. struct rmnet_smd_ch_info smd_data;
  105. struct workqueue_struct *wq;
  106. struct work_struct connect_work;
  107. struct work_struct disconnect_work;
  108. unsigned long dpkts_to_host;
  109. unsigned long dpkts_from_modem;
  110. unsigned long dpkts_from_host;
  111. unsigned long dpkts_to_modem;
  112. unsigned long cpkts_to_host;
  113. unsigned long cpkts_from_modem;
  114. unsigned long cpkts_from_host;
  115. unsigned long cpkts_to_modem;
  116. };
  117. static struct rmnet_smd_dev *rmnet_smd;
  118. static struct usb_interface_descriptor rmnet_smd_interface_desc = {
  119. .bLength = USB_DT_INTERFACE_SIZE,
  120. .bDescriptorType = USB_DT_INTERFACE,
  121. /* .bInterfaceNumber = DYNAMIC */
  122. .bNumEndpoints = 3,
  123. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  124. .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
  125. .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
  126. /* .iInterface = DYNAMIC */
  127. };
  128. /* Full speed support */
  129. static struct usb_endpoint_descriptor rmnet_smd_fs_notify_desc = {
  130. .bLength = USB_DT_ENDPOINT_SIZE,
  131. .bDescriptorType = USB_DT_ENDPOINT,
  132. .bEndpointAddress = USB_DIR_IN,
  133. .bmAttributes = USB_ENDPOINT_XFER_INT,
  134. .wMaxPacketSize = __constant_cpu_to_le16(
  135. RMNET_SMD_MAX_NOTIFY_SIZE),
  136. .bInterval = 1 << RMNET_SMD_NOTIFY_INTERVAL,
  137. };
  138. static struct usb_endpoint_descriptor rmnet_smd_fs_in_desc = {
  139. .bLength = USB_DT_ENDPOINT_SIZE,
  140. .bDescriptorType = USB_DT_ENDPOINT,
  141. .bEndpointAddress = USB_DIR_IN,
  142. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  143. .wMaxPacketSize = __constant_cpu_to_le16(64),
  144. };
  145. static struct usb_endpoint_descriptor rmnet_smd_fs_out_desc = {
  146. .bLength = USB_DT_ENDPOINT_SIZE,
  147. .bDescriptorType = USB_DT_ENDPOINT,
  148. .bEndpointAddress = USB_DIR_OUT,
  149. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  150. .wMaxPacketSize = __constant_cpu_to_le16(64),
  151. };
  152. static struct usb_descriptor_header *rmnet_smd_fs_function[] = {
  153. (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
  154. (struct usb_descriptor_header *) &rmnet_smd_fs_notify_desc,
  155. (struct usb_descriptor_header *) &rmnet_smd_fs_in_desc,
  156. (struct usb_descriptor_header *) &rmnet_smd_fs_out_desc,
  157. NULL,
  158. };
  159. /* High speed support */
  160. static struct usb_endpoint_descriptor rmnet_smd_hs_notify_desc = {
  161. .bLength = USB_DT_ENDPOINT_SIZE,
  162. .bDescriptorType = USB_DT_ENDPOINT,
  163. .bEndpointAddress = USB_DIR_IN,
  164. .bmAttributes = USB_ENDPOINT_XFER_INT,
  165. .wMaxPacketSize = __constant_cpu_to_le16(
  166. RMNET_SMD_MAX_NOTIFY_SIZE),
  167. .bInterval = RMNET_SMD_NOTIFY_INTERVAL + 4,
  168. };
  169. static struct usb_endpoint_descriptor rmnet_smd_hs_in_desc = {
  170. .bLength = USB_DT_ENDPOINT_SIZE,
  171. .bDescriptorType = USB_DT_ENDPOINT,
  172. .bEndpointAddress = USB_DIR_IN,
  173. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  174. .wMaxPacketSize = __constant_cpu_to_le16(512),
  175. };
  176. static struct usb_endpoint_descriptor rmnet_smd_hs_out_desc = {
  177. .bLength = USB_DT_ENDPOINT_SIZE,
  178. .bDescriptorType = USB_DT_ENDPOINT,
  179. .bEndpointAddress = USB_DIR_OUT,
  180. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  181. .wMaxPacketSize = __constant_cpu_to_le16(512),
  182. };
  183. static struct usb_descriptor_header *rmnet_smd_hs_function[] = {
  184. (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
  185. (struct usb_descriptor_header *) &rmnet_smd_hs_notify_desc,
  186. (struct usb_descriptor_header *) &rmnet_smd_hs_in_desc,
  187. (struct usb_descriptor_header *) &rmnet_smd_hs_out_desc,
  188. NULL,
  189. };
  190. /* String descriptors */
  191. static struct usb_string rmnet_smd_string_defs[] = {
  192. [0].s = "QMI RmNet",
  193. { } /* end of list */
  194. };
  195. static struct usb_gadget_strings rmnet_smd_string_table = {
  196. .language = 0x0409, /* en-us */
  197. .strings = rmnet_smd_string_defs,
  198. };
  199. static struct usb_gadget_strings *rmnet_smd_strings[] = {
  200. &rmnet_smd_string_table,
  201. NULL,
  202. };
  203. static struct qmi_buf *
  204. rmnet_smd_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
  205. {
  206. struct qmi_buf *qmi;
  207. qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
  208. if (qmi != NULL) {
  209. qmi->buf = kmalloc(len, kmalloc_flags);
  210. if (qmi->buf == NULL) {
  211. kfree(qmi);
  212. qmi = NULL;
  213. }
  214. }
  215. return qmi ? qmi : ERR_PTR(-ENOMEM);
  216. }
  217. static void rmnet_smd_free_qmi(struct qmi_buf *qmi)
  218. {
  219. kfree(qmi->buf);
  220. kfree(qmi);
  221. }
  222. /*
  223. * Allocate a usb_request and its buffer. Returns a pointer to the
  224. * usb_request or a error code if there is an error.
  225. */
  226. static struct usb_request *
  227. rmnet_smd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
  228. {
  229. struct usb_request *req;
  230. req = usb_ep_alloc_request(ep, kmalloc_flags);
  231. if (req != NULL) {
  232. req->length = len;
  233. req->buf = kmalloc(len, kmalloc_flags);
  234. if (req->buf == NULL) {
  235. usb_ep_free_request(ep, req);
  236. req = NULL;
  237. }
  238. }
  239. return req ? req : ERR_PTR(-ENOMEM);
  240. }
  241. /*
  242. * Free a usb_request and its buffer.
  243. */
  244. static void rmnet_smd_free_req(struct usb_ep *ep, struct usb_request *req)
  245. {
  246. kfree(req->buf);
  247. usb_ep_free_request(ep, req);
  248. }
  249. static void rmnet_smd_notify_complete(struct usb_ep *ep,
  250. struct usb_request *req)
  251. {
  252. struct rmnet_smd_dev *dev = req->context;
  253. struct usb_composite_dev *cdev = dev->cdev;
  254. int status = req->status;
  255. switch (status) {
  256. case -ECONNRESET:
  257. case -ESHUTDOWN:
  258. /* connection gone */
  259. atomic_set(&dev->notify_count, 0);
  260. break;
  261. default:
  262. ERROR(cdev, "rmnet notify ep error %d\n", status);
  263. /* FALLTHROUGH */
  264. case 0:
  265. if (ep != dev->epnotify)
  266. break;
  267. /* handle multiple pending QMI_RESPONSE_AVAILABLE
  268. * notifications by resending until we're done
  269. */
  270. if (atomic_dec_and_test(&dev->notify_count))
  271. break;
  272. status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
  273. if (status) {
  274. atomic_dec(&dev->notify_count);
  275. ERROR(cdev, "rmnet notify ep enqueue error %d\n",
  276. status);
  277. }
  278. break;
  279. }
  280. }
  281. static void qmi_smd_response_available(struct rmnet_smd_dev *dev)
  282. {
  283. struct usb_composite_dev *cdev = dev->cdev;
  284. struct usb_request *req = dev->notify_req;
  285. struct usb_cdc_notification *event = req->buf;
  286. int status;
  287. /* Response will be sent later */
  288. if (atomic_inc_return(&dev->notify_count) != 1)
  289. return;
  290. event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
  291. | USB_RECIP_INTERFACE;
  292. event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
  293. event->wValue = cpu_to_le16(0);
  294. event->wIndex = cpu_to_le16(dev->ifc_id);
  295. event->wLength = cpu_to_le16(0);
  296. status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
  297. if (status < 0) {
  298. atomic_dec(&dev->notify_count);
  299. ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
  300. }
  301. }
  302. /* TODO
  303. * handle modem restart events
  304. */
  305. static void rmnet_smd_event_notify(void *priv, unsigned event)
  306. {
  307. struct rmnet_smd_ch_info *smd_info = priv;
  308. int len = atomic_read(&smd_info->rx_pkt);
  309. struct rmnet_smd_dev *dev =
  310. (struct rmnet_smd_dev *) smd_info->tx_tlet.data;
  311. switch (event) {
  312. case SMD_EVENT_DATA: {
  313. if (!atomic_read(&dev->online))
  314. break;
  315. if (len && (smd_write_avail(smd_info->ch) >= len))
  316. tasklet_schedule(&smd_info->rx_tlet);
  317. if (smd_read_avail(smd_info->ch))
  318. tasklet_schedule(&smd_info->tx_tlet);
  319. break;
  320. }
  321. case SMD_EVENT_OPEN:
  322. /* usb endpoints are not enabled untill smd channels
  323. * are opened. wake up worker thread to continue
  324. * connection processing
  325. */
  326. set_bit(CH_OPENED, &smd_info->flags);
  327. wake_up(&smd_info->wait);
  328. break;
  329. case SMD_EVENT_CLOSE:
  330. /* We will never come here.
  331. * reset flags after closing smd channel
  332. * */
  333. clear_bit(CH_OPENED, &smd_info->flags);
  334. break;
  335. }
  336. }
  337. static void rmnet_control_tx_tlet(unsigned long arg)
  338. {
  339. struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
  340. struct usb_composite_dev *cdev = dev->cdev;
  341. struct qmi_buf *qmi_resp;
  342. int sz;
  343. unsigned long flags;
  344. while (1) {
  345. sz = smd_cur_packet_size(dev->smd_ctl.ch);
  346. if (sz == 0)
  347. break;
  348. if (smd_read_avail(dev->smd_ctl.ch) < sz)
  349. break;
  350. spin_lock_irqsave(&dev->lock, flags);
  351. if (list_empty(&dev->qmi_resp_pool)) {
  352. ERROR(cdev, "rmnet QMI Tx buffers full\n");
  353. spin_unlock_irqrestore(&dev->lock, flags);
  354. break;
  355. }
  356. qmi_resp = list_first_entry(&dev->qmi_resp_pool,
  357. struct qmi_buf, list);
  358. list_del(&qmi_resp->list);
  359. spin_unlock_irqrestore(&dev->lock, flags);
  360. qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
  361. spin_lock_irqsave(&dev->lock, flags);
  362. dev->cpkts_from_modem++;
  363. list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
  364. spin_unlock_irqrestore(&dev->lock, flags);
  365. qmi_smd_response_available(dev);
  366. }
  367. }
  368. static void rmnet_control_rx_tlet(unsigned long arg)
  369. {
  370. struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
  371. struct usb_composite_dev *cdev = dev->cdev;
  372. struct qmi_buf *qmi_req;
  373. int ret;
  374. unsigned long flags;
  375. spin_lock_irqsave(&dev->lock, flags);
  376. while (1) {
  377. if (list_empty(&dev->qmi_req_q)) {
  378. atomic_set(&dev->smd_ctl.rx_pkt, 0);
  379. break;
  380. }
  381. qmi_req = list_first_entry(&dev->qmi_req_q,
  382. struct qmi_buf, list);
  383. if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
  384. atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
  385. DBG(cdev, "rmnet control smd channel full\n");
  386. break;
  387. }
  388. list_del(&qmi_req->list);
  389. dev->cpkts_from_host++;
  390. spin_unlock_irqrestore(&dev->lock, flags);
  391. ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
  392. spin_lock_irqsave(&dev->lock, flags);
  393. if (ret != qmi_req->len) {
  394. ERROR(cdev, "rmnet control smd write failed\n");
  395. break;
  396. }
  397. dev->cpkts_to_modem++;
  398. list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
  399. }
  400. spin_unlock_irqrestore(&dev->lock, flags);
  401. }
  402. static void rmnet_smd_command_complete(struct usb_ep *ep,
  403. struct usb_request *req)
  404. {
  405. struct rmnet_smd_dev *dev = req->context;
  406. struct usb_composite_dev *cdev = dev->cdev;
  407. struct qmi_buf *qmi_req;
  408. int ret;
  409. if (req->status < 0) {
  410. ERROR(cdev, "rmnet command error %d\n", req->status);
  411. return;
  412. }
  413. spin_lock(&dev->lock);
  414. dev->cpkts_from_host++;
  415. /* no pending control rx packet */
  416. if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
  417. if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
  418. atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
  419. goto queue_req;
  420. }
  421. spin_unlock(&dev->lock);
  422. ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
  423. /* This should never happen */
  424. if (ret != req->actual)
  425. ERROR(cdev, "rmnet control smd write failed\n");
  426. spin_lock(&dev->lock);
  427. dev->cpkts_to_modem++;
  428. spin_unlock(&dev->lock);
  429. return;
  430. }
  431. queue_req:
  432. if (list_empty(&dev->qmi_req_pool)) {
  433. spin_unlock(&dev->lock);
  434. ERROR(cdev, "rmnet QMI pool is empty\n");
  435. return;
  436. }
  437. qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
  438. list_del(&qmi_req->list);
  439. spin_unlock(&dev->lock);
  440. memcpy(qmi_req->buf, req->buf, req->actual);
  441. qmi_req->len = req->actual;
  442. spin_lock(&dev->lock);
  443. list_add_tail(&qmi_req->list, &dev->qmi_req_q);
  444. spin_unlock(&dev->lock);
  445. }
  446. static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
  447. {
  448. struct rmnet_smd_dev *dev = req->context;
  449. spin_lock(&dev->lock);
  450. dev->cpkts_to_host++;
  451. spin_unlock(&dev->lock);
  452. }
  453. static int
  454. rmnet_smd_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
  455. {
  456. struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
  457. function);
  458. struct usb_composite_dev *cdev = f->config->cdev;
  459. struct usb_request *req = cdev->req;
  460. int ret = -EOPNOTSUPP;
  461. u16 w_index = le16_to_cpu(ctrl->wIndex);
  462. u16 w_value = le16_to_cpu(ctrl->wValue);
  463. u16 w_length = le16_to_cpu(ctrl->wLength);
  464. struct qmi_buf *resp;
  465. int schedule = 0;
  466. if (!atomic_read(&dev->online))
  467. return -ENOTCONN;
  468. switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
  469. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  470. | USB_CDC_SEND_ENCAPSULATED_COMMAND:
  471. ret = w_length;
  472. req->complete = rmnet_smd_command_complete;
  473. req->context = dev;
  474. break;
  475. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  476. | USB_CDC_GET_ENCAPSULATED_RESPONSE:
  477. if (w_value)
  478. goto invalid;
  479. else {
  480. spin_lock(&dev->lock);
  481. if (list_empty(&dev->qmi_resp_q)) {
  482. INFO(cdev, "qmi resp empty "
  483. " req%02x.%02x v%04x i%04x l%d\n",
  484. ctrl->bRequestType, ctrl->bRequest,
  485. w_value, w_index, w_length);
  486. spin_unlock(&dev->lock);
  487. goto invalid;
  488. }
  489. resp = list_first_entry(&dev->qmi_resp_q,
  490. struct qmi_buf, list);
  491. list_del(&resp->list);
  492. spin_unlock(&dev->lock);
  493. memcpy(req->buf, resp->buf, resp->len);
  494. ret = resp->len;
  495. spin_lock(&dev->lock);
  496. if (list_empty(&dev->qmi_resp_pool))
  497. schedule = 1;
  498. list_add_tail(&resp->list, &dev->qmi_resp_pool);
  499. if (schedule)
  500. tasklet_schedule(&dev->smd_ctl.tx_tlet);
  501. spin_unlock(&dev->lock);
  502. req->complete = rmnet_txcommand_complete;
  503. req->context = dev;
  504. }
  505. break;
  506. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  507. | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
  508. /* This is a workaround for RmNet and is borrowed from the
  509. * CDC/ACM standard. The host driver will issue the above ACM
  510. * standard request to the RmNet interface in the following
  511. * scenario: Once the network adapter is disabled from device
  512. * manager, the above request will be sent from the qcusbnet
  513. * host driver, with DTR being '0'. Once network adapter is
  514. * enabled from device manager (or during enumeration), the
  515. * request will be sent with DTR being '1'.
  516. */
  517. if (w_value & RMNET_SMD_ACM_CTRL_DTR)
  518. ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
  519. else
  520. ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
  521. break;
  522. default:
  523. invalid:
  524. DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
  525. ctrl->bRequestType, ctrl->bRequest,
  526. w_value, w_index, w_length);
  527. }
  528. /* respond with data transfer or status phase? */
  529. if (ret >= 0) {
  530. VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
  531. ctrl->bRequestType, ctrl->bRequest,
  532. w_value, w_index, w_length);
  533. req->zero = 0;
  534. req->length = ret;
  535. ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
  536. if (ret < 0)
  537. ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
  538. }
  539. return ret;
  540. }
  541. static void rmnet_smd_start_rx(struct rmnet_smd_dev *dev)
  542. {
  543. struct usb_composite_dev *cdev = dev->cdev;
  544. int status;
  545. struct usb_request *req;
  546. struct list_head *pool = &dev->rx_idle;
  547. unsigned long flags;
  548. spin_lock_irqsave(&dev->lock, flags);
  549. while (!list_empty(pool)) {
  550. req = list_entry(pool->next, struct usb_request, list);
  551. list_del(&req->list);
  552. spin_unlock_irqrestore(&dev->lock, flags);
  553. status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
  554. spin_lock_irqsave(&dev->lock, flags);
  555. if (status) {
  556. ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
  557. list_add_tail(&req->list, pool);
  558. break;
  559. }
  560. }
  561. spin_unlock_irqrestore(&dev->lock, flags);
  562. }
  563. static void rmnet_data_tx_tlet(unsigned long arg)
  564. {
  565. struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
  566. struct usb_composite_dev *cdev = dev->cdev;
  567. struct usb_request *req;
  568. int status;
  569. int sz;
  570. unsigned long flags;
  571. while (1) {
  572. sz = smd_cur_packet_size(dev->smd_data.ch);
  573. if (sz == 0)
  574. break;
  575. if (smd_read_avail(dev->smd_data.ch) < sz)
  576. break;
  577. spin_lock_irqsave(&dev->lock, flags);
  578. if (list_empty(&dev->tx_idle)) {
  579. spin_unlock_irqrestore(&dev->lock, flags);
  580. DBG(cdev, "rmnet data Tx buffers full\n");
  581. break;
  582. }
  583. req = list_first_entry(&dev->tx_idle, struct usb_request, list);
  584. list_del(&req->list);
  585. spin_unlock_irqrestore(&dev->lock, flags);
  586. req->length = smd_read(dev->smd_data.ch, req->buf, sz);
  587. status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
  588. if (status) {
  589. ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
  590. spin_lock_irqsave(&dev->lock, flags);
  591. list_add_tail(&req->list, &dev->tx_idle);
  592. spin_unlock_irqrestore(&dev->lock, flags);
  593. break;
  594. }
  595. spin_lock_irqsave(&dev->lock, flags);
  596. dev->dpkts_from_modem++;
  597. spin_unlock_irqrestore(&dev->lock, flags);
  598. }
  599. }
  600. static void rmnet_data_rx_tlet(unsigned long arg)
  601. {
  602. struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
  603. struct usb_composite_dev *cdev = dev->cdev;
  604. struct usb_request *req;
  605. int ret;
  606. unsigned long flags;
  607. spin_lock_irqsave(&dev->lock, flags);
  608. while (1) {
  609. if (list_empty(&dev->rx_queue)) {
  610. atomic_set(&dev->smd_data.rx_pkt, 0);
  611. break;
  612. }
  613. req = list_first_entry(&dev->rx_queue,
  614. struct usb_request, list);
  615. if (smd_write_avail(dev->smd_data.ch) < req->actual) {
  616. atomic_set(&dev->smd_data.rx_pkt, req->actual);
  617. DBG(cdev, "rmnet SMD data channel full\n");
  618. break;
  619. }
  620. list_del(&req->list);
  621. spin_unlock_irqrestore(&dev->lock, flags);
  622. ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
  623. spin_lock_irqsave(&dev->lock, flags);
  624. if (ret != req->actual) {
  625. ERROR(cdev, "rmnet SMD data write failed\n");
  626. break;
  627. }
  628. dev->dpkts_to_modem++;
  629. list_add_tail(&req->list, &dev->rx_idle);
  630. }
  631. spin_unlock_irqrestore(&dev->lock, flags);
  632. /* We have free rx data requests. */
  633. rmnet_smd_start_rx(dev);
  634. }
  635. /* If SMD has enough room to accommodate a data rx packet,
  636. * write into SMD directly. Otherwise enqueue to rx_queue.
  637. * We will not write into SMD directly untill rx_queue is
  638. * empty to strictly follow the ordering requests.
  639. */
  640. static void rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
  641. {
  642. struct rmnet_smd_dev *dev = req->context;
  643. struct usb_composite_dev *cdev = dev->cdev;
  644. int status = req->status;
  645. int ret;
  646. switch (status) {
  647. case 0:
  648. /* normal completion */
  649. break;
  650. case -ECONNRESET:
  651. case -ESHUTDOWN:
  652. /* connection gone */
  653. spin_lock(&dev->lock);
  654. list_add_tail(&req->list, &dev->rx_idle);
  655. spin_unlock(&dev->lock);
  656. return;
  657. default:
  658. /* unexpected failure */
  659. ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
  660. ep->name, status,
  661. req->actual, req->length);
  662. spin_lock(&dev->lock);
  663. list_add_tail(&req->list, &dev->rx_idle);
  664. spin_unlock(&dev->lock);
  665. return;
  666. }
  667. spin_lock(&dev->lock);
  668. dev->dpkts_from_host++;
  669. if (!atomic_read(&dev->smd_data.rx_pkt)) {
  670. if (smd_write_avail(dev->smd_data.ch) < req->actual) {
  671. atomic_set(&dev->smd_data.rx_pkt, req->actual);
  672. goto queue_req;
  673. }
  674. spin_unlock(&dev->lock);
  675. ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
  676. /* This should never happen */
  677. if (ret != req->actual)
  678. ERROR(cdev, "rmnet data smd write failed\n");
  679. /* Restart Rx */
  680. spin_lock(&dev->lock);
  681. dev->dpkts_to_modem++;
  682. list_add_tail(&req->list, &dev->rx_idle);
  683. spin_unlock(&dev->lock);
  684. rmnet_smd_start_rx(dev);
  685. return;
  686. }
  687. queue_req:
  688. list_add_tail(&req->list, &dev->rx_queue);
  689. spin_unlock(&dev->lock);
  690. }
  691. static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
  692. {
  693. struct rmnet_smd_dev *dev = req->context;
  694. struct usb_composite_dev *cdev = dev->cdev;
  695. int status = req->status;
  696. int schedule = 0;
  697. switch (status) {
  698. case -ECONNRESET:
  699. case -ESHUTDOWN:
  700. /* connection gone */
  701. spin_lock(&dev->lock);
  702. list_add_tail(&req->list, &dev->tx_idle);
  703. spin_unlock(&dev->lock);
  704. break;
  705. default:
  706. ERROR(cdev, "rmnet data tx ep error %d\n", status);
  707. /* FALLTHROUGH */
  708. case 0:
  709. spin_lock(&dev->lock);
  710. if (list_empty(&dev->tx_idle))
  711. schedule = 1;
  712. list_add_tail(&req->list, &dev->tx_idle);
  713. dev->dpkts_to_host++;
  714. if (schedule)
  715. tasklet_schedule(&dev->smd_data.tx_tlet);
  716. spin_unlock(&dev->lock);
  717. break;
  718. }
  719. }
  720. static void rmnet_smd_disconnect_work(struct work_struct *w)
  721. {
  722. struct qmi_buf *qmi;
  723. struct usb_request *req;
  724. struct list_head *act, *tmp;
  725. struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
  726. disconnect_work);
  727. tasklet_kill(&dev->smd_ctl.rx_tlet);
  728. tasklet_kill(&dev->smd_ctl.tx_tlet);
  729. tasklet_kill(&dev->smd_data.rx_tlet);
  730. tasklet_kill(&dev->smd_data.tx_tlet);
  731. smd_close(dev->smd_ctl.ch);
  732. dev->smd_ctl.flags = 0;
  733. smd_close(dev->smd_data.ch);
  734. dev->smd_data.flags = 0;
  735. atomic_set(&dev->notify_count, 0);
  736. list_for_each_safe(act, tmp, &dev->rx_queue) {
  737. req = list_entry(act, struct usb_request, list);
  738. list_del(&req->list);
  739. list_add_tail(&req->list, &dev->rx_idle);
  740. }
  741. list_for_each_safe(act, tmp, &dev->qmi_req_q) {
  742. qmi = list_entry(act, struct qmi_buf, list);
  743. list_del(&qmi->list);
  744. list_add_tail(&qmi->list, &dev->qmi_req_pool);
  745. }
  746. list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
  747. qmi = list_entry(act, struct qmi_buf, list);
  748. list_del(&qmi->list);
  749. list_add_tail(&qmi->list, &dev->qmi_resp_pool);
  750. }
  751. if (dev->is_pdrv_used) {
  752. platform_driver_unregister(&dev->pdrv);
  753. dev->is_pdrv_used = 0;
  754. }
  755. }
  756. /* SMD close may sleep
  757. * schedule a work to close smd channels
  758. */
  759. static void rmnet_smd_disable(struct usb_function *f)
  760. {
  761. struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
  762. function);
  763. atomic_set(&dev->online, 0);
  764. usb_ep_fifo_flush(dev->epnotify);
  765. usb_ep_disable(dev->epnotify);
  766. usb_ep_fifo_flush(dev->epout);
  767. usb_ep_disable(dev->epout);
  768. usb_ep_fifo_flush(dev->epin);
  769. usb_ep_disable(dev->epin);
  770. /* cleanup work */
  771. queue_work(dev->wq, &dev->disconnect_work);
  772. }
  773. static void rmnet_smd_connect_work(struct work_struct *w)
  774. {
  775. struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
  776. connect_work);
  777. struct usb_composite_dev *cdev = dev->cdev;
  778. int ret = 0;
  779. /* Control channel for QMI messages */
  780. ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
  781. &dev->smd_ctl, rmnet_smd_event_notify);
  782. if (ret) {
  783. ERROR(cdev, "Unable to open control smd channel: %d\n", ret);
  784. /*
  785. * Register platform driver to be notified in case SMD channels
  786. * later becomes ready to be opened.
  787. */
  788. if (!dev->is_pdrv_used) {
  789. ret = platform_driver_register(&dev->pdrv);
  790. if (ret)
  791. ERROR(cdev, "pdrv %s register failed %d\n",
  792. dev->pdrv.driver.name, ret);
  793. else
  794. dev->is_pdrv_used = 1;
  795. }
  796. return;
  797. }
  798. wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
  799. &dev->smd_ctl.flags));
  800. /* Data channel for network packets */
  801. ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
  802. &dev->smd_data, rmnet_smd_event_notify);
  803. if (ret) {
  804. ERROR(cdev, "Unable to open data smd channel\n");
  805. smd_close(dev->smd_ctl.ch);
  806. return;
  807. }
  808. wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
  809. &dev->smd_data.flags));
  810. atomic_set(&dev->online, 1);
  811. /* Queue Rx data requests */
  812. rmnet_smd_start_rx(dev);
  813. }
  814. static int rmnet_smd_ch_probe(struct platform_device *pdev)
  815. {
  816. DBG(rmnet_smd->cdev, "Probe called for device: %s\n", pdev->name);
  817. queue_work(rmnet_smd->wq, &rmnet_smd->connect_work);
  818. return 0;
  819. }
  820. /* SMD open may sleep.
  821. * Schedule a work to open smd channels and enable
  822. * endpoints if smd channels are opened successfully.
  823. */
  824. static int rmnet_smd_set_alt(struct usb_function *f,
  825. unsigned intf, unsigned alt)
  826. {
  827. struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
  828. function);
  829. struct usb_composite_dev *cdev = dev->cdev;
  830. int ret = 0;
  831. /* Enable epin endpoint */
  832. ret = config_ep_by_speed(cdev->gadget, f, dev->epin);
  833. if (ret) {
  834. dev->epin->desc = NULL;
  835. ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
  836. dev->epin->name, ret);
  837. return ret;
  838. }
  839. ret = usb_ep_enable(dev->epin);
  840. if (ret) {
  841. ERROR(cdev, "can't enable %s, result %d\n",
  842. dev->epin->name, ret);
  843. return ret;
  844. }
  845. /* Enable epout endpoint */
  846. ret = config_ep_by_speed(cdev->gadget, f, dev->epout);
  847. if (ret) {
  848. dev->epout->desc = NULL;
  849. ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
  850. dev->epout->name, ret);
  851. usb_ep_disable(dev->epin);
  852. return ret;
  853. }
  854. ret = usb_ep_enable(dev->epout);
  855. if (ret) {
  856. ERROR(cdev, "can't enable %s, result %d\n",
  857. dev->epout->name, ret);
  858. usb_ep_disable(dev->epin);
  859. return ret;
  860. }
  861. /* Enable epnotify endpoint */
  862. ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify);
  863. if (ret) {
  864. dev->epnotify->desc = NULL;
  865. ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
  866. dev->epnotify->name, ret);
  867. usb_ep_disable(dev->epin);
  868. usb_ep_disable(dev->epout);
  869. return ret;
  870. }
  871. ret = usb_ep_enable(dev->epnotify);
  872. if (ret) {
  873. ERROR(cdev, "can't enable %s, result %d\n",
  874. dev->epnotify->name, ret);
  875. usb_ep_disable(dev->epin);
  876. usb_ep_disable(dev->epout);
  877. return ret;
  878. }
  879. queue_work(dev->wq, &dev->connect_work);
  880. return 0;
  881. }
  882. static void rmnet_smd_free_buf(struct rmnet_smd_dev *dev)
  883. {
  884. struct qmi_buf *qmi;
  885. struct usb_request *req;
  886. struct list_head *act, *tmp;
  887. dev->dpkts_to_host = 0;
  888. dev->dpkts_from_modem = 0;
  889. dev->dpkts_from_host = 0;
  890. dev->dpkts_to_modem = 0;
  891. dev->cpkts_to_host = 0;
  892. dev->cpkts_from_modem = 0;
  893. dev->cpkts_from_host = 0;
  894. dev->cpkts_to_modem = 0;
  895. /* free all usb requests in tx pool */
  896. list_for_each_safe(act, tmp, &dev->tx_idle) {
  897. req = list_entry(act, struct usb_request, list);
  898. list_del(&req->list);
  899. rmnet_smd_free_req(dev->epout, req);
  900. }
  901. /* free all usb requests in rx pool */
  902. list_for_each_safe(act, tmp, &dev->rx_idle) {
  903. req = list_entry(act, struct usb_request, list);
  904. list_del(&req->list);
  905. rmnet_smd_free_req(dev->epin, req);
  906. }
  907. /* free all buffers in qmi request pool */
  908. list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
  909. qmi = list_entry(act, struct qmi_buf, list);
  910. list_del(&qmi->list);
  911. rmnet_smd_free_qmi(qmi);
  912. }
  913. /* free all buffers in qmi request pool */
  914. list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
  915. qmi = list_entry(act, struct qmi_buf, list);
  916. list_del(&qmi->list);
  917. rmnet_smd_free_qmi(qmi);
  918. }
  919. rmnet_smd_free_req(dev->epnotify, dev->notify_req);
  920. }
  921. static int rmnet_smd_bind(struct usb_configuration *c, struct usb_function *f)
  922. {
  923. struct usb_composite_dev *cdev = c->cdev;
  924. struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
  925. function);
  926. int i, id, ret;
  927. struct qmi_buf *qmi;
  928. struct usb_request *req;
  929. struct usb_ep *ep;
  930. dev->cdev = cdev;
  931. /* allocate interface ID */
  932. id = usb_interface_id(c, f);
  933. if (id < 0)
  934. return id;
  935. dev->ifc_id = id;
  936. rmnet_smd_interface_desc.bInterfaceNumber = id;
  937. ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_in_desc);
  938. if (!ep)
  939. return -ENODEV;
  940. ep->driver_data = cdev; /* claim endpoint */
  941. dev->epin = ep;
  942. ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_out_desc);
  943. if (!ep)
  944. return -ENODEV;
  945. ep->driver_data = cdev; /* claim endpoint */
  946. dev->epout = ep;
  947. ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_notify_desc);
  948. if (!ep)
  949. return -ENODEV;
  950. ep->driver_data = cdev; /* clain endpoint */
  951. dev->epnotify = ep;
  952. /* support all relevant hardware speeds... we expect that when
  953. * hardware is dual speed, all bulk-capable endpoints work at
  954. * both speeds
  955. */
  956. if (gadget_is_dualspeed(c->cdev->gadget)) {
  957. rmnet_smd_hs_in_desc.bEndpointAddress =
  958. rmnet_smd_fs_in_desc.bEndpointAddress;
  959. rmnet_smd_hs_out_desc.bEndpointAddress =
  960. rmnet_smd_fs_out_desc.bEndpointAddress;
  961. rmnet_smd_hs_notify_desc.bEndpointAddress =
  962. rmnet_smd_fs_notify_desc.bEndpointAddress;
  963. }
  964. /* allocate notification */
  965. dev->notify_req = rmnet_smd_alloc_req(dev->epnotify,
  966. RMNET_SMD_MAX_NOTIFY_SIZE, GFP_KERNEL);
  967. if (IS_ERR(dev->notify_req))
  968. return PTR_ERR(dev->notify_req);
  969. dev->notify_req->complete = rmnet_smd_notify_complete;
  970. dev->notify_req->context = dev;
  971. dev->notify_req->length = RMNET_SMD_MAX_NOTIFY_SIZE;
  972. /* Allocate the qmi request and response buffers */
  973. for (i = 0; i < QMI_REQ_MAX; i++) {
  974. qmi = rmnet_smd_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
  975. if (IS_ERR(qmi)) {
  976. ret = PTR_ERR(qmi);
  977. goto free_buf;
  978. }
  979. list_add_tail(&qmi->list, &dev->qmi_req_pool);
  980. }
  981. for (i = 0; i < QMI_RESP_MAX; i++) {
  982. qmi = rmnet_smd_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
  983. if (IS_ERR(qmi)) {
  984. ret = PTR_ERR(qmi);
  985. goto free_buf;
  986. }
  987. list_add_tail(&qmi->list, &dev->qmi_resp_pool);
  988. }
  989. /* Allocate bulk in/out requests for data transfer */
  990. for (i = 0; i < RMNET_RX_REQ_MAX; i++) {
  991. req = rmnet_smd_alloc_req(dev->epout, RMNET_RX_REQ_SIZE,
  992. GFP_KERNEL);
  993. if (IS_ERR(req)) {
  994. ret = PTR_ERR(req);
  995. goto free_buf;
  996. }
  997. req->length = RMNET_TXN_MAX;
  998. req->context = dev;
  999. req->complete = rmnet_smd_complete_epout;
  1000. list_add_tail(&req->list, &dev->rx_idle);
  1001. }
  1002. for (i = 0; i < RMNET_TX_REQ_MAX; i++) {
  1003. req = rmnet_smd_alloc_req(dev->epin, RMNET_TX_REQ_SIZE,
  1004. GFP_KERNEL);
  1005. if (IS_ERR(req)) {
  1006. ret = PTR_ERR(req);
  1007. goto free_buf;
  1008. }
  1009. req->context = dev;
  1010. req->complete = rmnet_smd_complete_epin;
  1011. list_add_tail(&req->list, &dev->tx_idle);
  1012. }
  1013. return 0;
  1014. free_buf:
  1015. rmnet_smd_free_buf(dev);
  1016. dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
  1017. return ret;
  1018. }
  1019. #if defined(CONFIG_DEBUG_FS)
  1020. static ssize_t rmnet_smd_debug_read_stats(struct file *file, char __user *ubuf,
  1021. size_t count, loff_t *ppos)
  1022. {
  1023. struct rmnet_smd_dev *dev = file->private_data;
  1024. struct rmnet_smd_ch_info smd_ctl_info = dev->smd_ctl;
  1025. struct rmnet_smd_ch_info smd_data_info = dev->smd_data;
  1026. char *buf;
  1027. unsigned long flags;
  1028. int ret;
  1029. buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
  1030. if (!buf)
  1031. return -ENOMEM;
  1032. spin_lock_irqsave(&dev->lock, flags);
  1033. ret = scnprintf(buf, 512,
  1034. "smd_control_ch_opened: %lu\n"
  1035. "smd_data_ch_opened: %lu\n"
  1036. "usb online : %d\n"
  1037. "dpkts_from_modem: %lu\n"
  1038. "dpkts_to_host: %lu\n"
  1039. "pending_dpkts_to_host: %lu\n"
  1040. "dpkts_from_host: %lu\n"
  1041. "dpkts_to_modem: %lu\n"
  1042. "pending_dpkts_to_modem: %lu\n"
  1043. "cpkts_from_modem: %lu\n"
  1044. "cpkts_to_host: %lu\n"
  1045. "pending_cpkts_to_host: %lu\n"
  1046. "cpkts_from_host: %lu\n"
  1047. "cpkts_to_modem: %lu\n"
  1048. "pending_cpkts_to_modem: %lu\n"
  1049. "smd_read_avail_ctrl: %d\n"
  1050. "smd_write_avail_ctrl: %d\n"
  1051. "smd_read_avail_data: %d\n"
  1052. "smd_write_avail_data: %d\n",
  1053. smd_ctl_info.flags, smd_data_info.flags,
  1054. atomic_read(&dev->online),
  1055. dev->dpkts_from_modem, dev->dpkts_to_host,
  1056. (dev->dpkts_from_modem - dev->dpkts_to_host),
  1057. dev->dpkts_from_host, dev->dpkts_to_modem,
  1058. (dev->dpkts_from_host - dev->dpkts_to_modem),
  1059. dev->cpkts_from_modem, dev->cpkts_to_host,
  1060. (dev->cpkts_from_modem - dev->cpkts_to_host),
  1061. dev->cpkts_from_host, dev->cpkts_to_modem,
  1062. (dev->cpkts_from_host - dev->cpkts_to_modem),
  1063. smd_read_avail(dev->smd_ctl.ch),
  1064. smd_write_avail(dev->smd_ctl.ch),
  1065. smd_read_avail(dev->smd_data.ch),
  1066. smd_write_avail(dev->smd_data.ch));
  1067. spin_unlock_irqrestore(&dev->lock, flags);
  1068. ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
  1069. kfree(buf);
  1070. return ret;
  1071. }
  1072. static ssize_t rmnet_smd_debug_reset_stats(struct file *file,
  1073. const char __user *buf,
  1074. size_t count, loff_t *ppos)
  1075. {
  1076. struct rmnet_smd_dev *dev = file->private_data;
  1077. unsigned long flags;
  1078. spin_lock_irqsave(&dev->lock, flags);
  1079. dev->dpkts_to_host = 0;
  1080. dev->dpkts_from_modem = 0;
  1081. dev->dpkts_from_host = 0;
  1082. dev->dpkts_to_modem = 0;
  1083. dev->cpkts_to_host = 0;
  1084. dev->cpkts_from_modem = 0;
  1085. dev->cpkts_from_host = 0;
  1086. dev->cpkts_to_modem = 0;
  1087. spin_unlock_irqrestore(&dev->lock, flags);
  1088. return count;
  1089. }
  1090. static int rmnet_smd_debug_open(struct inode *inode, struct file *file)
  1091. {
  1092. file->private_data = inode->i_private;
  1093. return 0;
  1094. }
  1095. const struct file_operations rmnet_smd_debug_stats_ops = {
  1096. .open = rmnet_smd_debug_open,
  1097. .read = rmnet_smd_debug_read_stats,
  1098. .write = rmnet_smd_debug_reset_stats,
  1099. };
  1100. struct dentry *dent_smd;
  1101. static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev)
  1102. {
  1103. struct dentry *dent_smd_status;
  1104. dent_smd = debugfs_create_dir("usb_rmnet_smd", 0);
  1105. if (!dent_smd || IS_ERR(dent_smd))
  1106. return;
  1107. dent_smd_status = debugfs_create_file("status", 0444, dent_smd, dev,
  1108. &rmnet_smd_debug_stats_ops);
  1109. if (!dent_smd_status || IS_ERR(dent_smd_status)) {
  1110. debugfs_remove(dent_smd);
  1111. dent_smd = NULL;
  1112. return;
  1113. }
  1114. return;
  1115. }
  1116. static void rmnet_smd_debugfs_remove(void)
  1117. {
  1118. debugfs_remove_recursive(dent_smd);
  1119. }
  1120. #else
  1121. static inline void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) {}
  1122. static inline void rmnet_smd_debugfs_remove(void){}
  1123. #endif
  1124. static void
  1125. rmnet_smd_unbind(struct usb_configuration *c, struct usb_function *f)
  1126. {
  1127. struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
  1128. function);
  1129. tasklet_kill(&dev->smd_ctl.rx_tlet);
  1130. tasklet_kill(&dev->smd_ctl.tx_tlet);
  1131. tasklet_kill(&dev->smd_data.rx_tlet);
  1132. tasklet_kill(&dev->smd_data.tx_tlet);
  1133. flush_workqueue(dev->wq);
  1134. rmnet_smd_free_buf(dev);
  1135. dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
  1136. destroy_workqueue(dev->wq);
  1137. rmnet_smd_debugfs_remove();
  1138. kfree(dev);
  1139. }
  1140. int rmnet_smd_bind_config(struct usb_configuration *c)
  1141. {
  1142. struct rmnet_smd_dev *dev;
  1143. int ret;
  1144. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1145. if (!dev)
  1146. return -ENOMEM;
  1147. rmnet_smd = dev;
  1148. dev->wq = create_singlethread_workqueue("k_rmnet_work");
  1149. if (!dev->wq) {
  1150. ret = -ENOMEM;
  1151. goto free_dev;
  1152. }
  1153. spin_lock_init(&dev->lock);
  1154. atomic_set(&dev->notify_count, 0);
  1155. atomic_set(&dev->online, 0);
  1156. atomic_set(&dev->smd_ctl.rx_pkt, 0);
  1157. atomic_set(&dev->smd_data.rx_pkt, 0);
  1158. INIT_WORK(&dev->connect_work, rmnet_smd_connect_work);
  1159. INIT_WORK(&dev->disconnect_work, rmnet_smd_disconnect_work);
  1160. tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
  1161. (unsigned long) dev);
  1162. tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
  1163. (unsigned long) dev);
  1164. tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
  1165. (unsigned long) dev);
  1166. tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
  1167. (unsigned long) dev);
  1168. init_waitqueue_head(&dev->smd_ctl.wait);
  1169. init_waitqueue_head(&dev->smd_data.wait);
  1170. dev->pdrv.probe = rmnet_smd_ch_probe;
  1171. dev->pdrv.driver.name = CONFIG_RMNET_SMD_CTL_CHANNEL;
  1172. dev->pdrv.driver.owner = THIS_MODULE;
  1173. INIT_LIST_HEAD(&dev->qmi_req_pool);
  1174. INIT_LIST_HEAD(&dev->qmi_req_q);
  1175. INIT_LIST_HEAD(&dev->qmi_resp_pool);
  1176. INIT_LIST_HEAD(&dev->qmi_resp_q);
  1177. INIT_LIST_HEAD(&dev->rx_idle);
  1178. INIT_LIST_HEAD(&dev->rx_queue);
  1179. INIT_LIST_HEAD(&dev->tx_idle);
  1180. dev->function.name = "rmnet";
  1181. dev->function.strings = rmnet_smd_strings;
  1182. dev->function.fs_descriptors = rmnet_smd_fs_function;
  1183. dev->function.hs_descriptors = rmnet_smd_hs_function;
  1184. dev->function.bind = rmnet_smd_bind;
  1185. dev->function.unbind = rmnet_smd_unbind;
  1186. dev->function.setup = rmnet_smd_setup;
  1187. dev->function.set_alt = rmnet_smd_set_alt;
  1188. dev->function.disable = rmnet_smd_disable;
  1189. ret = usb_add_function(c, &dev->function);
  1190. if (ret)
  1191. goto free_wq;
  1192. rmnet_smd_debugfs_init(dev);
  1193. return 0;
  1194. free_wq:
  1195. destroy_workqueue(dev->wq);
  1196. free_dev:
  1197. kfree(dev);
  1198. return ret;
  1199. }