rmnet_usb_ctrl.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276
  1. /* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/termios.h>
  18. #include <linux/poll.h>
  19. #include <linux/ratelimit.h>
  20. #include <linux/debugfs.h>
  21. #include "rmnet_usb.h"
  22. static char *rmnet_dev_names[MAX_RMNET_DEVS] = {"hsicctl"};
  23. module_param_array(rmnet_dev_names, charp, NULL, S_IRUGO | S_IWUSR);
  24. #define DEFAULT_READ_URB_LENGTH 0x1000
  25. #define UNLINK_TIMEOUT_MS 500 /*random value*/
  26. /*Output control lines.*/
  27. #define ACM_CTRL_DTR BIT(0)
  28. #define ACM_CTRL_RTS BIT(1)
  29. /*Input control lines.*/
  30. #define ACM_CTRL_DSR BIT(0)
  31. #define ACM_CTRL_CTS BIT(1)
  32. #define ACM_CTRL_RI BIT(2)
  33. #define ACM_CTRL_CD BIT(3)
  34. /*echo modem_wait > /sys/class/hsicctl/hsicctlx/modem_wait*/
  35. static ssize_t modem_wait_store(struct device *d, struct device_attribute *attr,
  36. const char *buf, size_t n)
  37. {
  38. unsigned int mdm_wait;
  39. struct rmnet_ctrl_dev *dev = dev_get_drvdata(d);
  40. if (!dev)
  41. return -ENODEV;
  42. sscanf(buf, "%u", &mdm_wait);
  43. dev->mdm_wait_timeout = mdm_wait;
  44. return n;
  45. }
  46. static ssize_t modem_wait_show(struct device *d, struct device_attribute *attr,
  47. char *buf)
  48. {
  49. struct rmnet_ctrl_dev *dev = dev_get_drvdata(d);
  50. if (!dev)
  51. return -ENODEV;
  52. return snprintf(buf, PAGE_SIZE, "%u\n", dev->mdm_wait_timeout);
  53. }
  54. static DEVICE_ATTR(modem_wait, 0664, modem_wait_show, modem_wait_store);
  55. static int ctl_msg_dbg_mask;
  56. module_param_named(dump_ctrl_msg, ctl_msg_dbg_mask, int,
  57. S_IRUGO | S_IWUSR | S_IWGRP);
  58. enum {
  59. MSM_USB_CTL_DEBUG = 1U << 0,
  60. MSM_USB_CTL_DUMP_BUFFER = 1U << 1,
  61. };
  62. #define DUMP_BUFFER(prestr, cnt, buf) \
  63. do { \
  64. if (ctl_msg_dbg_mask & MSM_USB_CTL_DUMP_BUFFER) \
  65. print_hex_dump(KERN_INFO, prestr, DUMP_PREFIX_NONE, \
  66. 16, 1, buf, cnt, false); \
  67. } while (0)
  68. #define DBG(x...) \
  69. do { \
  70. if (ctl_msg_dbg_mask & MSM_USB_CTL_DEBUG) \
  71. pr_info(x); \
  72. } while (0)
  73. /* passed in rmnet_usb_ctrl_init */
  74. static int num_devs;
  75. static int insts_per_dev;
  76. /* dynamically allocated 2-D array of num_devs*insts_per_dev ctrl_devs */
  77. static struct rmnet_ctrl_dev **ctrl_devs;
  78. static struct class *ctrldev_classp[MAX_RMNET_DEVS];
  79. static dev_t ctrldev_num[MAX_RMNET_DEVS];
  80. struct ctrl_pkt {
  81. size_t data_size;
  82. void *data;
  83. void *ctxt;
  84. };
  85. struct ctrl_pkt_list_elem {
  86. struct list_head list;
  87. struct ctrl_pkt cpkt;
  88. };
  89. static void resp_avail_cb(struct urb *);
  90. static int rmnet_usb_ctrl_dmux(struct ctrl_pkt_list_elem *clist)
  91. {
  92. struct mux_hdr *hdr;
  93. size_t pad_len;
  94. size_t total_len;
  95. unsigned int mux_id;
  96. hdr = (struct mux_hdr *)clist->cpkt.data;
  97. pad_len = hdr->padding_info >> MUX_PAD_SHIFT;
  98. if (pad_len > MAX_PAD_BYTES(4)) {
  99. pr_err_ratelimited("%s: Invalid pad len %d\n", __func__,
  100. pad_len);
  101. return -EINVAL;
  102. }
  103. mux_id = hdr->mux_id;
  104. if (!mux_id || mux_id > insts_per_dev) {
  105. pr_err_ratelimited("%s: Invalid mux id %d\n", __func__, mux_id);
  106. return -EINVAL;
  107. }
  108. total_len = le16_to_cpu(hdr->pkt_len_w_padding);
  109. if (!total_len || !(total_len - pad_len)) {
  110. pr_err_ratelimited("%s: Invalid pkt length %d\n", __func__,
  111. total_len);
  112. return -EINVAL;
  113. }
  114. clist->cpkt.data_size = total_len - pad_len;
  115. return mux_id - 1;
  116. }
  117. static void rmnet_usb_ctrl_mux(unsigned int id, struct ctrl_pkt *cpkt)
  118. {
  119. struct mux_hdr *hdr;
  120. size_t len;
  121. size_t pad_len = 0;
  122. hdr = (struct mux_hdr *)cpkt->data;
  123. hdr->mux_id = id + 1;
  124. len = cpkt->data_size - sizeof(struct mux_hdr) - MAX_PAD_BYTES(4);
  125. /*add padding if len is not 4 byte aligned*/
  126. pad_len = ALIGN(len, 4) - len;
  127. hdr->pkt_len_w_padding = cpu_to_le16(len + pad_len);
  128. hdr->padding_info = (pad_len << MUX_PAD_SHIFT) | MUX_CTRL_MASK;
  129. cpkt->data_size = sizeof(struct mux_hdr) + hdr->pkt_len_w_padding;
  130. }
  131. static void get_encap_work(struct work_struct *w)
  132. {
  133. struct usb_device *udev;
  134. struct rmnet_ctrl_dev *dev =
  135. container_of(w, struct rmnet_ctrl_dev, get_encap_work);
  136. int status;
  137. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
  138. return;
  139. udev = interface_to_usbdev(dev->intf);
  140. status = usb_autopm_get_interface(dev->intf);
  141. if (status < 0 && status != -EAGAIN && status != -EACCES) {
  142. dev->get_encap_failure_cnt++;
  143. return;
  144. }
  145. usb_fill_control_urb(dev->rcvurb, udev,
  146. usb_rcvctrlpipe(udev, 0),
  147. (unsigned char *)dev->in_ctlreq,
  148. dev->rcvbuf,
  149. DEFAULT_READ_URB_LENGTH,
  150. resp_avail_cb, dev);
  151. usb_anchor_urb(dev->rcvurb, &dev->rx_submitted);
  152. status = usb_submit_urb(dev->rcvurb, GFP_KERNEL);
  153. if (status) {
  154. dev->get_encap_failure_cnt++;
  155. usb_unanchor_urb(dev->rcvurb);
  156. usb_autopm_put_interface(dev->intf);
  157. if (status != -ENODEV)
  158. dev_err(dev->devicep,
  159. "%s: Error submitting Read URB %d\n",
  160. __func__, status);
  161. goto resubmit_int_urb;
  162. }
  163. return;
  164. resubmit_int_urb:
  165. /*check if it is already submitted in resume*/
  166. if (!dev->inturb->anchor) {
  167. usb_anchor_urb(dev->inturb, &dev->rx_submitted);
  168. status = usb_submit_urb(dev->inturb, GFP_KERNEL);
  169. if (status) {
  170. usb_unanchor_urb(dev->inturb);
  171. if (status != -ENODEV)
  172. dev_err(dev->devicep,
  173. "%s: Error re-submitting Int URB %d\n",
  174. __func__, status);
  175. }
  176. }
  177. }
  178. static void notification_available_cb(struct urb *urb)
  179. {
  180. int status;
  181. struct usb_cdc_notification *ctrl;
  182. struct usb_device *udev;
  183. struct rmnet_ctrl_dev *dev = urb->context;
  184. udev = interface_to_usbdev(dev->intf);
  185. switch (urb->status) {
  186. case 0:
  187. /*if non zero lenght of data received while unlink*/
  188. case -ENOENT:
  189. /*success*/
  190. break;
  191. /*do not resubmit*/
  192. case -ESHUTDOWN:
  193. case -ECONNRESET:
  194. case -EPROTO:
  195. return;
  196. case -EPIPE:
  197. pr_err_ratelimited("%s: Stall on int endpoint\n", __func__);
  198. /* TBD : halt to be cleared in work */
  199. return;
  200. /*resubmit*/
  201. case -EOVERFLOW:
  202. pr_err_ratelimited("%s: Babble error happened\n", __func__);
  203. default:
  204. pr_debug_ratelimited("%s: Non zero urb status = %d\n",
  205. __func__, urb->status);
  206. goto resubmit_int_urb;
  207. }
  208. if (!urb->actual_length)
  209. return;
  210. ctrl = urb->transfer_buffer;
  211. switch (ctrl->bNotificationType) {
  212. case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
  213. dev->resp_avail_cnt++;
  214. /* If MUX is not enabled, wakeup up the open process
  215. * upon first notify response available.
  216. */
  217. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
  218. set_bit(RMNET_CTRL_DEV_READY, &dev->status);
  219. wake_up(&dev->open_wait_queue);
  220. }
  221. usb_mark_last_busy(udev);
  222. queue_work(dev->wq, &dev->get_encap_work);
  223. return;
  224. default:
  225. dev_err(dev->devicep,
  226. "%s:Command not implemented\n", __func__);
  227. }
  228. resubmit_int_urb:
  229. usb_anchor_urb(urb, &dev->rx_submitted);
  230. status = usb_submit_urb(urb, GFP_ATOMIC);
  231. if (status) {
  232. usb_unanchor_urb(urb);
  233. if (status != -ENODEV)
  234. dev_err(dev->devicep,
  235. "%s: Error re-submitting Int URB %d\n",
  236. __func__, status);
  237. }
  238. return;
  239. }
  240. static void resp_avail_cb(struct urb *urb)
  241. {
  242. struct usb_device *udev;
  243. struct ctrl_pkt_list_elem *list_elem = NULL;
  244. struct rmnet_ctrl_dev *rx_dev, *dev = urb->context;
  245. void *cpkt;
  246. int ch_id, status = 0;
  247. size_t cpkt_size = 0;
  248. udev = interface_to_usbdev(dev->intf);
  249. usb_autopm_put_interface_async(dev->intf);
  250. switch (urb->status) {
  251. case 0:
  252. /*success*/
  253. break;
  254. /*do not resubmit*/
  255. case -ESHUTDOWN:
  256. case -ENOENT:
  257. case -ECONNRESET:
  258. case -EPROTO:
  259. return;
  260. /*resubmit*/
  261. case -EOVERFLOW:
  262. pr_err_ratelimited("%s: Babble error happened\n", __func__);
  263. default:
  264. pr_debug_ratelimited("%s: Non zero urb status = %d\n",
  265. __func__, urb->status);
  266. goto resubmit_int_urb;
  267. }
  268. dev_dbg(dev->devicep, "Read %d bytes for %s\n",
  269. urb->actual_length, dev->name);
  270. cpkt = urb->transfer_buffer;
  271. cpkt_size = urb->actual_length;
  272. if (!cpkt_size) {
  273. dev->zlp_cnt++;
  274. dev_dbg(dev->devicep, "%s: zero length pkt received\n",
  275. __func__);
  276. goto resubmit_int_urb;
  277. }
  278. list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
  279. if (!list_elem) {
  280. dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
  281. return;
  282. }
  283. list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
  284. if (!list_elem->cpkt.data) {
  285. dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
  286. __func__);
  287. kfree(list_elem);
  288. return;
  289. }
  290. memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
  291. list_elem->cpkt.data_size = cpkt_size;
  292. rx_dev = dev;
  293. if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
  294. ch_id = rmnet_usb_ctrl_dmux(list_elem);
  295. if (ch_id < 0) {
  296. kfree(list_elem->cpkt.data);
  297. kfree(list_elem);
  298. goto resubmit_int_urb;
  299. }
  300. rx_dev = &ctrl_devs[dev->id][ch_id];
  301. }
  302. rx_dev->get_encap_resp_cnt++;
  303. spin_lock(&rx_dev->rx_lock);
  304. list_add_tail(&list_elem->list, &rx_dev->rx_list);
  305. spin_unlock(&rx_dev->rx_lock);
  306. wake_up(&rx_dev->read_wait_queue);
  307. resubmit_int_urb:
  308. /*check if it is already submitted in resume*/
  309. if (!dev->inturb->anchor) {
  310. usb_mark_last_busy(udev);
  311. usb_anchor_urb(dev->inturb, &dev->rx_submitted);
  312. status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
  313. if (status) {
  314. usb_unanchor_urb(dev->inturb);
  315. if (status != -ENODEV)
  316. dev_err(dev->devicep,
  317. "%s: Error re-submitting Int URB %d\n",
  318. __func__, status);
  319. }
  320. }
  321. }
  322. int rmnet_usb_ctrl_start_rx(struct rmnet_ctrl_dev *dev)
  323. {
  324. int retval = 0;
  325. usb_anchor_urb(dev->inturb, &dev->rx_submitted);
  326. retval = usb_submit_urb(dev->inturb, GFP_KERNEL);
  327. if (retval < 0) {
  328. usb_unanchor_urb(dev->inturb);
  329. if (retval != -ENODEV)
  330. dev_err(dev->devicep,
  331. "%s Intr submit %d\n", __func__, retval);
  332. }
  333. return retval;
  334. }
  335. static int rmnet_usb_ctrl_alloc_rx(struct rmnet_ctrl_dev *dev)
  336. {
  337. dev->rcvurb = usb_alloc_urb(0, GFP_KERNEL);
  338. if (!dev->rcvurb) {
  339. pr_err("%s: Error allocating read urb\n", __func__);
  340. goto nomem;
  341. }
  342. dev->rcvbuf = kmalloc(DEFAULT_READ_URB_LENGTH, GFP_KERNEL);
  343. if (!dev->rcvbuf) {
  344. pr_err("%s: Error allocating read buffer\n", __func__);
  345. goto nomem;
  346. }
  347. dev->in_ctlreq = kmalloc(sizeof(*dev->in_ctlreq), GFP_KERNEL);
  348. if (!dev->in_ctlreq) {
  349. pr_err("%s: Error allocating setup packet buffer\n", __func__);
  350. goto nomem;
  351. }
  352. return 0;
  353. nomem:
  354. usb_free_urb(dev->rcvurb);
  355. kfree(dev->rcvbuf);
  356. kfree(dev->in_ctlreq);
  357. return -ENOMEM;
  358. }
  359. static int rmnet_usb_ctrl_write_cmd(struct rmnet_ctrl_dev *dev)
  360. {
  361. struct usb_device *udev;
  362. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
  363. return -ENODEV;
  364. udev = interface_to_usbdev(dev->intf);
  365. dev->set_ctrl_line_state_cnt++;
  366. return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  367. USB_CDC_REQ_SET_CONTROL_LINE_STATE,
  368. (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE),
  369. dev->cbits_tomdm,
  370. dev->intf->cur_altsetting->desc.bInterfaceNumber,
  371. NULL, 0, USB_CTRL_SET_TIMEOUT);
  372. }
  373. static void ctrl_write_callback(struct urb *urb)
  374. {
  375. struct ctrl_pkt *cpkt = urb->context;
  376. struct rmnet_ctrl_dev *dev = cpkt->ctxt;
  377. if (urb->status) {
  378. dev->tx_ctrl_err_cnt++;
  379. pr_debug_ratelimited("Write status/size %d/%d\n",
  380. urb->status, urb->actual_length);
  381. }
  382. kfree(urb->setup_packet);
  383. kfree(urb->transfer_buffer);
  384. usb_free_urb(urb);
  385. kfree(cpkt);
  386. usb_autopm_put_interface_async(dev->intf);
  387. }
  388. static int rmnet_usb_ctrl_write(struct rmnet_ctrl_dev *dev,
  389. struct ctrl_pkt *cpkt, size_t size)
  390. {
  391. int result;
  392. struct urb *sndurb;
  393. struct usb_ctrlrequest *out_ctlreq;
  394. struct usb_device *udev;
  395. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
  396. return -ENETRESET;
  397. udev = interface_to_usbdev(dev->intf);
  398. sndurb = usb_alloc_urb(0, GFP_KERNEL);
  399. if (!sndurb) {
  400. dev_err(dev->devicep, "Error allocating read urb\n");
  401. return -ENOMEM;
  402. }
  403. out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_KERNEL);
  404. if (!out_ctlreq) {
  405. usb_free_urb(sndurb);
  406. dev_err(dev->devicep, "Error allocating setup packet buffer\n");
  407. return -ENOMEM;
  408. }
  409. /* CDC Send Encapsulated Request packet */
  410. out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
  411. USB_RECIP_INTERFACE);
  412. out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
  413. out_ctlreq->wValue = 0;
  414. out_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber;
  415. out_ctlreq->wLength = cpu_to_le16(cpkt->data_size);
  416. usb_fill_control_urb(sndurb, udev,
  417. usb_sndctrlpipe(udev, 0),
  418. (unsigned char *)out_ctlreq, (void *)cpkt->data,
  419. cpkt->data_size, ctrl_write_callback, cpkt);
  420. result = usb_autopm_get_interface(dev->intf);
  421. if (result < 0) {
  422. dev_dbg(dev->devicep, "%s: Unable to resume interface: %d\n",
  423. __func__, result);
  424. /*
  425. * Revisit: if (result == -EPERM)
  426. * rmnet_usb_suspend(dev->intf, PMSG_SUSPEND);
  427. */
  428. usb_free_urb(sndurb);
  429. kfree(out_ctlreq);
  430. return result;
  431. }
  432. usb_anchor_urb(sndurb, &dev->tx_submitted);
  433. dev->snd_encap_cmd_cnt++;
  434. result = usb_submit_urb(sndurb, GFP_KERNEL);
  435. if (result < 0) {
  436. if (result != -ENODEV)
  437. dev_err(dev->devicep,
  438. "%s: Submit URB error %d\n",
  439. __func__, result);
  440. dev->snd_encap_cmd_cnt--;
  441. usb_autopm_put_interface(dev->intf);
  442. usb_unanchor_urb(sndurb);
  443. usb_free_urb(sndurb);
  444. kfree(out_ctlreq);
  445. return result;
  446. }
  447. return size;
  448. }
  449. static int rmnet_ctl_open(struct inode *inode, struct file *file)
  450. {
  451. int retval = 0;
  452. struct rmnet_ctrl_dev *dev =
  453. container_of(inode->i_cdev, struct rmnet_ctrl_dev, cdev);
  454. if (!dev)
  455. return -ENODEV;
  456. mutex_lock(&dev->dev_lock);
  457. if (test_bit(RMNET_CTRL_DEV_OPEN, &dev->status)) {
  458. mutex_unlock(&dev->dev_lock);
  459. goto already_opened;
  460. }
  461. set_bit(RMNET_CTRL_DEV_OPEN, &dev->status);
  462. mutex_unlock(&dev->dev_lock);
  463. if (dev->mdm_wait_timeout &&
  464. !test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
  465. retval = wait_event_interruptible_timeout(
  466. dev->open_wait_queue,
  467. test_bit(RMNET_CTRL_DEV_READY, &dev->status),
  468. msecs_to_jiffies(dev->mdm_wait_timeout * 1000));
  469. if (retval == 0) {
  470. dev_err(dev->devicep, "%s: Timeout opening %s\n",
  471. __func__, dev->name);
  472. retval = -ETIMEDOUT;
  473. } else if (retval < 0)
  474. dev_err(dev->devicep, "%s: Error waiting for %s\n",
  475. __func__, dev->name);
  476. if (retval < 0) {
  477. mutex_lock(&dev->dev_lock);
  478. clear_bit(RMNET_CTRL_DEV_OPEN, &dev->status);
  479. mutex_unlock(&dev->dev_lock);
  480. return retval;
  481. }
  482. }
  483. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
  484. dev_dbg(dev->devicep, "%s: Connection timedout opening %s\n",
  485. __func__, dev->name);
  486. mutex_lock(&dev->dev_lock);
  487. clear_bit(RMNET_CTRL_DEV_OPEN, &dev->status);
  488. mutex_unlock(&dev->dev_lock);
  489. return -ETIMEDOUT;
  490. }
  491. file->private_data = dev;
  492. already_opened:
  493. DBG("%s: Open called for %s\n", __func__, dev->name);
  494. return 0;
  495. }
  496. static int rmnet_ctl_release(struct inode *inode, struct file *file)
  497. {
  498. struct ctrl_pkt_list_elem *list_elem = NULL;
  499. struct rmnet_ctrl_dev *dev;
  500. unsigned long flag;
  501. int time;
  502. dev = file->private_data;
  503. if (!dev)
  504. return -ENODEV;
  505. DBG("%s Called on %s device\n", __func__, dev->name);
  506. spin_lock_irqsave(&dev->rx_lock, flag);
  507. while (!list_empty(&dev->rx_list)) {
  508. list_elem = list_first_entry(
  509. &dev->rx_list,
  510. struct ctrl_pkt_list_elem,
  511. list);
  512. list_del(&list_elem->list);
  513. kfree(list_elem->cpkt.data);
  514. kfree(list_elem);
  515. }
  516. spin_unlock_irqrestore(&dev->rx_lock, flag);
  517. mutex_lock(&dev->dev_lock);
  518. clear_bit(RMNET_CTRL_DEV_OPEN, &dev->status);
  519. mutex_unlock(&dev->dev_lock);
  520. time = usb_wait_anchor_empty_timeout(&dev->tx_submitted,
  521. UNLINK_TIMEOUT_MS);
  522. if (!time)
  523. usb_kill_anchored_urbs(&dev->tx_submitted);
  524. file->private_data = NULL;
  525. return 0;
  526. }
  527. static unsigned int rmnet_ctl_poll(struct file *file, poll_table *wait)
  528. {
  529. unsigned int mask = 0;
  530. struct rmnet_ctrl_dev *dev;
  531. dev = file->private_data;
  532. if (!dev)
  533. return POLLERR;
  534. poll_wait(file, &dev->read_wait_queue, wait);
  535. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
  536. dev_dbg(dev->devicep, "%s: Device not connected\n",
  537. __func__);
  538. return POLLERR;
  539. }
  540. if (!list_empty(&dev->rx_list))
  541. mask |= POLLIN | POLLRDNORM;
  542. return mask;
  543. }
  544. static ssize_t rmnet_ctl_read(struct file *file, char __user *buf, size_t count,
  545. loff_t *ppos)
  546. {
  547. int retval = 0;
  548. int bytes_to_read;
  549. unsigned int hdr_len = 0;
  550. struct rmnet_ctrl_dev *dev;
  551. struct ctrl_pkt_list_elem *list_elem = NULL;
  552. unsigned long flags;
  553. dev = file->private_data;
  554. if (!dev)
  555. return -ENODEV;
  556. DBG("%s: Read from %s\n", __func__, dev->name);
  557. ctrl_read:
  558. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
  559. dev_dbg(dev->devicep, "%s: Device not connected\n",
  560. __func__);
  561. return -ENETRESET;
  562. }
  563. spin_lock_irqsave(&dev->rx_lock, flags);
  564. if (list_empty(&dev->rx_list)) {
  565. spin_unlock_irqrestore(&dev->rx_lock, flags);
  566. retval = wait_event_interruptible(dev->read_wait_queue,
  567. !list_empty(&dev->rx_list) ||
  568. !test_bit(RMNET_CTRL_DEV_READY, &dev->status));
  569. if (retval < 0)
  570. return retval;
  571. goto ctrl_read;
  572. }
  573. list_elem = list_first_entry(&dev->rx_list,
  574. struct ctrl_pkt_list_elem, list);
  575. list_del(&list_elem->list);
  576. bytes_to_read = (uint32_t)(list_elem->cpkt.data_size);
  577. if (bytes_to_read > count) {
  578. spin_unlock_irqrestore(&dev->rx_lock, flags);
  579. dev_err(dev->devicep, "%s: Packet size %d > buf size %d\n",
  580. __func__, bytes_to_read, count);
  581. return -ENOMEM;
  582. }
  583. spin_unlock_irqrestore(&dev->rx_lock, flags);
  584. if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status))
  585. hdr_len = sizeof(struct mux_hdr);
  586. if (copy_to_user(buf, list_elem->cpkt.data + hdr_len, bytes_to_read)) {
  587. dev_err(dev->devicep,
  588. "%s: copy_to_user failed for %s\n",
  589. __func__, dev->name);
  590. spin_lock_irqsave(&dev->rx_lock, flags);
  591. list_add(&list_elem->list, &dev->rx_list);
  592. spin_unlock_irqrestore(&dev->rx_lock, flags);
  593. return -EFAULT;
  594. }
  595. kfree(list_elem->cpkt.data);
  596. kfree(list_elem);
  597. DBG("%s: Returning %d bytes to %s\n", __func__, bytes_to_read,
  598. dev->name);
  599. DUMP_BUFFER("Read: ", bytes_to_read, buf);
  600. return bytes_to_read;
  601. }
  602. static ssize_t rmnet_ctl_write(struct file *file, const char __user * buf,
  603. size_t size, loff_t *pos)
  604. {
  605. int status;
  606. size_t total_len;
  607. void *wbuf;
  608. void *actual_data;
  609. struct ctrl_pkt *cpkt;
  610. struct rmnet_ctrl_dev *dev = file->private_data;
  611. if (!dev)
  612. return -ENODEV;
  613. if (size <= 0)
  614. return -EINVAL;
  615. if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
  616. return -ENETRESET;
  617. DBG("%s: Writing %i bytes on %s\n", __func__, size, dev->name);
  618. total_len = size;
  619. if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status))
  620. total_len += sizeof(struct mux_hdr) + MAX_PAD_BYTES(4);
  621. wbuf = kmalloc(total_len , GFP_KERNEL);
  622. if (!wbuf)
  623. return -ENOMEM;
  624. cpkt = kmalloc(sizeof(struct ctrl_pkt), GFP_KERNEL);
  625. if (!cpkt) {
  626. kfree(wbuf);
  627. return -ENOMEM;
  628. }
  629. actual_data = cpkt->data = wbuf;
  630. cpkt->data_size = total_len;
  631. cpkt->ctxt = dev;
  632. if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
  633. actual_data = wbuf + sizeof(struct mux_hdr);
  634. rmnet_usb_ctrl_mux(dev->ch_id, cpkt);
  635. }
  636. status = copy_from_user(actual_data, buf, size);
  637. if (status) {
  638. dev_err(dev->devicep,
  639. "%s: Unable to copy data from userspace %d\n",
  640. __func__, status);
  641. kfree(wbuf);
  642. kfree(cpkt);
  643. return status;
  644. }
  645. DUMP_BUFFER("Write: ", size, buf);
  646. status = rmnet_usb_ctrl_write(dev, cpkt, size);
  647. if (status == size)
  648. return size;
  649. return status;
  650. }
  651. static int rmnet_ctrl_tiocmset(struct rmnet_ctrl_dev *dev, unsigned int set,
  652. unsigned int clear)
  653. {
  654. int retval;
  655. mutex_lock(&dev->dev_lock);
  656. if (set & TIOCM_DTR)
  657. dev->cbits_tomdm |= ACM_CTRL_DTR;
  658. /*
  659. * TBD if (set & TIOCM_RTS)
  660. * dev->cbits_tomdm |= ACM_CTRL_RTS;
  661. */
  662. if (clear & TIOCM_DTR)
  663. dev->cbits_tomdm &= ~ACM_CTRL_DTR;
  664. /*
  665. * (clear & TIOCM_RTS)
  666. * dev->cbits_tomdm &= ~ACM_CTRL_RTS;
  667. */
  668. mutex_unlock(&dev->dev_lock);
  669. retval = usb_autopm_get_interface(dev->intf);
  670. if (retval < 0) {
  671. dev_dbg(dev->devicep, "%s: Unable to resume interface: %d\n",
  672. __func__, retval);
  673. return retval;
  674. }
  675. retval = rmnet_usb_ctrl_write_cmd(dev);
  676. usb_autopm_put_interface(dev->intf);
  677. return retval;
  678. }
  679. static int rmnet_ctrl_tiocmget(struct rmnet_ctrl_dev *dev)
  680. {
  681. int ret;
  682. mutex_lock(&dev->dev_lock);
  683. ret =
  684. /*
  685. * TBD(dev->cbits_tolocal & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
  686. * (dev->cbits_tolocal & ACM_CTRL_CTS ? TIOCM_CTS : 0) |
  687. */
  688. (dev->cbits_tolocal & ACM_CTRL_CD ? TIOCM_CD : 0) |
  689. /*
  690. * TBD (dev->cbits_tolocal & ACM_CTRL_RI ? TIOCM_RI : 0) |
  691. *(dev->cbits_tomdm & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
  692. */
  693. (dev->cbits_tomdm & ACM_CTRL_DTR ? TIOCM_DTR : 0);
  694. mutex_unlock(&dev->dev_lock);
  695. return ret;
  696. }
  697. static long rmnet_ctrl_ioctl(struct file *file, unsigned int cmd,
  698. unsigned long arg)
  699. {
  700. int ret;
  701. struct rmnet_ctrl_dev *dev;
  702. dev = file->private_data;
  703. if (!dev)
  704. return -ENODEV;
  705. switch (cmd) {
  706. case TIOCMGET:
  707. ret = rmnet_ctrl_tiocmget(dev);
  708. break;
  709. case TIOCMSET:
  710. ret = rmnet_ctrl_tiocmset(dev, arg, ~arg);
  711. break;
  712. default:
  713. ret = -EINVAL;
  714. }
  715. return ret;
  716. }
  717. static const struct file_operations ctrldev_fops = {
  718. .owner = THIS_MODULE,
  719. .read = rmnet_ctl_read,
  720. .write = rmnet_ctl_write,
  721. .unlocked_ioctl = rmnet_ctrl_ioctl,
  722. .open = rmnet_ctl_open,
  723. .release = rmnet_ctl_release,
  724. .poll = rmnet_ctl_poll,
  725. };
  726. int rmnet_usb_ctrl_probe(struct usb_interface *intf,
  727. struct usb_host_endpoint *int_in,
  728. unsigned long rmnet_devnum,
  729. unsigned long *data)
  730. {
  731. struct rmnet_ctrl_dev *dev = NULL;
  732. u16 wMaxPacketSize;
  733. struct usb_endpoint_descriptor *ep;
  734. struct usb_device *udev = interface_to_usbdev(intf);
  735. int interval;
  736. int ret = 0, n;
  737. /* Find next available ctrl_dev */
  738. for (n = 0; n < insts_per_dev; n++) {
  739. dev = &ctrl_devs[rmnet_devnum][n];
  740. if (!dev->claimed)
  741. break;
  742. }
  743. if (!dev || n == insts_per_dev) {
  744. pr_err("%s: No available ctrl devices for %lu\n", __func__,
  745. rmnet_devnum);
  746. return -ENODEV;
  747. }
  748. dev->int_pipe = usb_rcvintpipe(udev,
  749. int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  750. dev->intf = intf;
  751. dev->id = rmnet_devnum;
  752. dev->snd_encap_cmd_cnt = 0;
  753. dev->get_encap_resp_cnt = 0;
  754. dev->resp_avail_cnt = 0;
  755. dev->tx_ctrl_err_cnt = 0;
  756. dev->set_ctrl_line_state_cnt = 0;
  757. dev->inturb = usb_alloc_urb(0, GFP_KERNEL);
  758. if (!dev->inturb) {
  759. dev_err(dev->devicep, "Error allocating int urb\n");
  760. return -ENOMEM;
  761. }
  762. /*use max pkt size from ep desc*/
  763. ep = &dev->intf->cur_altsetting->endpoint[0].desc;
  764. wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize);
  765. dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL);
  766. if (!dev->intbuf) {
  767. usb_free_urb(dev->inturb);
  768. dev_err(dev->devicep, "Error allocating int buffer\n");
  769. return -ENOMEM;
  770. }
  771. dev->in_ctlreq->bRequestType =
  772. (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
  773. dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
  774. dev->in_ctlreq->wValue = 0;
  775. dev->in_ctlreq->wIndex =
  776. dev->intf->cur_altsetting->desc.bInterfaceNumber;
  777. dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH);
  778. interval = int_in->desc.bInterval;
  779. usb_fill_int_urb(dev->inturb, udev,
  780. dev->int_pipe,
  781. dev->intbuf, wMaxPacketSize,
  782. notification_available_cb, dev, interval);
  783. usb_mark_last_busy(udev);
  784. ret = rmnet_usb_ctrl_start_rx(dev);
  785. if (ret) {
  786. usb_free_urb(dev->inturb);
  787. kfree(dev->intbuf);
  788. return ret;
  789. }
  790. dev->claimed = true;
  791. /*mux info is passed to data parameter*/
  792. if (*data)
  793. set_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status);
  794. *data = (unsigned long)dev;
  795. /* If MUX is enabled, wakeup the open process here */
  796. if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
  797. set_bit(RMNET_CTRL_DEV_READY, &dev->status);
  798. wake_up(&dev->open_wait_queue);
  799. }
  800. return 0;
  801. }
  802. void rmnet_usb_ctrl_disconnect(struct rmnet_ctrl_dev *dev)
  803. {
  804. dev->claimed = false;
  805. clear_bit(RMNET_CTRL_DEV_READY, &dev->status);
  806. mutex_lock(&dev->dev_lock);
  807. /*TBD: for now just update CD status*/
  808. dev->cbits_tolocal = ~ACM_CTRL_CD;
  809. dev->cbits_tomdm = ~ACM_CTRL_DTR;
  810. mutex_unlock(&dev->dev_lock);
  811. wake_up(&dev->read_wait_queue);
  812. cancel_work_sync(&dev->get_encap_work);
  813. usb_kill_anchored_urbs(&dev->tx_submitted);
  814. usb_kill_anchored_urbs(&dev->rx_submitted);
  815. usb_free_urb(dev->inturb);
  816. dev->inturb = NULL;
  817. kfree(dev->intbuf);
  818. dev->intbuf = NULL;
  819. }
  820. #if defined(CONFIG_DEBUG_FS)
  821. #define DEBUG_BUF_SIZE 4096
  822. static ssize_t rmnet_usb_ctrl_read_stats(struct file *file, char __user *ubuf,
  823. size_t count, loff_t *ppos)
  824. {
  825. struct rmnet_ctrl_dev *dev;
  826. char *buf;
  827. int ret;
  828. int i, n;
  829. int temp = 0;
  830. buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
  831. if (!buf)
  832. return -ENOMEM;
  833. for (i = 0; i < num_devs; i++) {
  834. for (n = 0; n < insts_per_dev; n++) {
  835. dev = &ctrl_devs[i][n];
  836. temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
  837. "\n#ctrl_dev: %pK Name: %s#\n"
  838. "snd encap cmd cnt %u\n"
  839. "resp avail cnt: %u\n"
  840. "get encap resp cnt: %u\n"
  841. "set ctrl line state cnt: %u\n"
  842. "tx_err_cnt: %u\n"
  843. "cbits_tolocal: %d\n"
  844. "cbits_tomdm: %d\n"
  845. "mdm_wait_timeout: %u\n"
  846. "zlp_cnt: %u\n"
  847. "get_encap_failure_cnt %u\n"
  848. "RMNET_CTRL_DEV_MUX_EN: %d\n"
  849. "RMNET_CTRL_DEV_OPEN: %d\n"
  850. "RMNET_CTRL_DEV_READY: %d\n",
  851. dev, dev->name,
  852. dev->snd_encap_cmd_cnt,
  853. dev->resp_avail_cnt,
  854. dev->get_encap_resp_cnt,
  855. dev->set_ctrl_line_state_cnt,
  856. dev->tx_ctrl_err_cnt,
  857. dev->cbits_tolocal,
  858. dev->cbits_tomdm,
  859. dev->mdm_wait_timeout,
  860. dev->zlp_cnt,
  861. dev->get_encap_failure_cnt,
  862. test_bit(RMNET_CTRL_DEV_MUX_EN,
  863. &dev->status),
  864. test_bit(RMNET_CTRL_DEV_OPEN,
  865. &dev->status),
  866. test_bit(RMNET_CTRL_DEV_READY,
  867. &dev->status));
  868. }
  869. }
  870. ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
  871. kfree(buf);
  872. return ret;
  873. }
  874. static ssize_t rmnet_usb_ctrl_reset_stats(struct file *file, const char __user *
  875. buf, size_t count, loff_t *ppos)
  876. {
  877. struct rmnet_ctrl_dev *dev;
  878. int i, n;
  879. for (i = 0; i < num_devs; i++) {
  880. for (n = 0; n < insts_per_dev; n++) {
  881. dev = &ctrl_devs[i][n];
  882. dev->snd_encap_cmd_cnt = 0;
  883. dev->resp_avail_cnt = 0;
  884. dev->get_encap_resp_cnt = 0;
  885. dev->set_ctrl_line_state_cnt = 0;
  886. dev->tx_ctrl_err_cnt = 0;
  887. dev->zlp_cnt = 0;
  888. }
  889. }
  890. return count;
  891. }
  892. const struct file_operations rmnet_usb_ctrl_stats_ops = {
  893. .read = rmnet_usb_ctrl_read_stats,
  894. .write = rmnet_usb_ctrl_reset_stats,
  895. };
  896. struct dentry *usb_ctrl_dent;
  897. struct dentry *usb_ctrl_dfile;
  898. static void rmnet_usb_ctrl_debugfs_init(void)
  899. {
  900. usb_ctrl_dent = debugfs_create_dir("rmnet_usb_ctrl", 0);
  901. if (IS_ERR(usb_ctrl_dent))
  902. return;
  903. usb_ctrl_dfile = debugfs_create_file("status", 0644, usb_ctrl_dent, 0,
  904. &rmnet_usb_ctrl_stats_ops);
  905. if (!usb_ctrl_dfile || IS_ERR(usb_ctrl_dfile))
  906. debugfs_remove(usb_ctrl_dent);
  907. }
  908. static void rmnet_usb_ctrl_debugfs_exit(void)
  909. {
  910. debugfs_remove(usb_ctrl_dfile);
  911. debugfs_remove(usb_ctrl_dent);
  912. }
  913. #else
  914. static void rmnet_usb_ctrl_debugfs_init(void) { }
  915. static void rmnet_usb_ctrl_debugfs_exit(void) { }
  916. #endif
  917. int rmnet_usb_ctrl_init(int no_rmnet_devs, int no_rmnet_insts_per_dev)
  918. {
  919. struct rmnet_ctrl_dev *dev;
  920. int i, n;
  921. int status;
  922. num_devs = no_rmnet_devs;
  923. insts_per_dev = no_rmnet_insts_per_dev;
  924. if (no_rmnet_devs >= MAX_RMNET_DEVS) {
  925. pr_err("Invalid device number.\n");
  926. return -EINVAL;
  927. }
  928. ctrl_devs = kzalloc(num_devs * sizeof(*ctrl_devs), GFP_KERNEL);
  929. if (!ctrl_devs)
  930. return -ENOMEM;
  931. for (i = 0; i < num_devs; i++) {
  932. ctrl_devs[i] = kzalloc(insts_per_dev * sizeof(*ctrl_devs[i]),
  933. GFP_KERNEL);
  934. if (!ctrl_devs[i])
  935. return -ENOMEM;
  936. status = alloc_chrdev_region(&ctrldev_num[i], 0, insts_per_dev,
  937. rmnet_dev_names[i]);
  938. if (IS_ERR_VALUE(status)) {
  939. pr_err("ERROR:%s: alloc_chrdev_region() ret %i.\n",
  940. __func__, status);
  941. return status;
  942. }
  943. ctrldev_classp[i] = class_create(THIS_MODULE,
  944. rmnet_dev_names[i]);
  945. if (IS_ERR(ctrldev_classp[i])) {
  946. pr_err("ERROR:%s: class_create() ENOMEM\n", __func__);
  947. status = PTR_ERR(ctrldev_classp[i]);
  948. return status;
  949. }
  950. for (n = 0; n < insts_per_dev; n++) {
  951. dev = &ctrl_devs[i][n];
  952. /*for debug purpose*/
  953. snprintf(dev->name, CTRL_DEV_MAX_LEN, "%s%d",
  954. rmnet_dev_names[i], n);
  955. dev->wq = create_singlethread_workqueue(dev->name);
  956. if (!dev->wq) {
  957. pr_err("unable to allocate workqueue");
  958. kfree(dev);
  959. return -ENOMEM;
  960. }
  961. dev->ch_id = n;
  962. mutex_init(&dev->dev_lock);
  963. spin_lock_init(&dev->rx_lock);
  964. init_waitqueue_head(&dev->read_wait_queue);
  965. init_waitqueue_head(&dev->open_wait_queue);
  966. INIT_LIST_HEAD(&dev->rx_list);
  967. init_usb_anchor(&dev->tx_submitted);
  968. init_usb_anchor(&dev->rx_submitted);
  969. INIT_WORK(&dev->get_encap_work, get_encap_work);
  970. cdev_init(&dev->cdev, &ctrldev_fops);
  971. dev->cdev.owner = THIS_MODULE;
  972. status = cdev_add(&dev->cdev, (ctrldev_num[i] + n), 1);
  973. if (status) {
  974. pr_err("%s: cdev_add() ret %i\n", __func__,
  975. status);
  976. destroy_workqueue(dev->wq);
  977. kfree(dev);
  978. return status;
  979. }
  980. dev->devicep = device_create(ctrldev_classp[i], NULL,
  981. (ctrldev_num[i] + n), NULL,
  982. "%s%d", rmnet_dev_names[i],
  983. n);
  984. if (IS_ERR(dev->devicep)) {
  985. long status = PTR_ERR(dev->devicep);
  986. pr_err("%s: device_create() returned %ld\n",
  987. __func__, status);
  988. cdev_del(&dev->cdev);
  989. destroy_workqueue(dev->wq);
  990. kfree(dev);
  991. return status;
  992. }
  993. /*create /sys/class/hsicctl/hsicctlx/modem_wait*/
  994. status = device_create_file(dev->devicep,
  995. &dev_attr_modem_wait);
  996. if (status) {
  997. device_destroy(dev->devicep->class,
  998. dev->devicep->devt);
  999. cdev_del(&dev->cdev);
  1000. destroy_workqueue(dev->wq);
  1001. kfree(dev);
  1002. return status;
  1003. }
  1004. dev_set_drvdata(dev->devicep, dev);
  1005. status = rmnet_usb_ctrl_alloc_rx(dev);
  1006. if (status) {
  1007. device_remove_file(dev->devicep,
  1008. &dev_attr_modem_wait);
  1009. device_destroy(dev->devicep->class,
  1010. dev->devicep->devt);
  1011. cdev_del(&dev->cdev);
  1012. destroy_workqueue(dev->wq);
  1013. kfree(dev);
  1014. return status;
  1015. }
  1016. }
  1017. }
  1018. rmnet_usb_ctrl_debugfs_init();
  1019. pr_info("rmnet usb ctrl Initialized.\n");
  1020. return 0;
  1021. }
  1022. static void free_rmnet_ctrl_dev(struct rmnet_ctrl_dev *dev)
  1023. {
  1024. kfree(dev->in_ctlreq);
  1025. kfree(dev->rcvbuf);
  1026. kfree(dev->intbuf);
  1027. usb_free_urb(dev->rcvurb);
  1028. usb_free_urb(dev->inturb);
  1029. device_remove_file(dev->devicep, &dev_attr_modem_wait);
  1030. cdev_del(&dev->cdev);
  1031. destroy_workqueue(dev->wq);
  1032. device_destroy(dev->devicep->class,
  1033. dev->devicep->devt);
  1034. }
  1035. void rmnet_usb_ctrl_exit(int no_rmnet_devs, int no_rmnet_insts_per_dev)
  1036. {
  1037. int i, n;
  1038. for (i = 0; i < no_rmnet_devs; i++) {
  1039. for (n = 0; n < no_rmnet_insts_per_dev; n++)
  1040. free_rmnet_ctrl_dev(&ctrl_devs[i][n]);
  1041. kfree(ctrl_devs[i]);
  1042. class_destroy(ctrldev_classp[i]);
  1043. if (ctrldev_num[i])
  1044. unregister_chrdev_region(ctrldev_num[i], insts_per_dev);
  1045. }
  1046. kfree(ctrl_devs);
  1047. rmnet_usb_ctrl_debugfs_exit();
  1048. }