mdm_ctrl_bridge.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. /* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/module.h>
  17. #include <linux/kref.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/ratelimit.h>
  22. #include <linux/usb/ch9.h>
  23. #include <linux/usb/cdc.h>
  24. #include <linux/termios.h>
  25. #include <asm/unaligned.h>
  26. #include <mach/usb_bridge.h>
  27. #define ACM_CTRL_DTR (1 << 0)
  28. #define DEFAULT_READ_URB_LENGTH 4096
  29. #define SUSPENDED BIT(0)
  30. enum ctrl_bridge_rx_state {
  31. RX_IDLE, /* inturb is not queued */
  32. RX_WAIT, /* inturb is queued and waiting for data */
  33. RX_BUSY, /* inturb is completed. processing RX */
  34. };
  35. struct ctrl_bridge {
  36. struct usb_device *udev;
  37. struct usb_interface *intf;
  38. char *name;
  39. unsigned int int_pipe;
  40. struct urb *inturb;
  41. void *intbuf;
  42. struct urb *readurb;
  43. void *readbuf;
  44. struct usb_anchor tx_submitted;
  45. struct usb_anchor tx_deferred;
  46. struct usb_ctrlrequest *in_ctlreq;
  47. struct bridge *brdg;
  48. struct platform_device *pdev;
  49. unsigned long flags;
  50. /* input control lines (DSR, CTS, CD, RI) */
  51. unsigned int cbits_tohost;
  52. /* output control lines (DTR, RTS) */
  53. unsigned int cbits_tomdm;
  54. spinlock_t lock;
  55. enum ctrl_bridge_rx_state rx_state;
  56. /* counters */
  57. unsigned int snd_encap_cmd;
  58. unsigned int get_encap_res;
  59. unsigned int resp_avail;
  60. unsigned int set_ctrl_line_sts;
  61. unsigned int notify_ser_state;
  62. };
  63. static struct ctrl_bridge *__dev[MAX_BRIDGE_DEVICES];
  64. static int get_ctrl_bridge_chid(char *xport_name)
  65. {
  66. struct ctrl_bridge *dev;
  67. int i;
  68. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  69. dev = __dev[i];
  70. if (!strncmp(dev->name, xport_name, BRIDGE_NAME_MAX_LEN))
  71. return i;
  72. }
  73. return -ENODEV;
  74. }
  75. unsigned int ctrl_bridge_get_cbits_tohost(unsigned int id)
  76. {
  77. struct ctrl_bridge *dev;
  78. if (id >= MAX_BRIDGE_DEVICES)
  79. return -EINVAL;
  80. dev = __dev[id];
  81. if (!dev)
  82. return -ENODEV;
  83. return dev->cbits_tohost;
  84. }
  85. EXPORT_SYMBOL(ctrl_bridge_get_cbits_tohost);
  86. int ctrl_bridge_set_cbits(unsigned int id, unsigned int cbits)
  87. {
  88. struct ctrl_bridge *dev;
  89. struct bridge *brdg;
  90. int retval;
  91. if (id >= MAX_BRIDGE_DEVICES)
  92. return -EINVAL;
  93. dev = __dev[id];
  94. if (!dev)
  95. return -ENODEV;
  96. pr_debug("%s: dev[id] =%u cbits : %u\n", __func__, id, cbits);
  97. brdg = dev->brdg;
  98. if (!brdg)
  99. return -ENODEV;
  100. dev->cbits_tomdm = cbits;
  101. retval = ctrl_bridge_write(id, NULL, 0);
  102. /* if DTR is high, update latest modem info to host */
  103. if (brdg && (cbits & ACM_CTRL_DTR) && brdg->ops.send_cbits)
  104. brdg->ops.send_cbits(brdg->ctx, dev->cbits_tohost);
  105. return retval;
  106. }
  107. EXPORT_SYMBOL(ctrl_bridge_set_cbits);
  108. static int ctrl_bridge_start_read(struct ctrl_bridge *dev, gfp_t gfp_flags)
  109. {
  110. int retval = 0;
  111. unsigned long flags;
  112. if (!dev->inturb) {
  113. dev_err(&dev->intf->dev, "%s: inturb is NULL\n", __func__);
  114. return -ENODEV;
  115. }
  116. retval = usb_submit_urb(dev->inturb, gfp_flags);
  117. if (retval < 0 && retval != -EPERM) {
  118. dev_err(&dev->intf->dev,
  119. "%s error submitting int urb %d\n",
  120. __func__, retval);
  121. }
  122. spin_lock_irqsave(&dev->lock, flags);
  123. if (retval)
  124. dev->rx_state = RX_IDLE;
  125. else
  126. dev->rx_state = RX_WAIT;
  127. spin_unlock_irqrestore(&dev->lock, flags);
  128. return retval;
  129. }
  130. static void resp_avail_cb(struct urb *urb)
  131. {
  132. struct ctrl_bridge *dev = urb->context;
  133. int resubmit_urb = 1;
  134. struct bridge *brdg = dev->brdg;
  135. unsigned long flags;
  136. /*usb device disconnect*/
  137. if (urb->dev->state == USB_STATE_NOTATTACHED)
  138. return;
  139. switch (urb->status) {
  140. case 0:
  141. /*success*/
  142. dev->get_encap_res++;
  143. if (brdg && brdg->ops.send_pkt)
  144. brdg->ops.send_pkt(brdg->ctx, urb->transfer_buffer,
  145. urb->actual_length);
  146. break;
  147. /*do not resubmit*/
  148. case -ESHUTDOWN:
  149. case -ENOENT:
  150. case -ECONNRESET:
  151. /* unplug */
  152. case -EPROTO:
  153. /*babble error*/
  154. resubmit_urb = 0;
  155. /*resubmit*/
  156. case -EOVERFLOW:
  157. default:
  158. dev_dbg(&dev->intf->dev, "%s: non zero urb status = %d\n",
  159. __func__, urb->status);
  160. }
  161. if (resubmit_urb) {
  162. /*re- submit int urb to check response available*/
  163. ctrl_bridge_start_read(dev, GFP_ATOMIC);
  164. } else {
  165. spin_lock_irqsave(&dev->lock, flags);
  166. dev->rx_state = RX_IDLE;
  167. spin_unlock_irqrestore(&dev->lock, flags);
  168. }
  169. usb_autopm_put_interface_async(dev->intf);
  170. }
  171. static void notification_available_cb(struct urb *urb)
  172. {
  173. int status;
  174. struct usb_cdc_notification *ctrl;
  175. struct ctrl_bridge *dev = urb->context;
  176. struct bridge *brdg = dev->brdg;
  177. unsigned int ctrl_bits;
  178. unsigned char *data;
  179. unsigned long flags;
  180. /*usb device disconnect*/
  181. if (urb->dev->state == USB_STATE_NOTATTACHED)
  182. return;
  183. spin_lock_irqsave(&dev->lock, flags);
  184. dev->rx_state = RX_IDLE;
  185. spin_unlock_irqrestore(&dev->lock, flags);
  186. switch (urb->status) {
  187. case 0:
  188. /*success*/
  189. break;
  190. case -ESHUTDOWN:
  191. case -ENOENT:
  192. case -ECONNRESET:
  193. case -EPROTO:
  194. /* unplug */
  195. return;
  196. case -EPIPE:
  197. dev_err(&dev->intf->dev,
  198. "%s: stall on int endpoint\n", __func__);
  199. /* TBD : halt to be cleared in work */
  200. case -EOVERFLOW:
  201. default:
  202. pr_debug_ratelimited("%s: non zero urb status = %d\n",
  203. __func__, urb->status);
  204. goto resubmit_int_urb;
  205. }
  206. ctrl = (struct usb_cdc_notification *)urb->transfer_buffer;
  207. data = (unsigned char *)(ctrl + 1);
  208. switch (ctrl->bNotificationType) {
  209. case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
  210. spin_lock_irqsave(&dev->lock, flags);
  211. dev->rx_state = RX_BUSY;
  212. spin_unlock_irqrestore(&dev->lock, flags);
  213. dev->resp_avail++;
  214. usb_autopm_get_interface_no_resume(dev->intf);
  215. usb_fill_control_urb(dev->readurb, dev->udev,
  216. usb_rcvctrlpipe(dev->udev, 0),
  217. (unsigned char *)dev->in_ctlreq,
  218. dev->readbuf,
  219. DEFAULT_READ_URB_LENGTH,
  220. resp_avail_cb, dev);
  221. status = usb_submit_urb(dev->readurb, GFP_ATOMIC);
  222. if (status) {
  223. dev_err(&dev->intf->dev,
  224. "%s: Error submitting Read URB %d\n",
  225. __func__, status);
  226. usb_autopm_put_interface_async(dev->intf);
  227. goto resubmit_int_urb;
  228. }
  229. return;
  230. case USB_CDC_NOTIFY_NETWORK_CONNECTION:
  231. dev_dbg(&dev->intf->dev, "%s network\n", ctrl->wValue ?
  232. "connected to" : "disconnected from");
  233. break;
  234. case USB_CDC_NOTIFY_SERIAL_STATE:
  235. dev->notify_ser_state++;
  236. ctrl_bits = get_unaligned_le16(data);
  237. dev_dbg(&dev->intf->dev, "serial state: %d\n", ctrl_bits);
  238. dev->cbits_tohost = ctrl_bits;
  239. if (brdg && brdg->ops.send_cbits)
  240. brdg->ops.send_cbits(brdg->ctx, ctrl_bits);
  241. break;
  242. default:
  243. dev_err(&dev->intf->dev, "%s: unknown notification %d received:"
  244. "index %d len %d data0 %d data1 %d",
  245. __func__, ctrl->bNotificationType, ctrl->wIndex,
  246. ctrl->wLength, data[0], data[1]);
  247. }
  248. resubmit_int_urb:
  249. ctrl_bridge_start_read(dev, GFP_ATOMIC);
  250. }
  251. int ctrl_bridge_open(struct bridge *brdg)
  252. {
  253. struct ctrl_bridge *dev;
  254. int ch_id;
  255. if (!brdg) {
  256. err("bridge is null\n");
  257. return -EINVAL;
  258. }
  259. ch_id = get_ctrl_bridge_chid(brdg->name);
  260. if (ch_id < 0 || ch_id >= MAX_BRIDGE_DEVICES) {
  261. err("%s: %s dev not found\n", __func__, brdg->name);
  262. return ch_id;
  263. }
  264. brdg->ch_id = ch_id;
  265. dev = __dev[ch_id];
  266. dev->brdg = brdg;
  267. dev->snd_encap_cmd = 0;
  268. dev->get_encap_res = 0;
  269. dev->resp_avail = 0;
  270. dev->set_ctrl_line_sts = 0;
  271. dev->notify_ser_state = 0;
  272. if (brdg->ops.send_cbits)
  273. brdg->ops.send_cbits(brdg->ctx, dev->cbits_tohost);
  274. return 0;
  275. }
  276. EXPORT_SYMBOL(ctrl_bridge_open);
  277. void ctrl_bridge_close(unsigned int id)
  278. {
  279. struct ctrl_bridge *dev;
  280. if (id >= MAX_BRIDGE_DEVICES)
  281. return;
  282. dev = __dev[id];
  283. if (!dev || !dev->brdg)
  284. return;
  285. dev_dbg(&dev->intf->dev, "%s:\n", __func__);
  286. ctrl_bridge_set_cbits(dev->brdg->ch_id, 0);
  287. dev->brdg = NULL;
  288. }
  289. EXPORT_SYMBOL(ctrl_bridge_close);
  290. static void ctrl_write_callback(struct urb *urb)
  291. {
  292. struct ctrl_bridge *dev = urb->context;
  293. if (urb->status) {
  294. pr_debug("Write status/size %d/%d\n",
  295. urb->status, urb->actual_length);
  296. }
  297. kfree(urb->transfer_buffer);
  298. kfree(urb->setup_packet);
  299. usb_free_urb(urb);
  300. /* if we are here after device disconnect
  301. * usb_unbind_interface() takes care of
  302. * residual pm_autopm_get_interface_* calls
  303. */
  304. if (urb->dev->state != USB_STATE_NOTATTACHED)
  305. usb_autopm_put_interface_async(dev->intf);
  306. }
  307. int ctrl_bridge_write(unsigned int id, char *data, size_t size)
  308. {
  309. int result;
  310. struct urb *writeurb;
  311. struct usb_ctrlrequest *out_ctlreq;
  312. struct ctrl_bridge *dev;
  313. unsigned long flags;
  314. if (id >= MAX_BRIDGE_DEVICES) {
  315. result = -EINVAL;
  316. goto free_data;
  317. }
  318. dev = __dev[id];
  319. if (!dev) {
  320. result = -ENODEV;
  321. goto free_data;
  322. }
  323. dev_dbg(&dev->intf->dev, "%s:[id]:%u: write (%d bytes)\n",
  324. __func__, id, size);
  325. writeurb = usb_alloc_urb(0, GFP_ATOMIC);
  326. if (!writeurb) {
  327. dev_err(&dev->intf->dev, "%s: error allocating read urb\n",
  328. __func__);
  329. result = -ENOMEM;
  330. goto free_data;
  331. }
  332. out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_ATOMIC);
  333. if (!out_ctlreq) {
  334. dev_err(&dev->intf->dev,
  335. "%s: error allocating setup packet buffer\n",
  336. __func__);
  337. result = -ENOMEM;
  338. goto free_urb;
  339. }
  340. /* CDC Send Encapsulated Request packet */
  341. out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
  342. USB_RECIP_INTERFACE);
  343. if (!data && !size) {
  344. out_ctlreq->bRequest = USB_CDC_REQ_SET_CONTROL_LINE_STATE;
  345. out_ctlreq->wValue = dev->cbits_tomdm;
  346. dev->set_ctrl_line_sts++;
  347. } else {
  348. out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
  349. out_ctlreq->wValue = 0;
  350. dev->snd_encap_cmd++;
  351. }
  352. out_ctlreq->wIndex =
  353. dev->intf->cur_altsetting->desc.bInterfaceNumber;
  354. out_ctlreq->wLength = cpu_to_le16(size);
  355. usb_fill_control_urb(writeurb, dev->udev,
  356. usb_sndctrlpipe(dev->udev, 0),
  357. (unsigned char *)out_ctlreq,
  358. (void *)data, size,
  359. ctrl_write_callback, dev);
  360. result = usb_autopm_get_interface_async(dev->intf);
  361. if (result < 0) {
  362. dev_dbg(&dev->intf->dev, "%s: unable to resume interface: %d\n",
  363. __func__, result);
  364. /*
  365. * Revisit: if (result == -EPERM)
  366. * bridge_suspend(dev->intf, PMSG_SUSPEND);
  367. */
  368. goto free_ctrlreq;
  369. }
  370. spin_lock_irqsave(&dev->lock, flags);
  371. if (test_bit(SUSPENDED, &dev->flags)) {
  372. usb_anchor_urb(writeurb, &dev->tx_deferred);
  373. spin_unlock_irqrestore(&dev->lock, flags);
  374. goto deferred;
  375. }
  376. usb_anchor_urb(writeurb, &dev->tx_submitted);
  377. spin_unlock_irqrestore(&dev->lock, flags);
  378. result = usb_submit_urb(writeurb, GFP_ATOMIC);
  379. if (result < 0) {
  380. dev_err(&dev->intf->dev, "%s: submit URB error %d\n",
  381. __func__, result);
  382. usb_autopm_put_interface_async(dev->intf);
  383. goto unanchor_urb;
  384. }
  385. deferred:
  386. return size;
  387. unanchor_urb:
  388. usb_unanchor_urb(writeurb);
  389. free_ctrlreq:
  390. kfree(out_ctlreq);
  391. free_urb:
  392. usb_free_urb(writeurb);
  393. free_data:
  394. kfree(data);
  395. return result;
  396. }
  397. EXPORT_SYMBOL(ctrl_bridge_write);
  398. int ctrl_bridge_suspend(unsigned int id)
  399. {
  400. struct ctrl_bridge *dev;
  401. unsigned long flags;
  402. if (id >= MAX_BRIDGE_DEVICES)
  403. return -EINVAL;
  404. dev = __dev[id];
  405. if (!dev)
  406. return -ENODEV;
  407. spin_lock_irqsave(&dev->lock, flags);
  408. if (!usb_anchor_empty(&dev->tx_submitted) || dev->rx_state == RX_BUSY) {
  409. spin_unlock_irqrestore(&dev->lock, flags);
  410. return -EBUSY;
  411. }
  412. spin_unlock_irqrestore(&dev->lock, flags);
  413. usb_kill_urb(dev->inturb);
  414. spin_lock_irqsave(&dev->lock, flags);
  415. if (dev->rx_state != RX_IDLE) {
  416. spin_unlock_irqrestore(&dev->lock, flags);
  417. return -EBUSY;
  418. }
  419. if (!usb_anchor_empty(&dev->tx_submitted)) {
  420. spin_unlock_irqrestore(&dev->lock, flags);
  421. ctrl_bridge_start_read(dev, GFP_KERNEL);
  422. return -EBUSY;
  423. }
  424. set_bit(SUSPENDED, &dev->flags);
  425. spin_unlock_irqrestore(&dev->lock, flags);
  426. return 0;
  427. }
  428. int ctrl_bridge_resume(unsigned int id)
  429. {
  430. struct ctrl_bridge *dev;
  431. struct urb *urb;
  432. unsigned long flags;
  433. int ret;
  434. if (id >= MAX_BRIDGE_DEVICES)
  435. return -EINVAL;
  436. dev = __dev[id];
  437. if (!dev)
  438. return -ENODEV;
  439. if (!test_bit(SUSPENDED, &dev->flags))
  440. return 0;
  441. spin_lock_irqsave(&dev->lock, flags);
  442. /* submit pending write requests */
  443. while ((urb = usb_get_from_anchor(&dev->tx_deferred))) {
  444. spin_unlock_irqrestore(&dev->lock, flags);
  445. /*
  446. * usb_get_from_anchor() does not drop the
  447. * ref count incremented by the usb_anchro_urb()
  448. * called in Tx submission path. Let us do it.
  449. */
  450. usb_put_urb(urb);
  451. usb_anchor_urb(urb, &dev->tx_submitted);
  452. ret = usb_submit_urb(urb, GFP_ATOMIC);
  453. if (ret < 0) {
  454. usb_unanchor_urb(urb);
  455. kfree(urb->setup_packet);
  456. kfree(urb->transfer_buffer);
  457. usb_free_urb(urb);
  458. usb_autopm_put_interface_async(dev->intf);
  459. }
  460. spin_lock_irqsave(&dev->lock, flags);
  461. }
  462. clear_bit(SUSPENDED, &dev->flags);
  463. spin_unlock_irqrestore(&dev->lock, flags);
  464. return ctrl_bridge_start_read(dev, GFP_KERNEL);
  465. }
  466. #if defined(CONFIG_DEBUG_FS)
  467. #define DEBUG_BUF_SIZE 1024
  468. static ssize_t ctrl_bridge_read_stats(struct file *file, char __user *ubuf,
  469. size_t count, loff_t *ppos)
  470. {
  471. struct ctrl_bridge *dev;
  472. char *buf;
  473. int ret;
  474. int i;
  475. int temp = 0;
  476. buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
  477. if (!buf)
  478. return -ENOMEM;
  479. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  480. dev = __dev[i];
  481. if (!dev)
  482. continue;
  483. temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
  484. "\nName#%s dev %pK\n"
  485. "snd encap cmd cnt: %u\n"
  486. "get encap res cnt: %u\n"
  487. "res available cnt: %u\n"
  488. "set ctrlline sts cnt: %u\n"
  489. "notify ser state cnt: %u\n"
  490. "cbits_tomdm: %d\n"
  491. "cbits_tohost: %d\n"
  492. "suspended: %d\n",
  493. dev->name, dev,
  494. dev->snd_encap_cmd,
  495. dev->get_encap_res,
  496. dev->resp_avail,
  497. dev->set_ctrl_line_sts,
  498. dev->notify_ser_state,
  499. dev->cbits_tomdm,
  500. dev->cbits_tohost,
  501. test_bit(SUSPENDED, &dev->flags));
  502. }
  503. ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
  504. kfree(buf);
  505. return ret;
  506. }
  507. static ssize_t ctrl_bridge_reset_stats(struct file *file,
  508. const char __user *buf, size_t count, loff_t *ppos)
  509. {
  510. struct ctrl_bridge *dev;
  511. int i;
  512. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  513. dev = __dev[i];
  514. if (!dev)
  515. continue;
  516. dev->snd_encap_cmd = 0;
  517. dev->get_encap_res = 0;
  518. dev->resp_avail = 0;
  519. dev->set_ctrl_line_sts = 0;
  520. dev->notify_ser_state = 0;
  521. }
  522. return count;
  523. }
  524. const struct file_operations ctrl_stats_ops = {
  525. .read = ctrl_bridge_read_stats,
  526. .write = ctrl_bridge_reset_stats,
  527. };
  528. struct dentry *ctrl_dent;
  529. struct dentry *ctrl_dfile;
  530. static void ctrl_bridge_debugfs_init(void)
  531. {
  532. ctrl_dent = debugfs_create_dir("ctrl_hsic_bridge", 0);
  533. if (IS_ERR(ctrl_dent))
  534. return;
  535. ctrl_dfile =
  536. debugfs_create_file("status", 0644, ctrl_dent, 0,
  537. &ctrl_stats_ops);
  538. if (!ctrl_dfile || IS_ERR(ctrl_dfile))
  539. debugfs_remove(ctrl_dent);
  540. }
  541. static void ctrl_bridge_debugfs_exit(void)
  542. {
  543. debugfs_remove(ctrl_dfile);
  544. debugfs_remove(ctrl_dent);
  545. }
  546. #else
  547. static void ctrl_bridge_debugfs_init(void) { }
  548. static void ctrl_bridge_debugfs_exit(void) { }
  549. #endif
  550. int
  551. ctrl_bridge_probe(struct usb_interface *ifc, struct usb_host_endpoint *int_in,
  552. char *name, int id)
  553. {
  554. struct ctrl_bridge *dev;
  555. struct usb_device *udev;
  556. struct usb_endpoint_descriptor *ep;
  557. u16 wMaxPacketSize;
  558. int retval = 0;
  559. int interval;
  560. udev = interface_to_usbdev(ifc);
  561. dev = __dev[id];
  562. if (!dev) {
  563. pr_err("%s:device not found\n", __func__);
  564. return -ENODEV;
  565. }
  566. dev->name = name;
  567. dev->pdev = platform_device_alloc(name, -1);
  568. if (!dev->pdev) {
  569. retval = -ENOMEM;
  570. dev_err(&ifc->dev, "%s: unable to allocate platform device\n",
  571. __func__);
  572. goto free_name;
  573. }
  574. dev->flags = 0;
  575. dev->udev = udev;
  576. dev->int_pipe = usb_rcvintpipe(udev,
  577. int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  578. dev->intf = ifc;
  579. /*use max pkt size from ep desc*/
  580. ep = &dev->intf->cur_altsetting->endpoint[0].desc;
  581. dev->inturb = usb_alloc_urb(0, GFP_KERNEL);
  582. if (!dev->inturb) {
  583. dev_err(&ifc->dev, "%s: error allocating int urb\n", __func__);
  584. retval = -ENOMEM;
  585. goto pdev_put;
  586. }
  587. wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize);
  588. dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL);
  589. if (!dev->intbuf) {
  590. dev_err(&ifc->dev, "%s: error allocating int buffer\n",
  591. __func__);
  592. retval = -ENOMEM;
  593. goto free_inturb;
  594. }
  595. interval = int_in->desc.bInterval;
  596. usb_fill_int_urb(dev->inturb, udev, dev->int_pipe,
  597. dev->intbuf, wMaxPacketSize,
  598. notification_available_cb, dev, interval);
  599. dev->readurb = usb_alloc_urb(0, GFP_KERNEL);
  600. if (!dev->readurb) {
  601. dev_err(&ifc->dev, "%s: error allocating read urb\n",
  602. __func__);
  603. retval = -ENOMEM;
  604. goto free_intbuf;
  605. }
  606. dev->readbuf = kmalloc(DEFAULT_READ_URB_LENGTH, GFP_KERNEL);
  607. if (!dev->readbuf) {
  608. dev_err(&ifc->dev, "%s: error allocating read buffer\n",
  609. __func__);
  610. retval = -ENOMEM;
  611. goto free_rurb;
  612. }
  613. dev->in_ctlreq = kmalloc(sizeof(*dev->in_ctlreq), GFP_KERNEL);
  614. if (!dev->in_ctlreq) {
  615. dev_err(&ifc->dev, "%s:error allocating setup packet buffer\n",
  616. __func__);
  617. retval = -ENOMEM;
  618. goto free_rbuf;
  619. }
  620. dev->in_ctlreq->bRequestType =
  621. (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
  622. dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
  623. dev->in_ctlreq->wValue = 0;
  624. dev->in_ctlreq->wIndex =
  625. dev->intf->cur_altsetting->desc.bInterfaceNumber;
  626. dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH);
  627. retval = platform_device_add(dev->pdev);
  628. if (retval) {
  629. dev_err(&ifc->dev, "%s:fail to add pdev\n", __func__);
  630. goto free_ctrlreq;
  631. }
  632. retval = ctrl_bridge_start_read(dev, GFP_KERNEL);
  633. if (retval) {
  634. dev_err(&ifc->dev, "%s:fail to start reading\n", __func__);
  635. goto pdev_del;
  636. }
  637. return 0;
  638. pdev_del:
  639. platform_device_del(dev->pdev);
  640. free_ctrlreq:
  641. kfree(dev->in_ctlreq);
  642. free_rbuf:
  643. kfree(dev->readbuf);
  644. free_rurb:
  645. usb_free_urb(dev->readurb);
  646. free_intbuf:
  647. kfree(dev->intbuf);
  648. free_inturb:
  649. usb_free_urb(dev->inturb);
  650. pdev_put:
  651. platform_device_put(dev->pdev);
  652. free_name:
  653. dev->name = "none";
  654. return retval;
  655. }
  656. void ctrl_bridge_disconnect(unsigned int id)
  657. {
  658. struct ctrl_bridge *dev = __dev[id];
  659. dev_dbg(&dev->intf->dev, "%s:\n", __func__);
  660. /*set device name to none to get correct channel id
  661. * at the time of bridge open
  662. */
  663. dev->name = "none";
  664. platform_device_unregister(dev->pdev);
  665. usb_scuttle_anchored_urbs(&dev->tx_deferred);
  666. usb_kill_anchored_urbs(&dev->tx_submitted);
  667. usb_kill_urb(dev->inturb);
  668. usb_kill_urb(dev->readurb);
  669. kfree(dev->in_ctlreq);
  670. kfree(dev->readbuf);
  671. kfree(dev->intbuf);
  672. usb_free_urb(dev->readurb);
  673. usb_free_urb(dev->inturb);
  674. }
  675. int ctrl_bridge_init(void)
  676. {
  677. struct ctrl_bridge *dev;
  678. int i;
  679. int retval = 0;
  680. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  681. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  682. if (!dev) {
  683. pr_err("%s: unable to allocate dev\n", __func__);
  684. retval = -ENOMEM;
  685. goto error;
  686. }
  687. /*transport name will be set during probe*/
  688. dev->name = "none";
  689. spin_lock_init(&dev->lock);
  690. init_usb_anchor(&dev->tx_submitted);
  691. init_usb_anchor(&dev->tx_deferred);
  692. __dev[i] = dev;
  693. }
  694. ctrl_bridge_debugfs_init();
  695. return 0;
  696. error:
  697. while (--i >= 0) {
  698. kfree(__dev[i]);
  699. __dev[i] = NULL;
  700. }
  701. return retval;
  702. }
  703. void ctrl_bridge_exit(void)
  704. {
  705. int i;
  706. ctrl_bridge_debugfs_exit();
  707. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  708. kfree(__dev[i]);
  709. __dev[i] = NULL;
  710. }
  711. }