mdm_data_bridge.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/module.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/ratelimit.h>
  21. #include <mach/usb_bridge.h>
  22. #define MAX_RX_URBS 100
  23. #define RMNET_RX_BUFSIZE 2048
  24. #define STOP_SUBMIT_URB_LIMIT 500
  25. #define FLOW_CTRL_EN_THRESHOLD 500
  26. #define FLOW_CTRL_DISABLE 300
  27. #define FLOW_CTRL_SUPPORT 1
  28. #define BRIDGE_DATA_IDX 0
  29. #define BRIDGE_CTRL_IDX 1
  30. /*for xport : HSIC*/
  31. static const char * const serial_hsic_bridge_names[] = {
  32. "serial_hsic_data",
  33. "serial_hsic_ctrl",
  34. };
  35. static const char * const rmnet_hsic_bridge_names[] = {
  36. "rmnet_hsic_data",
  37. "rmnet_hsic_ctrl",
  38. };
  39. /*for xport : HSUSB*/
  40. static const char * const serial_hsusb_bridge_names[] = {
  41. "serial_hsusb_data",
  42. "serial_hsusb_ctrl",
  43. };
  44. static const char * const rmnet_hsusb_bridge_names[] = {
  45. "rmnet_hsusb_data",
  46. "rmnet_hsusb_ctrl",
  47. };
  48. /* since driver supports multiple instances, on smp systems
  49. * probe might get called from multiple cores, hence use lock
  50. * to identify unclaimed bridge device instance
  51. */
  52. static DEFINE_MUTEX(brdg_claim_lock);
  53. static struct workqueue_struct *bridge_wq;
  54. static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
  55. module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
  56. static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
  57. module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
  58. static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
  59. module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
  60. unsigned int max_rx_urbs = MAX_RX_URBS;
  61. module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
  62. unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
  63. module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
  64. static unsigned tx_urb_mult = 20;
  65. module_param(tx_urb_mult, uint, S_IRUGO|S_IWUSR);
  66. #define TX_HALT 0
  67. #define RX_HALT 1
  68. #define SUSPENDED 2
  69. #define CLAIMED 3
  70. struct data_bridge {
  71. struct usb_interface *intf;
  72. struct usb_device *udev;
  73. int id;
  74. char *name;
  75. unsigned int bulk_in;
  76. unsigned int bulk_out;
  77. int err;
  78. /* keep track of in-flight URBs */
  79. struct usb_anchor tx_active;
  80. struct usb_anchor rx_active;
  81. struct list_head rx_idle;
  82. struct sk_buff_head rx_done;
  83. struct workqueue_struct *wq;
  84. struct work_struct process_rx_w;
  85. struct bridge *brdg;
  86. /* work queue function for handling halt conditions */
  87. struct work_struct kevent;
  88. unsigned long flags;
  89. struct platform_device *pdev;
  90. /* counters */
  91. atomic_t pending_txurbs;
  92. unsigned int txurb_drp_cnt;
  93. unsigned long to_host;
  94. unsigned long to_modem;
  95. unsigned int tx_throttled_cnt;
  96. unsigned int tx_unthrottled_cnt;
  97. unsigned int rx_throttled_cnt;
  98. unsigned int rx_unthrottled_cnt;
  99. };
  100. static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
  101. static unsigned int get_timestamp(void);
  102. static void dbg_timestamp(char *, struct sk_buff *);
  103. static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
  104. gfp_t flags);
  105. /* Find an unclaimed bridge device instance */
  106. static int get_bridge_dev_idx(void)
  107. {
  108. struct data_bridge *dev;
  109. int i;
  110. mutex_lock(&brdg_claim_lock);
  111. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  112. dev = __dev[i];
  113. if (!test_bit(CLAIMED, &dev->flags)) {
  114. set_bit(CLAIMED, &dev->flags);
  115. mutex_unlock(&brdg_claim_lock);
  116. return i;
  117. }
  118. }
  119. mutex_unlock(&brdg_claim_lock);
  120. return -ENODEV;
  121. }
  122. static int get_data_bridge_chid(char *xport_name)
  123. {
  124. struct data_bridge *dev;
  125. int i;
  126. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  127. dev = __dev[i];
  128. if (!strncmp(dev->name, xport_name, BRIDGE_NAME_MAX_LEN))
  129. return i;
  130. }
  131. return -ENODEV;
  132. }
  133. static inline bool rx_halted(struct data_bridge *dev)
  134. {
  135. return test_bit(RX_HALT, &dev->flags);
  136. }
  137. static inline bool rx_throttled(struct bridge *brdg)
  138. {
  139. return test_bit(RX_THROTTLED, &brdg->flags);
  140. }
  141. static void free_rx_urbs(struct data_bridge *dev)
  142. {
  143. struct list_head *head;
  144. struct urb *rx_urb;
  145. unsigned long flags;
  146. head = &dev->rx_idle;
  147. spin_lock_irqsave(&dev->rx_done.lock, flags);
  148. while (!list_empty(head)) {
  149. rx_urb = list_entry(head->next, struct urb, urb_list);
  150. list_del(&rx_urb->urb_list);
  151. usb_free_urb(rx_urb);
  152. }
  153. spin_unlock_irqrestore(&dev->rx_done.lock, flags);
  154. }
  155. int data_bridge_unthrottle_rx(unsigned int id)
  156. {
  157. struct data_bridge *dev;
  158. if (id >= MAX_BRIDGE_DEVICES)
  159. return -EINVAL;
  160. dev = __dev[id];
  161. if (!dev || !dev->brdg)
  162. return -ENODEV;
  163. dev->rx_unthrottled_cnt++;
  164. queue_work(dev->wq, &dev->process_rx_w);
  165. return 0;
  166. }
  167. EXPORT_SYMBOL(data_bridge_unthrottle_rx);
  168. static void data_bridge_process_rx(struct work_struct *work)
  169. {
  170. int retval;
  171. unsigned long flags;
  172. struct urb *rx_idle;
  173. struct sk_buff *skb;
  174. struct timestamp_info *info;
  175. struct data_bridge *dev =
  176. container_of(work, struct data_bridge, process_rx_w);
  177. struct bridge *brdg = dev->brdg;
  178. if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
  179. return;
  180. while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
  181. dev->to_host++;
  182. info = (struct timestamp_info *)skb->cb;
  183. info->rx_done_sent = get_timestamp();
  184. /* hand off sk_buff to client,they'll need to free it */
  185. retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
  186. if (retval == -ENOTCONN || retval == -EINVAL) {
  187. return;
  188. } else if (retval == -EBUSY) {
  189. dev->rx_throttled_cnt++;
  190. break;
  191. }
  192. }
  193. spin_lock_irqsave(&dev->rx_done.lock, flags);
  194. while (!list_empty(&dev->rx_idle)) {
  195. if (dev->rx_done.qlen > stop_submit_urb_limit)
  196. break;
  197. rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
  198. list_del(&rx_idle->urb_list);
  199. spin_unlock_irqrestore(&dev->rx_done.lock, flags);
  200. retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
  201. spin_lock_irqsave(&dev->rx_done.lock, flags);
  202. if (retval) {
  203. list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
  204. break;
  205. }
  206. }
  207. spin_unlock_irqrestore(&dev->rx_done.lock, flags);
  208. }
  209. static void data_bridge_read_cb(struct urb *urb)
  210. {
  211. struct bridge *brdg;
  212. struct sk_buff *skb = urb->context;
  213. struct timestamp_info *info = (struct timestamp_info *)skb->cb;
  214. struct data_bridge *dev = info->dev;
  215. bool queue = 0;
  216. /*usb device disconnect*/
  217. if (urb->dev->state == USB_STATE_NOTATTACHED)
  218. urb->status = -ECONNRESET;
  219. brdg = dev->brdg;
  220. skb_put(skb, urb->actual_length);
  221. switch (urb->status) {
  222. case 0: /* success */
  223. queue = 1;
  224. info->rx_done = get_timestamp();
  225. spin_lock(&dev->rx_done.lock);
  226. __skb_queue_tail(&dev->rx_done, skb);
  227. spin_unlock(&dev->rx_done.lock);
  228. break;
  229. /*do not resubmit*/
  230. case -EPIPE:
  231. set_bit(RX_HALT, &dev->flags);
  232. dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
  233. schedule_work(&dev->kevent);
  234. /* FALLTHROUGH */
  235. case -ESHUTDOWN:
  236. case -ENOENT: /* suspended */
  237. case -ECONNRESET: /* unplug */
  238. case -EPROTO:
  239. dev_kfree_skb_any(skb);
  240. break;
  241. /*resubmit */
  242. case -EOVERFLOW: /*babble error*/
  243. default:
  244. queue = 1;
  245. dev_kfree_skb_any(skb);
  246. pr_debug_ratelimited("%s: non zero urb status = %d\n",
  247. __func__, urb->status);
  248. break;
  249. }
  250. spin_lock(&dev->rx_done.lock);
  251. list_add_tail(&urb->urb_list, &dev->rx_idle);
  252. spin_unlock(&dev->rx_done.lock);
  253. if (queue)
  254. queue_work(dev->wq, &dev->process_rx_w);
  255. }
  256. static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
  257. gfp_t flags)
  258. {
  259. struct sk_buff *skb;
  260. struct timestamp_info *info;
  261. int retval = -EINVAL;
  262. unsigned int created;
  263. created = get_timestamp();
  264. skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
  265. if (!skb)
  266. return -ENOMEM;
  267. info = (struct timestamp_info *)skb->cb;
  268. info->dev = dev;
  269. info->created = created;
  270. usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
  271. skb->data, RMNET_RX_BUFSIZE,
  272. data_bridge_read_cb, skb);
  273. if (test_bit(SUSPENDED, &dev->flags))
  274. goto suspended;
  275. usb_anchor_urb(rx_urb, &dev->rx_active);
  276. info->rx_queued = get_timestamp();
  277. retval = usb_submit_urb(rx_urb, flags);
  278. if (retval)
  279. goto fail;
  280. usb_mark_last_busy(dev->udev);
  281. return 0;
  282. fail:
  283. usb_unanchor_urb(rx_urb);
  284. suspended:
  285. dev_kfree_skb_any(skb);
  286. return retval;
  287. }
  288. static int data_bridge_prepare_rx(struct data_bridge *dev)
  289. {
  290. int i;
  291. struct urb *rx_urb;
  292. int retval = 0;
  293. for (i = 0; i < max_rx_urbs; i++) {
  294. rx_urb = usb_alloc_urb(0, GFP_KERNEL);
  295. if (!rx_urb) {
  296. retval = -ENOMEM;
  297. goto free_urbs;
  298. }
  299. list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
  300. }
  301. return 0;
  302. free_urbs:
  303. free_rx_urbs(dev);
  304. return retval;
  305. }
  306. int data_bridge_open(struct bridge *brdg)
  307. {
  308. struct data_bridge *dev;
  309. int ch_id;
  310. if (!brdg) {
  311. err("bridge is null\n");
  312. return -EINVAL;
  313. }
  314. ch_id = get_data_bridge_chid(brdg->name);
  315. if (ch_id < 0 || ch_id >= MAX_BRIDGE_DEVICES) {
  316. err("%s: %s dev not found\n", __func__, brdg->name);
  317. return ch_id;
  318. }
  319. brdg->ch_id = ch_id;
  320. dev = __dev[ch_id];
  321. dev_dbg(&dev->intf->dev, "%s: dev:%pK\n", __func__, dev);
  322. dev->brdg = brdg;
  323. dev->err = 0;
  324. atomic_set(&dev->pending_txurbs, 0);
  325. dev->to_host = 0;
  326. dev->to_modem = 0;
  327. dev->txurb_drp_cnt = 0;
  328. dev->tx_throttled_cnt = 0;
  329. dev->tx_unthrottled_cnt = 0;
  330. dev->rx_throttled_cnt = 0;
  331. dev->rx_unthrottled_cnt = 0;
  332. queue_work(dev->wq, &dev->process_rx_w);
  333. return 0;
  334. }
  335. EXPORT_SYMBOL(data_bridge_open);
  336. void data_bridge_close(unsigned int id)
  337. {
  338. struct data_bridge *dev;
  339. struct sk_buff *skb;
  340. unsigned long flags;
  341. if (id >= MAX_BRIDGE_DEVICES)
  342. return;
  343. dev = __dev[id];
  344. if (!dev || !dev->brdg)
  345. return;
  346. dev_dbg(&dev->intf->dev, "%s:\n", __func__);
  347. cancel_work_sync(&dev->kevent);
  348. cancel_work_sync(&dev->process_rx_w);
  349. usb_kill_anchored_urbs(&dev->tx_active);
  350. usb_kill_anchored_urbs(&dev->rx_active);
  351. spin_lock_irqsave(&dev->rx_done.lock, flags);
  352. while ((skb = __skb_dequeue(&dev->rx_done)))
  353. dev_kfree_skb_any(skb);
  354. spin_unlock_irqrestore(&dev->rx_done.lock, flags);
  355. dev->brdg = NULL;
  356. }
  357. EXPORT_SYMBOL(data_bridge_close);
  358. static void defer_kevent(struct work_struct *work)
  359. {
  360. int status;
  361. struct data_bridge *dev =
  362. container_of(work, struct data_bridge, kevent);
  363. if (!dev)
  364. return;
  365. if (test_bit(TX_HALT, &dev->flags)) {
  366. usb_unlink_anchored_urbs(&dev->tx_active);
  367. status = usb_autopm_get_interface(dev->intf);
  368. if (status < 0) {
  369. dev_dbg(&dev->intf->dev,
  370. "can't acquire interface, status %d\n", status);
  371. return;
  372. }
  373. status = usb_clear_halt(dev->udev, dev->bulk_out);
  374. usb_autopm_put_interface(dev->intf);
  375. if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
  376. dev_err(&dev->intf->dev,
  377. "can't clear tx halt, status %d\n", status);
  378. else
  379. clear_bit(TX_HALT, &dev->flags);
  380. }
  381. if (test_bit(RX_HALT, &dev->flags)) {
  382. usb_unlink_anchored_urbs(&dev->rx_active);
  383. status = usb_autopm_get_interface(dev->intf);
  384. if (status < 0) {
  385. dev_dbg(&dev->intf->dev,
  386. "can't acquire interface, status %d\n", status);
  387. return;
  388. }
  389. status = usb_clear_halt(dev->udev, dev->bulk_in);
  390. usb_autopm_put_interface(dev->intf);
  391. if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
  392. dev_err(&dev->intf->dev,
  393. "can't clear rx halt, status %d\n", status);
  394. else {
  395. clear_bit(RX_HALT, &dev->flags);
  396. if (dev->brdg)
  397. queue_work(dev->wq, &dev->process_rx_w);
  398. }
  399. }
  400. }
  401. static void data_bridge_write_cb(struct urb *urb)
  402. {
  403. struct sk_buff *skb = urb->context;
  404. struct timestamp_info *info = (struct timestamp_info *)skb->cb;
  405. struct data_bridge *dev = info->dev;
  406. struct bridge *brdg = dev->brdg;
  407. int pending;
  408. pr_debug("%s: dev:%pK\n", __func__, dev);
  409. switch (urb->status) {
  410. case 0: /*success*/
  411. dbg_timestamp("UL", skb);
  412. break;
  413. case -EPROTO:
  414. dev->err = -EPROTO;
  415. break;
  416. case -EPIPE:
  417. set_bit(TX_HALT, &dev->flags);
  418. dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
  419. schedule_work(&dev->kevent);
  420. /* FALLTHROUGH */
  421. case -ESHUTDOWN:
  422. case -ENOENT: /* suspended */
  423. case -ECONNRESET: /* unplug */
  424. case -EOVERFLOW: /*babble error*/
  425. /* FALLTHROUGH */
  426. default:
  427. pr_debug_ratelimited("%s: non zero urb status = %d\n",
  428. __func__, urb->status);
  429. }
  430. usb_free_urb(urb);
  431. dev_kfree_skb_any(skb);
  432. pending = atomic_dec_return(&dev->pending_txurbs);
  433. /*flow ctrl*/
  434. if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
  435. test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
  436. pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
  437. __func__, pending);
  438. dev->tx_unthrottled_cnt++;
  439. if (brdg->ops.unthrottle_tx)
  440. brdg->ops.unthrottle_tx(brdg->ctx);
  441. }
  442. /* if we are here after device disconnect
  443. * usb_unbind_interface() takes care of
  444. * residual pm_autopm_get_interface_* calls
  445. */
  446. if (urb->dev->state != USB_STATE_NOTATTACHED)
  447. usb_autopm_put_interface_async(dev->intf);
  448. }
  449. int data_bridge_write(unsigned int id, struct sk_buff *skb)
  450. {
  451. int result;
  452. int size = skb->len;
  453. int pending;
  454. struct urb *txurb;
  455. struct timestamp_info *info = (struct timestamp_info *)skb->cb;
  456. struct data_bridge *dev = __dev[id];
  457. struct bridge *brdg;
  458. if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
  459. return -ENODEV;
  460. brdg = dev->brdg;
  461. if (!brdg)
  462. return -ENODEV;
  463. dev_dbg(&dev->intf->dev, "%s: write (%d bytes)\n", __func__, skb->len);
  464. result = usb_autopm_get_interface(dev->intf);
  465. if (result < 0) {
  466. dev_dbg(&dev->intf->dev, "%s: resume failure\n", __func__);
  467. goto pm_error;
  468. }
  469. txurb = usb_alloc_urb(0, GFP_KERNEL);
  470. if (!txurb) {
  471. dev_err(&dev->intf->dev, "%s: error allocating read urb\n",
  472. __func__);
  473. result = -ENOMEM;
  474. goto error;
  475. }
  476. /* store dev pointer in skb */
  477. info->dev = dev;
  478. info->tx_queued = get_timestamp();
  479. usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
  480. skb->data, skb->len, data_bridge_write_cb, skb);
  481. txurb->transfer_flags |= URB_ZERO_PACKET;
  482. pending = atomic_inc_return(&dev->pending_txurbs);
  483. usb_anchor_urb(txurb, &dev->tx_active);
  484. if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
  485. txurb->transfer_flags |= URB_NO_INTERRUPT;
  486. result = usb_submit_urb(txurb, GFP_KERNEL);
  487. if (result < 0) {
  488. usb_unanchor_urb(txurb);
  489. atomic_dec(&dev->pending_txurbs);
  490. dev_err(&dev->intf->dev, "%s: submit URB error %d\n",
  491. __func__, result);
  492. goto free_urb;
  493. }
  494. dev->to_modem++;
  495. dev_dbg(&dev->intf->dev, "%s: pending_txurbs: %u\n", __func__, pending);
  496. /* flow control: last urb submitted but return -EBUSY */
  497. if (fctrl_support && pending > fctrl_en_thld) {
  498. set_bit(TX_THROTTLED, &brdg->flags);
  499. dev->tx_throttled_cnt++;
  500. pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
  501. __func__, pending);
  502. return -EBUSY;
  503. }
  504. return size;
  505. free_urb:
  506. usb_free_urb(txurb);
  507. error:
  508. dev->txurb_drp_cnt++;
  509. usb_autopm_put_interface(dev->intf);
  510. pm_error:
  511. return result;
  512. }
  513. EXPORT_SYMBOL(data_bridge_write);
  514. static int bridge_resume(struct usb_interface *iface)
  515. {
  516. int retval = 0;
  517. struct data_bridge *dev = usb_get_intfdata(iface);
  518. clear_bit(SUSPENDED, &dev->flags);
  519. if (dev->brdg)
  520. queue_work(dev->wq, &dev->process_rx_w);
  521. retval = ctrl_bridge_resume(dev->id);
  522. return retval;
  523. }
  524. static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
  525. {
  526. int retval;
  527. struct data_bridge *dev = usb_get_intfdata(intf);
  528. if (atomic_read(&dev->pending_txurbs))
  529. return -EBUSY;
  530. retval = ctrl_bridge_suspend(dev->id);
  531. if (retval)
  532. return retval;
  533. set_bit(SUSPENDED, &dev->flags);
  534. usb_kill_anchored_urbs(&dev->rx_active);
  535. return 0;
  536. }
  537. static int data_bridge_probe(struct usb_interface *iface,
  538. struct usb_host_endpoint *bulk_in,
  539. struct usb_host_endpoint *bulk_out, char *name, int id)
  540. {
  541. struct data_bridge *dev;
  542. int retval;
  543. dev = __dev[id];
  544. if (!dev) {
  545. err("%s: device not found\n", __func__);
  546. return -ENODEV;
  547. }
  548. dev->pdev = platform_device_alloc(name, -1);
  549. if (!dev->pdev) {
  550. err("%s: unable to allocate platform device\n", __func__);
  551. kfree(dev);
  552. return -ENOMEM;
  553. }
  554. /*clear all bits except claimed bit*/
  555. clear_bit(RX_HALT, &dev->flags);
  556. clear_bit(TX_HALT, &dev->flags);
  557. clear_bit(SUSPENDED, &dev->flags);
  558. dev->id = id;
  559. dev->name = name;
  560. dev->udev = interface_to_usbdev(iface);
  561. dev->intf = iface;
  562. dev->bulk_in = usb_rcvbulkpipe(dev->udev,
  563. bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  564. dev->bulk_out = usb_sndbulkpipe(dev->udev,
  565. bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  566. usb_set_intfdata(iface, dev);
  567. /*allocate list of rx urbs*/
  568. retval = data_bridge_prepare_rx(dev);
  569. if (retval) {
  570. platform_device_put(dev->pdev);
  571. return retval;
  572. }
  573. platform_device_add(dev->pdev);
  574. return 0;
  575. }
  576. #if defined(CONFIG_DEBUG_FS)
  577. #define DEBUG_BUF_SIZE 4096
  578. static unsigned int record_timestamp;
  579. module_param(record_timestamp, uint, S_IRUGO | S_IWUSR);
  580. static struct timestamp_buf dbg_data = {
  581. .idx = 0,
  582. .lck = __RW_LOCK_UNLOCKED(lck)
  583. };
  584. /*get_timestamp - returns time of day in us */
  585. static unsigned int get_timestamp(void)
  586. {
  587. struct timeval tval;
  588. unsigned int stamp;
  589. if (!record_timestamp)
  590. return 0;
  591. do_gettimeofday(&tval);
  592. /* 2^32 = 4294967296. Limit to 4096s. */
  593. stamp = tval.tv_sec & 0xFFF;
  594. stamp = stamp * 1000000 + tval.tv_usec;
  595. return stamp;
  596. }
  597. static void dbg_inc(unsigned *idx)
  598. {
  599. *idx = (*idx + 1) & (DBG_DATA_MAX-1);
  600. }
  601. /**
  602. * dbg_timestamp - Stores timestamp values of a SKB life cycle
  603. * to debug buffer
  604. * @event: "UL": Uplink Data
  605. * @skb: SKB used to store timestamp values to debug buffer
  606. */
  607. static void dbg_timestamp(char *event, struct sk_buff * skb)
  608. {
  609. unsigned long flags;
  610. struct timestamp_info *info = (struct timestamp_info *)skb->cb;
  611. if (!record_timestamp)
  612. return;
  613. write_lock_irqsave(&dbg_data.lck, flags);
  614. scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
  615. "%pK %u[%s] %u %u %u %u %u %u\n",
  616. skb, skb->len, event, info->created, info->rx_queued,
  617. info->rx_done, info->rx_done_sent, info->tx_queued,
  618. get_timestamp());
  619. dbg_inc(&dbg_data.idx);
  620. write_unlock_irqrestore(&dbg_data.lck, flags);
  621. }
  622. /* show_timestamp: displays the timestamp buffer */
  623. static ssize_t show_timestamp(struct file *file, char __user *ubuf,
  624. size_t count, loff_t *ppos)
  625. {
  626. unsigned long flags;
  627. unsigned i;
  628. unsigned j = 0;
  629. char *buf;
  630. int ret = 0;
  631. if (!record_timestamp)
  632. return 0;
  633. buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
  634. if (!buf)
  635. return -ENOMEM;
  636. read_lock_irqsave(&dbg_data.lck, flags);
  637. i = dbg_data.idx;
  638. for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) {
  639. if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG))
  640. continue;
  641. j += scnprintf(buf + j, DEBUG_BUF_SIZE - j,
  642. "%s\n", dbg_data.buf[i]);
  643. }
  644. read_unlock_irqrestore(&dbg_data.lck, flags);
  645. ret = simple_read_from_buffer(ubuf, count, ppos, buf, j);
  646. kfree(buf);
  647. return ret;
  648. }
  649. const struct file_operations data_timestamp_ops = {
  650. .read = show_timestamp,
  651. };
  652. static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
  653. size_t count, loff_t *ppos)
  654. {
  655. struct data_bridge *dev;
  656. char *buf;
  657. int ret;
  658. int i;
  659. int temp = 0;
  660. buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
  661. if (!buf)
  662. return -ENOMEM;
  663. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  664. dev = __dev[i];
  665. if (!dev)
  666. continue;
  667. temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
  668. "\nName#%s dev %pK\n"
  669. "pending tx urbs: %u\n"
  670. "tx urb drp cnt: %u\n"
  671. "to host: %lu\n"
  672. "to mdm: %lu\n"
  673. "tx throttled cnt: %u\n"
  674. "tx unthrottled cnt: %u\n"
  675. "rx throttled cnt: %u\n"
  676. "rx unthrottled cnt: %u\n"
  677. "rx done skb qlen: %u\n"
  678. "dev err: %d\n"
  679. "suspended: %d\n"
  680. "TX_HALT: %d\n"
  681. "RX_HALT: %d\n",
  682. dev->name, dev,
  683. atomic_read(&dev->pending_txurbs),
  684. dev->txurb_drp_cnt,
  685. dev->to_host,
  686. dev->to_modem,
  687. dev->tx_throttled_cnt,
  688. dev->tx_unthrottled_cnt,
  689. dev->rx_throttled_cnt,
  690. dev->rx_unthrottled_cnt,
  691. dev->rx_done.qlen,
  692. dev->err,
  693. test_bit(SUSPENDED, &dev->flags),
  694. test_bit(TX_HALT, &dev->flags),
  695. test_bit(RX_HALT, &dev->flags));
  696. }
  697. ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
  698. kfree(buf);
  699. return ret;
  700. }
  701. static ssize_t data_bridge_reset_stats(struct file *file,
  702. const char __user *buf, size_t count, loff_t *ppos)
  703. {
  704. struct data_bridge *dev;
  705. int i;
  706. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  707. dev = __dev[i];
  708. if (!dev)
  709. continue;
  710. dev->to_host = 0;
  711. dev->to_modem = 0;
  712. dev->txurb_drp_cnt = 0;
  713. dev->tx_throttled_cnt = 0;
  714. dev->tx_unthrottled_cnt = 0;
  715. dev->rx_throttled_cnt = 0;
  716. dev->rx_unthrottled_cnt = 0;
  717. }
  718. return count;
  719. }
  720. const struct file_operations data_stats_ops = {
  721. .read = data_bridge_read_stats,
  722. .write = data_bridge_reset_stats,
  723. };
  724. static struct dentry *data_dent;
  725. static struct dentry *data_dfile_stats;
  726. static struct dentry *data_dfile_tstamp;
  727. static void data_bridge_debugfs_init(void)
  728. {
  729. data_dent = debugfs_create_dir("data_hsic_bridge", 0);
  730. if (IS_ERR(data_dent))
  731. return;
  732. data_dfile_stats = debugfs_create_file("status", 0644, data_dent, 0,
  733. &data_stats_ops);
  734. if (!data_dfile_stats || IS_ERR(data_dfile_stats)) {
  735. debugfs_remove(data_dent);
  736. return;
  737. }
  738. data_dfile_tstamp = debugfs_create_file("timestamp", 0644, data_dent,
  739. 0, &data_timestamp_ops);
  740. if (!data_dfile_tstamp || IS_ERR(data_dfile_tstamp))
  741. debugfs_remove(data_dent);
  742. }
  743. static void data_bridge_debugfs_exit(void)
  744. {
  745. debugfs_remove(data_dfile_stats);
  746. debugfs_remove(data_dfile_tstamp);
  747. debugfs_remove(data_dent);
  748. }
  749. #else
  750. static void data_bridge_debugfs_init(void) { }
  751. static void data_bridge_debugfs_exit(void) { }
  752. static void dbg_timestamp(char *event, struct sk_buff * skb)
  753. {
  754. return;
  755. }
  756. static unsigned int get_timestamp(void)
  757. {
  758. return 0;
  759. }
  760. #endif
  761. static int __devinit
  762. bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
  763. {
  764. struct usb_host_endpoint *endpoint = NULL;
  765. struct usb_host_endpoint *bulk_in = NULL;
  766. struct usb_host_endpoint *bulk_out = NULL;
  767. struct usb_host_endpoint *int_in = NULL;
  768. struct usb_device *udev;
  769. int i;
  770. int status = 0;
  771. int numends;
  772. int ch_id;
  773. char **bname = (char **)id->driver_info;
  774. if (iface->num_altsetting != 1) {
  775. err("%s invalid num_altsetting %u\n",
  776. __func__, iface->num_altsetting);
  777. return -EINVAL;
  778. }
  779. udev = interface_to_usbdev(iface);
  780. usb_get_dev(udev);
  781. numends = iface->cur_altsetting->desc.bNumEndpoints;
  782. for (i = 0; i < numends; i++) {
  783. endpoint = iface->cur_altsetting->endpoint + i;
  784. if (!endpoint) {
  785. dev_err(&iface->dev, "%s: invalid endpoint %u\n",
  786. __func__, i);
  787. status = -EINVAL;
  788. goto out;
  789. }
  790. if (usb_endpoint_is_bulk_in(&endpoint->desc))
  791. bulk_in = endpoint;
  792. else if (usb_endpoint_is_bulk_out(&endpoint->desc))
  793. bulk_out = endpoint;
  794. else if (usb_endpoint_is_int_in(&endpoint->desc))
  795. int_in = endpoint;
  796. }
  797. if (!bulk_in || !bulk_out || !int_in) {
  798. dev_err(&iface->dev, "%s: invalid endpoints\n", __func__);
  799. status = -EINVAL;
  800. goto out;
  801. }
  802. ch_id = get_bridge_dev_idx();
  803. if (ch_id < 0) {
  804. err("%s all bridge channels claimed. Probe failed\n", __func__);
  805. return -ENODEV;
  806. }
  807. status = data_bridge_probe(iface, bulk_in, bulk_out,
  808. bname[BRIDGE_DATA_IDX], ch_id);
  809. if (status < 0) {
  810. dev_err(&iface->dev, "data_bridge_probe failed %d\n", status);
  811. goto out;
  812. }
  813. status = ctrl_bridge_probe(iface, int_in, bname[BRIDGE_CTRL_IDX],
  814. ch_id);
  815. if (status < 0) {
  816. dev_err(&iface->dev, "ctrl_bridge_probe failed %d\n", status);
  817. goto error;
  818. }
  819. return 0;
  820. error:
  821. platform_device_unregister(__dev[ch_id]->pdev);
  822. free_rx_urbs(__dev[ch_id]);
  823. usb_set_intfdata(iface, NULL);
  824. out:
  825. usb_put_dev(udev);
  826. return status;
  827. }
  828. static void bridge_disconnect(struct usb_interface *intf)
  829. {
  830. struct data_bridge *dev = usb_get_intfdata(intf);
  831. if (!dev) {
  832. err("%s: data device not found\n", __func__);
  833. return;
  834. }
  835. /*set device name to none to get correct channel id
  836. * at the time of bridge open
  837. */
  838. dev->name = "none";
  839. ctrl_bridge_disconnect(dev->id);
  840. platform_device_unregister(dev->pdev);
  841. usb_set_intfdata(intf, NULL);
  842. free_rx_urbs(dev);
  843. usb_put_dev(dev->udev);
  844. clear_bit(CLAIMED, &dev->flags);
  845. }
  846. /*driver info stores data/ctrl bridge name used to match bridge xport name*/
  847. static const struct usb_device_id bridge_ids[] = {
  848. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9001, 2),
  849. .driver_info = (unsigned long)serial_hsic_bridge_names,
  850. },
  851. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9001, 3),
  852. .driver_info = (unsigned long)rmnet_hsic_bridge_names,
  853. },
  854. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9034, 2),
  855. .driver_info = (unsigned long)serial_hsic_bridge_names,
  856. },
  857. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9034, 3),
  858. .driver_info = (unsigned long)rmnet_hsic_bridge_names,
  859. },
  860. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9048, 3),
  861. .driver_info = (unsigned long)serial_hsic_bridge_names,
  862. },
  863. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9048, 4),
  864. .driver_info = (unsigned long)rmnet_hsic_bridge_names,
  865. },
  866. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x904c, 3),
  867. .driver_info = (unsigned long)serial_hsic_bridge_names,
  868. },
  869. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x904c, 5),
  870. .driver_info = (unsigned long)rmnet_hsic_bridge_names,
  871. },
  872. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9075, 3),
  873. .driver_info = (unsigned long)serial_hsic_bridge_names,
  874. },
  875. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9075, 5),
  876. .driver_info = (unsigned long)rmnet_hsic_bridge_names,
  877. },
  878. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9079, 3),
  879. .driver_info = (unsigned long)serial_hsusb_bridge_names,
  880. },
  881. { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9079, 4),
  882. .driver_info = (unsigned long)rmnet_hsusb_bridge_names,
  883. },
  884. { } /* Terminating entry */
  885. };
  886. MODULE_DEVICE_TABLE(usb, bridge_ids);
  887. static struct usb_driver bridge_driver = {
  888. .name = "mdm_bridge",
  889. .probe = bridge_probe,
  890. .disconnect = bridge_disconnect,
  891. .id_table = bridge_ids,
  892. .suspend = bridge_suspend,
  893. .resume = bridge_resume,
  894. .supports_autosuspend = 1,
  895. };
  896. static int __init bridge_init(void)
  897. {
  898. struct data_bridge *dev;
  899. int ret;
  900. int i = 0;
  901. ret = ctrl_bridge_init();
  902. if (ret)
  903. return ret;
  904. bridge_wq = create_singlethread_workqueue("mdm_bridge");
  905. if (!bridge_wq) {
  906. pr_err("%s: Unable to create workqueue:bridge\n", __func__);
  907. ret = -ENOMEM;
  908. goto free_ctrl;
  909. }
  910. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  911. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  912. if (!dev) {
  913. err("%s: unable to allocate dev\n", __func__);
  914. ret = -ENOMEM;
  915. goto error;
  916. }
  917. dev->wq = bridge_wq;
  918. /*transport name will be set during probe*/
  919. dev->name = "none";
  920. init_usb_anchor(&dev->tx_active);
  921. init_usb_anchor(&dev->rx_active);
  922. INIT_LIST_HEAD(&dev->rx_idle);
  923. skb_queue_head_init(&dev->rx_done);
  924. INIT_WORK(&dev->kevent, defer_kevent);
  925. INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
  926. __dev[i] = dev;
  927. }
  928. ret = usb_register(&bridge_driver);
  929. if (ret) {
  930. err("%s: unable to register mdm_bridge driver", __func__);
  931. goto error;
  932. }
  933. data_bridge_debugfs_init();
  934. return 0;
  935. error:
  936. while (--i >= 0) {
  937. kfree(__dev[i]);
  938. __dev[i] = NULL;
  939. }
  940. destroy_workqueue(bridge_wq);
  941. free_ctrl:
  942. ctrl_bridge_exit();
  943. return ret;
  944. }
  945. static void __exit bridge_exit(void)
  946. {
  947. int i;
  948. usb_deregister(&bridge_driver);
  949. data_bridge_debugfs_exit();
  950. destroy_workqueue(bridge_wq);
  951. for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
  952. kfree(__dev[i]);
  953. __dev[i] = NULL;
  954. }
  955. ctrl_bridge_exit();
  956. }
  957. module_init(bridge_init);
  958. module_exit(bridge_exit);
  959. MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
  960. MODULE_LICENSE("GPL v2");