u_data_hsic.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. /* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/device.h>
  15. #include <linux/delay.h>
  16. #include <linux/slab.h>
  17. #include <linux/termios.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/bitops.h>
  21. #include <linux/termios.h>
  22. #include <mach/usb_bridge.h>
  23. #include <mach/usb_gadget_xport.h>
  24. static unsigned int no_data_ports;
  25. #define GHSIC_DATA_RMNET_RX_Q_SIZE 50
  26. #define GHSIC_DATA_RMNET_TX_Q_SIZE 300
  27. #define GHSIC_DATA_SERIAL_RX_Q_SIZE 10
  28. #define GHSIC_DATA_SERIAL_TX_Q_SIZE 20
  29. #define GHSIC_DATA_RX_REQ_SIZE 2048
  30. #define GHSIC_DATA_TX_INTR_THRESHOLD 20
  31. static unsigned int ghsic_data_rmnet_tx_q_size = GHSIC_DATA_RMNET_TX_Q_SIZE;
  32. module_param(ghsic_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR);
  33. static unsigned int ghsic_data_rmnet_rx_q_size = GHSIC_DATA_RMNET_RX_Q_SIZE;
  34. module_param(ghsic_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR);
  35. static unsigned int ghsic_data_serial_tx_q_size = GHSIC_DATA_SERIAL_TX_Q_SIZE;
  36. module_param(ghsic_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR);
  37. static unsigned int ghsic_data_serial_rx_q_size = GHSIC_DATA_SERIAL_RX_Q_SIZE;
  38. module_param(ghsic_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR);
  39. static unsigned int ghsic_data_rx_req_size = GHSIC_DATA_RX_REQ_SIZE;
  40. module_param(ghsic_data_rx_req_size, uint, S_IRUGO | S_IWUSR);
  41. unsigned int ghsic_data_tx_intr_thld = GHSIC_DATA_TX_INTR_THRESHOLD;
  42. module_param(ghsic_data_tx_intr_thld, uint, S_IRUGO | S_IWUSR);
  43. /*flow ctrl*/
  44. #define GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD 500
  45. #define GHSIC_DATA_FLOW_CTRL_DISABLE 300
  46. #define GHSIC_DATA_FLOW_CTRL_SUPPORT 1
  47. #define GHSIC_DATA_PENDLIMIT_WITH_BRIDGE 500
  48. static unsigned int ghsic_data_fctrl_support = GHSIC_DATA_FLOW_CTRL_SUPPORT;
  49. module_param(ghsic_data_fctrl_support, uint, S_IRUGO | S_IWUSR);
  50. static unsigned int ghsic_data_fctrl_en_thld =
  51. GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD;
  52. module_param(ghsic_data_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
  53. static unsigned int ghsic_data_fctrl_dis_thld = GHSIC_DATA_FLOW_CTRL_DISABLE;
  54. module_param(ghsic_data_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
  55. static unsigned int ghsic_data_pend_limit_with_bridge =
  56. GHSIC_DATA_PENDLIMIT_WITH_BRIDGE;
  57. module_param(ghsic_data_pend_limit_with_bridge, uint, S_IRUGO | S_IWUSR);
  58. #define CH_OPENED 0
  59. #define CH_READY 1
  60. struct gdata_port {
  61. /* port */
  62. unsigned port_num;
  63. /* gadget */
  64. atomic_t connected;
  65. struct usb_ep *in;
  66. struct usb_ep *out;
  67. enum gadget_type gtype;
  68. /* data transfer queues */
  69. unsigned int tx_q_size;
  70. struct list_head tx_idle;
  71. struct sk_buff_head tx_skb_q;
  72. spinlock_t tx_lock;
  73. unsigned int rx_q_size;
  74. struct list_head rx_idle;
  75. struct sk_buff_head rx_skb_q;
  76. spinlock_t rx_lock;
  77. /* work */
  78. struct workqueue_struct *wq;
  79. struct work_struct connect_w;
  80. struct work_struct disconnect_w;
  81. struct work_struct write_tomdm_w;
  82. struct work_struct write_tohost_w;
  83. struct bridge brdg;
  84. /*bridge status*/
  85. unsigned long bridge_sts;
  86. unsigned int n_tx_req_queued;
  87. /*counters*/
  88. unsigned long to_modem;
  89. unsigned long to_host;
  90. unsigned int rx_throttled_cnt;
  91. unsigned int rx_unthrottled_cnt;
  92. unsigned int tx_throttled_cnt;
  93. unsigned int tx_unthrottled_cnt;
  94. unsigned int tomodem_drp_cnt;
  95. unsigned int unthrottled_pnd_skbs;
  96. };
  97. static struct {
  98. struct gdata_port *port;
  99. struct platform_driver pdrv;
  100. char port_name[BRIDGE_NAME_MAX_LEN];
  101. } gdata_ports[NUM_PORTS];
  102. static unsigned int get_timestamp(void);
  103. static void dbg_timestamp(char *, struct sk_buff *);
  104. static void ghsic_data_start_rx(struct gdata_port *port);
  105. static void ghsic_data_free_requests(struct usb_ep *ep, struct list_head *head)
  106. {
  107. struct usb_request *req;
  108. while (!list_empty(head)) {
  109. req = list_entry(head->next, struct usb_request, list);
  110. list_del(&req->list);
  111. usb_ep_free_request(ep, req);
  112. }
  113. }
  114. static int ghsic_data_alloc_requests(struct usb_ep *ep, struct list_head *head,
  115. int num,
  116. void (*cb)(struct usb_ep *ep, struct usb_request *),
  117. spinlock_t *lock)
  118. {
  119. int i;
  120. struct usb_request *req;
  121. unsigned long flags;
  122. pr_debug("%s: ep:%s head:%pK num:%d cb:%pK", __func__,
  123. ep->name, head, num, cb);
  124. for (i = 0; i < num; i++) {
  125. req = usb_ep_alloc_request(ep, GFP_KERNEL);
  126. if (!req) {
  127. pr_debug("%s: req allocated:%d\n", __func__, i);
  128. return list_empty(head) ? -ENOMEM : 0;
  129. }
  130. req->complete = cb;
  131. spin_lock_irqsave(lock, flags);
  132. list_add(&req->list, head);
  133. spin_unlock_irqrestore(lock, flags);
  134. }
  135. return 0;
  136. }
  137. static void ghsic_data_unthrottle_tx(void *ctx)
  138. {
  139. struct gdata_port *port = ctx;
  140. unsigned long flags;
  141. if (!port || !atomic_read(&port->connected))
  142. return;
  143. spin_lock_irqsave(&port->rx_lock, flags);
  144. port->tx_unthrottled_cnt++;
  145. spin_unlock_irqrestore(&port->rx_lock, flags);
  146. queue_work(port->wq, &port->write_tomdm_w);
  147. pr_debug("%s: port num =%d unthrottled\n", __func__,
  148. port->port_num);
  149. }
  150. static void ghsic_data_write_tohost(struct work_struct *w)
  151. {
  152. unsigned long flags;
  153. struct sk_buff *skb;
  154. int ret;
  155. struct usb_request *req;
  156. struct usb_ep *ep;
  157. struct gdata_port *port;
  158. struct timestamp_info *info;
  159. port = container_of(w, struct gdata_port, write_tohost_w);
  160. if (!port)
  161. return;
  162. spin_lock_irqsave(&port->tx_lock, flags);
  163. ep = port->in;
  164. if (!ep) {
  165. spin_unlock_irqrestore(&port->tx_lock, flags);
  166. return;
  167. }
  168. while (!list_empty(&port->tx_idle)) {
  169. skb = __skb_dequeue(&port->tx_skb_q);
  170. if (!skb)
  171. break;
  172. req = list_first_entry(&port->tx_idle, struct usb_request,
  173. list);
  174. req->context = skb;
  175. req->buf = skb->data;
  176. req->length = skb->len;
  177. req->zero = 1;
  178. port->n_tx_req_queued++;
  179. if (port->n_tx_req_queued == ghsic_data_tx_intr_thld) {
  180. req->no_interrupt = 0;
  181. port->n_tx_req_queued = 0;
  182. } else {
  183. req->no_interrupt = 1;
  184. }
  185. list_del(&req->list);
  186. info = (struct timestamp_info *)skb->cb;
  187. info->tx_queued = get_timestamp();
  188. spin_unlock_irqrestore(&port->tx_lock, flags);
  189. ret = usb_ep_queue(ep, req, GFP_KERNEL);
  190. spin_lock_irqsave(&port->tx_lock, flags);
  191. if (ret) {
  192. pr_err("%s: usb epIn failed\n", __func__);
  193. list_add(&req->list, &port->tx_idle);
  194. dev_kfree_skb_any(skb);
  195. break;
  196. }
  197. port->to_host++;
  198. if (ghsic_data_fctrl_support &&
  199. port->tx_skb_q.qlen <= ghsic_data_fctrl_dis_thld &&
  200. test_and_clear_bit(RX_THROTTLED, &port->brdg.flags)) {
  201. port->rx_unthrottled_cnt++;
  202. port->unthrottled_pnd_skbs = port->tx_skb_q.qlen;
  203. pr_debug_ratelimited("%s: disable flow ctrl:"
  204. " tx skbq len: %u\n",
  205. __func__, port->tx_skb_q.qlen);
  206. data_bridge_unthrottle_rx(port->brdg.ch_id);
  207. }
  208. }
  209. spin_unlock_irqrestore(&port->tx_lock, flags);
  210. }
  211. static int ghsic_data_receive(void *p, void *data, size_t len)
  212. {
  213. struct gdata_port *port = p;
  214. unsigned long flags;
  215. struct sk_buff *skb = data;
  216. if (!port || !atomic_read(&port->connected)) {
  217. dev_kfree_skb_any(skb);
  218. return -ENOTCONN;
  219. }
  220. pr_debug("%s: p:%pK#%d skb_len:%d\n", __func__,
  221. port, port->port_num, skb->len);
  222. spin_lock_irqsave(&port->tx_lock, flags);
  223. __skb_queue_tail(&port->tx_skb_q, skb);
  224. if (ghsic_data_fctrl_support &&
  225. port->tx_skb_q.qlen >= ghsic_data_fctrl_en_thld) {
  226. set_bit(RX_THROTTLED, &port->brdg.flags);
  227. port->rx_throttled_cnt++;
  228. pr_debug_ratelimited("%s: flow ctrl enabled: tx skbq len: %u\n",
  229. __func__, port->tx_skb_q.qlen);
  230. spin_unlock_irqrestore(&port->tx_lock, flags);
  231. queue_work(port->wq, &port->write_tohost_w);
  232. return -EBUSY;
  233. }
  234. spin_unlock_irqrestore(&port->tx_lock, flags);
  235. queue_work(port->wq, &port->write_tohost_w);
  236. return 0;
  237. }
  238. static void ghsic_data_write_tomdm(struct work_struct *w)
  239. {
  240. struct gdata_port *port;
  241. struct sk_buff *skb;
  242. struct timestamp_info *info;
  243. unsigned long flags;
  244. int ret;
  245. port = container_of(w, struct gdata_port, write_tomdm_w);
  246. if (!port || !atomic_read(&port->connected))
  247. return;
  248. spin_lock_irqsave(&port->rx_lock, flags);
  249. if (test_bit(TX_THROTTLED, &port->brdg.flags)) {
  250. spin_unlock_irqrestore(&port->rx_lock, flags);
  251. goto start_rx;
  252. }
  253. while ((skb = __skb_dequeue(&port->rx_skb_q))) {
  254. pr_debug("%s: port:%pK tom:%lu pno:%d\n", __func__,
  255. port, port->to_modem, port->port_num);
  256. info = (struct timestamp_info *)skb->cb;
  257. info->rx_done_sent = get_timestamp();
  258. spin_unlock_irqrestore(&port->rx_lock, flags);
  259. ret = data_bridge_write(port->brdg.ch_id, skb);
  260. spin_lock_irqsave(&port->rx_lock, flags);
  261. if (ret < 0) {
  262. if (ret == -EBUSY) {
  263. /*flow control*/
  264. port->tx_throttled_cnt++;
  265. break;
  266. }
  267. pr_err_ratelimited("%s: write error:%d\n",
  268. __func__, ret);
  269. port->tomodem_drp_cnt++;
  270. dev_kfree_skb_any(skb);
  271. break;
  272. }
  273. port->to_modem++;
  274. }
  275. spin_unlock_irqrestore(&port->rx_lock, flags);
  276. start_rx:
  277. ghsic_data_start_rx(port);
  278. }
  279. static void ghsic_data_epin_complete(struct usb_ep *ep, struct usb_request *req)
  280. {
  281. struct gdata_port *port = ep->driver_data;
  282. struct sk_buff *skb = req->context;
  283. int status = req->status;
  284. switch (status) {
  285. case 0:
  286. /* successful completion */
  287. dbg_timestamp("DL", skb);
  288. break;
  289. case -ECONNRESET:
  290. case -ESHUTDOWN:
  291. /* connection gone */
  292. dev_kfree_skb_any(skb);
  293. req->buf = 0;
  294. usb_ep_free_request(ep, req);
  295. return;
  296. default:
  297. pr_err("%s: data tx ep error %d\n", __func__, status);
  298. break;
  299. }
  300. dev_kfree_skb_any(skb);
  301. spin_lock(&port->tx_lock);
  302. list_add_tail(&req->list, &port->tx_idle);
  303. spin_unlock(&port->tx_lock);
  304. queue_work(port->wq, &port->write_tohost_w);
  305. }
  306. static void
  307. ghsic_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
  308. {
  309. struct gdata_port *port = ep->driver_data;
  310. struct sk_buff *skb = req->context;
  311. struct timestamp_info *info = (struct timestamp_info *)skb->cb;
  312. int status = req->status;
  313. int queue = 0;
  314. switch (status) {
  315. case 0:
  316. skb_put(skb, req->actual);
  317. queue = 1;
  318. break;
  319. case -ECONNRESET:
  320. case -ESHUTDOWN:
  321. /* cable disconnection */
  322. dev_kfree_skb_any(skb);
  323. req->buf = 0;
  324. usb_ep_free_request(ep, req);
  325. return;
  326. default:
  327. pr_err_ratelimited("%s: %s response error %d, %d/%d\n",
  328. __func__, ep->name, status,
  329. req->actual, req->length);
  330. dev_kfree_skb_any(skb);
  331. break;
  332. }
  333. spin_lock(&port->rx_lock);
  334. if (queue) {
  335. info->rx_done = get_timestamp();
  336. __skb_queue_tail(&port->rx_skb_q, skb);
  337. list_add_tail(&req->list, &port->rx_idle);
  338. queue_work(port->wq, &port->write_tomdm_w);
  339. }
  340. spin_unlock(&port->rx_lock);
  341. }
  342. static void ghsic_data_start_rx(struct gdata_port *port)
  343. {
  344. struct usb_request *req;
  345. struct usb_ep *ep;
  346. unsigned long flags;
  347. int ret;
  348. struct sk_buff *skb;
  349. struct timestamp_info *info;
  350. unsigned int created;
  351. pr_debug("%s: port:%pK\n", __func__, port);
  352. if (!port)
  353. return;
  354. spin_lock_irqsave(&port->rx_lock, flags);
  355. ep = port->out;
  356. if (!ep) {
  357. spin_unlock_irqrestore(&port->rx_lock, flags);
  358. return;
  359. }
  360. while (atomic_read(&port->connected) && !list_empty(&port->rx_idle)) {
  361. if (port->rx_skb_q.qlen > ghsic_data_pend_limit_with_bridge)
  362. break;
  363. req = list_first_entry(&port->rx_idle,
  364. struct usb_request, list);
  365. list_del(&req->list);
  366. spin_unlock_irqrestore(&port->rx_lock, flags);
  367. created = get_timestamp();
  368. skb = alloc_skb(ghsic_data_rx_req_size, GFP_KERNEL);
  369. if (!skb) {
  370. spin_lock_irqsave(&port->rx_lock, flags);
  371. list_add(&req->list, &port->rx_idle);
  372. break;
  373. }
  374. info = (struct timestamp_info *)skb->cb;
  375. info->created = created;
  376. req->buf = skb->data;
  377. req->length = ghsic_data_rx_req_size;
  378. req->context = skb;
  379. info->rx_queued = get_timestamp();
  380. ret = usb_ep_queue(ep, req, GFP_KERNEL);
  381. spin_lock_irqsave(&port->rx_lock, flags);
  382. if (ret) {
  383. dev_kfree_skb_any(skb);
  384. pr_err_ratelimited("%s: rx queue failed\n", __func__);
  385. if (atomic_read(&port->connected))
  386. list_add(&req->list, &port->rx_idle);
  387. else
  388. usb_ep_free_request(ep, req);
  389. break;
  390. }
  391. }
  392. spin_unlock_irqrestore(&port->rx_lock, flags);
  393. }
  394. static void ghsic_data_start_io(struct gdata_port *port)
  395. {
  396. unsigned long flags;
  397. struct usb_ep *ep_out, *ep_in;
  398. int ret;
  399. pr_debug("%s: port:%pK\n", __func__, port);
  400. if (!port)
  401. return;
  402. spin_lock_irqsave(&port->rx_lock, flags);
  403. ep_out = port->out;
  404. spin_unlock_irqrestore(&port->rx_lock, flags);
  405. if (!ep_out)
  406. return;
  407. ret = ghsic_data_alloc_requests(ep_out, &port->rx_idle,
  408. port->rx_q_size, ghsic_data_epout_complete, &port->rx_lock);
  409. if (ret) {
  410. pr_err("%s: rx req allocation failed\n", __func__);
  411. return;
  412. }
  413. spin_lock_irqsave(&port->tx_lock, flags);
  414. ep_in = port->in;
  415. spin_unlock_irqrestore(&port->tx_lock, flags);
  416. if (!ep_in) {
  417. spin_lock_irqsave(&port->rx_lock, flags);
  418. ghsic_data_free_requests(ep_out, &port->rx_idle);
  419. spin_unlock_irqrestore(&port->rx_lock, flags);
  420. return;
  421. }
  422. ret = ghsic_data_alloc_requests(ep_in, &port->tx_idle,
  423. port->tx_q_size, ghsic_data_epin_complete, &port->tx_lock);
  424. if (ret) {
  425. pr_err("%s: tx req allocation failed\n", __func__);
  426. spin_lock_irqsave(&port->rx_lock, flags);
  427. ghsic_data_free_requests(ep_out, &port->rx_idle);
  428. spin_unlock_irqrestore(&port->rx_lock, flags);
  429. return;
  430. }
  431. /* queue out requests */
  432. ghsic_data_start_rx(port);
  433. }
  434. static void ghsic_data_connect_w(struct work_struct *w)
  435. {
  436. struct gdata_port *port =
  437. container_of(w, struct gdata_port, connect_w);
  438. int ret;
  439. if (!port || !atomic_read(&port->connected) ||
  440. !test_bit(CH_READY, &port->bridge_sts))
  441. return;
  442. pr_debug("%s: port:%pK\n", __func__, port);
  443. ret = data_bridge_open(&port->brdg);
  444. if (ret) {
  445. pr_err("%s: unable open bridge ch:%d err:%d\n",
  446. __func__, port->brdg.ch_id, ret);
  447. return;
  448. }
  449. set_bit(CH_OPENED, &port->bridge_sts);
  450. ghsic_data_start_io(port);
  451. }
  452. static void ghsic_data_disconnect_w(struct work_struct *w)
  453. {
  454. struct gdata_port *port =
  455. container_of(w, struct gdata_port, disconnect_w);
  456. if (!test_bit(CH_OPENED, &port->bridge_sts))
  457. return;
  458. data_bridge_close(port->brdg.ch_id);
  459. clear_bit(CH_OPENED, &port->bridge_sts);
  460. }
  461. static void ghsic_data_free_buffers(struct gdata_port *port)
  462. {
  463. struct sk_buff *skb;
  464. unsigned long flags;
  465. if (!port)
  466. return;
  467. spin_lock_irqsave(&port->tx_lock, flags);
  468. if (!port->in) {
  469. spin_unlock_irqrestore(&port->tx_lock, flags);
  470. return;
  471. }
  472. ghsic_data_free_requests(port->in, &port->tx_idle);
  473. while ((skb = __skb_dequeue(&port->tx_skb_q)))
  474. dev_kfree_skb_any(skb);
  475. spin_unlock_irqrestore(&port->tx_lock, flags);
  476. spin_lock_irqsave(&port->rx_lock, flags);
  477. if (!port->out) {
  478. spin_unlock_irqrestore(&port->rx_lock, flags);
  479. return;
  480. }
  481. ghsic_data_free_requests(port->out, &port->rx_idle);
  482. while ((skb = __skb_dequeue(&port->rx_skb_q)))
  483. dev_kfree_skb_any(skb);
  484. spin_unlock_irqrestore(&port->rx_lock, flags);
  485. }
  486. static int ghsic_data_get_port_id(const char *pdev_name)
  487. {
  488. struct gdata_port *port;
  489. int i;
  490. for (i = 0; i < no_data_ports; i++) {
  491. port = gdata_ports[i].port;
  492. if (!strncmp(port->brdg.name, pdev_name, BRIDGE_NAME_MAX_LEN))
  493. return i;
  494. }
  495. return -EINVAL;
  496. }
  497. static int ghsic_data_probe(struct platform_device *pdev)
  498. {
  499. struct gdata_port *port;
  500. int id;
  501. pr_debug("%s: name:%s no_data_ports= %d\n", __func__, pdev->name,
  502. no_data_ports);
  503. id = ghsic_data_get_port_id(pdev->name);
  504. if (id < 0 || id >= no_data_ports) {
  505. pr_err("%s: invalid port: %d\n", __func__, id);
  506. return -EINVAL;
  507. }
  508. port = gdata_ports[id].port;
  509. set_bit(CH_READY, &port->bridge_sts);
  510. /* if usb is online, try opening bridge */
  511. if (atomic_read(&port->connected))
  512. queue_work(port->wq, &port->connect_w);
  513. return 0;
  514. }
  515. /* mdm disconnect */
  516. static int ghsic_data_remove(struct platform_device *pdev)
  517. {
  518. struct gdata_port *port;
  519. struct usb_ep *ep_in;
  520. struct usb_ep *ep_out;
  521. int id;
  522. pr_debug("%s: name:%s\n", __func__, pdev->name);
  523. id = ghsic_data_get_port_id(pdev->name);
  524. if (id < 0 || id >= no_data_ports) {
  525. pr_err("%s: invalid port: %d\n", __func__, id);
  526. return -EINVAL;
  527. }
  528. port = gdata_ports[id].port;
  529. ep_in = port->in;
  530. if (ep_in)
  531. usb_ep_fifo_flush(ep_in);
  532. ep_out = port->out;
  533. if (ep_out)
  534. usb_ep_fifo_flush(ep_out);
  535. /* cancel pending writes to MDM */
  536. cancel_work_sync(&port->write_tomdm_w);
  537. ghsic_data_free_buffers(port);
  538. cancel_work_sync(&port->connect_w);
  539. if (test_and_clear_bit(CH_OPENED, &port->bridge_sts))
  540. data_bridge_close(port->brdg.ch_id);
  541. clear_bit(CH_READY, &port->bridge_sts);
  542. return 0;
  543. }
  544. static void ghsic_data_port_free(int portno)
  545. {
  546. struct gdata_port *port = gdata_ports[portno].port;
  547. struct platform_driver *pdrv = &gdata_ports[portno].pdrv;
  548. destroy_workqueue(port->wq);
  549. kfree(port);
  550. if (pdrv)
  551. platform_driver_unregister(pdrv);
  552. }
  553. static int ghsic_data_port_alloc(unsigned port_num, enum gadget_type gtype)
  554. {
  555. struct gdata_port *port;
  556. struct platform_driver *pdrv;
  557. char *name;
  558. port = kzalloc(sizeof(struct gdata_port), GFP_KERNEL);
  559. if (!port)
  560. return -ENOMEM;
  561. name = gdata_ports[port_num].port_name;
  562. port->wq = create_singlethread_workqueue(name);
  563. if (!port->wq) {
  564. pr_err("%s: Unable to create workqueue:%s\n", __func__, name);
  565. kfree(port);
  566. return -ENOMEM;
  567. }
  568. port->port_num = port_num;
  569. /* port initialization */
  570. spin_lock_init(&port->rx_lock);
  571. spin_lock_init(&port->tx_lock);
  572. INIT_WORK(&port->connect_w, ghsic_data_connect_w);
  573. INIT_WORK(&port->disconnect_w, ghsic_data_disconnect_w);
  574. INIT_WORK(&port->write_tohost_w, ghsic_data_write_tohost);
  575. INIT_WORK(&port->write_tomdm_w, ghsic_data_write_tomdm);
  576. INIT_LIST_HEAD(&port->tx_idle);
  577. INIT_LIST_HEAD(&port->rx_idle);
  578. skb_queue_head_init(&port->tx_skb_q);
  579. skb_queue_head_init(&port->rx_skb_q);
  580. port->gtype = gtype;
  581. port->brdg.name = name;
  582. port->brdg.ctx = port;
  583. port->brdg.ops.send_pkt = ghsic_data_receive;
  584. port->brdg.ops.unthrottle_tx = ghsic_data_unthrottle_tx;
  585. gdata_ports[port_num].port = port;
  586. pdrv = &gdata_ports[port_num].pdrv;
  587. pdrv->probe = ghsic_data_probe;
  588. pdrv->remove = ghsic_data_remove;
  589. pdrv->driver.name = name;
  590. pdrv->driver.owner = THIS_MODULE;
  591. platform_driver_register(pdrv);
  592. pr_debug("%s: portno:%d\n", __func__, port_num);
  593. return 0;
  594. }
  595. void ghsic_data_disconnect(void *gptr, int port_num)
  596. {
  597. struct gdata_port *port;
  598. unsigned long flags;
  599. pr_debug("%s: port#%d\n", __func__, port_num);
  600. port = gdata_ports[port_num].port;
  601. if (port_num > no_data_ports) {
  602. pr_err("%s: invalid portno#%d\n", __func__, port_num);
  603. return;
  604. }
  605. if (!gptr || !port) {
  606. pr_err("%s: port is null\n", __func__);
  607. return;
  608. }
  609. ghsic_data_free_buffers(port);
  610. /* disable endpoints */
  611. if (port->in) {
  612. usb_ep_disable(port->in);
  613. port->in->driver_data = NULL;
  614. }
  615. if (port->out) {
  616. usb_ep_disable(port->out);
  617. port->out->driver_data = NULL;
  618. }
  619. atomic_set(&port->connected, 0);
  620. spin_lock_irqsave(&port->tx_lock, flags);
  621. port->in = NULL;
  622. port->n_tx_req_queued = 0;
  623. clear_bit(RX_THROTTLED, &port->brdg.flags);
  624. spin_unlock_irqrestore(&port->tx_lock, flags);
  625. spin_lock_irqsave(&port->rx_lock, flags);
  626. port->out = NULL;
  627. clear_bit(TX_THROTTLED, &port->brdg.flags);
  628. spin_unlock_irqrestore(&port->rx_lock, flags);
  629. queue_work(port->wq, &port->disconnect_w);
  630. }
  631. int ghsic_data_connect(void *gptr, int port_num)
  632. {
  633. struct gdata_port *port;
  634. struct gserial *gser;
  635. struct grmnet *gr;
  636. unsigned long flags;
  637. int ret = 0;
  638. pr_debug("%s: port#%d\n", __func__, port_num);
  639. port = gdata_ports[port_num].port;
  640. if (port_num > no_data_ports) {
  641. pr_err("%s: invalid portno#%d\n", __func__, port_num);
  642. return -ENODEV;
  643. }
  644. if (!gptr || !port) {
  645. pr_err("%s: port is null\n", __func__);
  646. return -ENODEV;
  647. }
  648. if (port->gtype == USB_GADGET_SERIAL) {
  649. gser = gptr;
  650. spin_lock_irqsave(&port->tx_lock, flags);
  651. port->in = gser->in;
  652. spin_unlock_irqrestore(&port->tx_lock, flags);
  653. spin_lock_irqsave(&port->rx_lock, flags);
  654. port->out = gser->out;
  655. spin_unlock_irqrestore(&port->rx_lock, flags);
  656. port->tx_q_size = ghsic_data_serial_tx_q_size;
  657. port->rx_q_size = ghsic_data_serial_rx_q_size;
  658. gser->in->driver_data = port;
  659. gser->out->driver_data = port;
  660. } else {
  661. gr = gptr;
  662. spin_lock_irqsave(&port->tx_lock, flags);
  663. port->in = gr->in;
  664. spin_unlock_irqrestore(&port->tx_lock, flags);
  665. spin_lock_irqsave(&port->rx_lock, flags);
  666. port->out = gr->out;
  667. spin_unlock_irqrestore(&port->rx_lock, flags);
  668. port->tx_q_size = ghsic_data_rmnet_tx_q_size;
  669. port->rx_q_size = ghsic_data_rmnet_rx_q_size;
  670. gr->in->driver_data = port;
  671. gr->out->driver_data = port;
  672. }
  673. ret = usb_ep_enable(port->in);
  674. if (ret) {
  675. pr_err("%s: usb_ep_enable failed eptype:IN ep:%pK",
  676. __func__, port->in);
  677. goto fail;
  678. }
  679. ret = usb_ep_enable(port->out);
  680. if (ret) {
  681. pr_err("%s: usb_ep_enable failed eptype:OUT ep:%pK",
  682. __func__, port->out);
  683. usb_ep_disable(port->in);
  684. goto fail;
  685. }
  686. atomic_set(&port->connected, 1);
  687. spin_lock_irqsave(&port->tx_lock, flags);
  688. port->to_host = 0;
  689. port->rx_throttled_cnt = 0;
  690. port->rx_unthrottled_cnt = 0;
  691. port->unthrottled_pnd_skbs = 0;
  692. spin_unlock_irqrestore(&port->tx_lock, flags);
  693. spin_lock_irqsave(&port->rx_lock, flags);
  694. port->to_modem = 0;
  695. port->tomodem_drp_cnt = 0;
  696. port->tx_throttled_cnt = 0;
  697. port->tx_unthrottled_cnt = 0;
  698. spin_unlock_irqrestore(&port->rx_lock, flags);
  699. queue_work(port->wq, &port->connect_w);
  700. fail:
  701. return ret;
  702. }
  703. #if defined(CONFIG_DEBUG_FS)
  704. #define DEBUG_DATA_BUF_SIZE 4096
  705. static unsigned int record_timestamp;
  706. module_param(record_timestamp, uint, S_IRUGO | S_IWUSR);
  707. static struct timestamp_buf dbg_data = {
  708. .idx = 0,
  709. .lck = __RW_LOCK_UNLOCKED(lck)
  710. };
  711. /*get_timestamp - returns time of day in us */
  712. static unsigned int get_timestamp(void)
  713. {
  714. struct timeval tval;
  715. unsigned int stamp;
  716. if (!record_timestamp)
  717. return 0;
  718. do_gettimeofday(&tval);
  719. /* 2^32 = 4294967296. Limit to 4096s. */
  720. stamp = tval.tv_sec & 0xFFF;
  721. stamp = stamp * 1000000 + tval.tv_usec;
  722. return stamp;
  723. }
  724. static void dbg_inc(unsigned *idx)
  725. {
  726. *idx = (*idx + 1) & (DBG_DATA_MAX-1);
  727. }
  728. /**
  729. * dbg_timestamp - Stores timestamp values of a SKB life cycle
  730. * to debug buffer
  731. * @event: "DL": Downlink Data
  732. * @skb: SKB used to store timestamp values to debug buffer
  733. */
  734. static void dbg_timestamp(char *event, struct sk_buff * skb)
  735. {
  736. unsigned long flags;
  737. struct timestamp_info *info = (struct timestamp_info *)skb->cb;
  738. if (!record_timestamp)
  739. return;
  740. write_lock_irqsave(&dbg_data.lck, flags);
  741. scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
  742. "%pK %u[%s] %u %u %u %u %u %u\n",
  743. skb, skb->len, event, info->created, info->rx_queued,
  744. info->rx_done, info->rx_done_sent, info->tx_queued,
  745. get_timestamp());
  746. dbg_inc(&dbg_data.idx);
  747. write_unlock_irqrestore(&dbg_data.lck, flags);
  748. }
  749. /* show_timestamp: displays the timestamp buffer */
  750. static ssize_t show_timestamp(struct file *file, char __user *ubuf,
  751. size_t count, loff_t *ppos)
  752. {
  753. unsigned long flags;
  754. unsigned i;
  755. unsigned j = 0;
  756. char *buf;
  757. int ret = 0;
  758. if (!record_timestamp)
  759. return 0;
  760. buf = kzalloc(sizeof(char) * DEBUG_DATA_BUF_SIZE, GFP_KERNEL);
  761. if (!buf)
  762. return -ENOMEM;
  763. read_lock_irqsave(&dbg_data.lck, flags);
  764. i = dbg_data.idx;
  765. for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) {
  766. if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG))
  767. continue;
  768. j += scnprintf(buf + j, DEBUG_DATA_BUF_SIZE - j,
  769. "%s\n", dbg_data.buf[i]);
  770. }
  771. read_unlock_irqrestore(&dbg_data.lck, flags);
  772. ret = simple_read_from_buffer(ubuf, count, ppos, buf, j);
  773. kfree(buf);
  774. return ret;
  775. }
  776. const struct file_operations gdata_timestamp_ops = {
  777. .read = show_timestamp,
  778. };
  779. static ssize_t ghsic_data_read_stats(struct file *file,
  780. char __user *ubuf, size_t count, loff_t *ppos)
  781. {
  782. struct gdata_port *port;
  783. struct platform_driver *pdrv;
  784. char *buf;
  785. unsigned long flags;
  786. int ret;
  787. int i;
  788. int temp = 0;
  789. buf = kzalloc(sizeof(char) * DEBUG_DATA_BUF_SIZE, GFP_KERNEL);
  790. if (!buf)
  791. return -ENOMEM;
  792. for (i = 0; i < no_data_ports; i++) {
  793. port = gdata_ports[i].port;
  794. if (!port)
  795. continue;
  796. pdrv = &gdata_ports[i].pdrv;
  797. spin_lock_irqsave(&port->rx_lock, flags);
  798. temp += scnprintf(buf + temp, DEBUG_DATA_BUF_SIZE - temp,
  799. "\nName: %s\n"
  800. "#PORT:%d port#: %pK\n"
  801. "data_ch_open: %d\n"
  802. "data_ch_ready: %d\n"
  803. "\n******UL INFO*****\n\n"
  804. "dpkts_to_modem: %lu\n"
  805. "tomodem_drp_cnt: %u\n"
  806. "rx_buf_len: %u\n"
  807. "tx thld cnt %u\n"
  808. "tx unthld cnt %u\n"
  809. "TX_THROTTLED %d\n",
  810. pdrv->driver.name,
  811. i, port,
  812. test_bit(CH_OPENED, &port->bridge_sts),
  813. test_bit(CH_READY, &port->bridge_sts),
  814. port->to_modem,
  815. port->tomodem_drp_cnt,
  816. port->rx_skb_q.qlen,
  817. port->tx_throttled_cnt,
  818. port->tx_unthrottled_cnt,
  819. test_bit(TX_THROTTLED, &port->brdg.flags));
  820. spin_unlock_irqrestore(&port->rx_lock, flags);
  821. spin_lock_irqsave(&port->tx_lock, flags);
  822. temp += scnprintf(buf + temp, DEBUG_DATA_BUF_SIZE - temp,
  823. "\n******DL INFO******\n\n"
  824. "dpkts_to_usbhost: %lu\n"
  825. "tx_buf_len: %u\n"
  826. "rx thld cnt %u\n"
  827. "rx unthld cnt %u\n"
  828. "uthld pnd skbs %u\n"
  829. "RX_THROTTLED %d\n",
  830. port->to_host,
  831. port->tx_skb_q.qlen,
  832. port->rx_throttled_cnt,
  833. port->rx_unthrottled_cnt,
  834. port->unthrottled_pnd_skbs,
  835. test_bit(RX_THROTTLED, &port->brdg.flags));
  836. spin_unlock_irqrestore(&port->tx_lock, flags);
  837. }
  838. ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
  839. kfree(buf);
  840. return ret;
  841. }
  842. static ssize_t ghsic_data_reset_stats(struct file *file,
  843. const char __user *buf, size_t count, loff_t *ppos)
  844. {
  845. struct gdata_port *port;
  846. int i;
  847. unsigned long flags;
  848. for (i = 0; i < no_data_ports; i++) {
  849. port = gdata_ports[i].port;
  850. if (!port)
  851. continue;
  852. spin_lock_irqsave(&port->rx_lock, flags);
  853. port->to_modem = 0;
  854. port->tomodem_drp_cnt = 0;
  855. port->tx_throttled_cnt = 0;
  856. port->tx_unthrottled_cnt = 0;
  857. spin_unlock_irqrestore(&port->rx_lock, flags);
  858. spin_lock_irqsave(&port->tx_lock, flags);
  859. port->to_host = 0;
  860. port->rx_throttled_cnt = 0;
  861. port->rx_unthrottled_cnt = 0;
  862. port->unthrottled_pnd_skbs = 0;
  863. spin_unlock_irqrestore(&port->tx_lock, flags);
  864. }
  865. return count;
  866. }
  867. const struct file_operations ghsic_stats_ops = {
  868. .read = ghsic_data_read_stats,
  869. .write = ghsic_data_reset_stats,
  870. };
  871. static struct dentry *gdata_dent;
  872. static struct dentry *gdata_dfile_stats;
  873. static struct dentry *gdata_dfile_tstamp;
  874. static void ghsic_data_debugfs_init(void)
  875. {
  876. gdata_dent = debugfs_create_dir("ghsic_data_xport", 0);
  877. if (IS_ERR(gdata_dent))
  878. return;
  879. gdata_dfile_stats = debugfs_create_file("status", 0444, gdata_dent, 0,
  880. &ghsic_stats_ops);
  881. if (!gdata_dfile_stats || IS_ERR(gdata_dfile_stats)) {
  882. debugfs_remove(gdata_dent);
  883. return;
  884. }
  885. gdata_dfile_tstamp = debugfs_create_file("timestamp", 0644, gdata_dent,
  886. 0, &gdata_timestamp_ops);
  887. if (!gdata_dfile_tstamp || IS_ERR(gdata_dfile_tstamp))
  888. debugfs_remove(gdata_dent);
  889. }
  890. static void ghsic_data_debugfs_exit(void)
  891. {
  892. debugfs_remove(gdata_dfile_stats);
  893. debugfs_remove(gdata_dfile_tstamp);
  894. debugfs_remove(gdata_dent);
  895. }
  896. #else
  897. static void ghsic_data_debugfs_init(void) { }
  898. static void ghsic_data_debugfs_exit(void) { }
  899. static void dbg_timestamp(char *event, struct sk_buff * skb)
  900. {
  901. return;
  902. }
  903. static unsigned int get_timestamp(void)
  904. {
  905. return 0;
  906. }
  907. #endif
  908. /*portname will be used to find the bridge channel index*/
  909. void ghsic_data_set_port_name(const char *name, const char *xport_type)
  910. {
  911. static unsigned int port_num;
  912. if (port_num >= NUM_PORTS) {
  913. pr_err("%s: setting xport name for invalid port num %d\n",
  914. __func__, port_num);
  915. return;
  916. }
  917. /*if no xport name is passed set it to xport type e.g. hsic*/
  918. if (!name)
  919. strlcpy(gdata_ports[port_num].port_name, xport_type,
  920. BRIDGE_NAME_MAX_LEN);
  921. else
  922. strlcpy(gdata_ports[port_num].port_name, name,
  923. BRIDGE_NAME_MAX_LEN);
  924. /*append _data to get data bridge name: e.g. serial_hsic_data*/
  925. strlcat(gdata_ports[port_num].port_name, "_data", BRIDGE_NAME_MAX_LEN);
  926. port_num++;
  927. }
  928. int ghsic_data_setup(unsigned num_ports, enum gadget_type gtype)
  929. {
  930. int first_port_id = no_data_ports;
  931. int total_num_ports = num_ports + no_data_ports;
  932. int ret = 0;
  933. int i;
  934. if (!num_ports || total_num_ports > NUM_PORTS) {
  935. pr_err("%s: Invalid num of ports count:%d\n",
  936. __func__, num_ports);
  937. return -EINVAL;
  938. }
  939. pr_debug("%s: count: %d\n", __func__, num_ports);
  940. for (i = first_port_id; i < (num_ports + first_port_id); i++) {
  941. /*probe can be called while port_alloc,so update no_data_ports*/
  942. no_data_ports++;
  943. ret = ghsic_data_port_alloc(i, gtype);
  944. if (ret) {
  945. no_data_ports--;
  946. pr_err("%s: Unable to alloc port:%d\n", __func__, i);
  947. goto free_ports;
  948. }
  949. }
  950. /*return the starting index*/
  951. return first_port_id;
  952. free_ports:
  953. for (i = first_port_id; i < no_data_ports; i++)
  954. ghsic_data_port_free(i);
  955. no_data_ports = first_port_id;
  956. return ret;
  957. }
  958. static int __init ghsic_data_init(void)
  959. {
  960. ghsic_data_debugfs_init();
  961. return 0;
  962. }
  963. module_init(ghsic_data_init);
  964. static void __exit ghsic_data_exit(void)
  965. {
  966. ghsic_data_debugfs_exit();
  967. }
  968. module_exit(ghsic_data_exit);
  969. MODULE_DESCRIPTION("hsic data xport driver");
  970. MODULE_LICENSE("GPL v2");