msm_rmnet.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. /* linux/drivers/net/msm_rmnet.c
  2. *
  3. * Virtual Ethernet Interface for MSM7K Networking
  4. *
  5. * Copyright (C) 2007 Google, Inc.
  6. * Copyright (c) 2010-2012, 2014, The Linux Foundation. All rights reserved.
  7. * Author: Brian Swetland <swetland@google.com>
  8. *
  9. * This software is licensed under the terms of the GNU General Public
  10. * License version 2, as published by the Free Software Foundation, and
  11. * may be copied, distributed, and modified under those terms.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/string.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/init.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/wakelock.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/msm_rmnet.h>
  33. #ifdef CONFIG_HAS_EARLYSUSPEND
  34. #include <linux/earlysuspend.h>
  35. #endif
  36. #include <mach/msm_smd.h>
  37. #include <mach/subsystem_restart.h>
  38. /* Debug message support */
  39. static int msm_rmnet_debug_mask;
  40. module_param_named(debug_enable, msm_rmnet_debug_mask,
  41. int, S_IRUGO | S_IWUSR | S_IWGRP);
  42. #define DEBUG_MASK_LVL0 (1U << 0)
  43. #define DEBUG_MASK_LVL1 (1U << 1)
  44. #define DEBUG_MASK_LVL2 (1U << 2)
  45. #define DBG(m, x...) do { \
  46. if (msm_rmnet_debug_mask & m) \
  47. pr_info(x); \
  48. } while (0)
  49. #define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
  50. #define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
  51. #define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
  52. /* Configure device instances */
  53. #define RMNET_DEVICE_COUNT (8)
  54. static const char *ch_name[RMNET_DEVICE_COUNT] = {
  55. "DATA5",
  56. "DATA6",
  57. "DATA7",
  58. "DATA8",
  59. "DATA9",
  60. "DATA12",
  61. "DATA13",
  62. "DATA14",
  63. };
  64. /* XXX should come from smd headers */
  65. #define SMD_PORT_ETHER0 11
  66. /* allow larger frames */
  67. #define RMNET_DATA_LEN 2000
  68. #define HEADROOM_FOR_QOS 8
  69. static struct completion *port_complete[RMNET_DEVICE_COUNT];
  70. struct rmnet_private
  71. {
  72. smd_channel_t *ch;
  73. struct net_device_stats stats;
  74. const char *chname;
  75. struct wake_lock wake_lock;
  76. #ifdef CONFIG_MSM_RMNET_DEBUG
  77. ktime_t last_packet;
  78. unsigned long wakeups_xmit;
  79. unsigned long wakeups_rcv;
  80. unsigned long timeout_us;
  81. #endif
  82. struct sk_buff *skb;
  83. spinlock_t lock;
  84. struct tasklet_struct tsklt;
  85. struct tasklet_struct rx_tasklet;
  86. u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
  87. struct platform_driver pdrv;
  88. struct completion complete;
  89. void *pil;
  90. struct mutex pil_lock;
  91. };
  92. static uint msm_rmnet_modem_wait;
  93. module_param_named(modem_wait, msm_rmnet_modem_wait,
  94. uint, S_IRUGO | S_IWUSR | S_IWGRP);
  95. /* Forward declaration */
  96. static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
  97. static int count_this_packet(void *_hdr, int len)
  98. {
  99. struct ethhdr *hdr = _hdr;
  100. if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
  101. return 0;
  102. return 1;
  103. }
  104. #ifdef CONFIG_MSM_RMNET_DEBUG
  105. static unsigned long timeout_us;
  106. #ifdef CONFIG_HAS_EARLYSUSPEND
  107. /*
  108. * If early suspend is enabled then we specify two timeout values,
  109. * screen on (default), and screen is off.
  110. */
  111. static unsigned long timeout_suspend_us;
  112. static struct device *rmnet0;
  113. /* Set timeout in us when the screen is off. */
  114. static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
  115. {
  116. timeout_suspend_us = simple_strtoul(buf, NULL, 10);
  117. return n;
  118. }
  119. static ssize_t timeout_suspend_show(struct device *d,
  120. struct device_attribute *attr,
  121. char *buf)
  122. {
  123. return snprintf(buf, PAGE_SIZE, "%lu\n",
  124. (unsigned long) timeout_suspend_us);
  125. }
  126. static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
  127. static void rmnet_early_suspend(struct early_suspend *handler) {
  128. if (rmnet0) {
  129. struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
  130. p->timeout_us = timeout_suspend_us;
  131. }
  132. }
  133. static void rmnet_late_resume(struct early_suspend *handler) {
  134. if (rmnet0) {
  135. struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
  136. p->timeout_us = timeout_us;
  137. }
  138. }
  139. static struct early_suspend rmnet_power_suspend = {
  140. .suspend = rmnet_early_suspend,
  141. .resume = rmnet_late_resume,
  142. };
  143. static int __init rmnet_late_init(void)
  144. {
  145. register_early_suspend(&rmnet_power_suspend);
  146. return 0;
  147. }
  148. late_initcall(rmnet_late_init);
  149. #endif
  150. /* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
  151. static int rmnet_cause_wakeup(struct rmnet_private *p) {
  152. int ret = 0;
  153. ktime_t now;
  154. if (p->timeout_us == 0) /* Check if disabled */
  155. return 0;
  156. /* Use real (wall) time. */
  157. now = ktime_get_real();
  158. if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
  159. ret = 1;
  160. }
  161. p->last_packet = now;
  162. return ret;
  163. }
  164. static ssize_t wakeups_xmit_show(struct device *d,
  165. struct device_attribute *attr,
  166. char *buf)
  167. {
  168. struct rmnet_private *p = netdev_priv(to_net_dev(d));
  169. return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_xmit);
  170. }
  171. DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
  172. static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
  173. char *buf)
  174. {
  175. struct rmnet_private *p = netdev_priv(to_net_dev(d));
  176. return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_rcv);
  177. }
  178. DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
  179. /* Set timeout in us. */
  180. static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
  181. const char *buf, size_t n)
  182. {
  183. #ifndef CONFIG_HAS_EARLYSUSPEND
  184. struct rmnet_private *p = netdev_priv(to_net_dev(d));
  185. p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
  186. #else
  187. /* If using early suspend/resume hooks do not write the value on store. */
  188. timeout_us = simple_strtoul(buf, NULL, 10);
  189. #endif
  190. return n;
  191. }
  192. static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
  193. char *buf)
  194. {
  195. struct rmnet_private *p = netdev_priv(to_net_dev(d));
  196. p = netdev_priv(to_net_dev(d));
  197. return snprintf(buf, PAGE_SIZE, "%lu\n", timeout_us);
  198. }
  199. DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
  200. #endif
  201. static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
  202. {
  203. __be16 protocol = 0;
  204. skb->dev = dev;
  205. /* Determine L3 protocol */
  206. switch (skb->data[0] & 0xf0) {
  207. case 0x40:
  208. protocol = htons(ETH_P_IP);
  209. break;
  210. case 0x60:
  211. protocol = htons(ETH_P_IPV6);
  212. break;
  213. default:
  214. pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
  215. dev->name, skb->data[0] & 0xf0);
  216. /* skb will be dropped in uppder layer for unknown protocol */
  217. }
  218. return protocol;
  219. }
  220. static void smd_net_data_handler(unsigned long arg);
  221. /* Called in soft-irq context */
  222. static void smd_net_data_handler(unsigned long arg)
  223. {
  224. struct net_device *dev = (struct net_device *) arg;
  225. struct rmnet_private *p = netdev_priv(dev);
  226. struct sk_buff *skb;
  227. void *ptr = 0;
  228. int sz;
  229. u32 opmode = p->operation_mode;
  230. unsigned long flags;
  231. for (;;) {
  232. sz = smd_cur_packet_size(p->ch);
  233. if (sz == 0) break;
  234. if (smd_read_avail(p->ch) < sz) break;
  235. skb = dev_alloc_skb(sz + NET_IP_ALIGN);
  236. if (skb == NULL) {
  237. pr_err("[%s] rmnet_recv() cannot allocate skb\n",
  238. dev->name);
  239. /* out of memory, reschedule a later attempt */
  240. p->rx_tasklet.data = (unsigned long)dev;
  241. tasklet_schedule(&p->rx_tasklet);
  242. break;
  243. } else {
  244. skb->dev = dev;
  245. skb_reserve(skb, NET_IP_ALIGN);
  246. ptr = skb_put(skb, sz);
  247. wake_lock_timeout(&p->wake_lock, HZ / 2);
  248. if (smd_read(p->ch, ptr, sz) != sz) {
  249. pr_err("[%s] rmnet_recv() smd lied about avail?!",
  250. dev->name);
  251. ptr = 0;
  252. dev_kfree_skb_irq(skb);
  253. } else {
  254. /* Handle Rx frame format */
  255. spin_lock_irqsave(&p->lock, flags);
  256. opmode = p->operation_mode;
  257. spin_unlock_irqrestore(&p->lock, flags);
  258. if (RMNET_IS_MODE_IP(opmode)) {
  259. /* Driver in IP mode */
  260. skb->protocol =
  261. rmnet_ip_type_trans(skb, dev);
  262. } else {
  263. /* Driver in Ethernet mode */
  264. skb->protocol =
  265. eth_type_trans(skb, dev);
  266. }
  267. if (RMNET_IS_MODE_IP(opmode) ||
  268. count_this_packet(ptr, skb->len)) {
  269. #ifdef CONFIG_MSM_RMNET_DEBUG
  270. p->wakeups_rcv +=
  271. rmnet_cause_wakeup(p);
  272. #endif
  273. p->stats.rx_packets++;
  274. p->stats.rx_bytes += skb->len;
  275. }
  276. DBG1("[%s] Rx packet #%lu len=%d\n",
  277. dev->name, p->stats.rx_packets,
  278. skb->len);
  279. /* Deliver to network stack */
  280. netif_rx(skb);
  281. }
  282. continue;
  283. }
  284. if (smd_read(p->ch, ptr, sz) != sz)
  285. pr_err("[%s] rmnet_recv() smd lied about avail?!",
  286. dev->name);
  287. }
  288. }
  289. static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
  290. {
  291. struct rmnet_private *p = netdev_priv(dev);
  292. smd_channel_t *ch = p->ch;
  293. int smd_ret;
  294. struct QMI_QOS_HDR_S *qmih;
  295. u32 opmode;
  296. unsigned long flags;
  297. /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
  298. spin_lock_irqsave(&p->lock, flags);
  299. opmode = p->operation_mode;
  300. spin_unlock_irqrestore(&p->lock, flags);
  301. if (RMNET_IS_MODE_QOS(opmode)) {
  302. qmih = (struct QMI_QOS_HDR_S *)
  303. skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
  304. qmih->version = 1;
  305. qmih->flags = 0;
  306. qmih->flow_id = skb->mark;
  307. }
  308. dev->trans_start = jiffies;
  309. smd_ret = smd_write(ch, skb->data, skb->len);
  310. if (smd_ret != skb->len) {
  311. pr_err("[%s] %s: smd_write returned error %d",
  312. dev->name, __func__, smd_ret);
  313. p->stats.tx_errors++;
  314. goto xmit_out;
  315. }
  316. if (RMNET_IS_MODE_IP(opmode) ||
  317. count_this_packet(skb->data, skb->len)) {
  318. p->stats.tx_packets++;
  319. p->stats.tx_bytes += skb->len;
  320. #ifdef CONFIG_MSM_RMNET_DEBUG
  321. p->wakeups_xmit += rmnet_cause_wakeup(p);
  322. #endif
  323. }
  324. DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
  325. dev->name, p->stats.tx_packets, skb->len, skb->mark);
  326. xmit_out:
  327. /* data xmited, safe to release skb */
  328. dev_kfree_skb_irq(skb);
  329. return 0;
  330. }
  331. static void _rmnet_resume_flow(unsigned long param)
  332. {
  333. struct net_device *dev = (struct net_device *)param;
  334. struct rmnet_private *p = netdev_priv(dev);
  335. struct sk_buff *skb = NULL;
  336. unsigned long flags;
  337. /* xmit and enable the flow only once even if
  338. multiple tasklets were scheduled by smd_net_notify */
  339. spin_lock_irqsave(&p->lock, flags);
  340. if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
  341. skb = p->skb;
  342. p->skb = NULL;
  343. spin_unlock_irqrestore(&p->lock, flags);
  344. _rmnet_xmit(skb, dev);
  345. netif_wake_queue(dev);
  346. } else
  347. spin_unlock_irqrestore(&p->lock, flags);
  348. }
  349. static void msm_rmnet_unload_modem(void *pil)
  350. {
  351. if (pil)
  352. subsystem_put(pil);
  353. }
  354. static void *msm_rmnet_load_modem(struct net_device *dev)
  355. {
  356. void *pil;
  357. int rc;
  358. struct rmnet_private *p = netdev_priv(dev);
  359. pil = subsystem_get("modem");
  360. if (IS_ERR(pil))
  361. pr_err("[%s] %s: modem load failed\n",
  362. dev->name, __func__);
  363. else if (msm_rmnet_modem_wait) {
  364. rc = wait_for_completion_interruptible_timeout(
  365. &p->complete,
  366. msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
  367. if (!rc)
  368. rc = -ETIMEDOUT;
  369. if (rc < 0) {
  370. pr_err("[%s] %s: wait for rmnet port failed %d\n",
  371. dev->name, __func__, rc);
  372. msm_rmnet_unload_modem(pil);
  373. pil = ERR_PTR(rc);
  374. }
  375. }
  376. return pil;
  377. }
  378. static void smd_net_notify(void *_dev, unsigned event)
  379. {
  380. struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
  381. switch (event) {
  382. case SMD_EVENT_DATA:
  383. spin_lock(&p->lock);
  384. if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
  385. smd_disable_read_intr(p->ch);
  386. tasklet_hi_schedule(&p->tsklt);
  387. }
  388. spin_unlock(&p->lock);
  389. if (smd_read_avail(p->ch) &&
  390. (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
  391. p->rx_tasklet.data = (unsigned long) _dev;
  392. tasklet_schedule(&p->rx_tasklet);
  393. }
  394. break;
  395. case SMD_EVENT_OPEN:
  396. DBG0("%s: opening SMD port\n", __func__);
  397. netif_carrier_on(_dev);
  398. if (netif_queue_stopped(_dev)) {
  399. DBG0("%s: re-starting if queue\n", __func__);
  400. netif_wake_queue(_dev);
  401. }
  402. break;
  403. case SMD_EVENT_CLOSE:
  404. DBG0("%s: closing SMD port\n", __func__);
  405. netif_carrier_off(_dev);
  406. break;
  407. }
  408. }
  409. static int __rmnet_open(struct net_device *dev)
  410. {
  411. int r;
  412. void *pil;
  413. struct rmnet_private *p = netdev_priv(dev);
  414. mutex_lock(&p->pil_lock);
  415. if (!p->pil) {
  416. pil = msm_rmnet_load_modem(dev);
  417. if (IS_ERR(pil)) {
  418. mutex_unlock(&p->pil_lock);
  419. return PTR_ERR(pil);
  420. }
  421. p->pil = pil;
  422. }
  423. mutex_unlock(&p->pil_lock);
  424. if (!p->ch) {
  425. r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
  426. if (r < 0)
  427. return -ENODEV;
  428. }
  429. smd_disable_read_intr(p->ch);
  430. return 0;
  431. }
  432. static int __rmnet_close(struct net_device *dev)
  433. {
  434. struct rmnet_private *p = netdev_priv(dev);
  435. if (p->ch)
  436. return 0;
  437. else
  438. return -EBADF;
  439. }
  440. static int rmnet_open(struct net_device *dev)
  441. {
  442. int rc = 0;
  443. DBG0("[%s] rmnet_open()\n", dev->name);
  444. rc = __rmnet_open(dev);
  445. if (rc == 0)
  446. netif_start_queue(dev);
  447. return rc;
  448. }
  449. static int rmnet_stop(struct net_device *dev)
  450. {
  451. DBG0("[%s] rmnet_stop()\n", dev->name);
  452. netif_stop_queue(dev);
  453. /* TODO: unload modem safely,
  454. currently, this causes unnecessary unloads */
  455. /*
  456. mutex_lock(&p->pil_lock);
  457. msm_rmnet_unload_modem(p->pil);
  458. p->pil = NULL;
  459. mutex_unlock(&p->pil_lock);
  460. */
  461. return 0;
  462. }
  463. static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
  464. {
  465. if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
  466. return -EINVAL;
  467. DBG0("[%s] MTU change: old=%d new=%d\n",
  468. dev->name, dev->mtu, new_mtu);
  469. dev->mtu = new_mtu;
  470. return 0;
  471. }
  472. static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
  473. {
  474. struct rmnet_private *p = netdev_priv(dev);
  475. smd_channel_t *ch = p->ch;
  476. unsigned long flags;
  477. if (netif_queue_stopped(dev)) {
  478. pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
  479. dev->name);
  480. return 0;
  481. }
  482. spin_lock_irqsave(&p->lock, flags);
  483. smd_enable_read_intr(ch);
  484. if (smd_write_avail(ch) < skb->len) {
  485. netif_stop_queue(dev);
  486. p->skb = skb;
  487. spin_unlock_irqrestore(&p->lock, flags);
  488. return 0;
  489. }
  490. smd_disable_read_intr(ch);
  491. spin_unlock_irqrestore(&p->lock, flags);
  492. _rmnet_xmit(skb, dev);
  493. return 0;
  494. }
  495. static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
  496. {
  497. struct rmnet_private *p = netdev_priv(dev);
  498. return &p->stats;
  499. }
  500. static void rmnet_set_multicast_list(struct net_device *dev)
  501. {
  502. }
  503. static void rmnet_tx_timeout(struct net_device *dev)
  504. {
  505. pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
  506. }
  507. static const struct net_device_ops rmnet_ops_ether = {
  508. .ndo_open = rmnet_open,
  509. .ndo_stop = rmnet_stop,
  510. .ndo_start_xmit = rmnet_xmit,
  511. .ndo_get_stats = rmnet_get_stats,
  512. .ndo_set_rx_mode = rmnet_set_multicast_list,
  513. .ndo_tx_timeout = rmnet_tx_timeout,
  514. .ndo_do_ioctl = rmnet_ioctl,
  515. .ndo_change_mtu = rmnet_change_mtu,
  516. .ndo_set_mac_address = eth_mac_addr,
  517. .ndo_validate_addr = eth_validate_addr,
  518. };
  519. static const struct net_device_ops rmnet_ops_ip = {
  520. .ndo_open = rmnet_open,
  521. .ndo_stop = rmnet_stop,
  522. .ndo_start_xmit = rmnet_xmit,
  523. .ndo_get_stats = rmnet_get_stats,
  524. .ndo_set_rx_mode = rmnet_set_multicast_list,
  525. .ndo_tx_timeout = rmnet_tx_timeout,
  526. .ndo_do_ioctl = rmnet_ioctl,
  527. .ndo_change_mtu = rmnet_change_mtu,
  528. .ndo_set_mac_address = 0,
  529. .ndo_validate_addr = 0,
  530. };
  531. static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  532. {
  533. struct rmnet_private *p = netdev_priv(dev);
  534. u32 old_opmode = p->operation_mode;
  535. unsigned long flags;
  536. int prev_mtu = dev->mtu;
  537. int rc = 0;
  538. /* Process IOCTL command */
  539. switch (cmd) {
  540. case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
  541. /* Perform Ethernet config only if in IP mode currently*/
  542. if (p->operation_mode & RMNET_MODE_LLP_IP) {
  543. ether_setup(dev);
  544. random_ether_addr(dev->dev_addr);
  545. dev->mtu = prev_mtu;
  546. dev->netdev_ops = &rmnet_ops_ether;
  547. spin_lock_irqsave(&p->lock, flags);
  548. p->operation_mode &= ~RMNET_MODE_LLP_IP;
  549. p->operation_mode |= RMNET_MODE_LLP_ETH;
  550. spin_unlock_irqrestore(&p->lock, flags);
  551. DBG0("[%s] rmnet_ioctl(): "
  552. "set Ethernet protocol mode\n",
  553. dev->name);
  554. }
  555. break;
  556. case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
  557. /* Perform IP config only if in Ethernet mode currently*/
  558. if (p->operation_mode & RMNET_MODE_LLP_ETH) {
  559. /* Undo config done in ether_setup() */
  560. dev->header_ops = 0; /* No header */
  561. dev->type = ARPHRD_RAWIP;
  562. dev->hard_header_len = 0;
  563. dev->mtu = prev_mtu;
  564. dev->addr_len = 0;
  565. dev->flags &= ~(IFF_BROADCAST|
  566. IFF_MULTICAST);
  567. dev->netdev_ops = &rmnet_ops_ip;
  568. spin_lock_irqsave(&p->lock, flags);
  569. p->operation_mode &= ~RMNET_MODE_LLP_ETH;
  570. p->operation_mode |= RMNET_MODE_LLP_IP;
  571. spin_unlock_irqrestore(&p->lock, flags);
  572. DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
  573. dev->name);
  574. }
  575. break;
  576. case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
  577. ifr->ifr_ifru.ifru_data =
  578. (void *)(p->operation_mode &
  579. (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
  580. break;
  581. case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
  582. spin_lock_irqsave(&p->lock, flags);
  583. p->operation_mode |= RMNET_MODE_QOS;
  584. spin_unlock_irqrestore(&p->lock, flags);
  585. DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
  586. dev->name);
  587. break;
  588. case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
  589. spin_lock_irqsave(&p->lock, flags);
  590. p->operation_mode &= ~RMNET_MODE_QOS;
  591. spin_unlock_irqrestore(&p->lock, flags);
  592. DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
  593. dev->name);
  594. break;
  595. case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
  596. ifr->ifr_ifru.ifru_data =
  597. (void *)(p->operation_mode & RMNET_MODE_QOS);
  598. break;
  599. case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
  600. ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
  601. break;
  602. case RMNET_IOCTL_OPEN: /* Open transport port */
  603. rc = __rmnet_open(dev);
  604. DBG0("[%s] rmnet_ioctl(): open transport port\n",
  605. dev->name);
  606. break;
  607. case RMNET_IOCTL_CLOSE: /* Close transport port */
  608. rc = __rmnet_close(dev);
  609. DBG0("[%s] rmnet_ioctl(): close transport port\n",
  610. dev->name);
  611. break;
  612. default:
  613. DBG0("[%s] error: rmnet_ioct called for unsupported cmd[0x%x]\n",
  614. dev->name, cmd);
  615. return -EINVAL;
  616. }
  617. DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
  618. dev->name, __func__, cmd, old_opmode, p->operation_mode);
  619. return rc;
  620. }
  621. static void __init rmnet_setup(struct net_device *dev)
  622. {
  623. /* Using Ethernet mode by default */
  624. dev->netdev_ops = &rmnet_ops_ether;
  625. ether_setup(dev);
  626. /* set this after calling ether_setup */
  627. dev->mtu = RMNET_DATA_LEN;
  628. dev->needed_headroom = HEADROOM_FOR_QOS;
  629. random_ether_addr(dev->dev_addr);
  630. dev->watchdog_timeo = 1000; /* 10 seconds? */
  631. }
  632. static int msm_rmnet_smd_probe(struct platform_device *pdev)
  633. {
  634. int i;
  635. for (i = 0; i < RMNET_DEVICE_COUNT; i++)
  636. if (!strcmp(pdev->name, ch_name[i])) {
  637. complete_all(port_complete[i]);
  638. break;
  639. }
  640. return 0;
  641. }
  642. static int __init rmnet_init(void)
  643. {
  644. int ret;
  645. struct device *d;
  646. struct net_device *dev;
  647. struct rmnet_private *p;
  648. unsigned n;
  649. pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
  650. #ifdef CONFIG_MSM_RMNET_DEBUG
  651. timeout_us = 0;
  652. #ifdef CONFIG_HAS_EARLYSUSPEND
  653. timeout_suspend_us = 0;
  654. #endif
  655. #endif
  656. for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
  657. dev = alloc_netdev(sizeof(struct rmnet_private),
  658. "rmnet%d", rmnet_setup);
  659. if (!dev)
  660. return -ENOMEM;
  661. d = &(dev->dev);
  662. p = netdev_priv(dev);
  663. p->chname = ch_name[n];
  664. /* Initial config uses Ethernet */
  665. p->operation_mode = RMNET_MODE_LLP_ETH;
  666. p->skb = NULL;
  667. spin_lock_init(&p->lock);
  668. tasklet_init(&p->tsklt, _rmnet_resume_flow,
  669. (unsigned long)dev);
  670. tasklet_init(&p->rx_tasklet, smd_net_data_handler,
  671. (unsigned long)dev);
  672. wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
  673. #ifdef CONFIG_MSM_RMNET_DEBUG
  674. p->timeout_us = timeout_us;
  675. p->wakeups_xmit = p->wakeups_rcv = 0;
  676. #endif
  677. init_completion(&p->complete);
  678. port_complete[n] = &p->complete;
  679. mutex_init(&p->pil_lock);
  680. p->pdrv.probe = msm_rmnet_smd_probe;
  681. p->pdrv.driver.name = ch_name[n];
  682. p->pdrv.driver.owner = THIS_MODULE;
  683. ret = platform_driver_register(&p->pdrv);
  684. if (ret) {
  685. free_netdev(dev);
  686. return ret;
  687. }
  688. ret = register_netdev(dev);
  689. if (ret) {
  690. platform_driver_unregister(&p->pdrv);
  691. free_netdev(dev);
  692. return ret;
  693. }
  694. #ifdef CONFIG_MSM_RMNET_DEBUG
  695. if (device_create_file(d, &dev_attr_timeout))
  696. continue;
  697. if (device_create_file(d, &dev_attr_wakeups_xmit))
  698. continue;
  699. if (device_create_file(d, &dev_attr_wakeups_rcv))
  700. continue;
  701. #ifdef CONFIG_HAS_EARLYSUSPEND
  702. if (device_create_file(d, &dev_attr_timeout_suspend))
  703. continue;
  704. /* Only care about rmnet0 for suspend/resume tiemout hooks. */
  705. if (n == 0)
  706. rmnet0 = d;
  707. #endif
  708. #endif
  709. }
  710. return 0;
  711. }
  712. module_init(rmnet_init);