caif_shmcore.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
  4. * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
  5. * Daniel Martensson / daniel.martensson@stericsson.com
  6. * License terms: GNU General Public License (GPL) version 2
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
  9. #include <linux/spinlock.h>
  10. #include <linux/sched.h>
  11. #include <linux/list.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/if_arp.h>
  14. #include <net/caif/caif_device.h>
  15. #include <net/caif/caif_shm.h>
  16. #define NR_TX_BUF 6
  17. #define NR_RX_BUF 6
  18. #define TX_BUF_SZ 0x2000
  19. #define RX_BUF_SZ 0x2000
  20. #define CAIF_NEEDED_HEADROOM 32
  21. #define CAIF_FLOW_ON 1
  22. #define CAIF_FLOW_OFF 0
  23. #define LOW_WATERMARK 3
  24. #define HIGH_WATERMARK 4
  25. /* Maximum number of CAIF buffers per shared memory buffer. */
  26. #define SHM_MAX_FRMS_PER_BUF 10
  27. /*
  28. * Size in bytes of the descriptor area
  29. * (With end of descriptor signalling)
  30. */
  31. #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
  32. sizeof(struct shm_pck_desc))
  33. /*
  34. * Offset to the first CAIF frame within a shared memory buffer.
  35. * Aligned on 32 bytes.
  36. */
  37. #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
  38. /* Number of bytes for CAIF shared memory header. */
  39. #define SHM_HDR_LEN 1
  40. /* Number of padding bytes for the complete CAIF frame. */
  41. #define SHM_FRM_PAD_LEN 4
  42. #define CAIF_MAX_MTU 4096
  43. #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
  44. #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
  45. #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
  46. #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
  47. #define SHM_FULL_MASK (0x0F << 0)
  48. #define SHM_EMPTY_MASK (0x0F << 4)
  49. struct shm_pck_desc {
  50. /*
  51. * Offset from start of shared memory area to start of
  52. * shared memory CAIF frame.
  53. */
  54. u32 frm_ofs;
  55. u32 frm_len;
  56. };
  57. struct buf_list {
  58. unsigned char *desc_vptr;
  59. u32 phy_addr;
  60. u32 index;
  61. u32 len;
  62. u32 frames;
  63. u32 frm_ofs;
  64. struct list_head list;
  65. };
  66. struct shm_caif_frm {
  67. /* Number of bytes of padding before the CAIF frame. */
  68. u8 hdr_ofs;
  69. };
  70. struct shmdrv_layer {
  71. /* caif_dev_common must always be first in the structure*/
  72. struct caif_dev_common cfdev;
  73. u32 shm_tx_addr;
  74. u32 shm_rx_addr;
  75. u32 shm_base_addr;
  76. u32 tx_empty_available;
  77. spinlock_t lock;
  78. struct list_head tx_empty_list;
  79. struct list_head tx_pend_list;
  80. struct list_head tx_full_list;
  81. struct list_head rx_empty_list;
  82. struct list_head rx_pend_list;
  83. struct list_head rx_full_list;
  84. struct workqueue_struct *pshm_tx_workqueue;
  85. struct workqueue_struct *pshm_rx_workqueue;
  86. struct work_struct shm_tx_work;
  87. struct work_struct shm_rx_work;
  88. struct sk_buff_head sk_qhead;
  89. struct shmdev_layer *pshm_dev;
  90. };
  91. static int shm_netdev_open(struct net_device *shm_netdev)
  92. {
  93. netif_wake_queue(shm_netdev);
  94. return 0;
  95. }
  96. static int shm_netdev_close(struct net_device *shm_netdev)
  97. {
  98. netif_stop_queue(shm_netdev);
  99. return 0;
  100. }
  101. int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
  102. {
  103. struct buf_list *pbuf;
  104. struct shmdrv_layer *pshm_drv;
  105. struct list_head *pos;
  106. u32 avail_emptybuff = 0;
  107. unsigned long flags = 0;
  108. pshm_drv = priv;
  109. /* Check for received buffers. */
  110. if (mbx_msg & SHM_FULL_MASK) {
  111. int idx;
  112. spin_lock_irqsave(&pshm_drv->lock, flags);
  113. /* Check whether we have any outstanding buffers. */
  114. if (list_empty(&pshm_drv->rx_empty_list)) {
  115. /* Release spin lock. */
  116. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  117. /* We print even in IRQ context... */
  118. pr_warn("No empty Rx buffers to fill: "
  119. "mbx_msg:%x\n", mbx_msg);
  120. /* Bail out. */
  121. goto err_sync;
  122. }
  123. pbuf =
  124. list_entry(pshm_drv->rx_empty_list.next,
  125. struct buf_list, list);
  126. idx = pbuf->index;
  127. /* Check buffer synchronization. */
  128. if (idx != SHM_GET_FULL(mbx_msg)) {
  129. /* We print even in IRQ context... */
  130. pr_warn(
  131. "phyif_shm_mbx_msg_cb: RX full out of sync:"
  132. " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
  133. idx, mbx_msg, SHM_GET_FULL(mbx_msg));
  134. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  135. /* Bail out. */
  136. goto err_sync;
  137. }
  138. list_del_init(&pbuf->list);
  139. list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
  140. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  141. /* Schedule RX work queue. */
  142. if (!work_pending(&pshm_drv->shm_rx_work))
  143. queue_work(pshm_drv->pshm_rx_workqueue,
  144. &pshm_drv->shm_rx_work);
  145. }
  146. /* Check for emptied buffers. */
  147. if (mbx_msg & SHM_EMPTY_MASK) {
  148. int idx;
  149. spin_lock_irqsave(&pshm_drv->lock, flags);
  150. /* Check whether we have any outstanding buffers. */
  151. if (list_empty(&pshm_drv->tx_full_list)) {
  152. /* We print even in IRQ context... */
  153. pr_warn("No TX to empty: msg:%x\n", mbx_msg);
  154. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  155. /* Bail out. */
  156. goto err_sync;
  157. }
  158. pbuf =
  159. list_entry(pshm_drv->tx_full_list.next,
  160. struct buf_list, list);
  161. idx = pbuf->index;
  162. /* Check buffer synchronization. */
  163. if (idx != SHM_GET_EMPTY(mbx_msg)) {
  164. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  165. /* We print even in IRQ context... */
  166. pr_warn("TX empty "
  167. "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
  168. /* Bail out. */
  169. goto err_sync;
  170. }
  171. list_del_init(&pbuf->list);
  172. /* Reset buffer parameters. */
  173. pbuf->frames = 0;
  174. pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
  175. list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
  176. /* Check the available no. of buffers in the empty list */
  177. list_for_each(pos, &pshm_drv->tx_empty_list)
  178. avail_emptybuff++;
  179. /* Check whether we have to wake up the transmitter. */
  180. if ((avail_emptybuff > HIGH_WATERMARK) &&
  181. (!pshm_drv->tx_empty_available)) {
  182. pshm_drv->tx_empty_available = 1;
  183. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  184. pshm_drv->cfdev.flowctrl
  185. (pshm_drv->pshm_dev->pshm_netdev,
  186. CAIF_FLOW_ON);
  187. /* Schedule the work queue. if required */
  188. if (!work_pending(&pshm_drv->shm_tx_work))
  189. queue_work(pshm_drv->pshm_tx_workqueue,
  190. &pshm_drv->shm_tx_work);
  191. } else
  192. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  193. }
  194. return 0;
  195. err_sync:
  196. return -EIO;
  197. }
  198. static void shm_rx_work_func(struct work_struct *rx_work)
  199. {
  200. struct shmdrv_layer *pshm_drv;
  201. struct buf_list *pbuf;
  202. unsigned long flags = 0;
  203. struct sk_buff *skb;
  204. char *p;
  205. int ret;
  206. pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
  207. while (1) {
  208. struct shm_pck_desc *pck_desc;
  209. spin_lock_irqsave(&pshm_drv->lock, flags);
  210. /* Check for received buffers. */
  211. if (list_empty(&pshm_drv->rx_full_list)) {
  212. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  213. break;
  214. }
  215. pbuf =
  216. list_entry(pshm_drv->rx_full_list.next, struct buf_list,
  217. list);
  218. list_del_init(&pbuf->list);
  219. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  220. /* Retrieve pointer to start of the packet descriptor area. */
  221. pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
  222. /*
  223. * Check whether descriptor contains a CAIF shared memory
  224. * frame.
  225. */
  226. while (pck_desc->frm_ofs) {
  227. unsigned int frm_buf_ofs;
  228. unsigned int frm_pck_ofs;
  229. unsigned int frm_pck_len;
  230. /*
  231. * Check whether offset is within buffer limits
  232. * (lower).
  233. */
  234. if (pck_desc->frm_ofs <
  235. (pbuf->phy_addr - pshm_drv->shm_base_addr))
  236. break;
  237. /*
  238. * Check whether offset is within buffer limits
  239. * (higher).
  240. */
  241. if (pck_desc->frm_ofs >
  242. ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
  243. pbuf->len))
  244. break;
  245. /* Calculate offset from start of buffer. */
  246. frm_buf_ofs =
  247. pck_desc->frm_ofs - (pbuf->phy_addr -
  248. pshm_drv->shm_base_addr);
  249. /*
  250. * Calculate offset and length of CAIF packet while
  251. * taking care of the shared memory header.
  252. */
  253. frm_pck_ofs =
  254. frm_buf_ofs + SHM_HDR_LEN +
  255. (*(pbuf->desc_vptr + frm_buf_ofs));
  256. frm_pck_len =
  257. (pck_desc->frm_len - SHM_HDR_LEN -
  258. (*(pbuf->desc_vptr + frm_buf_ofs)));
  259. /* Check whether CAIF packet is within buffer limits */
  260. if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
  261. break;
  262. /* Get a suitable CAIF packet and copy in data. */
  263. skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
  264. frm_pck_len + 1);
  265. if (skb == NULL) {
  266. pr_info("OOM: Try next frame in descriptor\n");
  267. break;
  268. }
  269. p = skb_put(skb, frm_pck_len);
  270. memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
  271. skb->protocol = htons(ETH_P_CAIF);
  272. skb_reset_mac_header(skb);
  273. skb->dev = pshm_drv->pshm_dev->pshm_netdev;
  274. /* Push received packet up the stack. */
  275. ret = netif_rx_ni(skb);
  276. if (!ret) {
  277. pshm_drv->pshm_dev->pshm_netdev->stats.
  278. rx_packets++;
  279. pshm_drv->pshm_dev->pshm_netdev->stats.
  280. rx_bytes += pck_desc->frm_len;
  281. } else
  282. ++pshm_drv->pshm_dev->pshm_netdev->stats.
  283. rx_dropped;
  284. /* Move to next packet descriptor. */
  285. pck_desc++;
  286. }
  287. spin_lock_irqsave(&pshm_drv->lock, flags);
  288. list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
  289. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  290. }
  291. /* Schedule the work queue. if required */
  292. if (!work_pending(&pshm_drv->shm_tx_work))
  293. queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
  294. }
  295. static void shm_tx_work_func(struct work_struct *tx_work)
  296. {
  297. u32 mbox_msg;
  298. unsigned int frmlen, avail_emptybuff, append = 0;
  299. unsigned long flags = 0;
  300. struct buf_list *pbuf = NULL;
  301. struct shmdrv_layer *pshm_drv;
  302. struct shm_caif_frm *frm;
  303. struct sk_buff *skb;
  304. struct shm_pck_desc *pck_desc;
  305. struct list_head *pos;
  306. pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
  307. do {
  308. /* Initialize mailbox message. */
  309. mbox_msg = 0x00;
  310. avail_emptybuff = 0;
  311. spin_lock_irqsave(&pshm_drv->lock, flags);
  312. /* Check for pending receive buffers. */
  313. if (!list_empty(&pshm_drv->rx_pend_list)) {
  314. pbuf = list_entry(pshm_drv->rx_pend_list.next,
  315. struct buf_list, list);
  316. list_del_init(&pbuf->list);
  317. list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
  318. /*
  319. * Value index is never changed,
  320. * so read access should be safe.
  321. */
  322. mbox_msg |= SHM_SET_EMPTY(pbuf->index);
  323. }
  324. skb = skb_peek(&pshm_drv->sk_qhead);
  325. if (skb == NULL)
  326. goto send_msg;
  327. /* Check the available no. of buffers in the empty list */
  328. list_for_each(pos, &pshm_drv->tx_empty_list)
  329. avail_emptybuff++;
  330. if ((avail_emptybuff < LOW_WATERMARK) &&
  331. pshm_drv->tx_empty_available) {
  332. /* Update blocking condition. */
  333. pshm_drv->tx_empty_available = 0;
  334. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  335. pshm_drv->cfdev.flowctrl
  336. (pshm_drv->pshm_dev->pshm_netdev,
  337. CAIF_FLOW_OFF);
  338. spin_lock_irqsave(&pshm_drv->lock, flags);
  339. }
  340. /*
  341. * We simply return back to the caller if we do not have space
  342. * either in Tx pending list or Tx empty list. In this case,
  343. * we hold the received skb in the skb list, waiting to
  344. * be transmitted once Tx buffers become available
  345. */
  346. if (list_empty(&pshm_drv->tx_empty_list))
  347. goto send_msg;
  348. /* Get the first free Tx buffer. */
  349. pbuf = list_entry(pshm_drv->tx_empty_list.next,
  350. struct buf_list, list);
  351. do {
  352. if (append) {
  353. skb = skb_peek(&pshm_drv->sk_qhead);
  354. if (skb == NULL)
  355. break;
  356. }
  357. frm = (struct shm_caif_frm *)
  358. (pbuf->desc_vptr + pbuf->frm_ofs);
  359. frm->hdr_ofs = 0;
  360. frmlen = 0;
  361. frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
  362. /* Add tail padding if needed. */
  363. if (frmlen % SHM_FRM_PAD_LEN)
  364. frmlen += SHM_FRM_PAD_LEN -
  365. (frmlen % SHM_FRM_PAD_LEN);
  366. /*
  367. * Verify that packet, header and additional padding
  368. * can fit within the buffer frame area.
  369. */
  370. if (frmlen >= (pbuf->len - pbuf->frm_ofs))
  371. break;
  372. if (!append) {
  373. list_del_init(&pbuf->list);
  374. append = 1;
  375. }
  376. skb = skb_dequeue(&pshm_drv->sk_qhead);
  377. if (skb == NULL)
  378. break;
  379. /* Copy in CAIF frame. */
  380. skb_copy_bits(skb, 0, pbuf->desc_vptr +
  381. pbuf->frm_ofs + SHM_HDR_LEN +
  382. frm->hdr_ofs, skb->len);
  383. pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
  384. pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
  385. frmlen;
  386. dev_kfree_skb_irq(skb);
  387. /* Fill in the shared memory packet descriptor area. */
  388. pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
  389. /* Forward to current frame. */
  390. pck_desc += pbuf->frames;
  391. pck_desc->frm_ofs = (pbuf->phy_addr -
  392. pshm_drv->shm_base_addr) +
  393. pbuf->frm_ofs;
  394. pck_desc->frm_len = frmlen;
  395. /* Terminate packet descriptor area. */
  396. pck_desc++;
  397. pck_desc->frm_ofs = 0;
  398. /* Update buffer parameters. */
  399. pbuf->frames++;
  400. pbuf->frm_ofs += frmlen + (frmlen % 32);
  401. } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
  402. /* Assign buffer as full. */
  403. list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
  404. append = 0;
  405. mbox_msg |= SHM_SET_FULL(pbuf->index);
  406. send_msg:
  407. spin_unlock_irqrestore(&pshm_drv->lock, flags);
  408. if (mbox_msg)
  409. pshm_drv->pshm_dev->pshmdev_mbxsend
  410. (pshm_drv->pshm_dev->shm_id, mbox_msg);
  411. } while (mbox_msg);
  412. }
  413. static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
  414. {
  415. struct shmdrv_layer *pshm_drv;
  416. pshm_drv = netdev_priv(shm_netdev);
  417. skb_queue_tail(&pshm_drv->sk_qhead, skb);
  418. /* Schedule Tx work queue. for deferred processing of skbs*/
  419. if (!work_pending(&pshm_drv->shm_tx_work))
  420. queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
  421. return 0;
  422. }
  423. static const struct net_device_ops netdev_ops = {
  424. .ndo_open = shm_netdev_open,
  425. .ndo_stop = shm_netdev_close,
  426. .ndo_start_xmit = shm_netdev_tx,
  427. };
  428. static void shm_netdev_setup(struct net_device *pshm_netdev)
  429. {
  430. struct shmdrv_layer *pshm_drv;
  431. pshm_netdev->netdev_ops = &netdev_ops;
  432. pshm_netdev->mtu = CAIF_MAX_MTU;
  433. pshm_netdev->type = ARPHRD_CAIF;
  434. pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
  435. pshm_netdev->tx_queue_len = 0;
  436. pshm_netdev->destructor = free_netdev;
  437. pshm_drv = netdev_priv(pshm_netdev);
  438. /* Initialize structures in a clean state. */
  439. memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
  440. pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
  441. }
  442. int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
  443. {
  444. int result, j;
  445. struct shmdrv_layer *pshm_drv = NULL;
  446. pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
  447. "cfshm%d", shm_netdev_setup);
  448. if (!pshm_dev->pshm_netdev)
  449. return -ENOMEM;
  450. pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
  451. pshm_drv->pshm_dev = pshm_dev;
  452. /*
  453. * Initialization starts with the verification of the
  454. * availability of MBX driver by calling its setup function.
  455. * MBX driver must be available by this time for proper
  456. * functioning of SHM driver.
  457. */
  458. if ((pshm_dev->pshmdev_mbxsetup
  459. (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
  460. pr_warn("Could not config. SHM Mailbox,"
  461. " Bailing out.....\n");
  462. free_netdev(pshm_dev->pshm_netdev);
  463. return -ENODEV;
  464. }
  465. skb_queue_head_init(&pshm_drv->sk_qhead);
  466. pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
  467. " INSTANCE AT pshm_drv =0x%p\n",
  468. pshm_drv->pshm_dev->shm_id, pshm_drv);
  469. if (pshm_dev->shm_total_sz <
  470. (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
  471. pr_warn("ERROR, Amount of available"
  472. " Phys. SHM cannot accommodate current SHM "
  473. "driver configuration, Bailing out ...\n");
  474. free_netdev(pshm_dev->pshm_netdev);
  475. return -ENOMEM;
  476. }
  477. pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
  478. pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
  479. if (pshm_dev->shm_loopback)
  480. pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
  481. else
  482. pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
  483. (NR_TX_BUF * TX_BUF_SZ);
  484. spin_lock_init(&pshm_drv->lock);
  485. INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
  486. INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
  487. INIT_LIST_HEAD(&pshm_drv->tx_full_list);
  488. INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
  489. INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
  490. INIT_LIST_HEAD(&pshm_drv->rx_full_list);
  491. INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
  492. INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
  493. pshm_drv->pshm_tx_workqueue =
  494. create_singlethread_workqueue("shm_tx_work");
  495. pshm_drv->pshm_rx_workqueue =
  496. create_singlethread_workqueue("shm_rx_work");
  497. for (j = 0; j < NR_TX_BUF; j++) {
  498. struct buf_list *tx_buf =
  499. kmalloc(sizeof(struct buf_list), GFP_KERNEL);
  500. if (tx_buf == NULL) {
  501. pr_warn("ERROR, Could not"
  502. " allocate dynamic mem. for tx_buf,"
  503. " Bailing out ...\n");
  504. free_netdev(pshm_dev->pshm_netdev);
  505. return -ENOMEM;
  506. }
  507. tx_buf->index = j;
  508. tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
  509. tx_buf->len = TX_BUF_SZ;
  510. tx_buf->frames = 0;
  511. tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
  512. if (pshm_dev->shm_loopback)
  513. tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
  514. else
  515. tx_buf->desc_vptr =
  516. ioremap(tx_buf->phy_addr, TX_BUF_SZ);
  517. list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
  518. }
  519. for (j = 0; j < NR_RX_BUF; j++) {
  520. struct buf_list *rx_buf =
  521. kmalloc(sizeof(struct buf_list), GFP_KERNEL);
  522. if (rx_buf == NULL) {
  523. pr_warn("ERROR, Could not"
  524. " allocate dynamic mem.for rx_buf,"
  525. " Bailing out ...\n");
  526. free_netdev(pshm_dev->pshm_netdev);
  527. return -ENOMEM;
  528. }
  529. rx_buf->index = j;
  530. rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
  531. rx_buf->len = RX_BUF_SZ;
  532. if (pshm_dev->shm_loopback)
  533. rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
  534. else
  535. rx_buf->desc_vptr =
  536. ioremap(rx_buf->phy_addr, RX_BUF_SZ);
  537. list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
  538. }
  539. pshm_drv->tx_empty_available = 1;
  540. result = register_netdev(pshm_dev->pshm_netdev);
  541. if (result)
  542. pr_warn("ERROR[%d], SHM could not, "
  543. "register with NW FRMWK Bailing out ...\n", result);
  544. return result;
  545. }
  546. void caif_shmcore_remove(struct net_device *pshm_netdev)
  547. {
  548. struct buf_list *pbuf;
  549. struct shmdrv_layer *pshm_drv = NULL;
  550. pshm_drv = netdev_priv(pshm_netdev);
  551. while (!(list_empty(&pshm_drv->tx_pend_list))) {
  552. pbuf =
  553. list_entry(pshm_drv->tx_pend_list.next,
  554. struct buf_list, list);
  555. list_del(&pbuf->list);
  556. kfree(pbuf);
  557. }
  558. while (!(list_empty(&pshm_drv->tx_full_list))) {
  559. pbuf =
  560. list_entry(pshm_drv->tx_full_list.next,
  561. struct buf_list, list);
  562. list_del(&pbuf->list);
  563. kfree(pbuf);
  564. }
  565. while (!(list_empty(&pshm_drv->tx_empty_list))) {
  566. pbuf =
  567. list_entry(pshm_drv->tx_empty_list.next,
  568. struct buf_list, list);
  569. list_del(&pbuf->list);
  570. kfree(pbuf);
  571. }
  572. while (!(list_empty(&pshm_drv->rx_full_list))) {
  573. pbuf =
  574. list_entry(pshm_drv->tx_full_list.next,
  575. struct buf_list, list);
  576. list_del(&pbuf->list);
  577. kfree(pbuf);
  578. }
  579. while (!(list_empty(&pshm_drv->rx_pend_list))) {
  580. pbuf =
  581. list_entry(pshm_drv->tx_pend_list.next,
  582. struct buf_list, list);
  583. list_del(&pbuf->list);
  584. kfree(pbuf);
  585. }
  586. while (!(list_empty(&pshm_drv->rx_empty_list))) {
  587. pbuf =
  588. list_entry(pshm_drv->rx_empty_list.next,
  589. struct buf_list, list);
  590. list_del(&pbuf->list);
  591. kfree(pbuf);
  592. }
  593. /* Destroy work queues. */
  594. destroy_workqueue(pshm_drv->pshm_tx_workqueue);
  595. destroy_workqueue(pshm_drv->pshm_rx_workqueue);
  596. unregister_netdev(pshm_netdev);
  597. }