sh_irda.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /*
  2. * SuperH IrDA Driver
  3. *
  4. * Copyright (C) 2010 Renesas Solutions Corp.
  5. * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  6. *
  7. * Based on sh_sir.c
  8. * Copyright (C) 2009 Renesas Solutions Corp.
  9. * Copyright 2006-2009 Analog Devices Inc.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. /*
  16. * CAUTION
  17. *
  18. * This driver is very simple.
  19. * So, it doesn't have below support now
  20. * - MIR/FIR support
  21. * - DMA transfer support
  22. * - FIFO mode support
  23. */
  24. #include <linux/io.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/clk.h>
  30. #include <net/irda/wrapper.h>
  31. #include <net/irda/irda_device.h>
  32. #define DRIVER_NAME "sh_irda"
  33. #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
  34. #define __IRDARAM_LEN 0x13FF
  35. #else
  36. #define __IRDARAM_LEN 0x1039
  37. #endif
  38. #define IRTMR 0x1F00 /* Transfer mode */
  39. #define IRCFR 0x1F02 /* Configuration */
  40. #define IRCTR 0x1F04 /* IR control */
  41. #define IRTFLR 0x1F20 /* Transmit frame length */
  42. #define IRTCTR 0x1F22 /* Transmit control */
  43. #define IRRFLR 0x1F40 /* Receive frame length */
  44. #define IRRCTR 0x1F42 /* Receive control */
  45. #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
  46. #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
  47. #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
  48. #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
  49. #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
  50. #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
  51. #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
  52. #define CRCCTR 0x1F80 /* CRC engine control */
  53. #define CRCIR 0x1F86 /* CRC engine input data */
  54. #define CRCCR 0x1F8A /* CRC engine calculation */
  55. #define CRCOR 0x1F8E /* CRC engine output data */
  56. #define FIFOCP 0x1FC0 /* FIFO current pointer */
  57. #define FIFOFP 0x1FC2 /* FIFO follow pointer */
  58. #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
  59. #define FIFORSOR 0x1FC6 /* FIFO receive status OR */
  60. #define FIFOSEL 0x1FC8 /* FIFO select */
  61. #define FIFORS 0x1FCA /* FIFO receive status */
  62. #define FIFORFL 0x1FCC /* FIFO receive frame length */
  63. #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
  64. #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
  65. #define BIFCTL 0x1FD2 /* BUS interface control */
  66. #define IRDARAM 0x0000 /* IrDA buffer RAM */
  67. #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
  68. /* IRTMR */
  69. #define TMD_MASK (0x3 << 14) /* Transfer Mode */
  70. #define TMD_SIR (0x0 << 14)
  71. #define TMD_MIR (0x3 << 14)
  72. #define TMD_FIR (0x2 << 14)
  73. #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
  74. #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
  75. #define SIM (1 << 0) /* SIR Interrupt Mask */
  76. #define xIM_MASK (FIFORIM | MIM | SIM)
  77. /* IRCFR */
  78. #define RTO_SHIFT 8 /* shift for Receive Timeout */
  79. #define RTO (0x3 << RTO_SHIFT)
  80. /* IRTCTR */
  81. #define ARMOD (1 << 15) /* Auto-Receive Mode */
  82. #define TE (1 << 0) /* Transmit Enable */
  83. /* IRRFLR */
  84. #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
  85. /* IRRCTR */
  86. #define RE (1 << 0) /* Receive Enable */
  87. /*
  88. * SIRISR, SIRIMR, SIRICR,
  89. * MFIRISR, MFIRIMR, MFIRICR
  90. */
  91. #define FRE (1 << 15) /* Frame Receive End */
  92. #define TROV (1 << 11) /* Transfer Area Overflow */
  93. #define xIR_9 (1 << 9)
  94. #define TOT xIR_9 /* for SIR Timeout */
  95. #define ABTD xIR_9 /* for MIR/FIR Abort Detection */
  96. #define xIR_8 (1 << 8)
  97. #define FER xIR_8 /* for SIR Framing Error */
  98. #define CRCER xIR_8 /* for MIR/FIR CRC error */
  99. #define FTE (1 << 7) /* Frame Transmit End */
  100. #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
  101. /* SIRBCR */
  102. #define BRC_MASK (0x3F) /* mask for Baud Rate Count */
  103. /* CRCCTR */
  104. #define CRC_RST (1 << 15) /* CRC Engine Reset */
  105. #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
  106. /* CRCIR */
  107. #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
  108. /************************************************************************
  109. enum / structure
  110. ************************************************************************/
  111. enum sh_irda_mode {
  112. SH_IRDA_NONE = 0,
  113. SH_IRDA_SIR,
  114. SH_IRDA_MIR,
  115. SH_IRDA_FIR,
  116. };
  117. struct sh_irda_self;
  118. struct sh_irda_xir_func {
  119. int (*xir_fre) (struct sh_irda_self *self);
  120. int (*xir_trov) (struct sh_irda_self *self);
  121. int (*xir_9) (struct sh_irda_self *self);
  122. int (*xir_8) (struct sh_irda_self *self);
  123. int (*xir_fte) (struct sh_irda_self *self);
  124. };
  125. struct sh_irda_self {
  126. void __iomem *membase;
  127. unsigned int irq;
  128. struct platform_device *pdev;
  129. struct net_device *ndev;
  130. struct irlap_cb *irlap;
  131. struct qos_info qos;
  132. iobuff_t tx_buff;
  133. iobuff_t rx_buff;
  134. enum sh_irda_mode mode;
  135. spinlock_t lock;
  136. struct sh_irda_xir_func *xir_func;
  137. };
  138. /************************************************************************
  139. common function
  140. ************************************************************************/
  141. static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
  142. {
  143. unsigned long flags;
  144. spin_lock_irqsave(&self->lock, flags);
  145. iowrite16(data, self->membase + offset);
  146. spin_unlock_irqrestore(&self->lock, flags);
  147. }
  148. static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
  149. {
  150. unsigned long flags;
  151. u16 ret;
  152. spin_lock_irqsave(&self->lock, flags);
  153. ret = ioread16(self->membase + offset);
  154. spin_unlock_irqrestore(&self->lock, flags);
  155. return ret;
  156. }
  157. static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
  158. u16 mask, u16 data)
  159. {
  160. unsigned long flags;
  161. u16 old, new;
  162. spin_lock_irqsave(&self->lock, flags);
  163. old = ioread16(self->membase + offset);
  164. new = (old & ~mask) | data;
  165. if (old != new)
  166. iowrite16(data, self->membase + offset);
  167. spin_unlock_irqrestore(&self->lock, flags);
  168. }
  169. /************************************************************************
  170. mode function
  171. ************************************************************************/
  172. /*=====================================
  173. *
  174. * common
  175. *
  176. *=====================================*/
  177. static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
  178. {
  179. struct device *dev = &self->ndev->dev;
  180. sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
  181. dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
  182. }
  183. static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
  184. {
  185. struct device *dev = &self->ndev->dev;
  186. if (SH_IRDA_SIR != self->mode)
  187. interval = 0;
  188. if (interval < 0 || interval > 2) {
  189. dev_err(dev, "unsupported timeout interval\n");
  190. return -EINVAL;
  191. }
  192. sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
  193. return 0;
  194. }
  195. static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
  196. {
  197. struct device *dev = &self->ndev->dev;
  198. u16 val;
  199. if (baudrate < 0)
  200. return 0;
  201. if (SH_IRDA_SIR != self->mode) {
  202. dev_err(dev, "it is not SIR mode\n");
  203. return -EINVAL;
  204. }
  205. /*
  206. * Baud rate (bits/s) =
  207. * (48 MHz / 26) / (baud rate counter value + 1) x 16
  208. */
  209. val = (48000000 / 26 / 16 / baudrate) - 1;
  210. dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
  211. sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
  212. return 0;
  213. }
  214. static int sh_irda_get_rcv_length(struct sh_irda_self *self)
  215. {
  216. return RFL_MASK & sh_irda_read(self, IRRFLR);
  217. }
  218. /*=====================================
  219. *
  220. * NONE MODE
  221. *
  222. *=====================================*/
  223. static int sh_irda_xir_fre(struct sh_irda_self *self)
  224. {
  225. struct device *dev = &self->ndev->dev;
  226. dev_err(dev, "none mode: frame recv\n");
  227. return 0;
  228. }
  229. static int sh_irda_xir_trov(struct sh_irda_self *self)
  230. {
  231. struct device *dev = &self->ndev->dev;
  232. dev_err(dev, "none mode: buffer ram over\n");
  233. return 0;
  234. }
  235. static int sh_irda_xir_9(struct sh_irda_self *self)
  236. {
  237. struct device *dev = &self->ndev->dev;
  238. dev_err(dev, "none mode: time over\n");
  239. return 0;
  240. }
  241. static int sh_irda_xir_8(struct sh_irda_self *self)
  242. {
  243. struct device *dev = &self->ndev->dev;
  244. dev_err(dev, "none mode: framing error\n");
  245. return 0;
  246. }
  247. static int sh_irda_xir_fte(struct sh_irda_self *self)
  248. {
  249. struct device *dev = &self->ndev->dev;
  250. dev_err(dev, "none mode: frame transmit end\n");
  251. return 0;
  252. }
  253. static struct sh_irda_xir_func sh_irda_xir_func = {
  254. .xir_fre = sh_irda_xir_fre,
  255. .xir_trov = sh_irda_xir_trov,
  256. .xir_9 = sh_irda_xir_9,
  257. .xir_8 = sh_irda_xir_8,
  258. .xir_fte = sh_irda_xir_fte,
  259. };
  260. /*=====================================
  261. *
  262. * MIR/FIR MODE
  263. *
  264. * MIR/FIR are not supported now
  265. *=====================================*/
  266. static struct sh_irda_xir_func sh_irda_mfir_func = {
  267. .xir_fre = sh_irda_xir_fre,
  268. .xir_trov = sh_irda_xir_trov,
  269. .xir_9 = sh_irda_xir_9,
  270. .xir_8 = sh_irda_xir_8,
  271. .xir_fte = sh_irda_xir_fte,
  272. };
  273. /*=====================================
  274. *
  275. * SIR MODE
  276. *
  277. *=====================================*/
  278. static int sh_irda_sir_fre(struct sh_irda_self *self)
  279. {
  280. struct device *dev = &self->ndev->dev;
  281. u16 data16;
  282. u8 *data = (u8 *)&data16;
  283. int len = sh_irda_get_rcv_length(self);
  284. int i, j;
  285. if (len > IRDARAM_LEN)
  286. len = IRDARAM_LEN;
  287. dev_dbg(dev, "frame recv length = %d\n", len);
  288. for (i = 0; i < len; i++) {
  289. j = i % 2;
  290. if (!j)
  291. data16 = sh_irda_read(self, IRDARAM + i);
  292. async_unwrap_char(self->ndev, &self->ndev->stats,
  293. &self->rx_buff, data[j]);
  294. }
  295. self->ndev->last_rx = jiffies;
  296. sh_irda_rcv_ctrl(self, 1);
  297. return 0;
  298. }
  299. static int sh_irda_sir_trov(struct sh_irda_self *self)
  300. {
  301. struct device *dev = &self->ndev->dev;
  302. dev_err(dev, "buffer ram over\n");
  303. sh_irda_rcv_ctrl(self, 1);
  304. return 0;
  305. }
  306. static int sh_irda_sir_tot(struct sh_irda_self *self)
  307. {
  308. struct device *dev = &self->ndev->dev;
  309. dev_err(dev, "time over\n");
  310. sh_irda_set_baudrate(self, 9600);
  311. sh_irda_rcv_ctrl(self, 1);
  312. return 0;
  313. }
  314. static int sh_irda_sir_fer(struct sh_irda_self *self)
  315. {
  316. struct device *dev = &self->ndev->dev;
  317. dev_err(dev, "framing error\n");
  318. sh_irda_rcv_ctrl(self, 1);
  319. return 0;
  320. }
  321. static int sh_irda_sir_fte(struct sh_irda_self *self)
  322. {
  323. struct device *dev = &self->ndev->dev;
  324. dev_dbg(dev, "frame transmit end\n");
  325. netif_wake_queue(self->ndev);
  326. return 0;
  327. }
  328. static struct sh_irda_xir_func sh_irda_sir_func = {
  329. .xir_fre = sh_irda_sir_fre,
  330. .xir_trov = sh_irda_sir_trov,
  331. .xir_9 = sh_irda_sir_tot,
  332. .xir_8 = sh_irda_sir_fer,
  333. .xir_fte = sh_irda_sir_fte,
  334. };
  335. static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
  336. {
  337. struct device *dev = &self->ndev->dev;
  338. struct sh_irda_xir_func *func;
  339. const char *name;
  340. u16 data;
  341. switch (mode) {
  342. case SH_IRDA_SIR:
  343. name = "SIR";
  344. data = TMD_SIR;
  345. func = &sh_irda_sir_func;
  346. break;
  347. case SH_IRDA_MIR:
  348. name = "MIR";
  349. data = TMD_MIR;
  350. func = &sh_irda_mfir_func;
  351. break;
  352. case SH_IRDA_FIR:
  353. name = "FIR";
  354. data = TMD_FIR;
  355. func = &sh_irda_mfir_func;
  356. break;
  357. default:
  358. name = "NONE";
  359. data = 0;
  360. func = &sh_irda_xir_func;
  361. break;
  362. }
  363. self->mode = mode;
  364. self->xir_func = func;
  365. sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
  366. dev_dbg(dev, "switch to %s mode", name);
  367. }
  368. /************************************************************************
  369. irq function
  370. ************************************************************************/
  371. static void sh_irda_set_irq_mask(struct sh_irda_self *self)
  372. {
  373. u16 tmr_hole;
  374. u16 xir_reg;
  375. /* set all mask */
  376. sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
  377. sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
  378. sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
  379. /* clear irq */
  380. sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
  381. sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
  382. switch (self->mode) {
  383. case SH_IRDA_SIR:
  384. tmr_hole = SIM;
  385. xir_reg = SIRIMR;
  386. break;
  387. case SH_IRDA_MIR:
  388. case SH_IRDA_FIR:
  389. tmr_hole = MIM;
  390. xir_reg = MFIRIMR;
  391. break;
  392. default:
  393. tmr_hole = 0;
  394. xir_reg = 0;
  395. break;
  396. }
  397. /* open mask */
  398. if (xir_reg) {
  399. sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
  400. sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
  401. }
  402. }
  403. static irqreturn_t sh_irda_irq(int irq, void *dev_id)
  404. {
  405. struct sh_irda_self *self = dev_id;
  406. struct sh_irda_xir_func *func = self->xir_func;
  407. u16 isr = sh_irda_read(self, SIRISR);
  408. /* clear irq */
  409. sh_irda_write(self, SIRICR, isr);
  410. if (isr & FRE)
  411. func->xir_fre(self);
  412. if (isr & TROV)
  413. func->xir_trov(self);
  414. if (isr & xIR_9)
  415. func->xir_9(self);
  416. if (isr & xIR_8)
  417. func->xir_8(self);
  418. if (isr & FTE)
  419. func->xir_fte(self);
  420. return IRQ_HANDLED;
  421. }
  422. /************************************************************************
  423. CRC function
  424. ************************************************************************/
  425. static void sh_irda_crc_reset(struct sh_irda_self *self)
  426. {
  427. sh_irda_write(self, CRCCTR, CRC_RST);
  428. }
  429. static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
  430. {
  431. sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
  432. }
  433. static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
  434. {
  435. return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
  436. }
  437. static u16 sh_irda_crc_out(struct sh_irda_self *self)
  438. {
  439. return sh_irda_read(self, CRCOR);
  440. }
  441. static int sh_irda_crc_init(struct sh_irda_self *self)
  442. {
  443. struct device *dev = &self->ndev->dev;
  444. int ret = -EIO;
  445. u16 val;
  446. sh_irda_crc_reset(self);
  447. sh_irda_crc_add(self, 0xCC);
  448. sh_irda_crc_add(self, 0xF5);
  449. sh_irda_crc_add(self, 0xF1);
  450. sh_irda_crc_add(self, 0xA7);
  451. val = sh_irda_crc_cnt(self);
  452. if (4 != val) {
  453. dev_err(dev, "CRC count error %x\n", val);
  454. goto crc_init_out;
  455. }
  456. val = sh_irda_crc_out(self);
  457. if (0x51DF != val) {
  458. dev_err(dev, "CRC result error%x\n", val);
  459. goto crc_init_out;
  460. }
  461. ret = 0;
  462. crc_init_out:
  463. sh_irda_crc_reset(self);
  464. return ret;
  465. }
  466. /************************************************************************
  467. iobuf function
  468. ************************************************************************/
  469. static void sh_irda_remove_iobuf(struct sh_irda_self *self)
  470. {
  471. kfree(self->rx_buff.head);
  472. self->tx_buff.head = NULL;
  473. self->tx_buff.data = NULL;
  474. self->rx_buff.head = NULL;
  475. self->rx_buff.data = NULL;
  476. }
  477. static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
  478. {
  479. if (self->rx_buff.head ||
  480. self->tx_buff.head) {
  481. dev_err(&self->ndev->dev, "iobuff has already existed.");
  482. return -EINVAL;
  483. }
  484. /* rx_buff */
  485. self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
  486. if (!self->rx_buff.head)
  487. return -ENOMEM;
  488. self->rx_buff.truesize = rxsize;
  489. self->rx_buff.in_frame = FALSE;
  490. self->rx_buff.state = OUTSIDE_FRAME;
  491. self->rx_buff.data = self->rx_buff.head;
  492. /* tx_buff */
  493. self->tx_buff.head = self->membase + IRDARAM;
  494. self->tx_buff.truesize = IRDARAM_LEN;
  495. return 0;
  496. }
  497. /************************************************************************
  498. net_device_ops function
  499. ************************************************************************/
  500. static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
  501. {
  502. struct sh_irda_self *self = netdev_priv(ndev);
  503. struct device *dev = &self->ndev->dev;
  504. int speed = irda_get_next_speed(skb);
  505. int ret;
  506. dev_dbg(dev, "hard xmit\n");
  507. netif_stop_queue(ndev);
  508. sh_irda_rcv_ctrl(self, 0);
  509. ret = sh_irda_set_baudrate(self, speed);
  510. if (ret < 0)
  511. goto sh_irda_hard_xmit_end;
  512. self->tx_buff.len = 0;
  513. if (skb->len) {
  514. unsigned long flags;
  515. spin_lock_irqsave(&self->lock, flags);
  516. self->tx_buff.len = async_wrap_skb(skb,
  517. self->tx_buff.head,
  518. self->tx_buff.truesize);
  519. spin_unlock_irqrestore(&self->lock, flags);
  520. if (self->tx_buff.len > self->tx_buff.truesize)
  521. self->tx_buff.len = self->tx_buff.truesize;
  522. sh_irda_write(self, IRTFLR, self->tx_buff.len);
  523. sh_irda_write(self, IRTCTR, ARMOD | TE);
  524. } else
  525. goto sh_irda_hard_xmit_end;
  526. dev_kfree_skb(skb);
  527. return 0;
  528. sh_irda_hard_xmit_end:
  529. sh_irda_set_baudrate(self, 9600);
  530. netif_wake_queue(self->ndev);
  531. sh_irda_rcv_ctrl(self, 1);
  532. dev_kfree_skb(skb);
  533. return ret;
  534. }
  535. static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
  536. {
  537. /*
  538. * FIXME
  539. *
  540. * This function is needed for irda framework.
  541. * But nothing to do now
  542. */
  543. return 0;
  544. }
  545. static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
  546. {
  547. struct sh_irda_self *self = netdev_priv(ndev);
  548. return &self->ndev->stats;
  549. }
  550. static int sh_irda_open(struct net_device *ndev)
  551. {
  552. struct sh_irda_self *self = netdev_priv(ndev);
  553. int err;
  554. pm_runtime_get_sync(&self->pdev->dev);
  555. err = sh_irda_crc_init(self);
  556. if (err)
  557. goto open_err;
  558. sh_irda_set_mode(self, SH_IRDA_SIR);
  559. sh_irda_set_timeout(self, 2);
  560. sh_irda_set_baudrate(self, 9600);
  561. self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
  562. if (!self->irlap) {
  563. err = -ENODEV;
  564. goto open_err;
  565. }
  566. netif_start_queue(ndev);
  567. sh_irda_rcv_ctrl(self, 1);
  568. sh_irda_set_irq_mask(self);
  569. dev_info(&ndev->dev, "opened\n");
  570. return 0;
  571. open_err:
  572. pm_runtime_put_sync(&self->pdev->dev);
  573. return err;
  574. }
  575. static int sh_irda_stop(struct net_device *ndev)
  576. {
  577. struct sh_irda_self *self = netdev_priv(ndev);
  578. /* Stop IrLAP */
  579. if (self->irlap) {
  580. irlap_close(self->irlap);
  581. self->irlap = NULL;
  582. }
  583. netif_stop_queue(ndev);
  584. pm_runtime_put_sync(&self->pdev->dev);
  585. dev_info(&ndev->dev, "stoped\n");
  586. return 0;
  587. }
  588. static const struct net_device_ops sh_irda_ndo = {
  589. .ndo_open = sh_irda_open,
  590. .ndo_stop = sh_irda_stop,
  591. .ndo_start_xmit = sh_irda_hard_xmit,
  592. .ndo_do_ioctl = sh_irda_ioctl,
  593. .ndo_get_stats = sh_irda_stats,
  594. };
  595. /************************************************************************
  596. platform_driver function
  597. ************************************************************************/
  598. static int __devinit sh_irda_probe(struct platform_device *pdev)
  599. {
  600. struct net_device *ndev;
  601. struct sh_irda_self *self;
  602. struct resource *res;
  603. int irq;
  604. int err = -ENOMEM;
  605. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  606. irq = platform_get_irq(pdev, 0);
  607. if (!res || irq < 0) {
  608. dev_err(&pdev->dev, "Not enough platform resources.\n");
  609. goto exit;
  610. }
  611. ndev = alloc_irdadev(sizeof(*self));
  612. if (!ndev)
  613. goto exit;
  614. self = netdev_priv(ndev);
  615. self->membase = ioremap_nocache(res->start, resource_size(res));
  616. if (!self->membase) {
  617. err = -ENXIO;
  618. dev_err(&pdev->dev, "Unable to ioremap.\n");
  619. goto err_mem_1;
  620. }
  621. err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
  622. if (err)
  623. goto err_mem_2;
  624. self->pdev = pdev;
  625. pm_runtime_enable(&pdev->dev);
  626. irda_init_max_qos_capabilies(&self->qos);
  627. ndev->netdev_ops = &sh_irda_ndo;
  628. ndev->irq = irq;
  629. self->ndev = ndev;
  630. self->qos.baud_rate.bits &= IR_9600; /* FIXME */
  631. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  632. spin_lock_init(&self->lock);
  633. irda_qos_bits_to_value(&self->qos);
  634. err = register_netdev(ndev);
  635. if (err)
  636. goto err_mem_4;
  637. platform_set_drvdata(pdev, ndev);
  638. if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
  639. dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
  640. goto err_mem_4;
  641. }
  642. dev_info(&pdev->dev, "SuperH IrDA probed\n");
  643. goto exit;
  644. err_mem_4:
  645. pm_runtime_disable(&pdev->dev);
  646. sh_irda_remove_iobuf(self);
  647. err_mem_2:
  648. iounmap(self->membase);
  649. err_mem_1:
  650. free_netdev(ndev);
  651. exit:
  652. return err;
  653. }
  654. static int __devexit sh_irda_remove(struct platform_device *pdev)
  655. {
  656. struct net_device *ndev = platform_get_drvdata(pdev);
  657. struct sh_irda_self *self = netdev_priv(ndev);
  658. if (!self)
  659. return 0;
  660. unregister_netdev(ndev);
  661. pm_runtime_disable(&pdev->dev);
  662. sh_irda_remove_iobuf(self);
  663. iounmap(self->membase);
  664. free_netdev(ndev);
  665. platform_set_drvdata(pdev, NULL);
  666. return 0;
  667. }
  668. static int sh_irda_runtime_nop(struct device *dev)
  669. {
  670. /* Runtime PM callback shared between ->runtime_suspend()
  671. * and ->runtime_resume(). Simply returns success.
  672. *
  673. * This driver re-initializes all registers after
  674. * pm_runtime_get_sync() anyway so there is no need
  675. * to save and restore registers here.
  676. */
  677. return 0;
  678. }
  679. static const struct dev_pm_ops sh_irda_pm_ops = {
  680. .runtime_suspend = sh_irda_runtime_nop,
  681. .runtime_resume = sh_irda_runtime_nop,
  682. };
  683. static struct platform_driver sh_irda_driver = {
  684. .probe = sh_irda_probe,
  685. .remove = __devexit_p(sh_irda_remove),
  686. .driver = {
  687. .name = DRIVER_NAME,
  688. .pm = &sh_irda_pm_ops,
  689. },
  690. };
  691. module_platform_driver(sh_irda_driver);
  692. MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
  693. MODULE_DESCRIPTION("SuperH IrDA driver");
  694. MODULE_LICENSE("GPL");