nuvoton-cir.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  25. * USA
  26. */
  27. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/pnp.h>
  31. #include <linux/io.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/sched.h>
  34. #include <linux/slab.h>
  35. #include <media/rc-core.h>
  36. #include <linux/pci_ids.h>
  37. #include "nuvoton-cir.h"
  38. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
  39. static const struct nvt_chip nvt_chips[] = {
  40. { "w83667hg", NVT_W83667HG },
  41. { "NCT6775F", NVT_6775F },
  42. { "NCT6776F", NVT_6776F },
  43. { "NCT6779D", NVT_6779D },
  44. };
  45. static inline bool is_w83667hg(struct nvt_dev *nvt)
  46. {
  47. return nvt->chip_ver == NVT_W83667HG;
  48. }
  49. /* write val to config reg */
  50. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  51. {
  52. outb(reg, nvt->cr_efir);
  53. outb(val, nvt->cr_efdr);
  54. }
  55. /* read val from config reg */
  56. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  57. {
  58. outb(reg, nvt->cr_efir);
  59. return inb(nvt->cr_efdr);
  60. }
  61. /* update config register bit without changing other bits */
  62. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  63. {
  64. u8 tmp = nvt_cr_read(nvt, reg) | val;
  65. nvt_cr_write(nvt, tmp, reg);
  66. }
  67. /* clear config register bit without changing other bits */
  68. static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  69. {
  70. u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  71. nvt_cr_write(nvt, tmp, reg);
  72. }
  73. /* enter extended function mode */
  74. static inline int nvt_efm_enable(struct nvt_dev *nvt)
  75. {
  76. if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
  77. return -EBUSY;
  78. /* Enabling Extended Function Mode explicitly requires writing 2x */
  79. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  80. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  81. return 0;
  82. }
  83. /* exit extended function mode */
  84. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  85. {
  86. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  87. release_region(nvt->cr_efir, 2);
  88. }
  89. /*
  90. * When you want to address a specific logical device, write its logical
  91. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  92. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  93. */
  94. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  95. {
  96. nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
  97. }
  98. /* select and enable logical device with setting EFM mode*/
  99. static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  100. {
  101. nvt_efm_enable(nvt);
  102. nvt_select_logical_dev(nvt, ldev);
  103. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  104. nvt_efm_disable(nvt);
  105. }
  106. /* select and disable logical device with setting EFM mode*/
  107. static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  108. {
  109. nvt_efm_enable(nvt);
  110. nvt_select_logical_dev(nvt, ldev);
  111. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  112. nvt_efm_disable(nvt);
  113. }
  114. /* write val to cir config register */
  115. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  116. {
  117. outb(val, nvt->cir_addr + offset);
  118. }
  119. /* read val from cir config register */
  120. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  121. {
  122. return inb(nvt->cir_addr + offset);
  123. }
  124. /* write val to cir wake register */
  125. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  126. u8 val, u8 offset)
  127. {
  128. outb(val, nvt->cir_wake_addr + offset);
  129. }
  130. /* read val from cir wake config register */
  131. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  132. {
  133. return inb(nvt->cir_wake_addr + offset);
  134. }
  135. /* don't override io address if one is set already */
  136. static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
  137. {
  138. unsigned long old_addr;
  139. old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
  140. old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
  141. if (old_addr)
  142. *ioaddr = old_addr;
  143. else {
  144. nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
  145. nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
  146. }
  147. }
  148. static ssize_t wakeup_data_show(struct device *dev,
  149. struct device_attribute *attr,
  150. char *buf)
  151. {
  152. struct rc_dev *rc_dev = to_rc_dev(dev);
  153. struct nvt_dev *nvt = rc_dev->priv;
  154. int fifo_len, duration;
  155. unsigned long flags;
  156. ssize_t buf_len = 0;
  157. int i;
  158. spin_lock_irqsave(&nvt->nvt_lock, flags);
  159. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  160. fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
  161. /* go to first element to be read */
  162. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
  163. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  164. for (i = 0; i < fifo_len; i++) {
  165. duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  166. duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
  167. buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
  168. "%d ", duration);
  169. }
  170. buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
  171. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  172. return buf_len;
  173. }
  174. static ssize_t wakeup_data_store(struct device *dev,
  175. struct device_attribute *attr,
  176. const char *buf, size_t len)
  177. {
  178. struct rc_dev *rc_dev = to_rc_dev(dev);
  179. struct nvt_dev *nvt = rc_dev->priv;
  180. unsigned long flags;
  181. u8 tolerance, config, wake_buf[WAKEUP_MAX_SIZE];
  182. char **argv;
  183. int i, count;
  184. unsigned int val;
  185. ssize_t ret;
  186. argv = argv_split(GFP_KERNEL, buf, &count);
  187. if (!argv)
  188. return -ENOMEM;
  189. if (!count || count > WAKEUP_MAX_SIZE) {
  190. ret = -EINVAL;
  191. goto out;
  192. }
  193. for (i = 0; i < count; i++) {
  194. ret = kstrtouint(argv[i], 10, &val);
  195. if (ret)
  196. goto out;
  197. val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
  198. if (!val || val > 0x7f) {
  199. ret = -EINVAL;
  200. goto out;
  201. }
  202. wake_buf[i] = val;
  203. /* sequence must start with a pulse */
  204. if (i % 2 == 0)
  205. wake_buf[i] |= BUF_PULSE_BIT;
  206. }
  207. /* hardcode the tolerance to 10% */
  208. tolerance = DIV_ROUND_UP(count, 10);
  209. spin_lock_irqsave(&nvt->nvt_lock, flags);
  210. nvt_clear_cir_wake_fifo(nvt);
  211. nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
  212. nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
  213. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  214. /* enable writes to wake fifo */
  215. nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
  216. CIR_WAKE_IRCON);
  217. for (i = 0; i < count; i++)
  218. nvt_cir_wake_reg_write(nvt, wake_buf[i], CIR_WAKE_WR_FIFO_DATA);
  219. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  220. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  221. ret = len;
  222. out:
  223. argv_free(argv);
  224. return ret;
  225. }
  226. static DEVICE_ATTR_RW(wakeup_data);
  227. /* dump current cir register contents */
  228. static void cir_dump_regs(struct nvt_dev *nvt)
  229. {
  230. nvt_efm_enable(nvt);
  231. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  232. pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  233. pr_info(" * CR CIR ACTIVE : 0x%x\n",
  234. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  235. pr_info(" * CR CIR BASE ADDR: 0x%x\n",
  236. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  237. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  238. pr_info(" * CR CIR IRQ NUM: 0x%x\n",
  239. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  240. nvt_efm_disable(nvt);
  241. pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  242. pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  243. pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  244. pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  245. pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  246. pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  247. pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  248. pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  249. pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  250. pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  251. pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  252. pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  253. pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  254. pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  255. pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  256. pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  257. pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  258. }
  259. /* dump current cir wake register contents */
  260. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  261. {
  262. u8 i, fifo_len;
  263. nvt_efm_enable(nvt);
  264. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  265. pr_info("%s: Dump CIR WAKE logical device registers:\n",
  266. NVT_DRIVER_NAME);
  267. pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
  268. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  269. pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  270. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  271. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  272. pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  273. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  274. nvt_efm_disable(nvt);
  275. pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  276. pr_info(" * IRCON: 0x%x\n",
  277. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  278. pr_info(" * IRSTS: 0x%x\n",
  279. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  280. pr_info(" * IREN: 0x%x\n",
  281. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  282. pr_info(" * FIFO CMP DEEP: 0x%x\n",
  283. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  284. pr_info(" * FIFO CMP TOL: 0x%x\n",
  285. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  286. pr_info(" * FIFO COUNT: 0x%x\n",
  287. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  288. pr_info(" * SLCH: 0x%x\n",
  289. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  290. pr_info(" * SLCL: 0x%x\n",
  291. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  292. pr_info(" * FIFOCON: 0x%x\n",
  293. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  294. pr_info(" * SRXFSTS: 0x%x\n",
  295. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  296. pr_info(" * SAMPLE RX FIFO: 0x%x\n",
  297. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  298. pr_info(" * WR FIFO DATA: 0x%x\n",
  299. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  300. pr_info(" * RD FIFO ONLY: 0x%x\n",
  301. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  302. pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
  303. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  304. pr_info(" * FIFO IGNORE: 0x%x\n",
  305. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  306. pr_info(" * IRFSM: 0x%x\n",
  307. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  308. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  309. pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  310. pr_info("* Contents =");
  311. for (i = 0; i < fifo_len; i++)
  312. pr_cont(" %02x",
  313. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  314. pr_cont("\n");
  315. }
  316. static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
  317. {
  318. int i;
  319. for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
  320. if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
  321. nvt->chip_ver = nvt_chips[i].chip_ver;
  322. return nvt_chips[i].name;
  323. }
  324. return NULL;
  325. }
  326. /* detect hardware features */
  327. static int nvt_hw_detect(struct nvt_dev *nvt)
  328. {
  329. const char *chip_name;
  330. int chip_id;
  331. nvt_efm_enable(nvt);
  332. /* Check if we're wired for the alternate EFER setup */
  333. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  334. if (nvt->chip_major == 0xff) {
  335. nvt_efm_disable(nvt);
  336. nvt->cr_efir = CR_EFIR2;
  337. nvt->cr_efdr = CR_EFDR2;
  338. nvt_efm_enable(nvt);
  339. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  340. }
  341. nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  342. nvt_efm_disable(nvt);
  343. chip_id = nvt->chip_major << 8 | nvt->chip_minor;
  344. if (chip_id == NVT_INVALID) {
  345. dev_err(&nvt->pdev->dev,
  346. "No device found on either EFM port\n");
  347. return -ENODEV;
  348. }
  349. chip_name = nvt_find_chip(nvt, chip_id);
  350. /* warn, but still let the driver load, if we don't know this chip */
  351. if (!chip_name)
  352. dev_warn(&nvt->pdev->dev,
  353. "unknown chip, id: 0x%02x 0x%02x, it may not work...",
  354. nvt->chip_major, nvt->chip_minor);
  355. else
  356. dev_info(&nvt->pdev->dev,
  357. "found %s or compatible: chip id: 0x%02x 0x%02x",
  358. chip_name, nvt->chip_major, nvt->chip_minor);
  359. return 0;
  360. }
  361. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  362. {
  363. u8 val, psreg, psmask, psval;
  364. if (is_w83667hg(nvt)) {
  365. psreg = CR_MULTIFUNC_PIN_SEL;
  366. psmask = MULTIFUNC_PIN_SEL_MASK;
  367. psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
  368. } else {
  369. psreg = CR_OUTPUT_PIN_SEL;
  370. psmask = OUTPUT_PIN_SEL_MASK;
  371. psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
  372. }
  373. /* output pin selection: enable CIR, with WB sensor enabled */
  374. val = nvt_cr_read(nvt, psreg);
  375. val &= psmask;
  376. val |= psval;
  377. nvt_cr_write(nvt, val, psreg);
  378. /* Select CIR logical device */
  379. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  380. nvt_set_ioaddr(nvt, &nvt->cir_addr);
  381. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  382. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  383. nvt->cir_addr, nvt->cir_irq);
  384. }
  385. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  386. {
  387. /* Select ACPI logical device and anable it */
  388. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  389. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  390. /* Enable CIR Wake via PSOUT# (Pin60) */
  391. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  392. /* enable pme interrupt of cir wakeup event */
  393. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  394. /* Select CIR Wake logical device */
  395. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  396. nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
  397. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
  398. nvt->cir_wake_addr);
  399. }
  400. /* clear out the hardware's cir rx fifo */
  401. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  402. {
  403. u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  404. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  405. }
  406. /* clear out the hardware's cir wake rx fifo */
  407. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  408. {
  409. u8 val, config;
  410. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  411. /* clearing wake fifo works in learning mode only */
  412. nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
  413. CIR_WAKE_IRCON);
  414. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  415. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  416. CIR_WAKE_FIFOCON);
  417. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  418. }
  419. /* clear out the hardware's cir tx fifo */
  420. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  421. {
  422. u8 val;
  423. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  424. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  425. }
  426. /* enable RX Trigger Level Reach and Packet End interrupts */
  427. static void nvt_set_cir_iren(struct nvt_dev *nvt)
  428. {
  429. u8 iren;
  430. iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
  431. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  432. }
  433. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  434. {
  435. /* set sample limit count (PE interrupt raised when reached) */
  436. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  437. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  438. /* set fifo irq trigger levels */
  439. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  440. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  441. /*
  442. * Enable TX and RX, specify carrier on = low, off = high, and set
  443. * sample period (currently 50us)
  444. */
  445. nvt_cir_reg_write(nvt,
  446. CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  447. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  448. CIR_IRCON);
  449. /* clear hardware rx and tx fifos */
  450. nvt_clear_cir_fifo(nvt);
  451. nvt_clear_tx_fifo(nvt);
  452. /* clear any and all stray interrupts */
  453. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  454. /* and finally, enable interrupts */
  455. nvt_set_cir_iren(nvt);
  456. /* enable the CIR logical device */
  457. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  458. }
  459. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  460. {
  461. /*
  462. * Disable RX, set specific carrier on = low, off = high,
  463. * and sample period (currently 50us)
  464. */
  465. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
  466. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  467. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  468. CIR_WAKE_IRCON);
  469. /* clear any and all stray interrupts */
  470. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  471. /* enable the CIR WAKE logical device */
  472. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  473. }
  474. static void nvt_enable_wake(struct nvt_dev *nvt)
  475. {
  476. unsigned long flags;
  477. nvt_efm_enable(nvt);
  478. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  479. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  480. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  481. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  482. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  483. nvt_efm_disable(nvt);
  484. spin_lock_irqsave(&nvt->nvt_lock, flags);
  485. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  486. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  487. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  488. CIR_WAKE_IRCON);
  489. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  490. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  491. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  492. }
  493. #if 0 /* Currently unused */
  494. /* rx carrier detect only works in learning mode, must be called w/nvt_lock */
  495. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  496. {
  497. u32 count, carrier, duration = 0;
  498. int i;
  499. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  500. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  501. for (i = 0; i < nvt->pkts; i++) {
  502. if (nvt->buf[i] & BUF_PULSE_BIT)
  503. duration += nvt->buf[i] & BUF_LEN_MASK;
  504. }
  505. duration *= SAMPLE_PERIOD;
  506. if (!count || !duration) {
  507. dev_notice(&nvt->pdev->dev,
  508. "Unable to determine carrier! (c:%u, d:%u)",
  509. count, duration);
  510. return 0;
  511. }
  512. carrier = MS_TO_NS(count) / duration;
  513. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  514. nvt_dbg("WTF? Carrier frequency out of range!");
  515. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  516. carrier, count, duration);
  517. return carrier;
  518. }
  519. #endif
  520. /*
  521. * set carrier frequency
  522. *
  523. * set carrier on 2 registers: CP & CC
  524. * always set CP as 0x81
  525. * set CC by SPEC, CC = 3MHz/carrier - 1
  526. */
  527. static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
  528. {
  529. struct nvt_dev *nvt = dev->priv;
  530. u16 val;
  531. if (carrier == 0)
  532. return -EINVAL;
  533. nvt_cir_reg_write(nvt, 1, CIR_CP);
  534. val = 3000000 / (carrier) - 1;
  535. nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
  536. nvt_dbg("cp: 0x%x cc: 0x%x\n",
  537. nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
  538. return 0;
  539. }
  540. /*
  541. * nvt_tx_ir
  542. *
  543. * 1) clean TX fifo first (handled by AP)
  544. * 2) copy data from user space
  545. * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
  546. * 4) send 9 packets to TX FIFO to open TTR
  547. * in interrupt_handler:
  548. * 5) send all data out
  549. * go back to write():
  550. * 6) disable TX interrupts, re-enable RX interupts
  551. *
  552. * The key problem of this function is user space data may larger than
  553. * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
  554. * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
  555. * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
  556. * set TXFCONT as 0xff, until buf_count less than 0xff.
  557. */
  558. static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
  559. {
  560. struct nvt_dev *nvt = dev->priv;
  561. unsigned long flags;
  562. unsigned int i;
  563. u8 iren;
  564. int ret;
  565. spin_lock_irqsave(&nvt->tx.lock, flags);
  566. ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
  567. nvt->tx.buf_count = (ret * sizeof(unsigned));
  568. memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
  569. nvt->tx.cur_buf_num = 0;
  570. /* save currently enabled interrupts */
  571. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  572. /* now disable all interrupts, save TFU & TTR */
  573. nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
  574. nvt->tx.tx_state = ST_TX_REPLY;
  575. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
  576. CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  577. /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
  578. for (i = 0; i < 9; i++)
  579. nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
  580. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  581. wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
  582. spin_lock_irqsave(&nvt->tx.lock, flags);
  583. nvt->tx.tx_state = ST_TX_NONE;
  584. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  585. /* restore enabled interrupts to prior state */
  586. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  587. return ret;
  588. }
  589. /* dump contents of the last rx buffer we got from the hw rx fifo */
  590. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  591. {
  592. int i;
  593. printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
  594. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  595. printk(KERN_CONT "0x%02x ", nvt->buf[i]);
  596. printk(KERN_CONT "\n");
  597. }
  598. /*
  599. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  600. * trigger decode when appropriate.
  601. *
  602. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  603. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  604. * (default 50us) intervals for that pulse/space. A discrete signal is
  605. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  606. * to signal more IR coming (repeats) or end of IR, respectively. We store
  607. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  608. * or 0x80, at which time, we trigger a decode operation.
  609. */
  610. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  611. {
  612. DEFINE_IR_RAW_EVENT(rawir);
  613. u8 sample;
  614. int i;
  615. nvt_dbg_verbose("%s firing", __func__);
  616. if (debug)
  617. nvt_dump_rx_buf(nvt);
  618. nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
  619. for (i = 0; i < nvt->pkts; i++) {
  620. sample = nvt->buf[i];
  621. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  622. rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
  623. * SAMPLE_PERIOD);
  624. nvt_dbg("Storing %s with duration %d",
  625. rawir.pulse ? "pulse" : "space", rawir.duration);
  626. ir_raw_event_store_with_filter(nvt->rdev, &rawir);
  627. }
  628. nvt->pkts = 0;
  629. nvt_dbg("Calling ir_raw_event_handle\n");
  630. ir_raw_event_handle(nvt->rdev);
  631. nvt_dbg_verbose("%s done", __func__);
  632. }
  633. static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
  634. {
  635. dev_warn(&nvt->pdev->dev, "RX FIFO overrun detected, flushing data!");
  636. nvt->pkts = 0;
  637. nvt_clear_cir_fifo(nvt);
  638. ir_raw_event_reset(nvt->rdev);
  639. }
  640. /* copy data from hardware rx fifo into driver buffer */
  641. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  642. {
  643. u8 fifocount;
  644. int i;
  645. /* Get count of how many bytes to read from RX FIFO */
  646. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  647. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  648. /* Read fifocount bytes from CIR Sample RX FIFO register */
  649. for (i = 0; i < fifocount; i++)
  650. nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  651. nvt->pkts = fifocount;
  652. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  653. nvt_process_rx_ir_data(nvt);
  654. }
  655. static void nvt_cir_log_irqs(u8 status, u8 iren)
  656. {
  657. nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  658. status, iren,
  659. status & CIR_IRSTS_RDR ? " RDR" : "",
  660. status & CIR_IRSTS_RTR ? " RTR" : "",
  661. status & CIR_IRSTS_PE ? " PE" : "",
  662. status & CIR_IRSTS_RFO ? " RFO" : "",
  663. status & CIR_IRSTS_TE ? " TE" : "",
  664. status & CIR_IRSTS_TTR ? " TTR" : "",
  665. status & CIR_IRSTS_TFU ? " TFU" : "",
  666. status & CIR_IRSTS_GH ? " GH" : "",
  667. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  668. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  669. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  670. }
  671. static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
  672. {
  673. unsigned long flags;
  674. u8 tx_state;
  675. spin_lock_irqsave(&nvt->tx.lock, flags);
  676. tx_state = nvt->tx.tx_state;
  677. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  678. return tx_state == ST_TX_NONE;
  679. }
  680. /* interrupt service routine for incoming and outgoing CIR data */
  681. static irqreturn_t nvt_cir_isr(int irq, void *data)
  682. {
  683. struct nvt_dev *nvt = data;
  684. u8 status, iren;
  685. unsigned long flags;
  686. nvt_dbg_verbose("%s firing", __func__);
  687. spin_lock_irqsave(&nvt->nvt_lock, flags);
  688. /*
  689. * Get IR Status register contents. Write 1 to ack/clear
  690. *
  691. * bit: reg name - description
  692. * 7: CIR_IRSTS_RDR - RX Data Ready
  693. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  694. * 5: CIR_IRSTS_PE - Packet End
  695. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  696. * 3: CIR_IRSTS_TE - TX FIFO Empty
  697. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  698. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  699. * 0: CIR_IRSTS_GH - Min Length Detected
  700. */
  701. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  702. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  703. /* At least NCT6779D creates a spurious interrupt when the
  704. * logical device is being disabled.
  705. */
  706. if (status == 0xff && iren == 0xff) {
  707. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  708. nvt_dbg_verbose("Spurious interrupt detected");
  709. return IRQ_HANDLED;
  710. }
  711. /* IRQ may be shared with CIR WAKE, therefore check for each
  712. * status bit whether the related interrupt source is enabled
  713. */
  714. if (!(status & iren)) {
  715. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  716. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  717. return IRQ_NONE;
  718. }
  719. /* ack/clear all irq flags we've got */
  720. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  721. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  722. nvt_cir_log_irqs(status, iren);
  723. if (status & CIR_IRSTS_RFO)
  724. nvt_handle_rx_fifo_overrun(nvt);
  725. else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) {
  726. /* We only do rx if not tx'ing */
  727. if (nvt_cir_tx_inactive(nvt))
  728. nvt_get_rx_ir_data(nvt);
  729. }
  730. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  731. if (status & CIR_IRSTS_TE)
  732. nvt_clear_tx_fifo(nvt);
  733. if (status & CIR_IRSTS_TTR) {
  734. unsigned int pos, count;
  735. u8 tmp;
  736. spin_lock_irqsave(&nvt->tx.lock, flags);
  737. pos = nvt->tx.cur_buf_num;
  738. count = nvt->tx.buf_count;
  739. /* Write data into the hardware tx fifo while pos < count */
  740. if (pos < count) {
  741. nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
  742. nvt->tx.cur_buf_num++;
  743. /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
  744. } else {
  745. tmp = nvt_cir_reg_read(nvt, CIR_IREN);
  746. nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
  747. }
  748. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  749. }
  750. if (status & CIR_IRSTS_TFU) {
  751. spin_lock_irqsave(&nvt->tx.lock, flags);
  752. if (nvt->tx.tx_state == ST_TX_REPLY) {
  753. nvt->tx.tx_state = ST_TX_REQUEST;
  754. wake_up(&nvt->tx.queue);
  755. }
  756. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  757. }
  758. nvt_dbg_verbose("%s done", __func__);
  759. return IRQ_HANDLED;
  760. }
  761. static void nvt_disable_cir(struct nvt_dev *nvt)
  762. {
  763. unsigned long flags;
  764. spin_lock_irqsave(&nvt->nvt_lock, flags);
  765. /* disable CIR interrupts */
  766. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  767. /* clear any and all pending interrupts */
  768. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  769. /* clear all function enable flags */
  770. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  771. /* clear hardware rx and tx fifos */
  772. nvt_clear_cir_fifo(nvt);
  773. nvt_clear_tx_fifo(nvt);
  774. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  775. /* disable the CIR logical device */
  776. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  777. }
  778. static int nvt_open(struct rc_dev *dev)
  779. {
  780. struct nvt_dev *nvt = dev->priv;
  781. unsigned long flags;
  782. spin_lock_irqsave(&nvt->nvt_lock, flags);
  783. /* set function enable flags */
  784. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  785. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  786. CIR_IRCON);
  787. /* clear all pending interrupts */
  788. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  789. /* enable interrupts */
  790. nvt_set_cir_iren(nvt);
  791. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  792. /* enable the CIR logical device */
  793. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  794. return 0;
  795. }
  796. static void nvt_close(struct rc_dev *dev)
  797. {
  798. struct nvt_dev *nvt = dev->priv;
  799. nvt_disable_cir(nvt);
  800. }
  801. /* Allocate memory, probe hardware, and initialize everything */
  802. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  803. {
  804. struct nvt_dev *nvt;
  805. struct rc_dev *rdev;
  806. int ret = -ENOMEM;
  807. nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
  808. if (!nvt)
  809. return ret;
  810. /* input device for IR remote (and tx) */
  811. rdev = rc_allocate_device();
  812. if (!rdev)
  813. goto exit_free_dev_rdev;
  814. ret = -ENODEV;
  815. /* activate pnp device */
  816. if (pnp_activate_dev(pdev) < 0) {
  817. dev_err(&pdev->dev, "Could not activate PNP device!\n");
  818. goto exit_free_dev_rdev;
  819. }
  820. /* validate pnp resources */
  821. if (!pnp_port_valid(pdev, 0) ||
  822. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  823. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  824. goto exit_free_dev_rdev;
  825. }
  826. if (!pnp_irq_valid(pdev, 0)) {
  827. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  828. goto exit_free_dev_rdev;
  829. }
  830. if (!pnp_port_valid(pdev, 1) ||
  831. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  832. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  833. goto exit_free_dev_rdev;
  834. }
  835. nvt->cir_addr = pnp_port_start(pdev, 0);
  836. nvt->cir_irq = pnp_irq(pdev, 0);
  837. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  838. nvt->cr_efir = CR_EFIR;
  839. nvt->cr_efdr = CR_EFDR;
  840. spin_lock_init(&nvt->nvt_lock);
  841. spin_lock_init(&nvt->tx.lock);
  842. pnp_set_drvdata(pdev, nvt);
  843. nvt->pdev = pdev;
  844. init_waitqueue_head(&nvt->tx.queue);
  845. ret = nvt_hw_detect(nvt);
  846. if (ret)
  847. goto exit_free_dev_rdev;
  848. /* Initialize CIR & CIR Wake Logical Devices */
  849. nvt_efm_enable(nvt);
  850. nvt_cir_ldev_init(nvt);
  851. nvt_cir_wake_ldev_init(nvt);
  852. nvt_efm_disable(nvt);
  853. /*
  854. * Initialize CIR & CIR Wake Config Registers
  855. * and enable logical devices
  856. */
  857. nvt_cir_regs_init(nvt);
  858. nvt_cir_wake_regs_init(nvt);
  859. /* Set up the rc device */
  860. rdev->priv = nvt;
  861. rdev->driver_type = RC_DRIVER_IR_RAW;
  862. rdev->allowed_protocols = RC_BIT_ALL;
  863. rdev->open = nvt_open;
  864. rdev->close = nvt_close;
  865. rdev->tx_ir = nvt_tx_ir;
  866. rdev->s_tx_carrier = nvt_set_tx_carrier;
  867. rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  868. rdev->input_phys = "nuvoton/cir0";
  869. rdev->input_id.bustype = BUS_HOST;
  870. rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
  871. rdev->input_id.product = nvt->chip_major;
  872. rdev->input_id.version = nvt->chip_minor;
  873. rdev->dev.parent = &pdev->dev;
  874. rdev->driver_name = NVT_DRIVER_NAME;
  875. rdev->map_name = RC_MAP_RC6_MCE;
  876. rdev->timeout = MS_TO_NS(100);
  877. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  878. rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
  879. #if 0
  880. rdev->min_timeout = XYZ;
  881. rdev->max_timeout = XYZ;
  882. /* tx bits */
  883. rdev->tx_resolution = XYZ;
  884. #endif
  885. nvt->rdev = rdev;
  886. ret = rc_register_device(rdev);
  887. if (ret)
  888. goto exit_free_dev_rdev;
  889. ret = -EBUSY;
  890. /* now claim resources */
  891. if (!devm_request_region(&pdev->dev, nvt->cir_addr,
  892. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  893. goto exit_unregister_device;
  894. if (devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
  895. IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt))
  896. goto exit_unregister_device;
  897. if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
  898. CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
  899. goto exit_unregister_device;
  900. ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
  901. if (ret)
  902. goto exit_unregister_device;
  903. device_init_wakeup(&pdev->dev, true);
  904. dev_notice(&pdev->dev, "driver has been successfully loaded\n");
  905. if (debug) {
  906. cir_dump_regs(nvt);
  907. cir_wake_dump_regs(nvt);
  908. }
  909. return 0;
  910. exit_unregister_device:
  911. rc_unregister_device(rdev);
  912. rdev = NULL;
  913. exit_free_dev_rdev:
  914. rc_free_device(rdev);
  915. return ret;
  916. }
  917. static void nvt_remove(struct pnp_dev *pdev)
  918. {
  919. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  920. device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
  921. nvt_disable_cir(nvt);
  922. /* enable CIR Wake (for IR power-on) */
  923. nvt_enable_wake(nvt);
  924. rc_unregister_device(nvt->rdev);
  925. }
  926. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  927. {
  928. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  929. unsigned long flags;
  930. nvt_dbg("%s called", __func__);
  931. spin_lock_irqsave(&nvt->tx.lock, flags);
  932. nvt->tx.tx_state = ST_TX_NONE;
  933. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  934. spin_lock_irqsave(&nvt->nvt_lock, flags);
  935. /* disable all CIR interrupts */
  936. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  937. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  938. /* disable cir logical dev */
  939. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  940. /* make sure wake is enabled */
  941. nvt_enable_wake(nvt);
  942. return 0;
  943. }
  944. static int nvt_resume(struct pnp_dev *pdev)
  945. {
  946. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  947. nvt_dbg("%s called", __func__);
  948. nvt_cir_regs_init(nvt);
  949. nvt_cir_wake_regs_init(nvt);
  950. return 0;
  951. }
  952. static void nvt_shutdown(struct pnp_dev *pdev)
  953. {
  954. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  955. nvt_enable_wake(nvt);
  956. }
  957. static const struct pnp_device_id nvt_ids[] = {
  958. { "WEC0530", 0 }, /* CIR */
  959. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  960. { "", 0 },
  961. };
  962. static struct pnp_driver nvt_driver = {
  963. .name = NVT_DRIVER_NAME,
  964. .id_table = nvt_ids,
  965. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  966. .probe = nvt_probe,
  967. .remove = nvt_remove,
  968. .suspend = nvt_suspend,
  969. .resume = nvt_resume,
  970. .shutdown = nvt_shutdown,
  971. };
  972. module_param(debug, int, S_IRUGO | S_IWUSR);
  973. MODULE_PARM_DESC(debug, "Enable debugging output");
  974. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  975. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  976. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  977. MODULE_LICENSE("GPL");
  978. module_pnp_driver(nvt_driver);