omap_hdq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * drivers/w1/masters/omap_hdq.c
  3. *
  4. * Copyright (C) 2007 Texas Instruments, Inc.
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/clk.h>
  18. #include <linux/io.h>
  19. #include <linux/sched.h>
  20. #include <asm/irq.h>
  21. #include <mach/hardware.h>
  22. #include "../w1.h"
  23. #include "../w1_int.h"
  24. #define MOD_NAME "OMAP_HDQ:"
  25. #define OMAP_HDQ_REVISION 0x00
  26. #define OMAP_HDQ_TX_DATA 0x04
  27. #define OMAP_HDQ_RX_DATA 0x08
  28. #define OMAP_HDQ_CTRL_STATUS 0x0c
  29. #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
  30. #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
  31. #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
  32. #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
  33. #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
  34. #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
  35. #define OMAP_HDQ_INT_STATUS 0x10
  36. #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
  37. #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
  38. #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
  39. #define OMAP_HDQ_SYSCONFIG 0x14
  40. #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
  41. #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
  42. #define OMAP_HDQ_SYSSTATUS 0x18
  43. #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
  44. #define OMAP_HDQ_FLAG_CLEAR 0
  45. #define OMAP_HDQ_FLAG_SET 1
  46. #define OMAP_HDQ_TIMEOUT (HZ/5)
  47. #define OMAP_HDQ_MAX_USER 4
  48. static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
  49. static int w1_id;
  50. struct hdq_data {
  51. struct device *dev;
  52. void __iomem *hdq_base;
  53. /* lock status update */
  54. struct mutex hdq_mutex;
  55. int hdq_usecount;
  56. struct clk *hdq_ick;
  57. struct clk *hdq_fck;
  58. u8 hdq_irqstatus;
  59. /* device lock */
  60. spinlock_t hdq_spinlock;
  61. /*
  62. * Used to control the call to omap_hdq_get and omap_hdq_put.
  63. * HDQ Protocol: Write the CMD|REG_address first, followed by
  64. * the data wrire or read.
  65. */
  66. int init_trans;
  67. };
  68. static int __devinit omap_hdq_probe(struct platform_device *pdev);
  69. static int omap_hdq_remove(struct platform_device *pdev);
  70. static struct platform_driver omap_hdq_driver = {
  71. .probe = omap_hdq_probe,
  72. .remove = omap_hdq_remove,
  73. .driver = {
  74. .name = "omap_hdq",
  75. },
  76. };
  77. static u8 omap_w1_read_byte(void *_hdq);
  78. static void omap_w1_write_byte(void *_hdq, u8 byte);
  79. static u8 omap_w1_reset_bus(void *_hdq);
  80. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  81. u8 search_type, w1_slave_found_callback slave_found);
  82. static struct w1_bus_master omap_w1_master = {
  83. .read_byte = omap_w1_read_byte,
  84. .write_byte = omap_w1_write_byte,
  85. .reset_bus = omap_w1_reset_bus,
  86. .search = omap_w1_search_bus,
  87. };
  88. /* HDQ register I/O routines */
  89. static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
  90. {
  91. return __raw_readb(hdq_data->hdq_base + offset);
  92. }
  93. static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
  94. {
  95. __raw_writeb(val, hdq_data->hdq_base + offset);
  96. }
  97. static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
  98. u8 val, u8 mask)
  99. {
  100. u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
  101. | (val & mask);
  102. __raw_writeb(new_val, hdq_data->hdq_base + offset);
  103. return new_val;
  104. }
  105. /*
  106. * Wait for one or more bits in flag change.
  107. * HDQ_FLAG_SET: wait until any bit in the flag is set.
  108. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
  109. * return 0 on success and -ETIMEDOUT in the case of timeout.
  110. */
  111. static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
  112. u8 flag, u8 flag_set, u8 *status)
  113. {
  114. int ret = 0;
  115. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  116. if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
  117. /* wait for the flag clear */
  118. while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
  119. && time_before(jiffies, timeout)) {
  120. schedule_timeout_uninterruptible(1);
  121. }
  122. if (*status & flag)
  123. ret = -ETIMEDOUT;
  124. } else if (flag_set == OMAP_HDQ_FLAG_SET) {
  125. /* wait for the flag set */
  126. while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
  127. && time_before(jiffies, timeout)) {
  128. schedule_timeout_uninterruptible(1);
  129. }
  130. if (!(*status & flag))
  131. ret = -ETIMEDOUT;
  132. } else
  133. return -EINVAL;
  134. return ret;
  135. }
  136. /* write out a byte and fill *status with HDQ_INT_STATUS */
  137. static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
  138. {
  139. int ret;
  140. u8 tmp_status;
  141. unsigned long irqflags;
  142. *status = 0;
  143. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  144. /* clear interrupt flags via a dummy read */
  145. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  146. /* ISR loads it with new INT_STATUS */
  147. hdq_data->hdq_irqstatus = 0;
  148. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  149. hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
  150. /* set the GO bit */
  151. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
  152. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  153. /* wait for the TXCOMPLETE bit */
  154. ret = wait_event_timeout(hdq_wait_queue,
  155. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  156. if (ret == 0) {
  157. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  158. goto out;
  159. }
  160. *status = hdq_data->hdq_irqstatus;
  161. /* check irqstatus */
  162. if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
  163. dev_dbg(hdq_data->dev, "timeout waiting for"
  164. "TXCOMPLETE/RXCOMPLETE, %x", *status);
  165. ret = -ETIMEDOUT;
  166. goto out;
  167. }
  168. /* wait for the GO bit return to zero */
  169. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  170. OMAP_HDQ_CTRL_STATUS_GO,
  171. OMAP_HDQ_FLAG_CLEAR, &tmp_status);
  172. if (ret) {
  173. dev_dbg(hdq_data->dev, "timeout waiting GO bit"
  174. "return to zero, %x", tmp_status);
  175. }
  176. out:
  177. return ret;
  178. }
  179. /* HDQ Interrupt service routine */
  180. static irqreturn_t hdq_isr(int irq, void *_hdq)
  181. {
  182. struct hdq_data *hdq_data = _hdq;
  183. unsigned long irqflags;
  184. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  185. hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  186. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  187. dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
  188. if (hdq_data->hdq_irqstatus &
  189. (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
  190. | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  191. /* wake up sleeping process */
  192. wake_up(&hdq_wait_queue);
  193. }
  194. return IRQ_HANDLED;
  195. }
  196. /* HDQ Mode: always return success */
  197. static u8 omap_w1_reset_bus(void *_hdq)
  198. {
  199. return 0;
  200. }
  201. /* W1 search callback function */
  202. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  203. u8 search_type, w1_slave_found_callback slave_found)
  204. {
  205. u64 module_id, rn_le, cs, id;
  206. if (w1_id)
  207. module_id = w1_id;
  208. else
  209. module_id = 0x1;
  210. rn_le = cpu_to_le64(module_id);
  211. /*
  212. * HDQ might not obey truly the 1-wire spec.
  213. * So calculate CRC based on module parameter.
  214. */
  215. cs = w1_calc_crc8((u8 *)&rn_le, 7);
  216. id = (cs << 56) | module_id;
  217. slave_found(master_dev, id);
  218. }
  219. static int _omap_hdq_reset(struct hdq_data *hdq_data)
  220. {
  221. int ret;
  222. u8 tmp_status;
  223. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
  224. /*
  225. * Select HDQ mode & enable clocks.
  226. * It is observed that INT flags can't be cleared via a read and GO/INIT
  227. * won't return to zero if interrupt is disabled. So we always enable
  228. * interrupt.
  229. */
  230. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  231. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  232. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  233. /* wait for reset to complete */
  234. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
  235. OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
  236. if (ret)
  237. dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
  238. tmp_status);
  239. else {
  240. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  241. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  242. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  243. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  244. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  245. }
  246. return ret;
  247. }
  248. /* Issue break pulse to the device */
  249. static int omap_hdq_break(struct hdq_data *hdq_data)
  250. {
  251. int ret = 0;
  252. u8 tmp_status;
  253. unsigned long irqflags;
  254. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  255. if (ret < 0) {
  256. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  257. ret = -EINTR;
  258. goto rtn;
  259. }
  260. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  261. /* clear interrupt flags via a dummy read */
  262. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  263. /* ISR loads it with new INT_STATUS */
  264. hdq_data->hdq_irqstatus = 0;
  265. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  266. /* set the INIT and GO bit */
  267. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  268. OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
  269. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  270. OMAP_HDQ_CTRL_STATUS_GO);
  271. /* wait for the TIMEOUT bit */
  272. ret = wait_event_timeout(hdq_wait_queue,
  273. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  274. if (ret == 0) {
  275. dev_dbg(hdq_data->dev, "break wait elapsed\n");
  276. ret = -EINTR;
  277. goto out;
  278. }
  279. tmp_status = hdq_data->hdq_irqstatus;
  280. /* check irqstatus */
  281. if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  282. dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
  283. tmp_status);
  284. ret = -ETIMEDOUT;
  285. goto out;
  286. }
  287. /*
  288. * wait for both INIT and GO bits rerurn to zero.
  289. * zero wait time expected for interrupt mode.
  290. */
  291. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  292. OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  293. OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
  294. &tmp_status);
  295. if (ret)
  296. dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
  297. "return to zero, %x", tmp_status);
  298. out:
  299. mutex_unlock(&hdq_data->hdq_mutex);
  300. rtn:
  301. return ret;
  302. }
  303. static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
  304. {
  305. int ret = 0;
  306. u8 status;
  307. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  308. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  309. if (ret < 0) {
  310. ret = -EINTR;
  311. goto rtn;
  312. }
  313. if (!hdq_data->hdq_usecount) {
  314. ret = -EINVAL;
  315. goto out;
  316. }
  317. if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  318. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  319. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
  320. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  321. /*
  322. * The RX comes immediately after TX. It
  323. * triggers another interrupt before we
  324. * sleep. So we have to wait for RXCOMPLETE bit.
  325. */
  326. while (!(hdq_data->hdq_irqstatus
  327. & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
  328. && time_before(jiffies, timeout)) {
  329. schedule_timeout_uninterruptible(1);
  330. }
  331. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
  332. OMAP_HDQ_CTRL_STATUS_DIR);
  333. status = hdq_data->hdq_irqstatus;
  334. /* check irqstatus */
  335. if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  336. dev_dbg(hdq_data->dev, "timeout waiting for"
  337. "RXCOMPLETE, %x", status);
  338. ret = -ETIMEDOUT;
  339. goto out;
  340. }
  341. }
  342. /* the data is ready. Read it in! */
  343. *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
  344. out:
  345. mutex_unlock(&hdq_data->hdq_mutex);
  346. rtn:
  347. return 0;
  348. }
  349. /* Enable clocks and set the controller to HDQ mode */
  350. static int omap_hdq_get(struct hdq_data *hdq_data)
  351. {
  352. int ret = 0;
  353. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  354. if (ret < 0) {
  355. ret = -EINTR;
  356. goto rtn;
  357. }
  358. if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
  359. dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
  360. ret = -EINVAL;
  361. goto out;
  362. } else {
  363. hdq_data->hdq_usecount++;
  364. try_module_get(THIS_MODULE);
  365. if (1 == hdq_data->hdq_usecount) {
  366. if (clk_enable(hdq_data->hdq_ick)) {
  367. dev_dbg(hdq_data->dev, "Can not enable ick\n");
  368. ret = -ENODEV;
  369. goto clk_err;
  370. }
  371. if (clk_enable(hdq_data->hdq_fck)) {
  372. dev_dbg(hdq_data->dev, "Can not enable fck\n");
  373. clk_disable(hdq_data->hdq_ick);
  374. ret = -ENODEV;
  375. goto clk_err;
  376. }
  377. /* make sure HDQ is out of reset */
  378. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
  379. OMAP_HDQ_SYSSTATUS_RESETDONE)) {
  380. ret = _omap_hdq_reset(hdq_data);
  381. if (ret)
  382. /* back up the count */
  383. hdq_data->hdq_usecount--;
  384. } else {
  385. /* select HDQ mode & enable clocks */
  386. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  387. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  388. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  389. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  390. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  391. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  392. }
  393. }
  394. }
  395. clk_err:
  396. clk_put(hdq_data->hdq_ick);
  397. clk_put(hdq_data->hdq_fck);
  398. out:
  399. mutex_unlock(&hdq_data->hdq_mutex);
  400. rtn:
  401. return ret;
  402. }
  403. /* Disable clocks to the module */
  404. static int omap_hdq_put(struct hdq_data *hdq_data)
  405. {
  406. int ret = 0;
  407. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  408. if (ret < 0)
  409. return -EINTR;
  410. if (0 == hdq_data->hdq_usecount) {
  411. dev_dbg(hdq_data->dev, "attempt to decrement use count"
  412. "when it is zero");
  413. ret = -EINVAL;
  414. } else {
  415. hdq_data->hdq_usecount--;
  416. module_put(THIS_MODULE);
  417. if (0 == hdq_data->hdq_usecount) {
  418. clk_disable(hdq_data->hdq_ick);
  419. clk_disable(hdq_data->hdq_fck);
  420. }
  421. }
  422. mutex_unlock(&hdq_data->hdq_mutex);
  423. return ret;
  424. }
  425. /* Read a byte of data from the device */
  426. static u8 omap_w1_read_byte(void *_hdq)
  427. {
  428. struct hdq_data *hdq_data = _hdq;
  429. u8 val = 0;
  430. int ret;
  431. ret = hdq_read_byte(hdq_data, &val);
  432. if (ret) {
  433. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  434. if (ret < 0) {
  435. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  436. return -EINTR;
  437. }
  438. hdq_data->init_trans = 0;
  439. mutex_unlock(&hdq_data->hdq_mutex);
  440. omap_hdq_put(hdq_data);
  441. return -1;
  442. }
  443. /* Write followed by a read, release the module */
  444. if (hdq_data->init_trans) {
  445. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  446. if (ret < 0) {
  447. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  448. return -EINTR;
  449. }
  450. hdq_data->init_trans = 0;
  451. mutex_unlock(&hdq_data->hdq_mutex);
  452. omap_hdq_put(hdq_data);
  453. }
  454. return val;
  455. }
  456. /* Write a byte of data to the device */
  457. static void omap_w1_write_byte(void *_hdq, u8 byte)
  458. {
  459. struct hdq_data *hdq_data = _hdq;
  460. int ret;
  461. u8 status;
  462. /* First write to initialize the transfer */
  463. if (hdq_data->init_trans == 0)
  464. omap_hdq_get(hdq_data);
  465. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  466. if (ret < 0) {
  467. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  468. return;
  469. }
  470. hdq_data->init_trans++;
  471. mutex_unlock(&hdq_data->hdq_mutex);
  472. ret = hdq_write_byte(hdq_data, byte, &status);
  473. if (ret == 0) {
  474. dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
  475. return;
  476. }
  477. /* Second write, data transferred. Release the module */
  478. if (hdq_data->init_trans > 1) {
  479. omap_hdq_put(hdq_data);
  480. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  481. if (ret < 0) {
  482. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  483. return;
  484. }
  485. hdq_data->init_trans = 0;
  486. mutex_unlock(&hdq_data->hdq_mutex);
  487. }
  488. return;
  489. }
  490. static int __devinit omap_hdq_probe(struct platform_device *pdev)
  491. {
  492. struct hdq_data *hdq_data;
  493. struct resource *res;
  494. int ret, irq;
  495. u8 rev;
  496. hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
  497. if (!hdq_data) {
  498. dev_dbg(&pdev->dev, "unable to allocate memory\n");
  499. ret = -ENOMEM;
  500. goto err_kmalloc;
  501. }
  502. hdq_data->dev = &pdev->dev;
  503. platform_set_drvdata(pdev, hdq_data);
  504. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  505. if (!res) {
  506. dev_dbg(&pdev->dev, "unable to get resource\n");
  507. ret = -ENXIO;
  508. goto err_resource;
  509. }
  510. hdq_data->hdq_base = ioremap(res->start, SZ_4K);
  511. if (!hdq_data->hdq_base) {
  512. dev_dbg(&pdev->dev, "ioremap failed\n");
  513. ret = -EINVAL;
  514. goto err_ioremap;
  515. }
  516. /* get interface & functional clock objects */
  517. hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
  518. if (IS_ERR(hdq_data->hdq_ick)) {
  519. dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
  520. ret = PTR_ERR(hdq_data->hdq_ick);
  521. goto err_ick;
  522. }
  523. hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
  524. if (IS_ERR(hdq_data->hdq_fck)) {
  525. dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
  526. ret = PTR_ERR(hdq_data->hdq_fck);
  527. goto err_fck;
  528. }
  529. hdq_data->hdq_usecount = 0;
  530. mutex_init(&hdq_data->hdq_mutex);
  531. if (clk_enable(hdq_data->hdq_ick)) {
  532. dev_dbg(&pdev->dev, "Can not enable ick\n");
  533. ret = -ENODEV;
  534. goto err_intfclk;
  535. }
  536. if (clk_enable(hdq_data->hdq_fck)) {
  537. dev_dbg(&pdev->dev, "Can not enable fck\n");
  538. ret = -ENODEV;
  539. goto err_fnclk;
  540. }
  541. rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
  542. dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
  543. (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
  544. spin_lock_init(&hdq_data->hdq_spinlock);
  545. irq = platform_get_irq(pdev, 0);
  546. if (irq < 0) {
  547. ret = -ENXIO;
  548. goto err_irq;
  549. }
  550. ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
  551. if (ret < 0) {
  552. dev_dbg(&pdev->dev, "could not request irq\n");
  553. goto err_irq;
  554. }
  555. omap_hdq_break(hdq_data);
  556. /* don't clock the HDQ until it is needed */
  557. clk_disable(hdq_data->hdq_ick);
  558. clk_disable(hdq_data->hdq_fck);
  559. omap_w1_master.data = hdq_data;
  560. ret = w1_add_master_device(&omap_w1_master);
  561. if (ret) {
  562. dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
  563. goto err_w1;
  564. }
  565. return 0;
  566. err_w1:
  567. err_irq:
  568. clk_disable(hdq_data->hdq_fck);
  569. err_fnclk:
  570. clk_disable(hdq_data->hdq_ick);
  571. err_intfclk:
  572. clk_put(hdq_data->hdq_fck);
  573. err_fck:
  574. clk_put(hdq_data->hdq_ick);
  575. err_ick:
  576. iounmap(hdq_data->hdq_base);
  577. err_ioremap:
  578. err_resource:
  579. platform_set_drvdata(pdev, NULL);
  580. kfree(hdq_data);
  581. err_kmalloc:
  582. return ret;
  583. }
  584. static int omap_hdq_remove(struct platform_device *pdev)
  585. {
  586. struct hdq_data *hdq_data = platform_get_drvdata(pdev);
  587. mutex_lock(&hdq_data->hdq_mutex);
  588. if (hdq_data->hdq_usecount) {
  589. dev_dbg(&pdev->dev, "removed when use count is not zero\n");
  590. mutex_unlock(&hdq_data->hdq_mutex);
  591. return -EBUSY;
  592. }
  593. mutex_unlock(&hdq_data->hdq_mutex);
  594. /* remove module dependency */
  595. clk_put(hdq_data->hdq_ick);
  596. clk_put(hdq_data->hdq_fck);
  597. free_irq(INT_24XX_HDQ_IRQ, hdq_data);
  598. platform_set_drvdata(pdev, NULL);
  599. iounmap(hdq_data->hdq_base);
  600. kfree(hdq_data);
  601. return 0;
  602. }
  603. static int __init
  604. omap_hdq_init(void)
  605. {
  606. return platform_driver_register(&omap_hdq_driver);
  607. }
  608. module_init(omap_hdq_init);
  609. static void __exit
  610. omap_hdq_exit(void)
  611. {
  612. platform_driver_unregister(&omap_hdq_driver);
  613. }
  614. module_exit(omap_hdq_exit);
  615. module_param(w1_id, int, S_IRUSR);
  616. MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
  617. MODULE_AUTHOR("Texas Instruments");
  618. MODULE_DESCRIPTION("HDQ driver Library");
  619. MODULE_LICENSE("GPL");