sh_cmt.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /*
  2. * SuperH Timer Support - CMT
  3. *
  4. * Copyright (C) 2008 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/ioport.h>
  24. #include <linux/irq.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_domain.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/sh_timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. struct sh_cmt_device;
  34. /*
  35. * The CMT comes in 5 different identified flavours, depending not only on the
  36. * SoC but also on the particular instance. The following table lists the main
  37. * characteristics of those flavours.
  38. *
  39. * 16B 32B 32B-F 48B 48B-2
  40. * -----------------------------------------------------------------------------
  41. * Channels 2 1/4 1 6 2/8
  42. * Control Width 16 16 16 16 32
  43. * Counter Width 16 32 32 32/48 32/48
  44. * Shared Start/Stop Y Y Y Y N
  45. *
  46. * The 48-bit gen2 version has a per-channel start/stop register located in the
  47. * channel registers block. All other versions have a shared start/stop register
  48. * located in the global space.
  49. *
  50. * Channels are indexed from 0 to N-1 in the documentation. The channel index
  51. * infers the start/stop bit position in the control register and the channel
  52. * registers block address. Some CMT instances have a subset of channels
  53. * available, in which case the index in the documentation doesn't match the
  54. * "real" index as implemented in hardware. This is for instance the case with
  55. * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
  56. * in the documentation but using start/stop bit 5 and having its registers
  57. * block at 0x60.
  58. *
  59. * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
  60. * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
  61. */
  62. enum sh_cmt_model {
  63. SH_CMT_16BIT,
  64. SH_CMT_32BIT,
  65. SH_CMT_32BIT_FAST,
  66. SH_CMT_48BIT,
  67. SH_CMT_48BIT_GEN2,
  68. };
  69. struct sh_cmt_info {
  70. enum sh_cmt_model model;
  71. unsigned long width; /* 16 or 32 bit version of hardware block */
  72. u32 overflow_bit;
  73. u32 clear_bits;
  74. /* callbacks for CMSTR and CMCSR access */
  75. u32 (*read_control)(void __iomem *base, unsigned long offs);
  76. void (*write_control)(void __iomem *base, unsigned long offs,
  77. u32 value);
  78. /* callbacks for CMCNT and CMCOR access */
  79. u32 (*read_count)(void __iomem *base, unsigned long offs);
  80. void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
  81. };
  82. struct sh_cmt_channel {
  83. struct sh_cmt_device *cmt;
  84. unsigned int index; /* Index in the documentation */
  85. unsigned int hwidx; /* Real hardware index */
  86. void __iomem *iostart;
  87. void __iomem *ioctrl;
  88. unsigned int timer_bit;
  89. unsigned long flags;
  90. u32 match_value;
  91. u32 next_match_value;
  92. u32 max_match_value;
  93. raw_spinlock_t lock;
  94. struct clock_event_device ced;
  95. struct clocksource cs;
  96. u64 total_cycles;
  97. bool cs_enabled;
  98. };
  99. struct sh_cmt_device {
  100. struct platform_device *pdev;
  101. const struct sh_cmt_info *info;
  102. void __iomem *mapbase;
  103. struct clk *clk;
  104. unsigned long rate;
  105. raw_spinlock_t lock; /* Protect the shared start/stop register */
  106. struct sh_cmt_channel *channels;
  107. unsigned int num_channels;
  108. unsigned int hw_channels;
  109. bool has_clockevent;
  110. bool has_clocksource;
  111. };
  112. #define SH_CMT16_CMCSR_CMF (1 << 7)
  113. #define SH_CMT16_CMCSR_CMIE (1 << 6)
  114. #define SH_CMT16_CMCSR_CKS8 (0 << 0)
  115. #define SH_CMT16_CMCSR_CKS32 (1 << 0)
  116. #define SH_CMT16_CMCSR_CKS128 (2 << 0)
  117. #define SH_CMT16_CMCSR_CKS512 (3 << 0)
  118. #define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
  119. #define SH_CMT32_CMCSR_CMF (1 << 15)
  120. #define SH_CMT32_CMCSR_OVF (1 << 14)
  121. #define SH_CMT32_CMCSR_WRFLG (1 << 13)
  122. #define SH_CMT32_CMCSR_STTF (1 << 12)
  123. #define SH_CMT32_CMCSR_STPF (1 << 11)
  124. #define SH_CMT32_CMCSR_SSIE (1 << 10)
  125. #define SH_CMT32_CMCSR_CMS (1 << 9)
  126. #define SH_CMT32_CMCSR_CMM (1 << 8)
  127. #define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
  128. #define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
  129. #define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
  130. #define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
  131. #define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
  132. #define SH_CMT32_CMCSR_DBGIVD (1 << 3)
  133. #define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
  134. #define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
  135. #define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
  136. #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
  137. #define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
  138. static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
  139. {
  140. return ioread16(base + (offs << 1));
  141. }
  142. static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
  143. {
  144. return ioread32(base + (offs << 2));
  145. }
  146. static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
  147. {
  148. iowrite16(value, base + (offs << 1));
  149. }
  150. static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
  151. {
  152. iowrite32(value, base + (offs << 2));
  153. }
  154. static const struct sh_cmt_info sh_cmt_info[] = {
  155. [SH_CMT_16BIT] = {
  156. .model = SH_CMT_16BIT,
  157. .width = 16,
  158. .overflow_bit = SH_CMT16_CMCSR_CMF,
  159. .clear_bits = ~SH_CMT16_CMCSR_CMF,
  160. .read_control = sh_cmt_read16,
  161. .write_control = sh_cmt_write16,
  162. .read_count = sh_cmt_read16,
  163. .write_count = sh_cmt_write16,
  164. },
  165. [SH_CMT_32BIT] = {
  166. .model = SH_CMT_32BIT,
  167. .width = 32,
  168. .overflow_bit = SH_CMT32_CMCSR_CMF,
  169. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  170. .read_control = sh_cmt_read16,
  171. .write_control = sh_cmt_write16,
  172. .read_count = sh_cmt_read32,
  173. .write_count = sh_cmt_write32,
  174. },
  175. [SH_CMT_32BIT_FAST] = {
  176. .model = SH_CMT_32BIT_FAST,
  177. .width = 32,
  178. .overflow_bit = SH_CMT32_CMCSR_CMF,
  179. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  180. .read_control = sh_cmt_read16,
  181. .write_control = sh_cmt_write16,
  182. .read_count = sh_cmt_read32,
  183. .write_count = sh_cmt_write32,
  184. },
  185. [SH_CMT_48BIT] = {
  186. .model = SH_CMT_48BIT,
  187. .width = 32,
  188. .overflow_bit = SH_CMT32_CMCSR_CMF,
  189. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  190. .read_control = sh_cmt_read32,
  191. .write_control = sh_cmt_write32,
  192. .read_count = sh_cmt_read32,
  193. .write_count = sh_cmt_write32,
  194. },
  195. [SH_CMT_48BIT_GEN2] = {
  196. .model = SH_CMT_48BIT_GEN2,
  197. .width = 32,
  198. .overflow_bit = SH_CMT32_CMCSR_CMF,
  199. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  200. .read_control = sh_cmt_read32,
  201. .write_control = sh_cmt_write32,
  202. .read_count = sh_cmt_read32,
  203. .write_count = sh_cmt_write32,
  204. },
  205. };
  206. #define CMCSR 0 /* channel register */
  207. #define CMCNT 1 /* channel register */
  208. #define CMCOR 2 /* channel register */
  209. static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
  210. {
  211. if (ch->iostart)
  212. return ch->cmt->info->read_control(ch->iostart, 0);
  213. else
  214. return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
  215. }
  216. static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
  217. {
  218. if (ch->iostart)
  219. ch->cmt->info->write_control(ch->iostart, 0, value);
  220. else
  221. ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
  222. }
  223. static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
  224. {
  225. return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
  226. }
  227. static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
  228. {
  229. ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
  230. }
  231. static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
  232. {
  233. return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
  234. }
  235. static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
  236. {
  237. ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
  238. }
  239. static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
  240. {
  241. ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
  242. }
  243. static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
  244. {
  245. u32 v1, v2, v3;
  246. u32 o1, o2;
  247. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  248. /* Make sure the timer value is stable. Stolen from acpi_pm.c */
  249. do {
  250. o2 = o1;
  251. v1 = sh_cmt_read_cmcnt(ch);
  252. v2 = sh_cmt_read_cmcnt(ch);
  253. v3 = sh_cmt_read_cmcnt(ch);
  254. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  255. } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
  256. || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
  257. *has_wrapped = o1;
  258. return v2;
  259. }
  260. static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
  261. {
  262. unsigned long flags;
  263. u32 value;
  264. /* start stop register shared by multiple timer channels */
  265. raw_spin_lock_irqsave(&ch->cmt->lock, flags);
  266. value = sh_cmt_read_cmstr(ch);
  267. if (start)
  268. value |= 1 << ch->timer_bit;
  269. else
  270. value &= ~(1 << ch->timer_bit);
  271. sh_cmt_write_cmstr(ch, value);
  272. raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
  273. }
  274. static int sh_cmt_enable(struct sh_cmt_channel *ch)
  275. {
  276. int k, ret;
  277. pm_runtime_get_sync(&ch->cmt->pdev->dev);
  278. dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
  279. /* enable clock */
  280. ret = clk_enable(ch->cmt->clk);
  281. if (ret) {
  282. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
  283. ch->index);
  284. goto err0;
  285. }
  286. /* make sure channel is disabled */
  287. sh_cmt_start_stop_ch(ch, 0);
  288. /* configure channel, periodic mode and maximum timeout */
  289. if (ch->cmt->info->width == 16) {
  290. sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
  291. SH_CMT16_CMCSR_CKS512);
  292. } else {
  293. sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
  294. SH_CMT32_CMCSR_CMTOUT_IE |
  295. SH_CMT32_CMCSR_CMR_IRQ |
  296. SH_CMT32_CMCSR_CKS_RCLK8);
  297. }
  298. sh_cmt_write_cmcor(ch, 0xffffffff);
  299. sh_cmt_write_cmcnt(ch, 0);
  300. /*
  301. * According to the sh73a0 user's manual, as CMCNT can be operated
  302. * only by the RCLK (Pseudo 32 KHz), there's one restriction on
  303. * modifying CMCNT register; two RCLK cycles are necessary before
  304. * this register is either read or any modification of the value
  305. * it holds is reflected in the LSI's actual operation.
  306. *
  307. * While at it, we're supposed to clear out the CMCNT as of this
  308. * moment, so make sure it's processed properly here. This will
  309. * take RCLKx2 at maximum.
  310. */
  311. for (k = 0; k < 100; k++) {
  312. if (!sh_cmt_read_cmcnt(ch))
  313. break;
  314. udelay(1);
  315. }
  316. if (sh_cmt_read_cmcnt(ch)) {
  317. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
  318. ch->index);
  319. ret = -ETIMEDOUT;
  320. goto err1;
  321. }
  322. /* enable channel */
  323. sh_cmt_start_stop_ch(ch, 1);
  324. return 0;
  325. err1:
  326. /* stop clock */
  327. clk_disable(ch->cmt->clk);
  328. err0:
  329. return ret;
  330. }
  331. static void sh_cmt_disable(struct sh_cmt_channel *ch)
  332. {
  333. /* disable channel */
  334. sh_cmt_start_stop_ch(ch, 0);
  335. /* disable interrupts in CMT block */
  336. sh_cmt_write_cmcsr(ch, 0);
  337. /* stop clock */
  338. clk_disable(ch->cmt->clk);
  339. dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
  340. pm_runtime_put(&ch->cmt->pdev->dev);
  341. }
  342. /* private flags */
  343. #define FLAG_CLOCKEVENT (1 << 0)
  344. #define FLAG_CLOCKSOURCE (1 << 1)
  345. #define FLAG_REPROGRAM (1 << 2)
  346. #define FLAG_SKIPEVENT (1 << 3)
  347. #define FLAG_IRQCONTEXT (1 << 4)
  348. static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
  349. int absolute)
  350. {
  351. u32 value = ch->next_match_value;
  352. u32 new_match;
  353. u32 delay = 0;
  354. u32 now = 0;
  355. u32 has_wrapped;
  356. now = sh_cmt_get_counter(ch, &has_wrapped);
  357. ch->flags |= FLAG_REPROGRAM; /* force reprogram */
  358. if (has_wrapped) {
  359. /* we're competing with the interrupt handler.
  360. * -> let the interrupt handler reprogram the timer.
  361. * -> interrupt number two handles the event.
  362. */
  363. ch->flags |= FLAG_SKIPEVENT;
  364. return;
  365. }
  366. if (absolute)
  367. now = 0;
  368. do {
  369. /* reprogram the timer hardware,
  370. * but don't save the new match value yet.
  371. */
  372. new_match = now + value + delay;
  373. if (new_match > ch->max_match_value)
  374. new_match = ch->max_match_value;
  375. sh_cmt_write_cmcor(ch, new_match);
  376. now = sh_cmt_get_counter(ch, &has_wrapped);
  377. if (has_wrapped && (new_match > ch->match_value)) {
  378. /* we are changing to a greater match value,
  379. * so this wrap must be caused by the counter
  380. * matching the old value.
  381. * -> first interrupt reprograms the timer.
  382. * -> interrupt number two handles the event.
  383. */
  384. ch->flags |= FLAG_SKIPEVENT;
  385. break;
  386. }
  387. if (has_wrapped) {
  388. /* we are changing to a smaller match value,
  389. * so the wrap must be caused by the counter
  390. * matching the new value.
  391. * -> save programmed match value.
  392. * -> let isr handle the event.
  393. */
  394. ch->match_value = new_match;
  395. break;
  396. }
  397. /* be safe: verify hardware settings */
  398. if (now < new_match) {
  399. /* timer value is below match value, all good.
  400. * this makes sure we won't miss any match events.
  401. * -> save programmed match value.
  402. * -> let isr handle the event.
  403. */
  404. ch->match_value = new_match;
  405. break;
  406. }
  407. /* the counter has reached a value greater
  408. * than our new match value. and since the
  409. * has_wrapped flag isn't set we must have
  410. * programmed a too close event.
  411. * -> increase delay and retry.
  412. */
  413. if (delay)
  414. delay <<= 1;
  415. else
  416. delay = 1;
  417. if (!delay)
  418. dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
  419. ch->index);
  420. } while (delay);
  421. }
  422. static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  423. {
  424. if (delta > ch->max_match_value)
  425. dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
  426. ch->index);
  427. ch->next_match_value = delta;
  428. sh_cmt_clock_event_program_verify(ch, 0);
  429. }
  430. static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  431. {
  432. unsigned long flags;
  433. raw_spin_lock_irqsave(&ch->lock, flags);
  434. __sh_cmt_set_next(ch, delta);
  435. raw_spin_unlock_irqrestore(&ch->lock, flags);
  436. }
  437. static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
  438. {
  439. struct sh_cmt_channel *ch = dev_id;
  440. /* clear flags */
  441. sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
  442. ch->cmt->info->clear_bits);
  443. /* update clock source counter to begin with if enabled
  444. * the wrap flag should be cleared by the timer specific
  445. * isr before we end up here.
  446. */
  447. if (ch->flags & FLAG_CLOCKSOURCE)
  448. ch->total_cycles += ch->match_value + 1;
  449. if (!(ch->flags & FLAG_REPROGRAM))
  450. ch->next_match_value = ch->max_match_value;
  451. ch->flags |= FLAG_IRQCONTEXT;
  452. if (ch->flags & FLAG_CLOCKEVENT) {
  453. if (!(ch->flags & FLAG_SKIPEVENT)) {
  454. if (clockevent_state_oneshot(&ch->ced)) {
  455. ch->next_match_value = ch->max_match_value;
  456. ch->flags |= FLAG_REPROGRAM;
  457. }
  458. ch->ced.event_handler(&ch->ced);
  459. }
  460. }
  461. ch->flags &= ~FLAG_SKIPEVENT;
  462. if (ch->flags & FLAG_REPROGRAM) {
  463. ch->flags &= ~FLAG_REPROGRAM;
  464. sh_cmt_clock_event_program_verify(ch, 1);
  465. if (ch->flags & FLAG_CLOCKEVENT)
  466. if ((clockevent_state_shutdown(&ch->ced))
  467. || (ch->match_value == ch->next_match_value))
  468. ch->flags &= ~FLAG_REPROGRAM;
  469. }
  470. ch->flags &= ~FLAG_IRQCONTEXT;
  471. return IRQ_HANDLED;
  472. }
  473. static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
  474. {
  475. int ret = 0;
  476. unsigned long flags;
  477. raw_spin_lock_irqsave(&ch->lock, flags);
  478. if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  479. ret = sh_cmt_enable(ch);
  480. if (ret)
  481. goto out;
  482. ch->flags |= flag;
  483. /* setup timeout if no clockevent */
  484. if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
  485. __sh_cmt_set_next(ch, ch->max_match_value);
  486. out:
  487. raw_spin_unlock_irqrestore(&ch->lock, flags);
  488. return ret;
  489. }
  490. static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
  491. {
  492. unsigned long flags;
  493. unsigned long f;
  494. raw_spin_lock_irqsave(&ch->lock, flags);
  495. f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
  496. ch->flags &= ~flag;
  497. if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  498. sh_cmt_disable(ch);
  499. /* adjust the timeout to maximum if only clocksource left */
  500. if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
  501. __sh_cmt_set_next(ch, ch->max_match_value);
  502. raw_spin_unlock_irqrestore(&ch->lock, flags);
  503. }
  504. static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
  505. {
  506. return container_of(cs, struct sh_cmt_channel, cs);
  507. }
  508. static u64 sh_cmt_clocksource_read(struct clocksource *cs)
  509. {
  510. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  511. unsigned long flags;
  512. u32 has_wrapped;
  513. u64 value;
  514. u32 raw;
  515. raw_spin_lock_irqsave(&ch->lock, flags);
  516. value = ch->total_cycles;
  517. raw = sh_cmt_get_counter(ch, &has_wrapped);
  518. if (unlikely(has_wrapped))
  519. raw += ch->match_value + 1;
  520. raw_spin_unlock_irqrestore(&ch->lock, flags);
  521. return value + raw;
  522. }
  523. static int sh_cmt_clocksource_enable(struct clocksource *cs)
  524. {
  525. int ret;
  526. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  527. WARN_ON(ch->cs_enabled);
  528. ch->total_cycles = 0;
  529. ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  530. if (!ret)
  531. ch->cs_enabled = true;
  532. return ret;
  533. }
  534. static void sh_cmt_clocksource_disable(struct clocksource *cs)
  535. {
  536. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  537. WARN_ON(!ch->cs_enabled);
  538. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  539. ch->cs_enabled = false;
  540. }
  541. static void sh_cmt_clocksource_suspend(struct clocksource *cs)
  542. {
  543. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  544. if (!ch->cs_enabled)
  545. return;
  546. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  547. pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
  548. }
  549. static void sh_cmt_clocksource_resume(struct clocksource *cs)
  550. {
  551. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  552. if (!ch->cs_enabled)
  553. return;
  554. pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
  555. sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  556. }
  557. static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
  558. const char *name)
  559. {
  560. struct clocksource *cs = &ch->cs;
  561. cs->name = name;
  562. cs->rating = 125;
  563. cs->read = sh_cmt_clocksource_read;
  564. cs->enable = sh_cmt_clocksource_enable;
  565. cs->disable = sh_cmt_clocksource_disable;
  566. cs->suspend = sh_cmt_clocksource_suspend;
  567. cs->resume = sh_cmt_clocksource_resume;
  568. cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
  569. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  570. dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
  571. ch->index);
  572. clocksource_register_hz(cs, ch->cmt->rate);
  573. return 0;
  574. }
  575. static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
  576. {
  577. return container_of(ced, struct sh_cmt_channel, ced);
  578. }
  579. static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
  580. {
  581. sh_cmt_start(ch, FLAG_CLOCKEVENT);
  582. if (periodic)
  583. sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
  584. else
  585. sh_cmt_set_next(ch, ch->max_match_value);
  586. }
  587. static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
  588. {
  589. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  590. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  591. return 0;
  592. }
  593. static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
  594. int periodic)
  595. {
  596. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  597. /* deal with old setting first */
  598. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  599. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  600. dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
  601. ch->index, periodic ? "periodic" : "oneshot");
  602. sh_cmt_clock_event_start(ch, periodic);
  603. return 0;
  604. }
  605. static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
  606. {
  607. return sh_cmt_clock_event_set_state(ced, 0);
  608. }
  609. static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
  610. {
  611. return sh_cmt_clock_event_set_state(ced, 1);
  612. }
  613. static int sh_cmt_clock_event_next(unsigned long delta,
  614. struct clock_event_device *ced)
  615. {
  616. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  617. BUG_ON(!clockevent_state_oneshot(ced));
  618. if (likely(ch->flags & FLAG_IRQCONTEXT))
  619. ch->next_match_value = delta - 1;
  620. else
  621. sh_cmt_set_next(ch, delta - 1);
  622. return 0;
  623. }
  624. static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
  625. {
  626. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  627. pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
  628. clk_unprepare(ch->cmt->clk);
  629. }
  630. static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
  631. {
  632. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  633. clk_prepare(ch->cmt->clk);
  634. pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
  635. }
  636. static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
  637. const char *name)
  638. {
  639. struct clock_event_device *ced = &ch->ced;
  640. int irq;
  641. int ret;
  642. irq = platform_get_irq(ch->cmt->pdev, ch->index);
  643. if (irq < 0) {
  644. dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
  645. ch->index);
  646. return irq;
  647. }
  648. ret = request_irq(irq, sh_cmt_interrupt,
  649. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  650. dev_name(&ch->cmt->pdev->dev), ch);
  651. if (ret) {
  652. dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
  653. ch->index, irq);
  654. return ret;
  655. }
  656. ced->name = name;
  657. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  658. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  659. ced->rating = 125;
  660. ced->cpumask = cpu_possible_mask;
  661. ced->set_next_event = sh_cmt_clock_event_next;
  662. ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
  663. ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
  664. ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
  665. ced->suspend = sh_cmt_clock_event_suspend;
  666. ced->resume = sh_cmt_clock_event_resume;
  667. /* TODO: calculate good shift from rate and counter bit width */
  668. ced->shift = 32;
  669. ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
  670. ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
  671. ced->max_delta_ticks = ch->max_match_value;
  672. ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
  673. ced->min_delta_ticks = 0x1f;
  674. dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
  675. ch->index);
  676. clockevents_register_device(ced);
  677. return 0;
  678. }
  679. static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
  680. bool clockevent, bool clocksource)
  681. {
  682. int ret;
  683. if (clockevent) {
  684. ch->cmt->has_clockevent = true;
  685. ret = sh_cmt_register_clockevent(ch, name);
  686. if (ret < 0)
  687. return ret;
  688. }
  689. if (clocksource) {
  690. ch->cmt->has_clocksource = true;
  691. sh_cmt_register_clocksource(ch, name);
  692. }
  693. return 0;
  694. }
  695. static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
  696. unsigned int hwidx, bool clockevent,
  697. bool clocksource, struct sh_cmt_device *cmt)
  698. {
  699. int ret;
  700. /* Skip unused channels. */
  701. if (!clockevent && !clocksource)
  702. return 0;
  703. ch->cmt = cmt;
  704. ch->index = index;
  705. ch->hwidx = hwidx;
  706. /*
  707. * Compute the address of the channel control register block. For the
  708. * timers with a per-channel start/stop register, compute its address
  709. * as well.
  710. */
  711. switch (cmt->info->model) {
  712. case SH_CMT_16BIT:
  713. ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
  714. break;
  715. case SH_CMT_32BIT:
  716. case SH_CMT_48BIT:
  717. ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
  718. break;
  719. case SH_CMT_32BIT_FAST:
  720. /*
  721. * The 32-bit "fast" timer has a single channel at hwidx 5 but
  722. * is located at offset 0x40 instead of 0x60 for some reason.
  723. */
  724. ch->ioctrl = cmt->mapbase + 0x40;
  725. break;
  726. case SH_CMT_48BIT_GEN2:
  727. ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
  728. ch->ioctrl = ch->iostart + 0x10;
  729. break;
  730. }
  731. if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
  732. ch->max_match_value = ~0;
  733. else
  734. ch->max_match_value = (1 << cmt->info->width) - 1;
  735. ch->match_value = ch->max_match_value;
  736. raw_spin_lock_init(&ch->lock);
  737. ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx;
  738. ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
  739. clockevent, clocksource);
  740. if (ret) {
  741. dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
  742. ch->index);
  743. return ret;
  744. }
  745. ch->cs_enabled = false;
  746. return 0;
  747. }
  748. static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
  749. {
  750. struct resource *mem;
  751. mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
  752. if (!mem) {
  753. dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
  754. return -ENXIO;
  755. }
  756. cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
  757. if (cmt->mapbase == NULL) {
  758. dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
  759. return -ENXIO;
  760. }
  761. return 0;
  762. }
  763. static const struct platform_device_id sh_cmt_id_table[] = {
  764. { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
  765. { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
  766. { }
  767. };
  768. MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
  769. static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
  770. { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] },
  771. { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] },
  772. { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
  773. { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] },
  774. { }
  775. };
  776. MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
  777. static int sh_cmt_parse_dt(struct sh_cmt_device *cmt)
  778. {
  779. struct device_node *np = cmt->pdev->dev.of_node;
  780. return of_property_read_u32(np, "renesas,channels-mask",
  781. &cmt->hw_channels);
  782. }
  783. static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
  784. {
  785. unsigned int mask;
  786. unsigned int i;
  787. int ret;
  788. cmt->pdev = pdev;
  789. raw_spin_lock_init(&cmt->lock);
  790. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  791. const struct of_device_id *id;
  792. id = of_match_node(sh_cmt_of_table, pdev->dev.of_node);
  793. cmt->info = id->data;
  794. ret = sh_cmt_parse_dt(cmt);
  795. if (ret < 0)
  796. return ret;
  797. } else if (pdev->dev.platform_data) {
  798. struct sh_timer_config *cfg = pdev->dev.platform_data;
  799. const struct platform_device_id *id = pdev->id_entry;
  800. cmt->info = (const struct sh_cmt_info *)id->driver_data;
  801. cmt->hw_channels = cfg->channels_mask;
  802. } else {
  803. dev_err(&cmt->pdev->dev, "missing platform data\n");
  804. return -ENXIO;
  805. }
  806. /* Get hold of clock. */
  807. cmt->clk = clk_get(&cmt->pdev->dev, "fck");
  808. if (IS_ERR(cmt->clk)) {
  809. dev_err(&cmt->pdev->dev, "cannot get clock\n");
  810. return PTR_ERR(cmt->clk);
  811. }
  812. ret = clk_prepare(cmt->clk);
  813. if (ret < 0)
  814. goto err_clk_put;
  815. /* Determine clock rate. */
  816. ret = clk_enable(cmt->clk);
  817. if (ret < 0)
  818. goto err_clk_unprepare;
  819. if (cmt->info->width == 16)
  820. cmt->rate = clk_get_rate(cmt->clk) / 512;
  821. else
  822. cmt->rate = clk_get_rate(cmt->clk) / 8;
  823. clk_disable(cmt->clk);
  824. /* Map the memory resource(s). */
  825. ret = sh_cmt_map_memory(cmt);
  826. if (ret < 0)
  827. goto err_clk_unprepare;
  828. /* Allocate and setup the channels. */
  829. cmt->num_channels = hweight8(cmt->hw_channels);
  830. cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
  831. GFP_KERNEL);
  832. if (cmt->channels == NULL) {
  833. ret = -ENOMEM;
  834. goto err_unmap;
  835. }
  836. /*
  837. * Use the first channel as a clock event device and the second channel
  838. * as a clock source. If only one channel is available use it for both.
  839. */
  840. for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
  841. unsigned int hwidx = ffs(mask) - 1;
  842. bool clocksource = i == 1 || cmt->num_channels == 1;
  843. bool clockevent = i == 0;
  844. ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
  845. clockevent, clocksource, cmt);
  846. if (ret < 0)
  847. goto err_unmap;
  848. mask &= ~(1 << hwidx);
  849. }
  850. platform_set_drvdata(pdev, cmt);
  851. return 0;
  852. err_unmap:
  853. kfree(cmt->channels);
  854. iounmap(cmt->mapbase);
  855. err_clk_unprepare:
  856. clk_unprepare(cmt->clk);
  857. err_clk_put:
  858. clk_put(cmt->clk);
  859. return ret;
  860. }
  861. static int sh_cmt_probe(struct platform_device *pdev)
  862. {
  863. struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
  864. int ret;
  865. if (!is_early_platform_device(pdev)) {
  866. pm_runtime_set_active(&pdev->dev);
  867. pm_runtime_enable(&pdev->dev);
  868. }
  869. if (cmt) {
  870. dev_info(&pdev->dev, "kept as earlytimer\n");
  871. goto out;
  872. }
  873. cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
  874. if (cmt == NULL)
  875. return -ENOMEM;
  876. ret = sh_cmt_setup(cmt, pdev);
  877. if (ret) {
  878. kfree(cmt);
  879. pm_runtime_idle(&pdev->dev);
  880. return ret;
  881. }
  882. if (is_early_platform_device(pdev))
  883. return 0;
  884. out:
  885. if (cmt->has_clockevent || cmt->has_clocksource)
  886. pm_runtime_irq_safe(&pdev->dev);
  887. else
  888. pm_runtime_idle(&pdev->dev);
  889. return 0;
  890. }
  891. static int sh_cmt_remove(struct platform_device *pdev)
  892. {
  893. return -EBUSY; /* cannot unregister clockevent and clocksource */
  894. }
  895. static struct platform_driver sh_cmt_device_driver = {
  896. .probe = sh_cmt_probe,
  897. .remove = sh_cmt_remove,
  898. .driver = {
  899. .name = "sh_cmt",
  900. .of_match_table = of_match_ptr(sh_cmt_of_table),
  901. },
  902. .id_table = sh_cmt_id_table,
  903. };
  904. static int __init sh_cmt_init(void)
  905. {
  906. return platform_driver_register(&sh_cmt_device_driver);
  907. }
  908. static void __exit sh_cmt_exit(void)
  909. {
  910. platform_driver_unregister(&sh_cmt_device_driver);
  911. }
  912. early_platform_init("earlytimer", &sh_cmt_device_driver);
  913. subsys_initcall(sh_cmt_init);
  914. module_exit(sh_cmt_exit);
  915. MODULE_AUTHOR("Magnus Damm");
  916. MODULE_DESCRIPTION("SuperH CMT Timer Driver");
  917. MODULE_LICENSE("GPL v2");