sh_tmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. /*
  2. * SuperH Timer Support - TMU
  3. *
  4. * Copyright (C) 2009 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/ioport.h>
  24. #include <linux/irq.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_domain.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/sh_timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. enum sh_tmu_model {
  34. SH_TMU,
  35. SH_TMU_SH3,
  36. };
  37. struct sh_tmu_device;
  38. struct sh_tmu_channel {
  39. struct sh_tmu_device *tmu;
  40. unsigned int index;
  41. void __iomem *base;
  42. int irq;
  43. unsigned long rate;
  44. unsigned long periodic;
  45. struct clock_event_device ced;
  46. struct clocksource cs;
  47. bool cs_enabled;
  48. unsigned int enable_count;
  49. };
  50. struct sh_tmu_device {
  51. struct platform_device *pdev;
  52. void __iomem *mapbase;
  53. struct clk *clk;
  54. enum sh_tmu_model model;
  55. raw_spinlock_t lock; /* Protect the shared start/stop register */
  56. struct sh_tmu_channel *channels;
  57. unsigned int num_channels;
  58. bool has_clockevent;
  59. bool has_clocksource;
  60. };
  61. #define TSTR -1 /* shared register */
  62. #define TCOR 0 /* channel register */
  63. #define TCNT 1 /* channel register */
  64. #define TCR 2 /* channel register */
  65. #define TCR_UNF (1 << 8)
  66. #define TCR_UNIE (1 << 5)
  67. #define TCR_TPSC_CLK4 (0 << 0)
  68. #define TCR_TPSC_CLK16 (1 << 0)
  69. #define TCR_TPSC_CLK64 (2 << 0)
  70. #define TCR_TPSC_CLK256 (3 << 0)
  71. #define TCR_TPSC_CLK1024 (4 << 0)
  72. #define TCR_TPSC_MASK (7 << 0)
  73. static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
  74. {
  75. unsigned long offs;
  76. if (reg_nr == TSTR) {
  77. switch (ch->tmu->model) {
  78. case SH_TMU_SH3:
  79. return ioread8(ch->tmu->mapbase + 2);
  80. case SH_TMU:
  81. return ioread8(ch->tmu->mapbase + 4);
  82. }
  83. }
  84. offs = reg_nr << 2;
  85. if (reg_nr == TCR)
  86. return ioread16(ch->base + offs);
  87. else
  88. return ioread32(ch->base + offs);
  89. }
  90. static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
  91. unsigned long value)
  92. {
  93. unsigned long offs;
  94. if (reg_nr == TSTR) {
  95. switch (ch->tmu->model) {
  96. case SH_TMU_SH3:
  97. return iowrite8(value, ch->tmu->mapbase + 2);
  98. case SH_TMU:
  99. return iowrite8(value, ch->tmu->mapbase + 4);
  100. }
  101. }
  102. offs = reg_nr << 2;
  103. if (reg_nr == TCR)
  104. iowrite16(value, ch->base + offs);
  105. else
  106. iowrite32(value, ch->base + offs);
  107. }
  108. static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
  109. {
  110. unsigned long flags, value;
  111. /* start stop register shared by multiple timer channels */
  112. raw_spin_lock_irqsave(&ch->tmu->lock, flags);
  113. value = sh_tmu_read(ch, TSTR);
  114. if (start)
  115. value |= 1 << ch->index;
  116. else
  117. value &= ~(1 << ch->index);
  118. sh_tmu_write(ch, TSTR, value);
  119. raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
  120. }
  121. static int __sh_tmu_enable(struct sh_tmu_channel *ch)
  122. {
  123. int ret;
  124. /* enable clock */
  125. ret = clk_enable(ch->tmu->clk);
  126. if (ret) {
  127. dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
  128. ch->index);
  129. return ret;
  130. }
  131. /* make sure channel is disabled */
  132. sh_tmu_start_stop_ch(ch, 0);
  133. /* maximum timeout */
  134. sh_tmu_write(ch, TCOR, 0xffffffff);
  135. sh_tmu_write(ch, TCNT, 0xffffffff);
  136. /* configure channel to parent clock / 4, irq off */
  137. ch->rate = clk_get_rate(ch->tmu->clk) / 4;
  138. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  139. /* enable channel */
  140. sh_tmu_start_stop_ch(ch, 1);
  141. return 0;
  142. }
  143. static int sh_tmu_enable(struct sh_tmu_channel *ch)
  144. {
  145. if (ch->enable_count++ > 0)
  146. return 0;
  147. pm_runtime_get_sync(&ch->tmu->pdev->dev);
  148. dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
  149. return __sh_tmu_enable(ch);
  150. }
  151. static void __sh_tmu_disable(struct sh_tmu_channel *ch)
  152. {
  153. /* disable channel */
  154. sh_tmu_start_stop_ch(ch, 0);
  155. /* disable interrupts in TMU block */
  156. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  157. /* stop clock */
  158. clk_disable(ch->tmu->clk);
  159. }
  160. static void sh_tmu_disable(struct sh_tmu_channel *ch)
  161. {
  162. if (WARN_ON(ch->enable_count == 0))
  163. return;
  164. if (--ch->enable_count > 0)
  165. return;
  166. __sh_tmu_disable(ch);
  167. dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
  168. pm_runtime_put(&ch->tmu->pdev->dev);
  169. }
  170. static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
  171. int periodic)
  172. {
  173. /* stop timer */
  174. sh_tmu_start_stop_ch(ch, 0);
  175. /* acknowledge interrupt */
  176. sh_tmu_read(ch, TCR);
  177. /* enable interrupt */
  178. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  179. /* reload delta value in case of periodic timer */
  180. if (periodic)
  181. sh_tmu_write(ch, TCOR, delta);
  182. else
  183. sh_tmu_write(ch, TCOR, 0xffffffff);
  184. sh_tmu_write(ch, TCNT, delta);
  185. /* start timer */
  186. sh_tmu_start_stop_ch(ch, 1);
  187. }
  188. static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
  189. {
  190. struct sh_tmu_channel *ch = dev_id;
  191. /* disable or acknowledge interrupt */
  192. if (clockevent_state_oneshot(&ch->ced))
  193. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  194. else
  195. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  196. /* notify clockevent layer */
  197. ch->ced.event_handler(&ch->ced);
  198. return IRQ_HANDLED;
  199. }
  200. static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
  201. {
  202. return container_of(cs, struct sh_tmu_channel, cs);
  203. }
  204. static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
  205. {
  206. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  207. return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
  208. }
  209. static int sh_tmu_clocksource_enable(struct clocksource *cs)
  210. {
  211. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  212. int ret;
  213. if (WARN_ON(ch->cs_enabled))
  214. return 0;
  215. ret = sh_tmu_enable(ch);
  216. if (!ret) {
  217. __clocksource_update_freq_hz(cs, ch->rate);
  218. ch->cs_enabled = true;
  219. }
  220. return ret;
  221. }
  222. static void sh_tmu_clocksource_disable(struct clocksource *cs)
  223. {
  224. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  225. if (WARN_ON(!ch->cs_enabled))
  226. return;
  227. sh_tmu_disable(ch);
  228. ch->cs_enabled = false;
  229. }
  230. static void sh_tmu_clocksource_suspend(struct clocksource *cs)
  231. {
  232. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  233. if (!ch->cs_enabled)
  234. return;
  235. if (--ch->enable_count == 0) {
  236. __sh_tmu_disable(ch);
  237. pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
  238. }
  239. }
  240. static void sh_tmu_clocksource_resume(struct clocksource *cs)
  241. {
  242. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  243. if (!ch->cs_enabled)
  244. return;
  245. if (ch->enable_count++ == 0) {
  246. pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
  247. __sh_tmu_enable(ch);
  248. }
  249. }
  250. static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
  251. const char *name)
  252. {
  253. struct clocksource *cs = &ch->cs;
  254. cs->name = name;
  255. cs->rating = 200;
  256. cs->read = sh_tmu_clocksource_read;
  257. cs->enable = sh_tmu_clocksource_enable;
  258. cs->disable = sh_tmu_clocksource_disable;
  259. cs->suspend = sh_tmu_clocksource_suspend;
  260. cs->resume = sh_tmu_clocksource_resume;
  261. cs->mask = CLOCKSOURCE_MASK(32);
  262. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  263. dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
  264. ch->index);
  265. /* Register with dummy 1 Hz value, gets updated in ->enable() */
  266. clocksource_register_hz(cs, 1);
  267. return 0;
  268. }
  269. static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
  270. {
  271. return container_of(ced, struct sh_tmu_channel, ced);
  272. }
  273. static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
  274. {
  275. struct clock_event_device *ced = &ch->ced;
  276. sh_tmu_enable(ch);
  277. clockevents_config(ced, ch->rate);
  278. if (periodic) {
  279. ch->periodic = (ch->rate + HZ/2) / HZ;
  280. sh_tmu_set_next(ch, ch->periodic, 1);
  281. }
  282. }
  283. static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
  284. {
  285. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  286. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  287. sh_tmu_disable(ch);
  288. return 0;
  289. }
  290. static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
  291. int periodic)
  292. {
  293. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  294. /* deal with old setting first */
  295. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  296. sh_tmu_disable(ch);
  297. dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
  298. ch->index, periodic ? "periodic" : "oneshot");
  299. sh_tmu_clock_event_start(ch, periodic);
  300. return 0;
  301. }
  302. static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
  303. {
  304. return sh_tmu_clock_event_set_state(ced, 0);
  305. }
  306. static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
  307. {
  308. return sh_tmu_clock_event_set_state(ced, 1);
  309. }
  310. static int sh_tmu_clock_event_next(unsigned long delta,
  311. struct clock_event_device *ced)
  312. {
  313. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  314. BUG_ON(!clockevent_state_oneshot(ced));
  315. /* program new delta value */
  316. sh_tmu_set_next(ch, delta, 0);
  317. return 0;
  318. }
  319. static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
  320. {
  321. pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  322. }
  323. static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
  324. {
  325. pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  326. }
  327. static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
  328. const char *name)
  329. {
  330. struct clock_event_device *ced = &ch->ced;
  331. int ret;
  332. ced->name = name;
  333. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  334. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  335. ced->rating = 200;
  336. ced->cpumask = cpu_possible_mask;
  337. ced->set_next_event = sh_tmu_clock_event_next;
  338. ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
  339. ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
  340. ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
  341. ced->suspend = sh_tmu_clock_event_suspend;
  342. ced->resume = sh_tmu_clock_event_resume;
  343. dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
  344. ch->index);
  345. clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
  346. ret = request_irq(ch->irq, sh_tmu_interrupt,
  347. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  348. dev_name(&ch->tmu->pdev->dev), ch);
  349. if (ret) {
  350. dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
  351. ch->index, ch->irq);
  352. return;
  353. }
  354. }
  355. static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
  356. bool clockevent, bool clocksource)
  357. {
  358. if (clockevent) {
  359. ch->tmu->has_clockevent = true;
  360. sh_tmu_register_clockevent(ch, name);
  361. } else if (clocksource) {
  362. ch->tmu->has_clocksource = true;
  363. sh_tmu_register_clocksource(ch, name);
  364. }
  365. return 0;
  366. }
  367. static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
  368. bool clockevent, bool clocksource,
  369. struct sh_tmu_device *tmu)
  370. {
  371. /* Skip unused channels. */
  372. if (!clockevent && !clocksource)
  373. return 0;
  374. ch->tmu = tmu;
  375. ch->index = index;
  376. if (tmu->model == SH_TMU_SH3)
  377. ch->base = tmu->mapbase + 4 + ch->index * 12;
  378. else
  379. ch->base = tmu->mapbase + 8 + ch->index * 12;
  380. ch->irq = platform_get_irq(tmu->pdev, index);
  381. if (ch->irq < 0) {
  382. dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
  383. ch->index);
  384. return ch->irq;
  385. }
  386. ch->cs_enabled = false;
  387. ch->enable_count = 0;
  388. return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
  389. clockevent, clocksource);
  390. }
  391. static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
  392. {
  393. struct resource *res;
  394. res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
  395. if (!res) {
  396. dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
  397. return -ENXIO;
  398. }
  399. tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
  400. if (tmu->mapbase == NULL)
  401. return -ENXIO;
  402. return 0;
  403. }
  404. static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
  405. {
  406. struct device_node *np = tmu->pdev->dev.of_node;
  407. tmu->model = SH_TMU;
  408. tmu->num_channels = 3;
  409. of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
  410. if (tmu->num_channels != 2 && tmu->num_channels != 3) {
  411. dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
  412. tmu->num_channels);
  413. return -EINVAL;
  414. }
  415. return 0;
  416. }
  417. static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
  418. {
  419. unsigned int i;
  420. int ret;
  421. tmu->pdev = pdev;
  422. raw_spin_lock_init(&tmu->lock);
  423. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  424. ret = sh_tmu_parse_dt(tmu);
  425. if (ret < 0)
  426. return ret;
  427. } else if (pdev->dev.platform_data) {
  428. const struct platform_device_id *id = pdev->id_entry;
  429. struct sh_timer_config *cfg = pdev->dev.platform_data;
  430. tmu->model = id->driver_data;
  431. tmu->num_channels = hweight8(cfg->channels_mask);
  432. } else {
  433. dev_err(&tmu->pdev->dev, "missing platform data\n");
  434. return -ENXIO;
  435. }
  436. /* Get hold of clock. */
  437. tmu->clk = clk_get(&tmu->pdev->dev, "fck");
  438. if (IS_ERR(tmu->clk)) {
  439. dev_err(&tmu->pdev->dev, "cannot get clock\n");
  440. return PTR_ERR(tmu->clk);
  441. }
  442. ret = clk_prepare(tmu->clk);
  443. if (ret < 0)
  444. goto err_clk_put;
  445. /* Map the memory resource. */
  446. ret = sh_tmu_map_memory(tmu);
  447. if (ret < 0) {
  448. dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
  449. goto err_clk_unprepare;
  450. }
  451. /* Allocate and setup the channels. */
  452. tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
  453. GFP_KERNEL);
  454. if (tmu->channels == NULL) {
  455. ret = -ENOMEM;
  456. goto err_unmap;
  457. }
  458. /*
  459. * Use the first channel as a clock event device and the second channel
  460. * as a clock source.
  461. */
  462. for (i = 0; i < tmu->num_channels; ++i) {
  463. ret = sh_tmu_channel_setup(&tmu->channels[i], i,
  464. i == 0, i == 1, tmu);
  465. if (ret < 0)
  466. goto err_unmap;
  467. }
  468. platform_set_drvdata(pdev, tmu);
  469. return 0;
  470. err_unmap:
  471. kfree(tmu->channels);
  472. iounmap(tmu->mapbase);
  473. err_clk_unprepare:
  474. clk_unprepare(tmu->clk);
  475. err_clk_put:
  476. clk_put(tmu->clk);
  477. return ret;
  478. }
  479. static int sh_tmu_probe(struct platform_device *pdev)
  480. {
  481. struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
  482. int ret;
  483. if (!is_early_platform_device(pdev)) {
  484. pm_runtime_set_active(&pdev->dev);
  485. pm_runtime_enable(&pdev->dev);
  486. }
  487. if (tmu) {
  488. dev_info(&pdev->dev, "kept as earlytimer\n");
  489. goto out;
  490. }
  491. tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
  492. if (tmu == NULL)
  493. return -ENOMEM;
  494. ret = sh_tmu_setup(tmu, pdev);
  495. if (ret) {
  496. kfree(tmu);
  497. pm_runtime_idle(&pdev->dev);
  498. return ret;
  499. }
  500. if (is_early_platform_device(pdev))
  501. return 0;
  502. out:
  503. if (tmu->has_clockevent || tmu->has_clocksource)
  504. pm_runtime_irq_safe(&pdev->dev);
  505. else
  506. pm_runtime_idle(&pdev->dev);
  507. return 0;
  508. }
  509. static int sh_tmu_remove(struct platform_device *pdev)
  510. {
  511. return -EBUSY; /* cannot unregister clockevent and clocksource */
  512. }
  513. static const struct platform_device_id sh_tmu_id_table[] = {
  514. { "sh-tmu", SH_TMU },
  515. { "sh-tmu-sh3", SH_TMU_SH3 },
  516. { }
  517. };
  518. MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
  519. static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
  520. { .compatible = "renesas,tmu" },
  521. { }
  522. };
  523. MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
  524. static struct platform_driver sh_tmu_device_driver = {
  525. .probe = sh_tmu_probe,
  526. .remove = sh_tmu_remove,
  527. .driver = {
  528. .name = "sh_tmu",
  529. .of_match_table = of_match_ptr(sh_tmu_of_table),
  530. },
  531. .id_table = sh_tmu_id_table,
  532. };
  533. static int __init sh_tmu_init(void)
  534. {
  535. return platform_driver_register(&sh_tmu_device_driver);
  536. }
  537. static void __exit sh_tmu_exit(void)
  538. {
  539. platform_driver_unregister(&sh_tmu_device_driver);
  540. }
  541. early_platform_init("earlytimer", &sh_tmu_device_driver);
  542. subsys_initcall(sh_tmu_init);
  543. module_exit(sh_tmu_exit);
  544. MODULE_AUTHOR("Magnus Damm");
  545. MODULE_DESCRIPTION("SuperH TMU Timer Driver");
  546. MODULE_LICENSE("GPL v2");