time_64.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /* time.c: UltraSparc timer and TOD clock support.
  2. *
  3. * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
  5. *
  6. * Based largely on code which is:
  7. *
  8. * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/param.h>
  15. #include <linux/string.h>
  16. #include <linux/mm.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/time.h>
  19. #include <linux/timex.h>
  20. #include <linux/init.h>
  21. #include <linux/ioport.h>
  22. #include <linux/mc146818rtc.h>
  23. #include <linux/delay.h>
  24. #include <linux/profile.h>
  25. #include <linux/bcd.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/cpufreq.h>
  28. #include <linux/percpu.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/rtc.h>
  31. #include <linux/rtc/m48t59.h>
  32. #include <linux/kernel_stat.h>
  33. #include <linux/clockchips.h>
  34. #include <linux/clocksource.h>
  35. #include <linux/of_device.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/ftrace.h>
  38. #include <asm/oplib.h>
  39. #include <asm/timer.h>
  40. #include <asm/irq.h>
  41. #include <asm/io.h>
  42. #include <asm/prom.h>
  43. #include <asm/starfire.h>
  44. #include <asm/smp.h>
  45. #include <asm/sections.h>
  46. #include <asm/cpudata.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/irq_regs.h>
  49. #include "entry.h"
  50. DEFINE_SPINLOCK(rtc_lock);
  51. #define TICK_PRIV_BIT (1UL << 63)
  52. #define TICKCMP_IRQ_BIT (1UL << 63)
  53. #ifdef CONFIG_SMP
  54. unsigned long profile_pc(struct pt_regs *regs)
  55. {
  56. unsigned long pc = instruction_pointer(regs);
  57. if (in_lock_functions(pc))
  58. return regs->u_regs[UREG_RETPC];
  59. return pc;
  60. }
  61. EXPORT_SYMBOL(profile_pc);
  62. #endif
  63. static void tick_disable_protection(void)
  64. {
  65. /* Set things up so user can access tick register for profiling
  66. * purposes. Also workaround BB_ERRATA_1 by doing a dummy
  67. * read back of %tick after writing it.
  68. */
  69. __asm__ __volatile__(
  70. " ba,pt %%xcc, 1f\n"
  71. " nop\n"
  72. " .align 64\n"
  73. "1: rd %%tick, %%g2\n"
  74. " add %%g2, 6, %%g2\n"
  75. " andn %%g2, %0, %%g2\n"
  76. " wrpr %%g2, 0, %%tick\n"
  77. " rdpr %%tick, %%g0"
  78. : /* no outputs */
  79. : "r" (TICK_PRIV_BIT)
  80. : "g2");
  81. }
  82. static void tick_disable_irq(void)
  83. {
  84. __asm__ __volatile__(
  85. " ba,pt %%xcc, 1f\n"
  86. " nop\n"
  87. " .align 64\n"
  88. "1: wr %0, 0x0, %%tick_cmpr\n"
  89. " rd %%tick_cmpr, %%g0"
  90. : /* no outputs */
  91. : "r" (TICKCMP_IRQ_BIT));
  92. }
  93. static void tick_init_tick(void)
  94. {
  95. tick_disable_protection();
  96. tick_disable_irq();
  97. }
  98. static unsigned long long tick_get_tick(void)
  99. {
  100. unsigned long ret;
  101. __asm__ __volatile__("rd %%tick, %0\n\t"
  102. "mov %0, %0"
  103. : "=r" (ret));
  104. return ret & ~TICK_PRIV_BIT;
  105. }
  106. static int tick_add_compare(unsigned long adj)
  107. {
  108. unsigned long orig_tick, new_tick, new_compare;
  109. __asm__ __volatile__("rd %%tick, %0"
  110. : "=r" (orig_tick));
  111. orig_tick &= ~TICKCMP_IRQ_BIT;
  112. /* Workaround for Spitfire Errata (#54 I think??), I discovered
  113. * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
  114. * number 103640.
  115. *
  116. * On Blackbird writes to %tick_cmpr can fail, the
  117. * workaround seems to be to execute the wr instruction
  118. * at the start of an I-cache line, and perform a dummy
  119. * read back from %tick_cmpr right after writing to it. -DaveM
  120. */
  121. __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
  122. " add %1, %2, %0\n\t"
  123. ".align 64\n"
  124. "1:\n\t"
  125. "wr %0, 0, %%tick_cmpr\n\t"
  126. "rd %%tick_cmpr, %%g0\n\t"
  127. : "=r" (new_compare)
  128. : "r" (orig_tick), "r" (adj));
  129. __asm__ __volatile__("rd %%tick, %0"
  130. : "=r" (new_tick));
  131. new_tick &= ~TICKCMP_IRQ_BIT;
  132. return ((long)(new_tick - (orig_tick+adj))) > 0L;
  133. }
  134. static unsigned long tick_add_tick(unsigned long adj)
  135. {
  136. unsigned long new_tick;
  137. /* Also need to handle Blackbird bug here too. */
  138. __asm__ __volatile__("rd %%tick, %0\n\t"
  139. "add %0, %1, %0\n\t"
  140. "wrpr %0, 0, %%tick\n\t"
  141. : "=&r" (new_tick)
  142. : "r" (adj));
  143. return new_tick;
  144. }
  145. static struct sparc64_tick_ops tick_operations __read_mostly = {
  146. .name = "tick",
  147. .init_tick = tick_init_tick,
  148. .disable_irq = tick_disable_irq,
  149. .get_tick = tick_get_tick,
  150. .add_tick = tick_add_tick,
  151. .add_compare = tick_add_compare,
  152. .softint_mask = 1UL << 0,
  153. };
  154. struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
  155. EXPORT_SYMBOL(tick_ops);
  156. static void stick_disable_irq(void)
  157. {
  158. __asm__ __volatile__(
  159. "wr %0, 0x0, %%asr25"
  160. : /* no outputs */
  161. : "r" (TICKCMP_IRQ_BIT));
  162. }
  163. static void stick_init_tick(void)
  164. {
  165. /* Writes to the %tick and %stick register are not
  166. * allowed on sun4v. The Hypervisor controls that
  167. * bit, per-strand.
  168. */
  169. if (tlb_type != hypervisor) {
  170. tick_disable_protection();
  171. tick_disable_irq();
  172. /* Let the user get at STICK too. */
  173. __asm__ __volatile__(
  174. " rd %%asr24, %%g2\n"
  175. " andn %%g2, %0, %%g2\n"
  176. " wr %%g2, 0, %%asr24"
  177. : /* no outputs */
  178. : "r" (TICK_PRIV_BIT)
  179. : "g1", "g2");
  180. }
  181. stick_disable_irq();
  182. }
  183. static unsigned long long stick_get_tick(void)
  184. {
  185. unsigned long ret;
  186. __asm__ __volatile__("rd %%asr24, %0"
  187. : "=r" (ret));
  188. return ret & ~TICK_PRIV_BIT;
  189. }
  190. static unsigned long stick_add_tick(unsigned long adj)
  191. {
  192. unsigned long new_tick;
  193. __asm__ __volatile__("rd %%asr24, %0\n\t"
  194. "add %0, %1, %0\n\t"
  195. "wr %0, 0, %%asr24\n\t"
  196. : "=&r" (new_tick)
  197. : "r" (adj));
  198. return new_tick;
  199. }
  200. static int stick_add_compare(unsigned long adj)
  201. {
  202. unsigned long orig_tick, new_tick;
  203. __asm__ __volatile__("rd %%asr24, %0"
  204. : "=r" (orig_tick));
  205. orig_tick &= ~TICKCMP_IRQ_BIT;
  206. __asm__ __volatile__("wr %0, 0, %%asr25"
  207. : /* no outputs */
  208. : "r" (orig_tick + adj));
  209. __asm__ __volatile__("rd %%asr24, %0"
  210. : "=r" (new_tick));
  211. new_tick &= ~TICKCMP_IRQ_BIT;
  212. return ((long)(new_tick - (orig_tick+adj))) > 0L;
  213. }
  214. static struct sparc64_tick_ops stick_operations __read_mostly = {
  215. .name = "stick",
  216. .init_tick = stick_init_tick,
  217. .disable_irq = stick_disable_irq,
  218. .get_tick = stick_get_tick,
  219. .add_tick = stick_add_tick,
  220. .add_compare = stick_add_compare,
  221. .softint_mask = 1UL << 16,
  222. };
  223. /* On Hummingbird the STICK/STICK_CMPR register is implemented
  224. * in I/O space. There are two 64-bit registers each, the
  225. * first holds the low 32-bits of the value and the second holds
  226. * the high 32-bits.
  227. *
  228. * Since STICK is constantly updating, we have to access it carefully.
  229. *
  230. * The sequence we use to read is:
  231. * 1) read high
  232. * 2) read low
  233. * 3) read high again, if it rolled re-read both low and high again.
  234. *
  235. * Writing STICK safely is also tricky:
  236. * 1) write low to zero
  237. * 2) write high
  238. * 3) write low
  239. */
  240. #define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
  241. #define HBIRD_STICK_ADDR 0x1fe0000f070UL
  242. static unsigned long __hbird_read_stick(void)
  243. {
  244. unsigned long ret, tmp1, tmp2, tmp3;
  245. unsigned long addr = HBIRD_STICK_ADDR+8;
  246. __asm__ __volatile__("ldxa [%1] %5, %2\n"
  247. "1:\n\t"
  248. "sub %1, 0x8, %1\n\t"
  249. "ldxa [%1] %5, %3\n\t"
  250. "add %1, 0x8, %1\n\t"
  251. "ldxa [%1] %5, %4\n\t"
  252. "cmp %4, %2\n\t"
  253. "bne,a,pn %%xcc, 1b\n\t"
  254. " mov %4, %2\n\t"
  255. "sllx %4, 32, %4\n\t"
  256. "or %3, %4, %0\n\t"
  257. : "=&r" (ret), "=&r" (addr),
  258. "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
  259. : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
  260. return ret;
  261. }
  262. static void __hbird_write_stick(unsigned long val)
  263. {
  264. unsigned long low = (val & 0xffffffffUL);
  265. unsigned long high = (val >> 32UL);
  266. unsigned long addr = HBIRD_STICK_ADDR;
  267. __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
  268. "add %0, 0x8, %0\n\t"
  269. "stxa %3, [%0] %4\n\t"
  270. "sub %0, 0x8, %0\n\t"
  271. "stxa %2, [%0] %4"
  272. : "=&r" (addr)
  273. : "0" (addr), "r" (low), "r" (high),
  274. "i" (ASI_PHYS_BYPASS_EC_E));
  275. }
  276. static void __hbird_write_compare(unsigned long val)
  277. {
  278. unsigned long low = (val & 0xffffffffUL);
  279. unsigned long high = (val >> 32UL);
  280. unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
  281. __asm__ __volatile__("stxa %3, [%0] %4\n\t"
  282. "sub %0, 0x8, %0\n\t"
  283. "stxa %2, [%0] %4"
  284. : "=&r" (addr)
  285. : "0" (addr), "r" (low), "r" (high),
  286. "i" (ASI_PHYS_BYPASS_EC_E));
  287. }
  288. static void hbtick_disable_irq(void)
  289. {
  290. __hbird_write_compare(TICKCMP_IRQ_BIT);
  291. }
  292. static void hbtick_init_tick(void)
  293. {
  294. tick_disable_protection();
  295. /* XXX This seems to be necessary to 'jumpstart' Hummingbird
  296. * XXX into actually sending STICK interrupts. I think because
  297. * XXX of how we store %tick_cmpr in head.S this somehow resets the
  298. * XXX {TICK + STICK} interrupt mux. -DaveM
  299. */
  300. __hbird_write_stick(__hbird_read_stick());
  301. hbtick_disable_irq();
  302. }
  303. static unsigned long long hbtick_get_tick(void)
  304. {
  305. return __hbird_read_stick() & ~TICK_PRIV_BIT;
  306. }
  307. static unsigned long hbtick_add_tick(unsigned long adj)
  308. {
  309. unsigned long val;
  310. val = __hbird_read_stick() + adj;
  311. __hbird_write_stick(val);
  312. return val;
  313. }
  314. static int hbtick_add_compare(unsigned long adj)
  315. {
  316. unsigned long val = __hbird_read_stick();
  317. unsigned long val2;
  318. val &= ~TICKCMP_IRQ_BIT;
  319. val += adj;
  320. __hbird_write_compare(val);
  321. val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
  322. return ((long)(val2 - val)) > 0L;
  323. }
  324. static struct sparc64_tick_ops hbtick_operations __read_mostly = {
  325. .name = "hbtick",
  326. .init_tick = hbtick_init_tick,
  327. .disable_irq = hbtick_disable_irq,
  328. .get_tick = hbtick_get_tick,
  329. .add_tick = hbtick_add_tick,
  330. .add_compare = hbtick_add_compare,
  331. .softint_mask = 1UL << 0,
  332. };
  333. static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
  334. int update_persistent_clock(struct timespec now)
  335. {
  336. struct rtc_device *rtc = rtc_class_open("rtc0");
  337. int err = -1;
  338. if (rtc) {
  339. err = rtc_set_mmss(rtc, now.tv_sec);
  340. rtc_class_close(rtc);
  341. }
  342. return err;
  343. }
  344. unsigned long cmos_regs;
  345. EXPORT_SYMBOL(cmos_regs);
  346. static struct resource rtc_cmos_resource;
  347. static struct platform_device rtc_cmos_device = {
  348. .name = "rtc_cmos",
  349. .id = -1,
  350. .resource = &rtc_cmos_resource,
  351. .num_resources = 1,
  352. };
  353. static int __devinit rtc_probe(struct platform_device *op)
  354. {
  355. struct resource *r;
  356. printk(KERN_INFO "%s: RTC regs at 0x%llx\n",
  357. op->dev.of_node->full_name, op->resource[0].start);
  358. /* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
  359. * up a fake resource so that the probe works for all cases.
  360. * When the RTC is behind an ISA bus it will have IORESOURCE_IO
  361. * already, whereas when it's behind EBUS is will be IORESOURCE_MEM.
  362. */
  363. r = &rtc_cmos_resource;
  364. r->flags = IORESOURCE_IO;
  365. r->name = op->resource[0].name;
  366. r->start = op->resource[0].start;
  367. r->end = op->resource[0].end;
  368. cmos_regs = op->resource[0].start;
  369. return platform_device_register(&rtc_cmos_device);
  370. }
  371. static const struct of_device_id rtc_match[] = {
  372. {
  373. .name = "rtc",
  374. .compatible = "m5819",
  375. },
  376. {
  377. .name = "rtc",
  378. .compatible = "isa-m5819p",
  379. },
  380. {
  381. .name = "rtc",
  382. .compatible = "isa-m5823p",
  383. },
  384. {
  385. .name = "rtc",
  386. .compatible = "ds1287",
  387. },
  388. {},
  389. };
  390. static struct platform_driver rtc_driver = {
  391. .probe = rtc_probe,
  392. .driver = {
  393. .name = "rtc",
  394. .owner = THIS_MODULE,
  395. .of_match_table = rtc_match,
  396. },
  397. };
  398. static struct platform_device rtc_bq4802_device = {
  399. .name = "rtc-bq4802",
  400. .id = -1,
  401. .num_resources = 1,
  402. };
  403. static int __devinit bq4802_probe(struct platform_device *op)
  404. {
  405. printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
  406. op->dev.of_node->full_name, op->resource[0].start);
  407. rtc_bq4802_device.resource = &op->resource[0];
  408. return platform_device_register(&rtc_bq4802_device);
  409. }
  410. static const struct of_device_id bq4802_match[] = {
  411. {
  412. .name = "rtc",
  413. .compatible = "bq4802",
  414. },
  415. {},
  416. };
  417. static struct platform_driver bq4802_driver = {
  418. .probe = bq4802_probe,
  419. .driver = {
  420. .name = "bq4802",
  421. .owner = THIS_MODULE,
  422. .of_match_table = bq4802_match,
  423. },
  424. };
  425. static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
  426. {
  427. struct platform_device *pdev = to_platform_device(dev);
  428. void __iomem *regs = (void __iomem *) pdev->resource[0].start;
  429. return readb(regs + ofs);
  430. }
  431. static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
  432. {
  433. struct platform_device *pdev = to_platform_device(dev);
  434. void __iomem *regs = (void __iomem *) pdev->resource[0].start;
  435. writeb(val, regs + ofs);
  436. }
  437. static struct m48t59_plat_data m48t59_data = {
  438. .read_byte = mostek_read_byte,
  439. .write_byte = mostek_write_byte,
  440. };
  441. static struct platform_device m48t59_rtc = {
  442. .name = "rtc-m48t59",
  443. .id = 0,
  444. .num_resources = 1,
  445. .dev = {
  446. .platform_data = &m48t59_data,
  447. },
  448. };
  449. static int __devinit mostek_probe(struct platform_device *op)
  450. {
  451. struct device_node *dp = op->dev.of_node;
  452. /* On an Enterprise system there can be multiple mostek clocks.
  453. * We should only match the one that is on the central FHC bus.
  454. */
  455. if (!strcmp(dp->parent->name, "fhc") &&
  456. strcmp(dp->parent->parent->name, "central") != 0)
  457. return -ENODEV;
  458. printk(KERN_INFO "%s: Mostek regs at 0x%llx\n",
  459. dp->full_name, op->resource[0].start);
  460. m48t59_rtc.resource = &op->resource[0];
  461. return platform_device_register(&m48t59_rtc);
  462. }
  463. static const struct of_device_id mostek_match[] = {
  464. {
  465. .name = "eeprom",
  466. },
  467. {},
  468. };
  469. static struct platform_driver mostek_driver = {
  470. .probe = mostek_probe,
  471. .driver = {
  472. .name = "mostek",
  473. .owner = THIS_MODULE,
  474. .of_match_table = mostek_match,
  475. },
  476. };
  477. static struct platform_device rtc_sun4v_device = {
  478. .name = "rtc-sun4v",
  479. .id = -1,
  480. };
  481. static struct platform_device rtc_starfire_device = {
  482. .name = "rtc-starfire",
  483. .id = -1,
  484. };
  485. static int __init clock_init(void)
  486. {
  487. if (this_is_starfire)
  488. return platform_device_register(&rtc_starfire_device);
  489. if (tlb_type == hypervisor)
  490. return platform_device_register(&rtc_sun4v_device);
  491. (void) platform_driver_register(&rtc_driver);
  492. (void) platform_driver_register(&mostek_driver);
  493. (void) platform_driver_register(&bq4802_driver);
  494. return 0;
  495. }
  496. /* Must be after subsys_initcall() so that busses are probed. Must
  497. * be before device_initcall() because things like the RTC driver
  498. * need to see the clock registers.
  499. */
  500. fs_initcall(clock_init);
  501. /* This is gets the master TICK_INT timer going. */
  502. static unsigned long sparc64_init_timers(void)
  503. {
  504. struct device_node *dp;
  505. unsigned long freq;
  506. dp = of_find_node_by_path("/");
  507. if (tlb_type == spitfire) {
  508. unsigned long ver, manuf, impl;
  509. __asm__ __volatile__ ("rdpr %%ver, %0"
  510. : "=&r" (ver));
  511. manuf = ((ver >> 48) & 0xffff);
  512. impl = ((ver >> 32) & 0xffff);
  513. if (manuf == 0x17 && impl == 0x13) {
  514. /* Hummingbird, aka Ultra-IIe */
  515. tick_ops = &hbtick_operations;
  516. freq = of_getintprop_default(dp, "stick-frequency", 0);
  517. } else {
  518. tick_ops = &tick_operations;
  519. freq = local_cpu_data().clock_tick;
  520. }
  521. } else {
  522. tick_ops = &stick_operations;
  523. freq = of_getintprop_default(dp, "stick-frequency", 0);
  524. }
  525. return freq;
  526. }
  527. struct freq_table {
  528. unsigned long clock_tick_ref;
  529. unsigned int ref_freq;
  530. };
  531. static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
  532. unsigned long sparc64_get_clock_tick(unsigned int cpu)
  533. {
  534. struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
  535. if (ft->clock_tick_ref)
  536. return ft->clock_tick_ref;
  537. return cpu_data(cpu).clock_tick;
  538. }
  539. EXPORT_SYMBOL(sparc64_get_clock_tick);
  540. #ifdef CONFIG_CPU_FREQ
  541. static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  542. void *data)
  543. {
  544. struct cpufreq_freqs *freq = data;
  545. unsigned int cpu = freq->cpu;
  546. struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
  547. if (!ft->ref_freq) {
  548. ft->ref_freq = freq->old;
  549. ft->clock_tick_ref = cpu_data(cpu).clock_tick;
  550. }
  551. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  552. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
  553. (val == CPUFREQ_RESUMECHANGE)) {
  554. cpu_data(cpu).clock_tick =
  555. cpufreq_scale(ft->clock_tick_ref,
  556. ft->ref_freq,
  557. freq->new);
  558. }
  559. return 0;
  560. }
  561. static struct notifier_block sparc64_cpufreq_notifier_block = {
  562. .notifier_call = sparc64_cpufreq_notifier
  563. };
  564. static int __init register_sparc64_cpufreq_notifier(void)
  565. {
  566. cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
  567. CPUFREQ_TRANSITION_NOTIFIER);
  568. return 0;
  569. }
  570. core_initcall(register_sparc64_cpufreq_notifier);
  571. #endif /* CONFIG_CPU_FREQ */
  572. static int sparc64_next_event(unsigned long delta,
  573. struct clock_event_device *evt)
  574. {
  575. return tick_ops->add_compare(delta) ? -ETIME : 0;
  576. }
  577. static void sparc64_timer_setup(enum clock_event_mode mode,
  578. struct clock_event_device *evt)
  579. {
  580. switch (mode) {
  581. case CLOCK_EVT_MODE_ONESHOT:
  582. case CLOCK_EVT_MODE_RESUME:
  583. break;
  584. case CLOCK_EVT_MODE_SHUTDOWN:
  585. tick_ops->disable_irq();
  586. break;
  587. case CLOCK_EVT_MODE_PERIODIC:
  588. case CLOCK_EVT_MODE_UNUSED:
  589. WARN_ON(1);
  590. break;
  591. }
  592. }
  593. static struct clock_event_device sparc64_clockevent = {
  594. .features = CLOCK_EVT_FEAT_ONESHOT,
  595. .set_mode = sparc64_timer_setup,
  596. .set_next_event = sparc64_next_event,
  597. .rating = 100,
  598. .shift = 30,
  599. .irq = -1,
  600. };
  601. static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
  602. void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
  603. {
  604. struct pt_regs *old_regs = set_irq_regs(regs);
  605. unsigned long tick_mask = tick_ops->softint_mask;
  606. int cpu = smp_processor_id();
  607. struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
  608. clear_softint(tick_mask);
  609. irq_enter();
  610. local_cpu_data().irq0_irqs++;
  611. kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
  612. if (unlikely(!evt->event_handler)) {
  613. printk(KERN_WARNING
  614. "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
  615. } else
  616. evt->event_handler(evt);
  617. irq_exit();
  618. set_irq_regs(old_regs);
  619. }
  620. void __devinit setup_sparc64_timer(void)
  621. {
  622. struct clock_event_device *sevt;
  623. unsigned long pstate;
  624. /* Guarantee that the following sequences execute
  625. * uninterrupted.
  626. */
  627. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  628. "wrpr %0, %1, %%pstate"
  629. : "=r" (pstate)
  630. : "i" (PSTATE_IE));
  631. tick_ops->init_tick();
  632. /* Restore PSTATE_IE. */
  633. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  634. : /* no outputs */
  635. : "r" (pstate));
  636. sevt = &__get_cpu_var(sparc64_events);
  637. memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
  638. sevt->cpumask = cpumask_of(smp_processor_id());
  639. clockevents_register_device(sevt);
  640. }
  641. #define SPARC64_NSEC_PER_CYC_SHIFT 10UL
  642. static struct clocksource clocksource_tick = {
  643. .rating = 100,
  644. .mask = CLOCKSOURCE_MASK(64),
  645. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  646. };
  647. static unsigned long tb_ticks_per_usec __read_mostly;
  648. void __delay(unsigned long loops)
  649. {
  650. unsigned long bclock, now;
  651. bclock = tick_ops->get_tick();
  652. do {
  653. now = tick_ops->get_tick();
  654. } while ((now-bclock) < loops);
  655. }
  656. EXPORT_SYMBOL(__delay);
  657. void udelay(unsigned long usecs)
  658. {
  659. __delay(tb_ticks_per_usec * usecs);
  660. }
  661. EXPORT_SYMBOL(udelay);
  662. static cycle_t clocksource_tick_read(struct clocksource *cs)
  663. {
  664. return tick_ops->get_tick();
  665. }
  666. void __init time_init(void)
  667. {
  668. unsigned long freq = sparc64_init_timers();
  669. tb_ticks_per_usec = freq / USEC_PER_SEC;
  670. timer_ticks_per_nsec_quotient =
  671. clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
  672. clocksource_tick.name = tick_ops->name;
  673. clocksource_tick.read = clocksource_tick_read;
  674. clocksource_register_hz(&clocksource_tick, freq);
  675. printk("clocksource: mult[%x] shift[%d]\n",
  676. clocksource_tick.mult, clocksource_tick.shift);
  677. sparc64_clockevent.name = tick_ops->name;
  678. clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
  679. sparc64_clockevent.max_delta_ns =
  680. clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
  681. sparc64_clockevent.min_delta_ns =
  682. clockevent_delta2ns(0xF, &sparc64_clockevent);
  683. printk("clockevent: mult[%x] shift[%d]\n",
  684. sparc64_clockevent.mult, sparc64_clockevent.shift);
  685. setup_sparc64_timer();
  686. }
  687. unsigned long long sched_clock(void)
  688. {
  689. unsigned long ticks = tick_ops->get_tick();
  690. return (ticks * timer_ticks_per_nsec_quotient)
  691. >> SPARC64_NSEC_PER_CYC_SHIFT;
  692. }
  693. int __devinit read_current_timer(unsigned long *timer_val)
  694. {
  695. *timer_val = tick_ops->get_tick();
  696. return 0;
  697. }