hpet.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370
  1. #include <linux/clocksource.h>
  2. #include <linux/clockchips.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/export.h>
  5. #include <linux/delay.h>
  6. #include <linux/errno.h>
  7. #include <linux/i8253.h>
  8. #include <linux/slab.h>
  9. #include <linux/hpet.h>
  10. #include <linux/init.h>
  11. #include <linux/cpu.h>
  12. #include <linux/pm.h>
  13. #include <linux/io.h>
  14. #include <asm/cpufeature.h>
  15. #include <asm/irqdomain.h>
  16. #include <asm/fixmap.h>
  17. #include <asm/hpet.h>
  18. #include <asm/time.h>
  19. #define HPET_MASK CLOCKSOURCE_MASK(32)
  20. /* FSEC = 10^-15
  21. NSEC = 10^-9 */
  22. #define FSEC_PER_NSEC 1000000L
  23. #define HPET_DEV_USED_BIT 2
  24. #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
  25. #define HPET_DEV_VALID 0x8
  26. #define HPET_DEV_FSB_CAP 0x1000
  27. #define HPET_DEV_PERI_CAP 0x2000
  28. #define HPET_MIN_CYCLES 128
  29. #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
  30. /*
  31. * HPET address is set in acpi/boot.c, when an ACPI entry exists
  32. */
  33. unsigned long hpet_address;
  34. u8 hpet_blockid; /* OS timer block num */
  35. bool hpet_msi_disable;
  36. #ifdef CONFIG_PCI_MSI
  37. static unsigned int hpet_num_timers;
  38. #endif
  39. static void __iomem *hpet_virt_address;
  40. struct hpet_dev {
  41. struct clock_event_device evt;
  42. unsigned int num;
  43. int cpu;
  44. unsigned int irq;
  45. unsigned int flags;
  46. char name[10];
  47. };
  48. static inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
  49. {
  50. return container_of(evtdev, struct hpet_dev, evt);
  51. }
  52. inline unsigned int hpet_readl(unsigned int a)
  53. {
  54. return readl(hpet_virt_address + a);
  55. }
  56. static inline void hpet_writel(unsigned int d, unsigned int a)
  57. {
  58. writel(d, hpet_virt_address + a);
  59. }
  60. #ifdef CONFIG_X86_64
  61. #include <asm/pgtable.h>
  62. #endif
  63. static inline void hpet_set_mapping(void)
  64. {
  65. hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
  66. }
  67. static inline void hpet_clear_mapping(void)
  68. {
  69. iounmap(hpet_virt_address);
  70. hpet_virt_address = NULL;
  71. }
  72. /*
  73. * HPET command line enable / disable
  74. */
  75. bool boot_hpet_disable;
  76. bool hpet_force_user;
  77. static bool hpet_verbose;
  78. static int __init hpet_setup(char *str)
  79. {
  80. while (str) {
  81. char *next = strchr(str, ',');
  82. if (next)
  83. *next++ = 0;
  84. if (!strncmp("disable", str, 7))
  85. boot_hpet_disable = true;
  86. if (!strncmp("force", str, 5))
  87. hpet_force_user = true;
  88. if (!strncmp("verbose", str, 7))
  89. hpet_verbose = true;
  90. str = next;
  91. }
  92. return 1;
  93. }
  94. __setup("hpet=", hpet_setup);
  95. static int __init disable_hpet(char *str)
  96. {
  97. boot_hpet_disable = true;
  98. return 1;
  99. }
  100. __setup("nohpet", disable_hpet);
  101. static inline int is_hpet_capable(void)
  102. {
  103. return !boot_hpet_disable && hpet_address;
  104. }
  105. /*
  106. * HPET timer interrupt enable / disable
  107. */
  108. static bool hpet_legacy_int_enabled;
  109. /**
  110. * is_hpet_enabled - check whether the hpet timer interrupt is enabled
  111. */
  112. int is_hpet_enabled(void)
  113. {
  114. return is_hpet_capable() && hpet_legacy_int_enabled;
  115. }
  116. EXPORT_SYMBOL_GPL(is_hpet_enabled);
  117. static void _hpet_print_config(const char *function, int line)
  118. {
  119. u32 i, timers, l, h;
  120. printk(KERN_INFO "hpet: %s(%d):\n", function, line);
  121. l = hpet_readl(HPET_ID);
  122. h = hpet_readl(HPET_PERIOD);
  123. timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  124. printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
  125. l = hpet_readl(HPET_CFG);
  126. h = hpet_readl(HPET_STATUS);
  127. printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
  128. l = hpet_readl(HPET_COUNTER);
  129. h = hpet_readl(HPET_COUNTER+4);
  130. printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
  131. for (i = 0; i < timers; i++) {
  132. l = hpet_readl(HPET_Tn_CFG(i));
  133. h = hpet_readl(HPET_Tn_CFG(i)+4);
  134. printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
  135. i, l, h);
  136. l = hpet_readl(HPET_Tn_CMP(i));
  137. h = hpet_readl(HPET_Tn_CMP(i)+4);
  138. printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
  139. i, l, h);
  140. l = hpet_readl(HPET_Tn_ROUTE(i));
  141. h = hpet_readl(HPET_Tn_ROUTE(i)+4);
  142. printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
  143. i, l, h);
  144. }
  145. }
  146. #define hpet_print_config() \
  147. do { \
  148. if (hpet_verbose) \
  149. _hpet_print_config(__func__, __LINE__); \
  150. } while (0)
  151. /*
  152. * When the hpet driver (/dev/hpet) is enabled, we need to reserve
  153. * timer 0 and timer 1 in case of RTC emulation.
  154. */
  155. #ifdef CONFIG_HPET
  156. static void hpet_reserve_msi_timers(struct hpet_data *hd);
  157. static void hpet_reserve_platform_timers(unsigned int id)
  158. {
  159. struct hpet __iomem *hpet = hpet_virt_address;
  160. struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
  161. unsigned int nrtimers, i;
  162. struct hpet_data hd;
  163. nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  164. memset(&hd, 0, sizeof(hd));
  165. hd.hd_phys_address = hpet_address;
  166. hd.hd_address = hpet;
  167. hd.hd_nirqs = nrtimers;
  168. hpet_reserve_timer(&hd, 0);
  169. #ifdef CONFIG_HPET_EMULATE_RTC
  170. hpet_reserve_timer(&hd, 1);
  171. #endif
  172. /*
  173. * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
  174. * is wrong for i8259!) not the output IRQ. Many BIOS writers
  175. * don't bother configuring *any* comparator interrupts.
  176. */
  177. hd.hd_irq[0] = HPET_LEGACY_8254;
  178. hd.hd_irq[1] = HPET_LEGACY_RTC;
  179. for (i = 2; i < nrtimers; timer++, i++) {
  180. hd.hd_irq[i] = (readl(&timer->hpet_config) &
  181. Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
  182. }
  183. hpet_reserve_msi_timers(&hd);
  184. hpet_alloc(&hd);
  185. }
  186. #else
  187. static void hpet_reserve_platform_timers(unsigned int id) { }
  188. #endif
  189. /*
  190. * Common hpet info
  191. */
  192. static unsigned long hpet_freq;
  193. static struct clock_event_device hpet_clockevent;
  194. static void hpet_stop_counter(void)
  195. {
  196. u32 cfg = hpet_readl(HPET_CFG);
  197. cfg &= ~HPET_CFG_ENABLE;
  198. hpet_writel(cfg, HPET_CFG);
  199. }
  200. static void hpet_reset_counter(void)
  201. {
  202. hpet_writel(0, HPET_COUNTER);
  203. hpet_writel(0, HPET_COUNTER + 4);
  204. }
  205. static void hpet_start_counter(void)
  206. {
  207. unsigned int cfg = hpet_readl(HPET_CFG);
  208. cfg |= HPET_CFG_ENABLE;
  209. hpet_writel(cfg, HPET_CFG);
  210. }
  211. static void hpet_restart_counter(void)
  212. {
  213. hpet_stop_counter();
  214. hpet_reset_counter();
  215. hpet_start_counter();
  216. }
  217. static void hpet_resume_device(void)
  218. {
  219. force_hpet_resume();
  220. }
  221. static void hpet_resume_counter(struct clocksource *cs)
  222. {
  223. hpet_resume_device();
  224. hpet_restart_counter();
  225. }
  226. static void hpet_enable_legacy_int(void)
  227. {
  228. unsigned int cfg = hpet_readl(HPET_CFG);
  229. cfg |= HPET_CFG_LEGACY;
  230. hpet_writel(cfg, HPET_CFG);
  231. hpet_legacy_int_enabled = true;
  232. }
  233. static void hpet_legacy_clockevent_register(void)
  234. {
  235. /* Start HPET legacy interrupts */
  236. hpet_enable_legacy_int();
  237. /*
  238. * Start hpet with the boot cpu mask and make it
  239. * global after the IO_APIC has been initialized.
  240. */
  241. hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
  242. clockevents_config_and_register(&hpet_clockevent, hpet_freq,
  243. HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
  244. global_clock_event = &hpet_clockevent;
  245. printk(KERN_DEBUG "hpet clockevent registered\n");
  246. }
  247. static int hpet_set_periodic(struct clock_event_device *evt, int timer)
  248. {
  249. unsigned int cfg, cmp, now;
  250. uint64_t delta;
  251. hpet_stop_counter();
  252. delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult;
  253. delta >>= evt->shift;
  254. now = hpet_readl(HPET_COUNTER);
  255. cmp = now + (unsigned int)delta;
  256. cfg = hpet_readl(HPET_Tn_CFG(timer));
  257. cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
  258. HPET_TN_32BIT;
  259. hpet_writel(cfg, HPET_Tn_CFG(timer));
  260. hpet_writel(cmp, HPET_Tn_CMP(timer));
  261. udelay(1);
  262. /*
  263. * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
  264. * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
  265. * bit is automatically cleared after the first write.
  266. * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
  267. * Publication # 24674)
  268. */
  269. hpet_writel((unsigned int)delta, HPET_Tn_CMP(timer));
  270. hpet_start_counter();
  271. hpet_print_config();
  272. return 0;
  273. }
  274. static int hpet_set_oneshot(struct clock_event_device *evt, int timer)
  275. {
  276. unsigned int cfg;
  277. cfg = hpet_readl(HPET_Tn_CFG(timer));
  278. cfg &= ~HPET_TN_PERIODIC;
  279. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  280. hpet_writel(cfg, HPET_Tn_CFG(timer));
  281. return 0;
  282. }
  283. static int hpet_shutdown(struct clock_event_device *evt, int timer)
  284. {
  285. unsigned int cfg;
  286. cfg = hpet_readl(HPET_Tn_CFG(timer));
  287. cfg &= ~HPET_TN_ENABLE;
  288. hpet_writel(cfg, HPET_Tn_CFG(timer));
  289. return 0;
  290. }
  291. static int hpet_resume(struct clock_event_device *evt, int timer)
  292. {
  293. if (!timer) {
  294. hpet_enable_legacy_int();
  295. } else {
  296. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  297. irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
  298. irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
  299. disable_hardirq(hdev->irq);
  300. irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
  301. enable_irq(hdev->irq);
  302. }
  303. hpet_print_config();
  304. return 0;
  305. }
  306. static int hpet_next_event(unsigned long delta,
  307. struct clock_event_device *evt, int timer)
  308. {
  309. u32 cnt;
  310. s32 res;
  311. cnt = hpet_readl(HPET_COUNTER);
  312. cnt += (u32) delta;
  313. hpet_writel(cnt, HPET_Tn_CMP(timer));
  314. /*
  315. * HPETs are a complete disaster. The compare register is
  316. * based on a equal comparison and neither provides a less
  317. * than or equal functionality (which would require to take
  318. * the wraparound into account) nor a simple count down event
  319. * mode. Further the write to the comparator register is
  320. * delayed internally up to two HPET clock cycles in certain
  321. * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
  322. * longer delays. We worked around that by reading back the
  323. * compare register, but that required another workaround for
  324. * ICH9,10 chips where the first readout after write can
  325. * return the old stale value. We already had a minimum
  326. * programming delta of 5us enforced, but a NMI or SMI hitting
  327. * between the counter readout and the comparator write can
  328. * move us behind that point easily. Now instead of reading
  329. * the compare register back several times, we make the ETIME
  330. * decision based on the following: Return ETIME if the
  331. * counter value after the write is less than HPET_MIN_CYCLES
  332. * away from the event or if the counter is already ahead of
  333. * the event. The minimum programming delta for the generic
  334. * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
  335. */
  336. res = (s32)(cnt - hpet_readl(HPET_COUNTER));
  337. return res < HPET_MIN_CYCLES ? -ETIME : 0;
  338. }
  339. static int hpet_legacy_shutdown(struct clock_event_device *evt)
  340. {
  341. return hpet_shutdown(evt, 0);
  342. }
  343. static int hpet_legacy_set_oneshot(struct clock_event_device *evt)
  344. {
  345. return hpet_set_oneshot(evt, 0);
  346. }
  347. static int hpet_legacy_set_periodic(struct clock_event_device *evt)
  348. {
  349. return hpet_set_periodic(evt, 0);
  350. }
  351. static int hpet_legacy_resume(struct clock_event_device *evt)
  352. {
  353. return hpet_resume(evt, 0);
  354. }
  355. static int hpet_legacy_next_event(unsigned long delta,
  356. struct clock_event_device *evt)
  357. {
  358. return hpet_next_event(delta, evt, 0);
  359. }
  360. /*
  361. * The hpet clock event device
  362. */
  363. static struct clock_event_device hpet_clockevent = {
  364. .name = "hpet",
  365. .features = CLOCK_EVT_FEAT_PERIODIC |
  366. CLOCK_EVT_FEAT_ONESHOT,
  367. .set_state_periodic = hpet_legacy_set_periodic,
  368. .set_state_oneshot = hpet_legacy_set_oneshot,
  369. .set_state_shutdown = hpet_legacy_shutdown,
  370. .tick_resume = hpet_legacy_resume,
  371. .set_next_event = hpet_legacy_next_event,
  372. .irq = 0,
  373. .rating = 50,
  374. };
  375. /*
  376. * HPET MSI Support
  377. */
  378. #ifdef CONFIG_PCI_MSI
  379. static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
  380. static struct hpet_dev *hpet_devs;
  381. static struct irq_domain *hpet_domain;
  382. void hpet_msi_unmask(struct irq_data *data)
  383. {
  384. struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
  385. unsigned int cfg;
  386. /* unmask it */
  387. cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
  388. cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
  389. hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
  390. }
  391. void hpet_msi_mask(struct irq_data *data)
  392. {
  393. struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
  394. unsigned int cfg;
  395. /* mask it */
  396. cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
  397. cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
  398. hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
  399. }
  400. void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
  401. {
  402. hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
  403. hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
  404. }
  405. void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
  406. {
  407. msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
  408. msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
  409. msg->address_hi = 0;
  410. }
  411. static int hpet_msi_shutdown(struct clock_event_device *evt)
  412. {
  413. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  414. return hpet_shutdown(evt, hdev->num);
  415. }
  416. static int hpet_msi_set_oneshot(struct clock_event_device *evt)
  417. {
  418. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  419. return hpet_set_oneshot(evt, hdev->num);
  420. }
  421. static int hpet_msi_set_periodic(struct clock_event_device *evt)
  422. {
  423. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  424. return hpet_set_periodic(evt, hdev->num);
  425. }
  426. static int hpet_msi_resume(struct clock_event_device *evt)
  427. {
  428. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  429. return hpet_resume(evt, hdev->num);
  430. }
  431. static int hpet_msi_next_event(unsigned long delta,
  432. struct clock_event_device *evt)
  433. {
  434. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  435. return hpet_next_event(delta, evt, hdev->num);
  436. }
  437. static irqreturn_t hpet_interrupt_handler(int irq, void *data)
  438. {
  439. struct hpet_dev *dev = (struct hpet_dev *)data;
  440. struct clock_event_device *hevt = &dev->evt;
  441. if (!hevt->event_handler) {
  442. printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
  443. dev->num);
  444. return IRQ_HANDLED;
  445. }
  446. hevt->event_handler(hevt);
  447. return IRQ_HANDLED;
  448. }
  449. static int hpet_setup_irq(struct hpet_dev *dev)
  450. {
  451. if (request_irq(dev->irq, hpet_interrupt_handler,
  452. IRQF_TIMER | IRQF_NOBALANCING,
  453. dev->name, dev))
  454. return -1;
  455. disable_irq(dev->irq);
  456. irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
  457. enable_irq(dev->irq);
  458. printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
  459. dev->name, dev->irq);
  460. return 0;
  461. }
  462. /* This should be called in specific @cpu */
  463. static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
  464. {
  465. struct clock_event_device *evt = &hdev->evt;
  466. WARN_ON(cpu != smp_processor_id());
  467. if (!(hdev->flags & HPET_DEV_VALID))
  468. return;
  469. hdev->cpu = cpu;
  470. per_cpu(cpu_hpet_dev, cpu) = hdev;
  471. evt->name = hdev->name;
  472. hpet_setup_irq(hdev);
  473. evt->irq = hdev->irq;
  474. evt->rating = 110;
  475. evt->features = CLOCK_EVT_FEAT_ONESHOT;
  476. if (hdev->flags & HPET_DEV_PERI_CAP) {
  477. evt->features |= CLOCK_EVT_FEAT_PERIODIC;
  478. evt->set_state_periodic = hpet_msi_set_periodic;
  479. }
  480. evt->set_state_shutdown = hpet_msi_shutdown;
  481. evt->set_state_oneshot = hpet_msi_set_oneshot;
  482. evt->tick_resume = hpet_msi_resume;
  483. evt->set_next_event = hpet_msi_next_event;
  484. evt->cpumask = cpumask_of(hdev->cpu);
  485. clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
  486. 0x7FFFFFFF);
  487. }
  488. #ifdef CONFIG_HPET
  489. /* Reserve at least one timer for userspace (/dev/hpet) */
  490. #define RESERVE_TIMERS 1
  491. #else
  492. #define RESERVE_TIMERS 0
  493. #endif
  494. static void hpet_msi_capability_lookup(unsigned int start_timer)
  495. {
  496. unsigned int id;
  497. unsigned int num_timers;
  498. unsigned int num_timers_used = 0;
  499. int i, irq;
  500. if (hpet_msi_disable)
  501. return;
  502. if (boot_cpu_has(X86_FEATURE_ARAT))
  503. return;
  504. id = hpet_readl(HPET_ID);
  505. num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
  506. num_timers++; /* Value read out starts from 0 */
  507. hpet_print_config();
  508. hpet_domain = hpet_create_irq_domain(hpet_blockid);
  509. if (!hpet_domain)
  510. return;
  511. hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
  512. if (!hpet_devs)
  513. return;
  514. hpet_num_timers = num_timers;
  515. for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
  516. struct hpet_dev *hdev = &hpet_devs[num_timers_used];
  517. unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
  518. /* Only consider HPET timer with MSI support */
  519. if (!(cfg & HPET_TN_FSB_CAP))
  520. continue;
  521. hdev->flags = 0;
  522. if (cfg & HPET_TN_PERIODIC_CAP)
  523. hdev->flags |= HPET_DEV_PERI_CAP;
  524. sprintf(hdev->name, "hpet%d", i);
  525. hdev->num = i;
  526. irq = hpet_assign_irq(hpet_domain, hdev, hdev->num);
  527. if (irq <= 0)
  528. continue;
  529. hdev->irq = irq;
  530. hdev->flags |= HPET_DEV_FSB_CAP;
  531. hdev->flags |= HPET_DEV_VALID;
  532. num_timers_used++;
  533. if (num_timers_used == num_possible_cpus())
  534. break;
  535. }
  536. printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
  537. num_timers, num_timers_used);
  538. }
  539. #ifdef CONFIG_HPET
  540. static void hpet_reserve_msi_timers(struct hpet_data *hd)
  541. {
  542. int i;
  543. if (!hpet_devs)
  544. return;
  545. for (i = 0; i < hpet_num_timers; i++) {
  546. struct hpet_dev *hdev = &hpet_devs[i];
  547. if (!(hdev->flags & HPET_DEV_VALID))
  548. continue;
  549. hd->hd_irq[hdev->num] = hdev->irq;
  550. hpet_reserve_timer(hd, hdev->num);
  551. }
  552. }
  553. #endif
  554. static struct hpet_dev *hpet_get_unused_timer(void)
  555. {
  556. int i;
  557. if (!hpet_devs)
  558. return NULL;
  559. for (i = 0; i < hpet_num_timers; i++) {
  560. struct hpet_dev *hdev = &hpet_devs[i];
  561. if (!(hdev->flags & HPET_DEV_VALID))
  562. continue;
  563. if (test_and_set_bit(HPET_DEV_USED_BIT,
  564. (unsigned long *)&hdev->flags))
  565. continue;
  566. return hdev;
  567. }
  568. return NULL;
  569. }
  570. struct hpet_work_struct {
  571. struct delayed_work work;
  572. struct completion complete;
  573. };
  574. static void hpet_work(struct work_struct *w)
  575. {
  576. struct hpet_dev *hdev;
  577. int cpu = smp_processor_id();
  578. struct hpet_work_struct *hpet_work;
  579. hpet_work = container_of(w, struct hpet_work_struct, work.work);
  580. hdev = hpet_get_unused_timer();
  581. if (hdev)
  582. init_one_hpet_msi_clockevent(hdev, cpu);
  583. complete(&hpet_work->complete);
  584. }
  585. static int hpet_cpuhp_online(unsigned int cpu)
  586. {
  587. struct hpet_work_struct work;
  588. INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
  589. init_completion(&work.complete);
  590. /* FIXME: add schedule_work_on() */
  591. schedule_delayed_work_on(cpu, &work.work, 0);
  592. wait_for_completion(&work.complete);
  593. destroy_delayed_work_on_stack(&work.work);
  594. return 0;
  595. }
  596. static int hpet_cpuhp_dead(unsigned int cpu)
  597. {
  598. struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
  599. if (!hdev)
  600. return 0;
  601. free_irq(hdev->irq, hdev);
  602. hdev->flags &= ~HPET_DEV_USED;
  603. per_cpu(cpu_hpet_dev, cpu) = NULL;
  604. return 0;
  605. }
  606. #else
  607. static void hpet_msi_capability_lookup(unsigned int start_timer)
  608. {
  609. return;
  610. }
  611. #ifdef CONFIG_HPET
  612. static void hpet_reserve_msi_timers(struct hpet_data *hd)
  613. {
  614. return;
  615. }
  616. #endif
  617. #define hpet_cpuhp_online NULL
  618. #define hpet_cpuhp_dead NULL
  619. #endif
  620. /*
  621. * Clock source related code
  622. */
  623. #if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
  624. /*
  625. * Reading the HPET counter is a very slow operation. If a large number of
  626. * CPUs are trying to access the HPET counter simultaneously, it can cause
  627. * massive delay and slow down system performance dramatically. This may
  628. * happen when HPET is the default clock source instead of TSC. For a
  629. * really large system with hundreds of CPUs, the slowdown may be so
  630. * severe that it may actually crash the system because of a NMI watchdog
  631. * soft lockup, for example.
  632. *
  633. * If multiple CPUs are trying to access the HPET counter at the same time,
  634. * we don't actually need to read the counter multiple times. Instead, the
  635. * other CPUs can use the counter value read by the first CPU in the group.
  636. *
  637. * This special feature is only enabled on x86-64 systems. It is unlikely
  638. * that 32-bit x86 systems will have enough CPUs to require this feature
  639. * with its associated locking overhead. And we also need 64-bit atomic
  640. * read.
  641. *
  642. * The lock and the hpet value are stored together and can be read in a
  643. * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t
  644. * is 32 bits in size.
  645. */
  646. union hpet_lock {
  647. struct {
  648. arch_spinlock_t lock;
  649. u32 value;
  650. };
  651. u64 lockval;
  652. };
  653. static union hpet_lock hpet __cacheline_aligned = {
  654. { .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
  655. };
  656. static cycle_t read_hpet(struct clocksource *cs)
  657. {
  658. unsigned long flags;
  659. union hpet_lock old, new;
  660. BUILD_BUG_ON(sizeof(union hpet_lock) != 8);
  661. /*
  662. * Read HPET directly if in NMI.
  663. */
  664. if (in_nmi())
  665. return (cycle_t)hpet_readl(HPET_COUNTER);
  666. /*
  667. * Read the current state of the lock and HPET value atomically.
  668. */
  669. old.lockval = READ_ONCE(hpet.lockval);
  670. if (arch_spin_is_locked(&old.lock))
  671. goto contended;
  672. local_irq_save(flags);
  673. if (arch_spin_trylock(&hpet.lock)) {
  674. new.value = hpet_readl(HPET_COUNTER);
  675. /*
  676. * Use WRITE_ONCE() to prevent store tearing.
  677. */
  678. WRITE_ONCE(hpet.value, new.value);
  679. arch_spin_unlock(&hpet.lock);
  680. local_irq_restore(flags);
  681. return (cycle_t)new.value;
  682. }
  683. local_irq_restore(flags);
  684. contended:
  685. /*
  686. * Contended case
  687. * --------------
  688. * Wait until the HPET value change or the lock is free to indicate
  689. * its value is up-to-date.
  690. *
  691. * It is possible that old.value has already contained the latest
  692. * HPET value while the lock holder was in the process of releasing
  693. * the lock. Checking for lock state change will enable us to return
  694. * the value immediately instead of waiting for the next HPET reader
  695. * to come along.
  696. */
  697. do {
  698. cpu_relax();
  699. new.lockval = READ_ONCE(hpet.lockval);
  700. } while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
  701. return (cycle_t)new.value;
  702. }
  703. #else
  704. /*
  705. * For UP or 32-bit.
  706. */
  707. static cycle_t read_hpet(struct clocksource *cs)
  708. {
  709. return (cycle_t)hpet_readl(HPET_COUNTER);
  710. }
  711. #endif
  712. static struct clocksource clocksource_hpet = {
  713. .name = "hpet",
  714. .rating = 250,
  715. .read = read_hpet,
  716. .mask = HPET_MASK,
  717. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  718. .resume = hpet_resume_counter,
  719. };
  720. static int hpet_clocksource_register(void)
  721. {
  722. u64 start, now;
  723. cycle_t t1;
  724. /* Start the counter */
  725. hpet_restart_counter();
  726. /* Verify whether hpet counter works */
  727. t1 = hpet_readl(HPET_COUNTER);
  728. start = rdtsc();
  729. /*
  730. * We don't know the TSC frequency yet, but waiting for
  731. * 200000 TSC cycles is safe:
  732. * 4 GHz == 50us
  733. * 1 GHz == 200us
  734. */
  735. do {
  736. rep_nop();
  737. now = rdtsc();
  738. } while ((now - start) < 200000UL);
  739. if (t1 == hpet_readl(HPET_COUNTER)) {
  740. printk(KERN_WARNING
  741. "HPET counter not counting. HPET disabled\n");
  742. return -ENODEV;
  743. }
  744. clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
  745. return 0;
  746. }
  747. static u32 *hpet_boot_cfg;
  748. /**
  749. * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  750. */
  751. int __init hpet_enable(void)
  752. {
  753. u32 hpet_period, cfg, id;
  754. u64 freq;
  755. unsigned int i, last;
  756. if (!is_hpet_capable())
  757. return 0;
  758. hpet_set_mapping();
  759. /*
  760. * Read the period and check for a sane value:
  761. */
  762. hpet_period = hpet_readl(HPET_PERIOD);
  763. /*
  764. * AMD SB700 based systems with spread spectrum enabled use a
  765. * SMM based HPET emulation to provide proper frequency
  766. * setting. The SMM code is initialized with the first HPET
  767. * register access and takes some time to complete. During
  768. * this time the config register reads 0xffffffff. We check
  769. * for max. 1000 loops whether the config register reads a non
  770. * 0xffffffff value to make sure that HPET is up and running
  771. * before we go further. A counting loop is safe, as the HPET
  772. * access takes thousands of CPU cycles. On non SB700 based
  773. * machines this check is only done once and has no side
  774. * effects.
  775. */
  776. for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
  777. if (i == 1000) {
  778. printk(KERN_WARNING
  779. "HPET config register value = 0xFFFFFFFF. "
  780. "Disabling HPET\n");
  781. goto out_nohpet;
  782. }
  783. }
  784. if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
  785. goto out_nohpet;
  786. /*
  787. * The period is a femto seconds value. Convert it to a
  788. * frequency.
  789. */
  790. freq = FSEC_PER_SEC;
  791. do_div(freq, hpet_period);
  792. hpet_freq = freq;
  793. /*
  794. * Read the HPET ID register to retrieve the IRQ routing
  795. * information and the number of channels
  796. */
  797. id = hpet_readl(HPET_ID);
  798. hpet_print_config();
  799. last = (id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
  800. #ifdef CONFIG_HPET_EMULATE_RTC
  801. /*
  802. * The legacy routing mode needs at least two channels, tick timer
  803. * and the rtc emulation channel.
  804. */
  805. if (!last)
  806. goto out_nohpet;
  807. #endif
  808. cfg = hpet_readl(HPET_CFG);
  809. hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
  810. GFP_KERNEL);
  811. if (hpet_boot_cfg)
  812. *hpet_boot_cfg = cfg;
  813. else
  814. pr_warn("HPET initial state will not be saved\n");
  815. cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
  816. hpet_writel(cfg, HPET_CFG);
  817. if (cfg)
  818. pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
  819. cfg);
  820. for (i = 0; i <= last; ++i) {
  821. cfg = hpet_readl(HPET_Tn_CFG(i));
  822. if (hpet_boot_cfg)
  823. hpet_boot_cfg[i + 1] = cfg;
  824. cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
  825. hpet_writel(cfg, HPET_Tn_CFG(i));
  826. cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
  827. | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
  828. | HPET_TN_FSB | HPET_TN_FSB_CAP);
  829. if (cfg)
  830. pr_warn("HPET: Unrecognized bits %#x set in cfg#%u\n",
  831. cfg, i);
  832. }
  833. hpet_print_config();
  834. if (hpet_clocksource_register())
  835. goto out_nohpet;
  836. if (id & HPET_ID_LEGSUP) {
  837. hpet_legacy_clockevent_register();
  838. return 1;
  839. }
  840. return 0;
  841. out_nohpet:
  842. hpet_clear_mapping();
  843. hpet_address = 0;
  844. return 0;
  845. }
  846. /*
  847. * Needs to be late, as the reserve_timer code calls kalloc !
  848. *
  849. * Not a problem on i386 as hpet_enable is called from late_time_init,
  850. * but on x86_64 it is necessary !
  851. */
  852. static __init int hpet_late_init(void)
  853. {
  854. int ret;
  855. if (boot_hpet_disable)
  856. return -ENODEV;
  857. if (!hpet_address) {
  858. if (!force_hpet_address)
  859. return -ENODEV;
  860. hpet_address = force_hpet_address;
  861. hpet_enable();
  862. }
  863. if (!hpet_virt_address)
  864. return -ENODEV;
  865. if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
  866. hpet_msi_capability_lookup(2);
  867. else
  868. hpet_msi_capability_lookup(0);
  869. hpet_reserve_platform_timers(hpet_readl(HPET_ID));
  870. hpet_print_config();
  871. if (hpet_msi_disable)
  872. return 0;
  873. if (boot_cpu_has(X86_FEATURE_ARAT))
  874. return 0;
  875. /* This notifier should be called after workqueue is ready */
  876. ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
  877. hpet_cpuhp_online, NULL);
  878. if (ret)
  879. return ret;
  880. ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
  881. hpet_cpuhp_dead);
  882. if (ret)
  883. goto err_cpuhp;
  884. return 0;
  885. err_cpuhp:
  886. cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
  887. return ret;
  888. }
  889. fs_initcall(hpet_late_init);
  890. void hpet_disable(void)
  891. {
  892. if (is_hpet_capable() && hpet_virt_address) {
  893. unsigned int cfg = hpet_readl(HPET_CFG), id, last;
  894. if (hpet_boot_cfg)
  895. cfg = *hpet_boot_cfg;
  896. else if (hpet_legacy_int_enabled) {
  897. cfg &= ~HPET_CFG_LEGACY;
  898. hpet_legacy_int_enabled = false;
  899. }
  900. cfg &= ~HPET_CFG_ENABLE;
  901. hpet_writel(cfg, HPET_CFG);
  902. if (!hpet_boot_cfg)
  903. return;
  904. id = hpet_readl(HPET_ID);
  905. last = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
  906. for (id = 0; id <= last; ++id)
  907. hpet_writel(hpet_boot_cfg[id + 1], HPET_Tn_CFG(id));
  908. if (*hpet_boot_cfg & HPET_CFG_ENABLE)
  909. hpet_writel(*hpet_boot_cfg, HPET_CFG);
  910. }
  911. }
  912. #ifdef CONFIG_HPET_EMULATE_RTC
  913. /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
  914. * is enabled, we support RTC interrupt functionality in software.
  915. * RTC has 3 kinds of interrupts:
  916. * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
  917. * is updated
  918. * 2) Alarm Interrupt - generate an interrupt at a specific time of day
  919. * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
  920. * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
  921. * (1) and (2) above are implemented using polling at a frequency of
  922. * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
  923. * overhead. (DEFAULT_RTC_INT_FREQ)
  924. * For (3), we use interrupts at 64Hz or user specified periodic
  925. * frequency, whichever is higher.
  926. */
  927. #include <linux/mc146818rtc.h>
  928. #include <linux/rtc.h>
  929. #define DEFAULT_RTC_INT_FREQ 64
  930. #define DEFAULT_RTC_SHIFT 6
  931. #define RTC_NUM_INTS 1
  932. static unsigned long hpet_rtc_flags;
  933. static int hpet_prev_update_sec;
  934. static struct rtc_time hpet_alarm_time;
  935. static unsigned long hpet_pie_count;
  936. static u32 hpet_t1_cmp;
  937. static u32 hpet_default_delta;
  938. static u32 hpet_pie_delta;
  939. static unsigned long hpet_pie_limit;
  940. static rtc_irq_handler irq_handler;
  941. /*
  942. * Check that the hpet counter c1 is ahead of the c2
  943. */
  944. static inline int hpet_cnt_ahead(u32 c1, u32 c2)
  945. {
  946. return (s32)(c2 - c1) < 0;
  947. }
  948. /*
  949. * Registers a IRQ handler.
  950. */
  951. int hpet_register_irq_handler(rtc_irq_handler handler)
  952. {
  953. if (!is_hpet_enabled())
  954. return -ENODEV;
  955. if (irq_handler)
  956. return -EBUSY;
  957. irq_handler = handler;
  958. return 0;
  959. }
  960. EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
  961. /*
  962. * Deregisters the IRQ handler registered with hpet_register_irq_handler()
  963. * and does cleanup.
  964. */
  965. void hpet_unregister_irq_handler(rtc_irq_handler handler)
  966. {
  967. if (!is_hpet_enabled())
  968. return;
  969. irq_handler = NULL;
  970. hpet_rtc_flags = 0;
  971. }
  972. EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
  973. /*
  974. * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
  975. * is not supported by all HPET implementations for timer 1.
  976. *
  977. * hpet_rtc_timer_init() is called when the rtc is initialized.
  978. */
  979. int hpet_rtc_timer_init(void)
  980. {
  981. unsigned int cfg, cnt, delta;
  982. unsigned long flags;
  983. if (!is_hpet_enabled())
  984. return 0;
  985. if (!hpet_default_delta) {
  986. uint64_t clc;
  987. clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
  988. clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
  989. hpet_default_delta = clc;
  990. }
  991. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  992. delta = hpet_default_delta;
  993. else
  994. delta = hpet_pie_delta;
  995. local_irq_save(flags);
  996. cnt = delta + hpet_readl(HPET_COUNTER);
  997. hpet_writel(cnt, HPET_T1_CMP);
  998. hpet_t1_cmp = cnt;
  999. cfg = hpet_readl(HPET_T1_CFG);
  1000. cfg &= ~HPET_TN_PERIODIC;
  1001. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  1002. hpet_writel(cfg, HPET_T1_CFG);
  1003. local_irq_restore(flags);
  1004. return 1;
  1005. }
  1006. EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
  1007. static void hpet_disable_rtc_channel(void)
  1008. {
  1009. u32 cfg = hpet_readl(HPET_T1_CFG);
  1010. cfg &= ~HPET_TN_ENABLE;
  1011. hpet_writel(cfg, HPET_T1_CFG);
  1012. }
  1013. /*
  1014. * The functions below are called from rtc driver.
  1015. * Return 0 if HPET is not being used.
  1016. * Otherwise do the necessary changes and return 1.
  1017. */
  1018. int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
  1019. {
  1020. if (!is_hpet_enabled())
  1021. return 0;
  1022. hpet_rtc_flags &= ~bit_mask;
  1023. if (unlikely(!hpet_rtc_flags))
  1024. hpet_disable_rtc_channel();
  1025. return 1;
  1026. }
  1027. EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
  1028. int hpet_set_rtc_irq_bit(unsigned long bit_mask)
  1029. {
  1030. unsigned long oldbits = hpet_rtc_flags;
  1031. if (!is_hpet_enabled())
  1032. return 0;
  1033. hpet_rtc_flags |= bit_mask;
  1034. if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
  1035. hpet_prev_update_sec = -1;
  1036. if (!oldbits)
  1037. hpet_rtc_timer_init();
  1038. return 1;
  1039. }
  1040. EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
  1041. int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
  1042. unsigned char sec)
  1043. {
  1044. if (!is_hpet_enabled())
  1045. return 0;
  1046. hpet_alarm_time.tm_hour = hrs;
  1047. hpet_alarm_time.tm_min = min;
  1048. hpet_alarm_time.tm_sec = sec;
  1049. return 1;
  1050. }
  1051. EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
  1052. int hpet_set_periodic_freq(unsigned long freq)
  1053. {
  1054. uint64_t clc;
  1055. if (!is_hpet_enabled())
  1056. return 0;
  1057. if (freq <= DEFAULT_RTC_INT_FREQ)
  1058. hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
  1059. else {
  1060. clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
  1061. do_div(clc, freq);
  1062. clc >>= hpet_clockevent.shift;
  1063. hpet_pie_delta = clc;
  1064. hpet_pie_limit = 0;
  1065. }
  1066. return 1;
  1067. }
  1068. EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
  1069. int hpet_rtc_dropped_irq(void)
  1070. {
  1071. return is_hpet_enabled();
  1072. }
  1073. EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
  1074. static void hpet_rtc_timer_reinit(void)
  1075. {
  1076. unsigned int delta;
  1077. int lost_ints = -1;
  1078. if (unlikely(!hpet_rtc_flags))
  1079. hpet_disable_rtc_channel();
  1080. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  1081. delta = hpet_default_delta;
  1082. else
  1083. delta = hpet_pie_delta;
  1084. /*
  1085. * Increment the comparator value until we are ahead of the
  1086. * current count.
  1087. */
  1088. do {
  1089. hpet_t1_cmp += delta;
  1090. hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
  1091. lost_ints++;
  1092. } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
  1093. if (lost_ints) {
  1094. if (hpet_rtc_flags & RTC_PIE)
  1095. hpet_pie_count += lost_ints;
  1096. if (printk_ratelimit())
  1097. printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
  1098. lost_ints);
  1099. }
  1100. }
  1101. irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
  1102. {
  1103. struct rtc_time curr_time;
  1104. unsigned long rtc_int_flag = 0;
  1105. hpet_rtc_timer_reinit();
  1106. memset(&curr_time, 0, sizeof(struct rtc_time));
  1107. if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
  1108. mc146818_get_time(&curr_time);
  1109. if (hpet_rtc_flags & RTC_UIE &&
  1110. curr_time.tm_sec != hpet_prev_update_sec) {
  1111. if (hpet_prev_update_sec >= 0)
  1112. rtc_int_flag = RTC_UF;
  1113. hpet_prev_update_sec = curr_time.tm_sec;
  1114. }
  1115. if (hpet_rtc_flags & RTC_PIE &&
  1116. ++hpet_pie_count >= hpet_pie_limit) {
  1117. rtc_int_flag |= RTC_PF;
  1118. hpet_pie_count = 0;
  1119. }
  1120. if (hpet_rtc_flags & RTC_AIE &&
  1121. (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
  1122. (curr_time.tm_min == hpet_alarm_time.tm_min) &&
  1123. (curr_time.tm_hour == hpet_alarm_time.tm_hour))
  1124. rtc_int_flag |= RTC_AF;
  1125. if (rtc_int_flag) {
  1126. rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
  1127. if (irq_handler)
  1128. irq_handler(rtc_int_flag, dev_id);
  1129. }
  1130. return IRQ_HANDLED;
  1131. }
  1132. EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
  1133. #endif