time.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * Copyright (C) 2004-2007 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/clockchips.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/time.h>
  15. #include <linux/cpu.h>
  16. #include <asm/sysreg.h>
  17. #include <mach/pm.h>
  18. static bool disable_cpu_idle_poll;
  19. static cycle_t read_cycle_count(struct clocksource *cs)
  20. {
  21. return (cycle_t)sysreg_read(COUNT);
  22. }
  23. /*
  24. * The architectural cycle count registers are a fine clocksource unless
  25. * the system idle loop use sleep states like "idle": the CPU cycles
  26. * measured by COUNT (and COMPARE) don't happen during sleep states.
  27. * Their duration also changes if cpufreq changes the CPU clock rate.
  28. * So we rate the clocksource using COUNT as very low quality.
  29. */
  30. static struct clocksource counter = {
  31. .name = "avr32_counter",
  32. .rating = 50,
  33. .read = read_cycle_count,
  34. .mask = CLOCKSOURCE_MASK(32),
  35. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  36. };
  37. static irqreturn_t timer_interrupt(int irq, void *dev_id)
  38. {
  39. struct clock_event_device *evdev = dev_id;
  40. if (unlikely(!(intc_get_pending(0) & 1)))
  41. return IRQ_NONE;
  42. /*
  43. * Disable the interrupt until the clockevent subsystem
  44. * reprograms it.
  45. */
  46. sysreg_write(COMPARE, 0);
  47. evdev->event_handler(evdev);
  48. return IRQ_HANDLED;
  49. }
  50. static struct irqaction timer_irqaction = {
  51. .handler = timer_interrupt,
  52. /* Oprofile uses the same irq as the timer, so allow it to be shared */
  53. .flags = IRQF_TIMER | IRQF_SHARED,
  54. .name = "avr32_comparator",
  55. };
  56. static int comparator_next_event(unsigned long delta,
  57. struct clock_event_device *evdev)
  58. {
  59. unsigned long flags;
  60. raw_local_irq_save(flags);
  61. /* The time to read COUNT then update COMPARE must be less
  62. * than the min_delta_ns value for this clockevent source.
  63. */
  64. sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1);
  65. raw_local_irq_restore(flags);
  66. return 0;
  67. }
  68. static int comparator_shutdown(struct clock_event_device *evdev)
  69. {
  70. pr_debug("%s: %s\n", __func__, evdev->name);
  71. sysreg_write(COMPARE, 0);
  72. if (disable_cpu_idle_poll) {
  73. disable_cpu_idle_poll = false;
  74. /*
  75. * Only disable idle poll if we have forced that
  76. * in a previous call.
  77. */
  78. cpu_idle_poll_ctrl(false);
  79. }
  80. return 0;
  81. }
  82. static int comparator_set_oneshot(struct clock_event_device *evdev)
  83. {
  84. pr_debug("%s: %s\n", __func__, evdev->name);
  85. disable_cpu_idle_poll = true;
  86. /*
  87. * If we're using the COUNT and COMPARE registers we
  88. * need to force idle poll.
  89. */
  90. cpu_idle_poll_ctrl(true);
  91. return 0;
  92. }
  93. static struct clock_event_device comparator = {
  94. .name = "avr32_comparator",
  95. .features = CLOCK_EVT_FEAT_ONESHOT,
  96. .shift = 16,
  97. .rating = 50,
  98. .set_next_event = comparator_next_event,
  99. .set_state_shutdown = comparator_shutdown,
  100. .set_state_oneshot = comparator_set_oneshot,
  101. .tick_resume = comparator_set_oneshot,
  102. };
  103. void read_persistent_clock(struct timespec *ts)
  104. {
  105. ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0);
  106. ts->tv_nsec = 0;
  107. }
  108. void __init time_init(void)
  109. {
  110. unsigned long counter_hz;
  111. int ret;
  112. /* figure rate for counter */
  113. counter_hz = clk_get_rate(boot_cpu_data.clk);
  114. ret = clocksource_register_hz(&counter, counter_hz);
  115. if (ret)
  116. pr_debug("timer: could not register clocksource: %d\n", ret);
  117. /* setup COMPARE clockevent */
  118. comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
  119. comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
  120. comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
  121. comparator.cpumask = cpumask_of(0);
  122. sysreg_write(COMPARE, 0);
  123. timer_irqaction.dev_id = &comparator;
  124. ret = setup_irq(0, &timer_irqaction);
  125. if (ret)
  126. pr_debug("timer: could not request IRQ 0: %d\n", ret);
  127. else {
  128. clockevents_register_device(&comparator);
  129. pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name,
  130. ((counter_hz + 500) / 1000) / 1000,
  131. ((counter_hz + 500) / 1000) % 1000);
  132. }
  133. }