cevt-smtc.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 MIPS Technologies, Inc.
  7. * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
  8. * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
  9. */
  10. #include <linux/clockchips.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/percpu.h>
  13. #include <linux/smp.h>
  14. #include <linux/irq.h>
  15. #include <asm/smtc_ipi.h>
  16. #include <asm/time.h>
  17. #include <asm/cevt-r4k.h>
  18. /*
  19. * Variant clock event timer support for SMTC on MIPS 34K, 1004K
  20. * or other MIPS MT cores.
  21. *
  22. * Notes on SMTC Support:
  23. *
  24. * SMTC has multiple microthread TCs pretending to be Linux CPUs.
  25. * But there's only one Count/Compare pair per VPE, and Compare
  26. * interrupts are taken opportunisitically by available TCs
  27. * bound to the VPE with the Count register. The new timer
  28. * framework provides for global broadcasts, but we really
  29. * want VPE-level multicasts for best behavior. So instead
  30. * of invoking the high-level clock-event broadcast code,
  31. * this version of SMTC support uses the historical SMTC
  32. * multicast mechanisms "under the hood", appearing to the
  33. * generic clock layer as if the interrupts are per-CPU.
  34. *
  35. * The approach taken here is to maintain a set of NR_CPUS
  36. * virtual timers, and track which "CPU" needs to be alerted
  37. * at each event.
  38. *
  39. * It's unlikely that we'll see a MIPS MT core with more than
  40. * 2 VPEs, but we *know* that we won't need to handle more
  41. * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
  42. * is always going to be overkill, but always going to be enough.
  43. */
  44. unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
  45. static int smtc_nextinvpe[NR_CPUS];
  46. /*
  47. * Timestamps stored are absolute values to be programmed
  48. * into Count register. Valid timestamps will never be zero.
  49. * If a Zero Count value is actually calculated, it is converted
  50. * to be a 1, which will introduce 1 or two CPU cycles of error
  51. * roughly once every four billion events, which at 1000 HZ means
  52. * about once every 50 days. If that's actually a problem, one
  53. * could alternate squashing 0 to 1 and to -1.
  54. */
  55. #define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
  56. #define ISVALID(x) ((x) != 0L)
  57. /*
  58. * Time comparison is subtle, as it's really truncated
  59. * modular arithmetic.
  60. */
  61. #define IS_SOONER(a, b, reference) \
  62. (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
  63. /*
  64. * CATCHUP_INCREMENT, used when the function falls behind the counter.
  65. * Could be an increasing function instead of a constant;
  66. */
  67. #define CATCHUP_INCREMENT 64
  68. static int mips_next_event(unsigned long delta,
  69. struct clock_event_device *evt)
  70. {
  71. unsigned long flags;
  72. unsigned int mtflags;
  73. unsigned long timestamp, reference, previous;
  74. unsigned long nextcomp = 0L;
  75. int vpe = current_cpu_data.vpe_id;
  76. int cpu = smp_processor_id();
  77. local_irq_save(flags);
  78. mtflags = dmt();
  79. /*
  80. * Maintain the per-TC virtual timer
  81. * and program the per-VPE shared Count register
  82. * as appropriate here...
  83. */
  84. reference = (unsigned long)read_c0_count();
  85. timestamp = MAKEVALID(reference + delta);
  86. /*
  87. * To really model the clock, we have to catch the case
  88. * where the current next-in-VPE timestamp is the old
  89. * timestamp for the calling CPE, but the new value is
  90. * in fact later. In that case, we have to do a full
  91. * scan and discover the new next-in-VPE CPU id and
  92. * timestamp.
  93. */
  94. previous = smtc_nexttime[vpe][cpu];
  95. if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
  96. && IS_SOONER(previous, timestamp, reference)) {
  97. int i;
  98. int soonest = cpu;
  99. /*
  100. * Update timestamp array here, so that new
  101. * value gets considered along with those of
  102. * other virtual CPUs on the VPE.
  103. */
  104. smtc_nexttime[vpe][cpu] = timestamp;
  105. for_each_online_cpu(i) {
  106. if (ISVALID(smtc_nexttime[vpe][i])
  107. && IS_SOONER(smtc_nexttime[vpe][i],
  108. smtc_nexttime[vpe][soonest], reference)) {
  109. soonest = i;
  110. }
  111. }
  112. smtc_nextinvpe[vpe] = soonest;
  113. nextcomp = smtc_nexttime[vpe][soonest];
  114. /*
  115. * Otherwise, we don't have to process the whole array rank,
  116. * we just have to see if the event horizon has gotten closer.
  117. */
  118. } else {
  119. if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
  120. IS_SOONER(timestamp,
  121. smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
  122. smtc_nextinvpe[vpe] = cpu;
  123. nextcomp = timestamp;
  124. }
  125. /*
  126. * Since next-in-VPE may me the same as the executing
  127. * virtual CPU, we update the array *after* checking
  128. * its value.
  129. */
  130. smtc_nexttime[vpe][cpu] = timestamp;
  131. }
  132. /*
  133. * It may be that, in fact, we don't need to update Compare,
  134. * but if we do, we want to make sure we didn't fall into
  135. * a crack just behind Count.
  136. */
  137. if (ISVALID(nextcomp)) {
  138. write_c0_compare(nextcomp);
  139. ehb();
  140. /*
  141. * We never return an error, we just make sure
  142. * that we trigger the handlers as quickly as
  143. * we can if we fell behind.
  144. */
  145. while ((nextcomp - (unsigned long)read_c0_count())
  146. > (unsigned long)LONG_MAX) {
  147. nextcomp += CATCHUP_INCREMENT;
  148. write_c0_compare(nextcomp);
  149. ehb();
  150. }
  151. }
  152. emt(mtflags);
  153. local_irq_restore(flags);
  154. return 0;
  155. }
  156. void smtc_distribute_timer(int vpe)
  157. {
  158. unsigned long flags;
  159. unsigned int mtflags;
  160. int cpu;
  161. struct clock_event_device *cd;
  162. unsigned long nextstamp;
  163. unsigned long reference;
  164. repeat:
  165. nextstamp = 0L;
  166. for_each_online_cpu(cpu) {
  167. /*
  168. * Find virtual CPUs within the current VPE who have
  169. * unserviced timer requests whose time is now past.
  170. */
  171. local_irq_save(flags);
  172. mtflags = dmt();
  173. if (cpu_data[cpu].vpe_id == vpe &&
  174. ISVALID(smtc_nexttime[vpe][cpu])) {
  175. reference = (unsigned long)read_c0_count();
  176. if ((smtc_nexttime[vpe][cpu] - reference)
  177. > (unsigned long)LONG_MAX) {
  178. smtc_nexttime[vpe][cpu] = 0L;
  179. emt(mtflags);
  180. local_irq_restore(flags);
  181. /*
  182. * We don't send IPIs to ourself.
  183. */
  184. if (cpu != smp_processor_id()) {
  185. smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
  186. } else {
  187. cd = &per_cpu(mips_clockevent_device, cpu);
  188. cd->event_handler(cd);
  189. }
  190. } else {
  191. /* Local to VPE but Valid Time not yet reached. */
  192. if (!ISVALID(nextstamp) ||
  193. IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
  194. reference)) {
  195. smtc_nextinvpe[vpe] = cpu;
  196. nextstamp = smtc_nexttime[vpe][cpu];
  197. }
  198. emt(mtflags);
  199. local_irq_restore(flags);
  200. }
  201. } else {
  202. emt(mtflags);
  203. local_irq_restore(flags);
  204. }
  205. }
  206. /* Reprogram for interrupt at next soonest timestamp for VPE */
  207. if (ISVALID(nextstamp)) {
  208. write_c0_compare(nextstamp);
  209. ehb();
  210. if ((nextstamp - (unsigned long)read_c0_count())
  211. > (unsigned long)LONG_MAX)
  212. goto repeat;
  213. }
  214. }
  215. irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
  216. {
  217. int cpu = smp_processor_id();
  218. /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
  219. handle_perf_irq(1);
  220. if (read_c0_cause() & (1 << 30)) {
  221. /* Clear Count/Compare Interrupt */
  222. write_c0_compare(read_c0_compare());
  223. smtc_distribute_timer(cpu_data[cpu].vpe_id);
  224. }
  225. return IRQ_HANDLED;
  226. }
  227. int __cpuinit smtc_clockevent_init(void)
  228. {
  229. uint64_t mips_freq = mips_hpt_frequency;
  230. unsigned int cpu = smp_processor_id();
  231. struct clock_event_device *cd;
  232. unsigned int irq;
  233. int i;
  234. int j;
  235. if (!cpu_has_counter || !mips_hpt_frequency)
  236. return -ENXIO;
  237. if (cpu == 0) {
  238. for (i = 0; i < num_possible_cpus(); i++) {
  239. smtc_nextinvpe[i] = 0;
  240. for (j = 0; j < num_possible_cpus(); j++)
  241. smtc_nexttime[i][j] = 0L;
  242. }
  243. /*
  244. * SMTC also can't have the usablility test
  245. * run by secondary TCs once Compare is in use.
  246. */
  247. if (!c0_compare_int_usable())
  248. return -ENXIO;
  249. }
  250. /*
  251. * With vectored interrupts things are getting platform specific.
  252. * get_c0_compare_int is a hook to allow a platform to return the
  253. * interrupt number of it's liking.
  254. */
  255. irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  256. if (get_c0_compare_int)
  257. irq = get_c0_compare_int();
  258. cd = &per_cpu(mips_clockevent_device, cpu);
  259. cd->name = "MIPS";
  260. cd->features = CLOCK_EVT_FEAT_ONESHOT;
  261. /* Calculate the min / max delta */
  262. cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
  263. cd->shift = 32;
  264. cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
  265. cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
  266. cd->rating = 300;
  267. cd->irq = irq;
  268. cd->cpumask = cpumask_of(cpu);
  269. cd->set_next_event = mips_next_event;
  270. cd->set_mode = mips_set_clock_mode;
  271. cd->event_handler = mips_event_handler;
  272. clockevents_register_device(cd);
  273. /*
  274. * On SMTC we only want to do the data structure
  275. * initialization and IRQ setup once.
  276. */
  277. if (cpu)
  278. return 0;
  279. /*
  280. * And we need the hwmask associated with the c0_compare
  281. * vector to be initialized.
  282. */
  283. irq_hwmask[irq] = (0x100 << cp0_compare_irq);
  284. if (cp0_timer_irq_installed)
  285. return 0;
  286. cp0_timer_irq_installed = 1;
  287. setup_irq(irq, &c0_compare_irqaction);
  288. return 0;
  289. }