op_model_mipsxx.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004, 05, 06 by Ralf Baechle
  7. * Copyright (C) 2005 by MIPS Technologies, Inc.
  8. */
  9. #include <linux/cpumask.h>
  10. #include <linux/oprofile.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/smp.h>
  13. #include <asm/irq_regs.h>
  14. #include "op_impl.h"
  15. #define M_PERFCTL_EXL (1UL << 0)
  16. #define M_PERFCTL_KERNEL (1UL << 1)
  17. #define M_PERFCTL_SUPERVISOR (1UL << 2)
  18. #define M_PERFCTL_USER (1UL << 3)
  19. #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
  20. #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
  21. #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
  22. #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
  23. #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
  24. #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
  25. #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
  26. #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
  27. #define M_PERFCTL_WIDE (1UL << 30)
  28. #define M_PERFCTL_MORE (1UL << 31)
  29. #define M_COUNTER_OVERFLOW (1UL << 31)
  30. static int (*save_perf_irq)(void);
  31. #ifdef CONFIG_MIPS_MT_SMP
  32. static int cpu_has_mipsmt_pertccounters;
  33. #define WHAT (M_TC_EN_VPE | \
  34. M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
  35. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  36. 0 : cpu_data[smp_processor_id()].vpe_id)
  37. /*
  38. * The number of bits to shift to convert between counters per core and
  39. * counters per VPE. There is no reasonable interface atm to obtain the
  40. * number of VPEs used by Linux and in the 34K this number is fixed to two
  41. * anyways so we hardcore a few things here for the moment. The way it's
  42. * done here will ensure that oprofile VSMP kernel will run right on a lesser
  43. * core like a 24K also or with maxcpus=1.
  44. */
  45. static inline unsigned int vpe_shift(void)
  46. {
  47. if (num_possible_cpus() > 1)
  48. return 1;
  49. return 0;
  50. }
  51. #else
  52. #define WHAT 0
  53. #define vpe_id() 0
  54. static inline unsigned int vpe_shift(void)
  55. {
  56. return 0;
  57. }
  58. #endif
  59. static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
  60. {
  61. return counters >> vpe_shift();
  62. }
  63. static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
  64. {
  65. return counters << vpe_shift();
  66. }
  67. #define __define_perf_accessors(r, n, np) \
  68. \
  69. static inline unsigned int r_c0_ ## r ## n(void) \
  70. { \
  71. unsigned int cpu = vpe_id(); \
  72. \
  73. switch (cpu) { \
  74. case 0: \
  75. return read_c0_ ## r ## n(); \
  76. case 1: \
  77. return read_c0_ ## r ## np(); \
  78. default: \
  79. BUG(); \
  80. } \
  81. return 0; \
  82. } \
  83. \
  84. static inline void w_c0_ ## r ## n(unsigned int value) \
  85. { \
  86. unsigned int cpu = vpe_id(); \
  87. \
  88. switch (cpu) { \
  89. case 0: \
  90. write_c0_ ## r ## n(value); \
  91. return; \
  92. case 1: \
  93. write_c0_ ## r ## np(value); \
  94. return; \
  95. default: \
  96. BUG(); \
  97. } \
  98. return; \
  99. } \
  100. __define_perf_accessors(perfcntr, 0, 2)
  101. __define_perf_accessors(perfcntr, 1, 3)
  102. __define_perf_accessors(perfcntr, 2, 0)
  103. __define_perf_accessors(perfcntr, 3, 1)
  104. __define_perf_accessors(perfctrl, 0, 2)
  105. __define_perf_accessors(perfctrl, 1, 3)
  106. __define_perf_accessors(perfctrl, 2, 0)
  107. __define_perf_accessors(perfctrl, 3, 1)
  108. struct op_mips_model op_model_mipsxx_ops;
  109. static struct mipsxx_register_config {
  110. unsigned int control[4];
  111. unsigned int counter[4];
  112. } reg;
  113. /* Compute all of the registers in preparation for enabling profiling. */
  114. static void mipsxx_reg_setup(struct op_counter_config *ctr)
  115. {
  116. unsigned int counters = op_model_mipsxx_ops.num_counters;
  117. int i;
  118. /* Compute the performance counter control word. */
  119. for (i = 0; i < counters; i++) {
  120. reg.control[i] = 0;
  121. reg.counter[i] = 0;
  122. if (!ctr[i].enabled)
  123. continue;
  124. reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
  125. M_PERFCTL_INTERRUPT_ENABLE;
  126. if (ctr[i].kernel)
  127. reg.control[i] |= M_PERFCTL_KERNEL;
  128. if (ctr[i].user)
  129. reg.control[i] |= M_PERFCTL_USER;
  130. if (ctr[i].exl)
  131. reg.control[i] |= M_PERFCTL_EXL;
  132. reg.counter[i] = 0x80000000 - ctr[i].count;
  133. }
  134. }
  135. /* Program all of the registers in preparation for enabling profiling. */
  136. static void mipsxx_cpu_setup(void *args)
  137. {
  138. unsigned int counters = op_model_mipsxx_ops.num_counters;
  139. switch (counters) {
  140. case 4:
  141. w_c0_perfctrl3(0);
  142. w_c0_perfcntr3(reg.counter[3]);
  143. case 3:
  144. w_c0_perfctrl2(0);
  145. w_c0_perfcntr2(reg.counter[2]);
  146. case 2:
  147. w_c0_perfctrl1(0);
  148. w_c0_perfcntr1(reg.counter[1]);
  149. case 1:
  150. w_c0_perfctrl0(0);
  151. w_c0_perfcntr0(reg.counter[0]);
  152. }
  153. }
  154. /* Start all counters on current CPU */
  155. static void mipsxx_cpu_start(void *args)
  156. {
  157. unsigned int counters = op_model_mipsxx_ops.num_counters;
  158. switch (counters) {
  159. case 4:
  160. w_c0_perfctrl3(WHAT | reg.control[3]);
  161. case 3:
  162. w_c0_perfctrl2(WHAT | reg.control[2]);
  163. case 2:
  164. w_c0_perfctrl1(WHAT | reg.control[1]);
  165. case 1:
  166. w_c0_perfctrl0(WHAT | reg.control[0]);
  167. }
  168. }
  169. /* Stop all counters on current CPU */
  170. static void mipsxx_cpu_stop(void *args)
  171. {
  172. unsigned int counters = op_model_mipsxx_ops.num_counters;
  173. switch (counters) {
  174. case 4:
  175. w_c0_perfctrl3(0);
  176. case 3:
  177. w_c0_perfctrl2(0);
  178. case 2:
  179. w_c0_perfctrl1(0);
  180. case 1:
  181. w_c0_perfctrl0(0);
  182. }
  183. }
  184. static int mipsxx_perfcount_handler(void)
  185. {
  186. unsigned int counters = op_model_mipsxx_ops.num_counters;
  187. unsigned int control;
  188. unsigned int counter;
  189. int handled = IRQ_NONE;
  190. if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
  191. return handled;
  192. switch (counters) {
  193. #define HANDLE_COUNTER(n) \
  194. case n + 1: \
  195. control = r_c0_perfctrl ## n(); \
  196. counter = r_c0_perfcntr ## n(); \
  197. if ((control & M_PERFCTL_INTERRUPT_ENABLE) && \
  198. (counter & M_COUNTER_OVERFLOW)) { \
  199. oprofile_add_sample(get_irq_regs(), n); \
  200. w_c0_perfcntr ## n(reg.counter[n]); \
  201. handled = IRQ_HANDLED; \
  202. }
  203. HANDLE_COUNTER(3)
  204. HANDLE_COUNTER(2)
  205. HANDLE_COUNTER(1)
  206. HANDLE_COUNTER(0)
  207. }
  208. return handled;
  209. }
  210. #define M_CONFIG1_PC (1 << 4)
  211. static inline int __n_counters(void)
  212. {
  213. if (!(read_c0_config1() & M_CONFIG1_PC))
  214. return 0;
  215. if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
  216. return 1;
  217. if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
  218. return 2;
  219. if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
  220. return 3;
  221. return 4;
  222. }
  223. static inline int n_counters(void)
  224. {
  225. int counters;
  226. switch (current_cpu_type()) {
  227. case CPU_R10000:
  228. counters = 2;
  229. break;
  230. case CPU_R12000:
  231. case CPU_R14000:
  232. counters = 4;
  233. break;
  234. default:
  235. counters = __n_counters();
  236. }
  237. return counters;
  238. }
  239. static void reset_counters(void *arg)
  240. {
  241. int counters = (int)(long)arg;
  242. switch (counters) {
  243. case 4:
  244. w_c0_perfctrl3(0);
  245. w_c0_perfcntr3(0);
  246. case 3:
  247. w_c0_perfctrl2(0);
  248. w_c0_perfcntr2(0);
  249. case 2:
  250. w_c0_perfctrl1(0);
  251. w_c0_perfcntr1(0);
  252. case 1:
  253. w_c0_perfctrl0(0);
  254. w_c0_perfcntr0(0);
  255. }
  256. }
  257. static int __init mipsxx_init(void)
  258. {
  259. int counters;
  260. counters = n_counters();
  261. if (counters == 0) {
  262. printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
  263. return -ENODEV;
  264. }
  265. #ifdef CONFIG_MIPS_MT_SMP
  266. cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
  267. if (!cpu_has_mipsmt_pertccounters)
  268. counters = counters_total_to_per_cpu(counters);
  269. #endif
  270. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  271. op_model_mipsxx_ops.num_counters = counters;
  272. switch (current_cpu_type()) {
  273. case CPU_20KC:
  274. op_model_mipsxx_ops.cpu_type = "mips/20K";
  275. break;
  276. case CPU_24K:
  277. op_model_mipsxx_ops.cpu_type = "mips/24K";
  278. break;
  279. case CPU_25KF:
  280. op_model_mipsxx_ops.cpu_type = "mips/25K";
  281. break;
  282. case CPU_1004K:
  283. #if 0
  284. /* FIXME: report as 34K for now */
  285. op_model_mipsxx_ops.cpu_type = "mips/1004K";
  286. break;
  287. #endif
  288. case CPU_34K:
  289. op_model_mipsxx_ops.cpu_type = "mips/34K";
  290. break;
  291. case CPU_74K:
  292. op_model_mipsxx_ops.cpu_type = "mips/74K";
  293. break;
  294. case CPU_5KC:
  295. op_model_mipsxx_ops.cpu_type = "mips/5K";
  296. break;
  297. case CPU_R10000:
  298. if ((current_cpu_data.processor_id & 0xff) == 0x20)
  299. op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
  300. else
  301. op_model_mipsxx_ops.cpu_type = "mips/r10000";
  302. break;
  303. case CPU_R12000:
  304. case CPU_R14000:
  305. op_model_mipsxx_ops.cpu_type = "mips/r12000";
  306. break;
  307. case CPU_SB1:
  308. case CPU_SB1A:
  309. op_model_mipsxx_ops.cpu_type = "mips/sb1";
  310. break;
  311. default:
  312. printk(KERN_ERR "Profiling unsupported for this CPU\n");
  313. return -ENODEV;
  314. }
  315. save_perf_irq = perf_irq;
  316. perf_irq = mipsxx_perfcount_handler;
  317. return 0;
  318. }
  319. static void mipsxx_exit(void)
  320. {
  321. int counters = op_model_mipsxx_ops.num_counters;
  322. counters = counters_per_cpu_to_total(counters);
  323. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  324. perf_irq = save_perf_irq;
  325. }
  326. struct op_mips_model op_model_mipsxx_ops = {
  327. .reg_setup = mipsxx_reg_setup,
  328. .cpu_setup = mipsxx_cpu_setup,
  329. .init = mipsxx_init,
  330. .exit = mipsxx_exit,
  331. .cpu_start = mipsxx_cpu_start,
  332. .cpu_stop = mipsxx_cpu_stop,
  333. };