sysfs.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /* sysfs.c: Toplogy sysfs support code for sparc64.
  2. *
  3. * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/sysdev.h>
  7. #include <linux/cpu.h>
  8. #include <linux/smp.h>
  9. #include <linux/percpu.h>
  10. #include <linux/init.h>
  11. #include <asm/cpudata.h>
  12. #include <asm/hypervisor.h>
  13. #include <asm/spitfire.h>
  14. static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
  15. #define SHOW_MMUSTAT_ULONG(NAME) \
  16. static ssize_t show_##NAME(struct sys_device *dev, \
  17. struct sysdev_attribute *attr, char *buf) \
  18. { \
  19. struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
  20. return sprintf(buf, "%lu\n", p->NAME); \
  21. } \
  22. static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
  23. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
  24. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
  25. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
  26. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
  27. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
  28. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
  29. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
  30. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
  31. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
  32. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
  33. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
  34. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
  35. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
  36. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
  37. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
  38. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
  39. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
  40. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
  41. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
  42. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
  43. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
  44. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
  45. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
  46. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
  47. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
  48. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
  49. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
  50. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
  51. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
  52. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
  53. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
  54. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
  55. static struct attribute *mmu_stat_attrs[] = {
  56. &attr_immu_tsb_hits_ctx0_8k_tte.attr,
  57. &attr_immu_tsb_ticks_ctx0_8k_tte.attr,
  58. &attr_immu_tsb_hits_ctx0_64k_tte.attr,
  59. &attr_immu_tsb_ticks_ctx0_64k_tte.attr,
  60. &attr_immu_tsb_hits_ctx0_4mb_tte.attr,
  61. &attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
  62. &attr_immu_tsb_hits_ctx0_256mb_tte.attr,
  63. &attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
  64. &attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
  65. &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
  66. &attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
  67. &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
  68. &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
  69. &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
  70. &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
  71. &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
  72. &attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
  73. &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
  74. &attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
  75. &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
  76. &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
  77. &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
  78. &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
  79. &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
  80. &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
  81. &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
  82. &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
  83. &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
  84. &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
  85. &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
  86. &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
  87. &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
  88. NULL,
  89. };
  90. static struct attribute_group mmu_stat_group = {
  91. .attrs = mmu_stat_attrs,
  92. .name = "mmu_stats",
  93. };
  94. /* XXX convert to rusty's on_one_cpu */
  95. static unsigned long run_on_cpu(unsigned long cpu,
  96. unsigned long (*func)(unsigned long),
  97. unsigned long arg)
  98. {
  99. cpumask_t old_affinity;
  100. unsigned long ret;
  101. cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
  102. /* should return -EINVAL to userspace */
  103. if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
  104. return 0;
  105. ret = func(arg);
  106. set_cpus_allowed_ptr(current, &old_affinity);
  107. return ret;
  108. }
  109. static unsigned long read_mmustat_enable(unsigned long junk)
  110. {
  111. unsigned long ra = 0;
  112. sun4v_mmustat_info(&ra);
  113. return ra != 0;
  114. }
  115. static unsigned long write_mmustat_enable(unsigned long val)
  116. {
  117. unsigned long ra, orig_ra;
  118. if (val)
  119. ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
  120. else
  121. ra = 0UL;
  122. return sun4v_mmustat_conf(ra, &orig_ra);
  123. }
  124. static ssize_t show_mmustat_enable(struct sys_device *s,
  125. struct sysdev_attribute *attr, char *buf)
  126. {
  127. unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
  128. return sprintf(buf, "%lx\n", val);
  129. }
  130. static ssize_t store_mmustat_enable(struct sys_device *s,
  131. struct sysdev_attribute *attr, const char *buf,
  132. size_t count)
  133. {
  134. unsigned long val, err;
  135. int ret = sscanf(buf, "%ld", &val);
  136. if (ret != 1)
  137. return -EINVAL;
  138. err = run_on_cpu(s->id, write_mmustat_enable, val);
  139. if (err)
  140. return -EIO;
  141. return count;
  142. }
  143. static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
  144. static int mmu_stats_supported;
  145. static int register_mmu_stats(struct sys_device *s)
  146. {
  147. if (!mmu_stats_supported)
  148. return 0;
  149. sysdev_create_file(s, &attr_mmustat_enable);
  150. return sysfs_create_group(&s->kobj, &mmu_stat_group);
  151. }
  152. #ifdef CONFIG_HOTPLUG_CPU
  153. static void unregister_mmu_stats(struct sys_device *s)
  154. {
  155. if (!mmu_stats_supported)
  156. return;
  157. sysfs_remove_group(&s->kobj, &mmu_stat_group);
  158. sysdev_remove_file(s, &attr_mmustat_enable);
  159. }
  160. #endif
  161. #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
  162. static ssize_t show_##NAME(struct sys_device *dev, \
  163. struct sysdev_attribute *attr, char *buf) \
  164. { \
  165. cpuinfo_sparc *c = &cpu_data(dev->id); \
  166. return sprintf(buf, "%lu\n", c->MEMBER); \
  167. }
  168. #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
  169. static ssize_t show_##NAME(struct sys_device *dev, \
  170. struct sysdev_attribute *attr, char *buf) \
  171. { \
  172. cpuinfo_sparc *c = &cpu_data(dev->id); \
  173. return sprintf(buf, "%u\n", c->MEMBER); \
  174. }
  175. SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
  176. SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
  177. SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
  178. SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
  179. SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
  180. SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
  181. SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
  182. static struct sysdev_attribute cpu_core_attrs[] = {
  183. _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL),
  184. _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
  185. _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
  186. _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
  187. _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
  188. _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
  189. _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
  190. };
  191. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  192. static void register_cpu_online(unsigned int cpu)
  193. {
  194. struct cpu *c = &per_cpu(cpu_devices, cpu);
  195. struct sys_device *s = &c->sysdev;
  196. int i;
  197. for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
  198. sysdev_create_file(s, &cpu_core_attrs[i]);
  199. register_mmu_stats(s);
  200. }
  201. #ifdef CONFIG_HOTPLUG_CPU
  202. static void unregister_cpu_online(unsigned int cpu)
  203. {
  204. struct cpu *c = &per_cpu(cpu_devices, cpu);
  205. struct sys_device *s = &c->sysdev;
  206. int i;
  207. unregister_mmu_stats(s);
  208. for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
  209. sysdev_remove_file(s, &cpu_core_attrs[i]);
  210. }
  211. #endif
  212. static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
  213. unsigned long action, void *hcpu)
  214. {
  215. unsigned int cpu = (unsigned int)(long)hcpu;
  216. switch (action) {
  217. case CPU_ONLINE:
  218. case CPU_ONLINE_FROZEN:
  219. register_cpu_online(cpu);
  220. break;
  221. #ifdef CONFIG_HOTPLUG_CPU
  222. case CPU_DEAD:
  223. case CPU_DEAD_FROZEN:
  224. unregister_cpu_online(cpu);
  225. break;
  226. #endif
  227. }
  228. return NOTIFY_OK;
  229. }
  230. static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
  231. .notifier_call = sysfs_cpu_notify,
  232. };
  233. static void __init check_mmu_stats(void)
  234. {
  235. unsigned long dummy1, err;
  236. if (tlb_type != hypervisor)
  237. return;
  238. err = sun4v_mmustat_info(&dummy1);
  239. if (!err)
  240. mmu_stats_supported = 1;
  241. }
  242. static void register_nodes(void)
  243. {
  244. #ifdef CONFIG_NUMA
  245. int i;
  246. for (i = 0; i < MAX_NUMNODES; i++)
  247. register_one_node(i);
  248. #endif
  249. }
  250. static int __init topology_init(void)
  251. {
  252. int cpu;
  253. register_nodes();
  254. check_mmu_stats();
  255. register_cpu_notifier(&sysfs_cpu_nb);
  256. for_each_possible_cpu(cpu) {
  257. struct cpu *c = &per_cpu(cpu_devices, cpu);
  258. register_cpu(c, cpu);
  259. if (cpu_online(cpu))
  260. register_cpu_online(cpu);
  261. }
  262. return 0;
  263. }
  264. subsys_initcall(topology_init);