cpufreq_stats.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * drivers/cpufreq/cpufreq_stats.c
  3. *
  4. * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  5. * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/slab.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sysfs.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/module.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/percpu.h>
  19. #include <linux/kobject.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/notifier.h>
  22. #include <linux/sort.h>
  23. #include <linux/err.h>
  24. #include <linux/of.h>
  25. #include <linux/sched.h>
  26. #include <asm/cputime.h>
  27. static spinlock_t cpufreq_stats_lock;
  28. #define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
  29. static struct freq_attr _attr_##_name = {\
  30. .attr = {.name = __stringify(_name), .mode = _mode, }, \
  31. .show = _show,\
  32. };
  33. struct cpufreq_stats {
  34. unsigned int cpu;
  35. unsigned int total_trans;
  36. unsigned long long last_time;
  37. unsigned int max_state;
  38. unsigned int state_num;
  39. unsigned int last_index;
  40. cputime64_t *time_in_state;
  41. unsigned int *freq_table;
  42. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  43. unsigned int *trans_table;
  44. #endif
  45. };
  46. struct all_cpufreq_stats {
  47. unsigned int state_num;
  48. cputime64_t *time_in_state;
  49. unsigned int *freq_table;
  50. };
  51. struct cpufreq_power_stats {
  52. unsigned int state_num;
  53. unsigned int *curr;
  54. unsigned int *freq_table;
  55. };
  56. struct all_freq_table {
  57. unsigned int *freq_table;
  58. unsigned int table_size;
  59. };
  60. static struct all_freq_table *all_freq_table;
  61. static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
  62. static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
  63. static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
  64. struct cpufreq_stats_attribute {
  65. struct attribute attr;
  66. ssize_t(*show) (struct cpufreq_stats *, char *);
  67. };
  68. static int cpufreq_stats_update(unsigned int cpu)
  69. {
  70. struct cpufreq_stats *stat;
  71. struct all_cpufreq_stats *all_stat;
  72. unsigned long long cur_time;
  73. cur_time = get_jiffies_64();
  74. spin_lock(&cpufreq_stats_lock);
  75. stat = per_cpu(cpufreq_stats_table, cpu);
  76. all_stat = per_cpu(all_cpufreq_stats, cpu);
  77. if (!stat) {
  78. spin_unlock(&cpufreq_stats_lock);
  79. return 0;
  80. }
  81. if (stat->time_in_state) {
  82. stat->time_in_state[stat->last_index] +=
  83. cur_time - stat->last_time;
  84. if (all_stat)
  85. all_stat->time_in_state[stat->last_index] +=
  86. cur_time - stat->last_time;
  87. }
  88. stat->last_time = cur_time;
  89. spin_unlock(&cpufreq_stats_lock);
  90. return 0;
  91. }
  92. static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  93. {
  94. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  95. if (!stat)
  96. return 0;
  97. return sprintf(buf, "%d\n",
  98. per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
  99. }
  100. static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  101. {
  102. ssize_t len = 0;
  103. int i;
  104. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  105. if (!stat)
  106. return 0;
  107. cpufreq_stats_update(stat->cpu);
  108. for (i = 0; i < stat->state_num; i++) {
  109. len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
  110. (unsigned long long)
  111. cputime64_to_clock_t(stat->time_in_state[i]));
  112. }
  113. return len;
  114. }
  115. static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
  116. unsigned int freq)
  117. {
  118. int i;
  119. if (!all_stat)
  120. return -1;
  121. for (i = 0; i < all_stat->state_num; i++) {
  122. if (all_stat->freq_table[i] == freq)
  123. return i;
  124. }
  125. return -1;
  126. }
  127. void acct_update_power(struct task_struct *task, cputime_t cputime) {
  128. struct cpufreq_power_stats *powerstats;
  129. struct cpufreq_stats *stats;
  130. unsigned int cpu_num, curr;
  131. if (!task)
  132. return;
  133. cpu_num = task_cpu(task);
  134. powerstats = per_cpu(cpufreq_power_stats, cpu_num);
  135. stats = per_cpu(cpufreq_stats_table, cpu_num);
  136. if (!powerstats || !stats)
  137. return;
  138. curr = powerstats->curr[stats->last_index];
  139. if (task->cpu_power != ULLONG_MAX)
  140. task->cpu_power += curr * cputime_to_usecs(cputime);
  141. }
  142. EXPORT_SYMBOL_GPL(acct_update_power);
  143. static ssize_t show_current_in_state(struct kobject *kobj,
  144. struct kobj_attribute *attr, char *buf)
  145. {
  146. ssize_t len = 0;
  147. unsigned int i, cpu;
  148. struct cpufreq_power_stats *powerstats;
  149. spin_lock(&cpufreq_stats_lock);
  150. for_each_possible_cpu(cpu) {
  151. powerstats = per_cpu(cpufreq_power_stats, cpu);
  152. if (!powerstats)
  153. continue;
  154. len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
  155. for (i = 0; i < powerstats->state_num; i++)
  156. len += scnprintf(buf + len, PAGE_SIZE - len,
  157. "%d=%d ", powerstats->freq_table[i],
  158. powerstats->curr[i]);
  159. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  160. }
  161. spin_unlock(&cpufreq_stats_lock);
  162. return len;
  163. }
  164. static ssize_t show_all_time_in_state(struct kobject *kobj,
  165. struct kobj_attribute *attr, char *buf)
  166. {
  167. ssize_t len = 0;
  168. unsigned int i, cpu, freq, index;
  169. struct all_cpufreq_stats *all_stat;
  170. struct cpufreq_policy *policy;
  171. len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
  172. for_each_possible_cpu(cpu) {
  173. len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
  174. if (cpu_online(cpu))
  175. cpufreq_stats_update(cpu);
  176. }
  177. if (!all_freq_table)
  178. goto out;
  179. for (i = 0; i < all_freq_table->table_size; i++) {
  180. freq = all_freq_table->freq_table[i];
  181. len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
  182. for_each_possible_cpu(cpu) {
  183. policy = cpufreq_cpu_get(cpu);
  184. if (policy == NULL)
  185. continue;
  186. all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
  187. index = get_index_all_cpufreq_stat(all_stat, freq);
  188. if (index != -1) {
  189. len += scnprintf(buf + len, PAGE_SIZE - len,
  190. "%llu\t\t", (unsigned long long)
  191. cputime64_to_clock_t(all_stat->time_in_state[index]));
  192. } else {
  193. len += scnprintf(buf + len, PAGE_SIZE - len,
  194. "N/A\t\t");
  195. }
  196. cpufreq_cpu_put(policy);
  197. }
  198. }
  199. out:
  200. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  201. return len;
  202. }
  203. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  204. static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  205. {
  206. ssize_t len = 0;
  207. int i, j;
  208. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  209. if (!stat)
  210. return 0;
  211. cpufreq_stats_update(stat->cpu);
  212. len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
  213. len += snprintf(buf + len, PAGE_SIZE - len, " : ");
  214. for (i = 0; i < stat->state_num; i++) {
  215. if (len >= PAGE_SIZE)
  216. break;
  217. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  218. stat->freq_table[i]);
  219. }
  220. if (len >= PAGE_SIZE)
  221. return PAGE_SIZE;
  222. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  223. for (i = 0; i < stat->state_num; i++) {
  224. if (len >= PAGE_SIZE)
  225. break;
  226. len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
  227. stat->freq_table[i]);
  228. for (j = 0; j < stat->state_num; j++) {
  229. if (len >= PAGE_SIZE)
  230. break;
  231. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  232. stat->trans_table[i*stat->max_state+j]);
  233. }
  234. if (len >= PAGE_SIZE)
  235. break;
  236. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  237. }
  238. if (len >= PAGE_SIZE)
  239. return PAGE_SIZE;
  240. return len;
  241. }
  242. CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
  243. #endif
  244. CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
  245. CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
  246. static struct attribute *default_attrs[] = {
  247. &_attr_total_trans.attr,
  248. &_attr_time_in_state.attr,
  249. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  250. &_attr_trans_table.attr,
  251. #endif
  252. NULL
  253. };
  254. static struct attribute_group stats_attr_group = {
  255. .attrs = default_attrs,
  256. .name = "stats"
  257. };
  258. static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
  259. 0444, show_all_time_in_state, NULL);
  260. static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
  261. 0444, show_current_in_state, NULL);
  262. static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
  263. {
  264. int index;
  265. for (index = 0; index < stat->max_state; index++)
  266. if (stat->freq_table[index] == freq)
  267. return index;
  268. return -1;
  269. }
  270. /* should be called late in the CPU removal sequence so that the stats
  271. * memory is still available in case someone tries to use it.
  272. */
  273. static void cpufreq_stats_free_table(unsigned int cpu)
  274. {
  275. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
  276. if (stat) {
  277. kfree(stat->time_in_state);
  278. kfree(stat);
  279. }
  280. per_cpu(cpufreq_stats_table, cpu) = NULL;
  281. }
  282. /* must be called early in the CPU removal sequence (before
  283. * cpufreq_remove_dev) so that policy is still valid.
  284. */
  285. static void cpufreq_stats_free_sysfs(unsigned int cpu)
  286. {
  287. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  288. if (policy && policy->cpu == cpu)
  289. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  290. if (policy)
  291. cpufreq_cpu_put(policy);
  292. }
  293. static void cpufreq_allstats_free(void)
  294. {
  295. int cpu;
  296. struct all_cpufreq_stats *all_stat;
  297. sysfs_remove_file(cpufreq_global_kobject,
  298. &_attr_all_time_in_state.attr);
  299. for_each_possible_cpu(cpu) {
  300. all_stat = per_cpu(all_cpufreq_stats, cpu);
  301. if (!all_stat)
  302. continue;
  303. kfree(all_stat->time_in_state);
  304. kfree(all_stat);
  305. per_cpu(all_cpufreq_stats, cpu) = NULL;
  306. }
  307. if (all_freq_table) {
  308. kfree(all_freq_table->freq_table);
  309. kfree(all_freq_table);
  310. all_freq_table = NULL;
  311. }
  312. }
  313. static void cpufreq_powerstats_free(void)
  314. {
  315. int cpu;
  316. struct cpufreq_power_stats *powerstats;
  317. sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
  318. for_each_possible_cpu(cpu) {
  319. powerstats = per_cpu(cpufreq_power_stats, cpu);
  320. if (!powerstats)
  321. continue;
  322. kfree(powerstats->curr);
  323. kfree(powerstats);
  324. per_cpu(cpufreq_power_stats, cpu) = NULL;
  325. }
  326. }
  327. static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
  328. struct cpufreq_frequency_table *table, int count)
  329. {
  330. unsigned int i, j, ret = 0;
  331. struct cpufreq_stats *stat;
  332. struct cpufreq_policy *data;
  333. unsigned int alloc_size;
  334. unsigned int cpu = policy->cpu;
  335. if (per_cpu(cpufreq_stats_table, cpu))
  336. return -EBUSY;
  337. stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
  338. if ((stat) == NULL)
  339. return -ENOMEM;
  340. data = cpufreq_cpu_get(cpu);
  341. if (data == NULL) {
  342. ret = -EINVAL;
  343. goto error_get_fail;
  344. }
  345. ret = sysfs_create_group(&data->kobj, &stats_attr_group);
  346. if (ret)
  347. goto error_out;
  348. stat->cpu = cpu;
  349. per_cpu(cpufreq_stats_table, cpu) = stat;
  350. alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
  351. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  352. alloc_size += count * count * sizeof(int);
  353. #endif
  354. stat->max_state = count;
  355. stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  356. if (!stat->time_in_state) {
  357. ret = -ENOMEM;
  358. goto error_out;
  359. }
  360. stat->freq_table = (unsigned int *)(stat->time_in_state + count);
  361. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  362. stat->trans_table = stat->freq_table + count;
  363. #endif
  364. j = 0;
  365. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  366. unsigned int freq = table[i].frequency;
  367. if (freq == CPUFREQ_ENTRY_INVALID)
  368. continue;
  369. if (freq_table_get_index(stat, freq) == -1)
  370. stat->freq_table[j++] = freq;
  371. }
  372. stat->state_num = j;
  373. spin_lock(&cpufreq_stats_lock);
  374. stat->last_time = get_jiffies_64();
  375. stat->last_index = freq_table_get_index(stat, policy->cur);
  376. spin_unlock(&cpufreq_stats_lock);
  377. cpufreq_cpu_put(data);
  378. return 0;
  379. error_out:
  380. cpufreq_cpu_put(data);
  381. error_get_fail:
  382. kfree(stat);
  383. per_cpu(cpufreq_stats_table, cpu) = NULL;
  384. return ret;
  385. }
  386. static void cpufreq_powerstats_create(unsigned int cpu,
  387. struct cpufreq_frequency_table *table, int count) {
  388. unsigned int alloc_size, i = 0, j = 0, ret = 0;
  389. struct cpufreq_power_stats *powerstats;
  390. struct device_node *cpu_node;
  391. char device_path[16];
  392. powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
  393. GFP_KERNEL);
  394. if (!powerstats)
  395. return;
  396. /* Allocate memory for freq table per cpu as well as clockticks per
  397. * freq*/
  398. alloc_size = count * sizeof(unsigned int) +
  399. count * sizeof(unsigned int);
  400. powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
  401. if (!powerstats->curr) {
  402. kfree(powerstats);
  403. return;
  404. }
  405. powerstats->freq_table = powerstats->curr + count;
  406. spin_lock(&cpufreq_stats_lock);
  407. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
  408. unsigned int freq = table[i].frequency;
  409. if (freq == CPUFREQ_ENTRY_INVALID)
  410. continue;
  411. powerstats->freq_table[j++] = freq;
  412. }
  413. powerstats->state_num = j;
  414. snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
  415. cpu_node = of_find_node_by_path(device_path);
  416. if (cpu_node) {
  417. ret = of_property_read_u32_array(cpu_node, "current",
  418. powerstats->curr, count);
  419. if (ret) {
  420. kfree(powerstats->curr);
  421. kfree(powerstats);
  422. powerstats = NULL;
  423. }
  424. }
  425. per_cpu(cpufreq_power_stats, cpu) = powerstats;
  426. spin_unlock(&cpufreq_stats_lock);
  427. }
  428. static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
  429. {
  430. unsigned int lhs = *(const unsigned int *)(lhs_ptr);
  431. unsigned int rhs = *(const unsigned int *)(rhs_ptr);
  432. return (lhs - rhs);
  433. }
  434. static bool check_all_freq_table(unsigned int freq)
  435. {
  436. int i;
  437. for (i = 0; i < all_freq_table->table_size; i++) {
  438. if (freq == all_freq_table->freq_table[i])
  439. return true;
  440. }
  441. return false;
  442. }
  443. static void create_all_freq_table(void)
  444. {
  445. all_freq_table = kzalloc(sizeof(struct all_freq_table),
  446. GFP_KERNEL);
  447. if (!all_freq_table)
  448. pr_warn("could not allocate memory for all_freq_table\n");
  449. return;
  450. }
  451. static void free_all_freq_table(void)
  452. {
  453. if (all_freq_table) {
  454. if (all_freq_table->freq_table) {
  455. kfree(all_freq_table->freq_table);
  456. all_freq_table->freq_table = NULL;
  457. }
  458. kfree(all_freq_table);
  459. all_freq_table = NULL;
  460. }
  461. }
  462. static void add_all_freq_table(unsigned int freq)
  463. {
  464. unsigned int size;
  465. size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
  466. all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
  467. size, GFP_ATOMIC);
  468. if (IS_ERR(all_freq_table->freq_table)) {
  469. pr_warn("Could not reallocate memory for freq_table\n");
  470. all_freq_table->freq_table = NULL;
  471. return;
  472. }
  473. all_freq_table->freq_table[all_freq_table->table_size++] = freq;
  474. }
  475. static void cpufreq_allstats_create(unsigned int cpu,
  476. struct cpufreq_frequency_table *table, int count)
  477. {
  478. int i , j = 0;
  479. unsigned int alloc_size;
  480. struct all_cpufreq_stats *all_stat;
  481. bool sort_needed = false;
  482. all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
  483. GFP_KERNEL);
  484. if (!all_stat) {
  485. pr_warn("Cannot allocate memory for cpufreq stats\n");
  486. return;
  487. }
  488. /*Allocate memory for freq table per cpu as well as clockticks per freq*/
  489. alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
  490. all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  491. if (!all_stat->time_in_state) {
  492. pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
  493. kfree(all_stat);
  494. all_stat = NULL;
  495. return;
  496. }
  497. all_stat->freq_table = (unsigned int *)
  498. (all_stat->time_in_state + count);
  499. spin_lock(&cpufreq_stats_lock);
  500. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  501. unsigned int freq = table[i].frequency;
  502. if (freq == CPUFREQ_ENTRY_INVALID)
  503. continue;
  504. all_stat->freq_table[j++] = freq;
  505. if (all_freq_table && !check_all_freq_table(freq)) {
  506. add_all_freq_table(freq);
  507. sort_needed = true;
  508. }
  509. }
  510. if (sort_needed)
  511. sort(all_freq_table->freq_table, all_freq_table->table_size,
  512. sizeof(unsigned int), &compare_for_sort, NULL);
  513. all_stat->state_num = j;
  514. per_cpu(all_cpufreq_stats, cpu) = all_stat;
  515. spin_unlock(&cpufreq_stats_lock);
  516. }
  517. static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
  518. unsigned long val, void *data)
  519. {
  520. int ret, count = 0, i;
  521. struct cpufreq_policy *policy = data;
  522. struct cpufreq_frequency_table *table;
  523. unsigned int cpu = policy->cpu;
  524. if (val != CPUFREQ_NOTIFY)
  525. return 0;
  526. table = cpufreq_frequency_get_table(cpu);
  527. if (!table)
  528. return 0;
  529. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  530. unsigned int freq = table[i].frequency;
  531. if (freq == CPUFREQ_ENTRY_INVALID)
  532. continue;
  533. count++;
  534. }
  535. if (!per_cpu(all_cpufreq_stats, cpu))
  536. cpufreq_allstats_create(cpu, table, count);
  537. if (!per_cpu(cpufreq_power_stats, cpu))
  538. cpufreq_powerstats_create(cpu, table, count);
  539. ret = cpufreq_stats_create_table(policy, table, count);
  540. if (ret)
  541. return ret;
  542. return 0;
  543. }
  544. static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
  545. unsigned long val, void *data)
  546. {
  547. struct cpufreq_freqs *freq = data;
  548. struct cpufreq_stats *stat;
  549. int old_index, new_index;
  550. if (val != CPUFREQ_POSTCHANGE)
  551. return 0;
  552. stat = per_cpu(cpufreq_stats_table, freq->cpu);
  553. if (!stat)
  554. return 0;
  555. old_index = stat->last_index;
  556. new_index = freq_table_get_index(stat, freq->new);
  557. /* We can't do stat->time_in_state[-1]= .. */
  558. if (old_index == -1 || new_index == -1)
  559. return 0;
  560. cpufreq_stats_update(freq->cpu);
  561. if (old_index == new_index)
  562. return 0;
  563. spin_lock(&cpufreq_stats_lock);
  564. stat->last_index = new_index;
  565. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  566. stat->trans_table[old_index * stat->max_state + new_index]++;
  567. #endif
  568. stat->total_trans++;
  569. spin_unlock(&cpufreq_stats_lock);
  570. return 0;
  571. }
  572. static int cpufreq_stats_create_table_cpu(unsigned int cpu)
  573. {
  574. struct cpufreq_policy *policy;
  575. struct cpufreq_frequency_table *table;
  576. int ret = -ENODEV, i, count = 0;
  577. policy = cpufreq_cpu_get(cpu);
  578. if (!policy)
  579. return -ENODEV;
  580. table = cpufreq_frequency_get_table(cpu);
  581. if (!table)
  582. goto out;
  583. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  584. unsigned int freq = table[i].frequency;
  585. if (freq == CPUFREQ_ENTRY_INVALID)
  586. continue;
  587. count++;
  588. }
  589. if (!per_cpu(all_cpufreq_stats, cpu))
  590. cpufreq_allstats_create(cpu, table, count);
  591. if (!per_cpu(cpufreq_power_stats, cpu))
  592. cpufreq_powerstats_create(cpu, table, count);
  593. ret = cpufreq_stats_create_table(policy, table, count);
  594. out:
  595. cpufreq_cpu_put(policy);
  596. return ret;
  597. }
  598. static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
  599. unsigned long action,
  600. void *hcpu)
  601. {
  602. unsigned int cpu = (unsigned long)hcpu;
  603. switch (action) {
  604. case CPU_ONLINE:
  605. case CPU_ONLINE_FROZEN:
  606. cpufreq_update_policy(cpu);
  607. break;
  608. case CPU_DOWN_PREPARE:
  609. case CPU_DOWN_PREPARE_FROZEN:
  610. cpufreq_stats_free_sysfs(cpu);
  611. break;
  612. case CPU_DEAD:
  613. case CPU_DEAD_FROZEN:
  614. cpufreq_stats_free_table(cpu);
  615. break;
  616. case CPU_DOWN_FAILED:
  617. case CPU_DOWN_FAILED_FROZEN:
  618. cpufreq_stats_create_table_cpu(cpu);
  619. break;
  620. }
  621. return NOTIFY_OK;
  622. }
  623. /* priority=1 so this will get called before cpufreq_remove_dev */
  624. static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
  625. .notifier_call = cpufreq_stat_cpu_callback,
  626. .priority = 1,
  627. };
  628. static struct notifier_block notifier_policy_block = {
  629. .notifier_call = cpufreq_stat_notifier_policy
  630. };
  631. static struct notifier_block notifier_trans_block = {
  632. .notifier_call = cpufreq_stat_notifier_trans
  633. };
  634. static int __init cpufreq_stats_init(void)
  635. {
  636. int ret;
  637. unsigned int cpu;
  638. spin_lock_init(&cpufreq_stats_lock);
  639. ret = cpufreq_register_notifier(&notifier_policy_block,
  640. CPUFREQ_POLICY_NOTIFIER);
  641. if (ret)
  642. return ret;
  643. create_all_freq_table();
  644. ret = cpufreq_register_notifier(&notifier_trans_block,
  645. CPUFREQ_TRANSITION_NOTIFIER);
  646. if (ret) {
  647. cpufreq_unregister_notifier(&notifier_policy_block,
  648. CPUFREQ_POLICY_NOTIFIER);
  649. free_all_freq_table();
  650. return ret;
  651. }
  652. register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  653. for_each_online_cpu(cpu) {
  654. cpufreq_update_policy(cpu);
  655. }
  656. ret = sysfs_create_file(cpufreq_global_kobject,
  657. &_attr_all_time_in_state.attr);
  658. if (ret)
  659. pr_warn("Cannot create sysfs file for cpufreq stats\n");
  660. ret = sysfs_create_file(cpufreq_global_kobject,
  661. &_attr_current_in_state.attr);
  662. if (ret)
  663. pr_warn("Cannot create sysfs file for cpufreq current stats\n");
  664. return 0;
  665. }
  666. static void __exit cpufreq_stats_exit(void)
  667. {
  668. unsigned int cpu;
  669. cpufreq_unregister_notifier(&notifier_policy_block,
  670. CPUFREQ_POLICY_NOTIFIER);
  671. cpufreq_unregister_notifier(&notifier_trans_block,
  672. CPUFREQ_TRANSITION_NOTIFIER);
  673. unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  674. for_each_online_cpu(cpu) {
  675. cpufreq_stats_free_table(cpu);
  676. cpufreq_stats_free_sysfs(cpu);
  677. }
  678. cpufreq_allstats_free();
  679. cpufreq_powerstats_free();
  680. }
  681. MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
  682. MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
  683. "through sysfs filesystem");
  684. MODULE_LICENSE("GPL");
  685. module_init(cpufreq_stats_init);
  686. module_exit(cpufreq_stats_exit);