cpufreq_stats.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /*
  2. * drivers/cpufreq/cpufreq_stats.c
  3. *
  4. * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  5. * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/slab.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sysfs.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/module.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/percpu.h>
  19. #include <linux/kobject.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/notifier.h>
  22. #include <linux/sort.h>
  23. #include <linux/err.h>
  24. #include <linux/of.h>
  25. #include <linux/sched.h>
  26. #include <asm/cputime.h>
  27. static spinlock_t cpufreq_stats_lock;
  28. struct cpufreq_stats {
  29. unsigned int cpu;
  30. unsigned int total_trans;
  31. unsigned long long last_time;
  32. unsigned int max_state;
  33. unsigned int state_num;
  34. unsigned int last_index;
  35. cputime64_t *time_in_state;
  36. unsigned int *freq_table;
  37. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  38. unsigned int *trans_table;
  39. #endif
  40. };
  41. struct all_cpufreq_stats {
  42. unsigned int state_num;
  43. cputime64_t *time_in_state;
  44. unsigned int *freq_table;
  45. };
  46. struct cpufreq_power_stats {
  47. unsigned int state_num;
  48. unsigned int *curr;
  49. unsigned int *freq_table;
  50. };
  51. struct all_freq_table {
  52. unsigned int *freq_table;
  53. unsigned int table_size;
  54. };
  55. static struct all_freq_table *all_freq_table;
  56. static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
  57. static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
  58. static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
  59. struct cpufreq_stats_attribute {
  60. struct attribute attr;
  61. ssize_t(*show) (struct cpufreq_stats *, char *);
  62. };
  63. static int cpufreq_stats_update(unsigned int cpu)
  64. {
  65. struct cpufreq_stats *stat;
  66. struct all_cpufreq_stats *all_stat;
  67. unsigned long long cur_time;
  68. cur_time = get_jiffies_64();
  69. spin_lock(&cpufreq_stats_lock);
  70. stat = per_cpu(cpufreq_stats_table, cpu);
  71. all_stat = per_cpu(all_cpufreq_stats, cpu);
  72. if (!stat) {
  73. spin_unlock(&cpufreq_stats_lock);
  74. return 0;
  75. }
  76. if (stat->time_in_state) {
  77. stat->time_in_state[stat->last_index] +=
  78. cur_time - stat->last_time;
  79. if (all_stat)
  80. all_stat->time_in_state[stat->last_index] +=
  81. cur_time - stat->last_time;
  82. }
  83. stat->last_time = cur_time;
  84. spin_unlock(&cpufreq_stats_lock);
  85. return 0;
  86. }
  87. static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  88. {
  89. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  90. if (!stat)
  91. return 0;
  92. return sprintf(buf, "%d\n",
  93. per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
  94. }
  95. static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  96. {
  97. ssize_t len = 0;
  98. int i;
  99. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  100. if (!stat)
  101. return 0;
  102. cpufreq_stats_update(stat->cpu);
  103. for (i = 0; i < stat->state_num; i++) {
  104. len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
  105. (unsigned long long)
  106. cputime64_to_clock_t(stat->time_in_state[i]));
  107. }
  108. return len;
  109. }
  110. static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
  111. unsigned int freq)
  112. {
  113. int i;
  114. if (!all_stat)
  115. return -1;
  116. for (i = 0; i < all_stat->state_num; i++) {
  117. if (all_stat->freq_table[i] == freq)
  118. return i;
  119. }
  120. return -1;
  121. }
  122. void acct_update_power(struct task_struct *task, cputime_t cputime) {
  123. struct cpufreq_power_stats *powerstats;
  124. struct cpufreq_stats *stats;
  125. unsigned int cpu_num, curr;
  126. if (!task)
  127. return;
  128. cpu_num = task_cpu(task);
  129. powerstats = per_cpu(cpufreq_power_stats, cpu_num);
  130. stats = per_cpu(cpufreq_stats_table, cpu_num);
  131. if (!powerstats || !stats)
  132. return;
  133. curr = powerstats->curr[stats->last_index];
  134. if (task->cpu_power != ULLONG_MAX)
  135. task->cpu_power += curr * cputime_to_usecs(cputime);
  136. }
  137. EXPORT_SYMBOL_GPL(acct_update_power);
  138. static ssize_t show_current_in_state(struct kobject *kobj,
  139. struct kobj_attribute *attr, char *buf)
  140. {
  141. ssize_t len = 0;
  142. unsigned int i, cpu;
  143. struct cpufreq_power_stats *powerstats;
  144. spin_lock(&cpufreq_stats_lock);
  145. for_each_possible_cpu(cpu) {
  146. powerstats = per_cpu(cpufreq_power_stats, cpu);
  147. if (!powerstats)
  148. continue;
  149. len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
  150. for (i = 0; i < powerstats->state_num; i++)
  151. len += scnprintf(buf + len, PAGE_SIZE - len,
  152. "%d=%d ", powerstats->freq_table[i],
  153. powerstats->curr[i]);
  154. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  155. }
  156. spin_unlock(&cpufreq_stats_lock);
  157. return len;
  158. }
  159. static ssize_t show_all_time_in_state(struct kobject *kobj,
  160. struct kobj_attribute *attr, char *buf)
  161. {
  162. ssize_t len = 0;
  163. unsigned int i, cpu, freq, index;
  164. struct all_cpufreq_stats *all_stat;
  165. struct cpufreq_policy *policy;
  166. len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
  167. for_each_possible_cpu(cpu) {
  168. len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
  169. if (cpu_online(cpu))
  170. cpufreq_stats_update(cpu);
  171. }
  172. if (!all_freq_table)
  173. goto out;
  174. for (i = 0; i < all_freq_table->table_size; i++) {
  175. freq = all_freq_table->freq_table[i];
  176. len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
  177. for_each_possible_cpu(cpu) {
  178. policy = cpufreq_cpu_get(cpu);
  179. if (policy == NULL)
  180. continue;
  181. all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
  182. index = get_index_all_cpufreq_stat(all_stat, freq);
  183. if (index != -1) {
  184. len += scnprintf(buf + len, PAGE_SIZE - len,
  185. "%llu\t\t", (unsigned long long)
  186. cputime64_to_clock_t(all_stat->time_in_state[index]));
  187. } else {
  188. len += scnprintf(buf + len, PAGE_SIZE - len,
  189. "N/A\t\t");
  190. }
  191. cpufreq_cpu_put(policy);
  192. }
  193. }
  194. out:
  195. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  196. return len;
  197. }
  198. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  199. static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  200. {
  201. ssize_t len = 0;
  202. int i, j;
  203. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  204. if (!stat)
  205. return 0;
  206. cpufreq_stats_update(stat->cpu);
  207. len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
  208. len += snprintf(buf + len, PAGE_SIZE - len, " : ");
  209. for (i = 0; i < stat->state_num; i++) {
  210. if (len >= PAGE_SIZE)
  211. break;
  212. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  213. stat->freq_table[i]);
  214. }
  215. if (len >= PAGE_SIZE)
  216. return PAGE_SIZE;
  217. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  218. for (i = 0; i < stat->state_num; i++) {
  219. if (len >= PAGE_SIZE)
  220. break;
  221. len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
  222. stat->freq_table[i]);
  223. for (j = 0; j < stat->state_num; j++) {
  224. if (len >= PAGE_SIZE)
  225. break;
  226. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  227. stat->trans_table[i*stat->max_state+j]);
  228. }
  229. if (len >= PAGE_SIZE)
  230. break;
  231. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  232. }
  233. if (len >= PAGE_SIZE)
  234. return PAGE_SIZE;
  235. return len;
  236. }
  237. cpufreq_freq_attr_ro(trans_table);
  238. #endif
  239. cpufreq_freq_attr_ro(total_trans);
  240. cpufreq_freq_attr_ro(time_in_state);
  241. static struct attribute *default_attrs[] = {
  242. &total_trans.attr,
  243. &time_in_state.attr,
  244. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  245. &trans_table.attr,
  246. #endif
  247. NULL
  248. };
  249. static struct attribute_group stats_attr_group = {
  250. .attrs = default_attrs,
  251. .name = "stats"
  252. };
  253. static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
  254. 0444, show_all_time_in_state, NULL);
  255. static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
  256. 0444, show_current_in_state, NULL);
  257. static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
  258. {
  259. int index;
  260. for (index = 0; index < stat->max_state; index++)
  261. if (stat->freq_table[index] == freq)
  262. return index;
  263. return -1;
  264. }
  265. /* should be called late in the CPU removal sequence so that the stats
  266. * memory is still available in case someone tries to use it.
  267. */
  268. static void cpufreq_stats_free_table(unsigned int cpu)
  269. {
  270. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
  271. if (stat) {
  272. kfree(stat->time_in_state);
  273. kfree(stat);
  274. }
  275. per_cpu(cpufreq_stats_table, cpu) = NULL;
  276. }
  277. /* must be called early in the CPU removal sequence (before
  278. * cpufreq_remove_dev) so that policy is still valid.
  279. */
  280. static void cpufreq_stats_free_sysfs(unsigned int cpu)
  281. {
  282. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  283. if (!cpufreq_frequency_get_table(cpu))
  284. return;
  285. if (policy && policy->cpu == cpu)
  286. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  287. if (policy)
  288. cpufreq_cpu_put(policy);
  289. }
  290. static void cpufreq_allstats_free(void)
  291. {
  292. int cpu;
  293. struct all_cpufreq_stats *all_stat;
  294. sysfs_remove_file(cpufreq_global_kobject,
  295. &_attr_all_time_in_state.attr);
  296. for_each_possible_cpu(cpu) {
  297. all_stat = per_cpu(all_cpufreq_stats, cpu);
  298. if (!all_stat)
  299. continue;
  300. kfree(all_stat->time_in_state);
  301. kfree(all_stat);
  302. per_cpu(all_cpufreq_stats, cpu) = NULL;
  303. }
  304. if (all_freq_table) {
  305. kfree(all_freq_table->freq_table);
  306. kfree(all_freq_table);
  307. all_freq_table = NULL;
  308. }
  309. }
  310. static void cpufreq_powerstats_free(void)
  311. {
  312. int cpu;
  313. struct cpufreq_power_stats *powerstats;
  314. sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
  315. for_each_possible_cpu(cpu) {
  316. powerstats = per_cpu(cpufreq_power_stats, cpu);
  317. if (!powerstats)
  318. continue;
  319. kfree(powerstats->curr);
  320. kfree(powerstats);
  321. per_cpu(cpufreq_power_stats, cpu) = NULL;
  322. }
  323. }
  324. static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
  325. struct cpufreq_frequency_table *table, int count)
  326. {
  327. unsigned int i, j, ret = 0;
  328. struct cpufreq_stats *stat;
  329. struct cpufreq_policy *data;
  330. unsigned int alloc_size;
  331. unsigned int cpu = policy->cpu;
  332. if (per_cpu(cpufreq_stats_table, cpu))
  333. return -EBUSY;
  334. stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
  335. if ((stat) == NULL)
  336. return -ENOMEM;
  337. data = cpufreq_cpu_get(cpu);
  338. if (data == NULL) {
  339. ret = -EINVAL;
  340. goto error_get_fail;
  341. }
  342. ret = sysfs_create_group(&data->kobj, &stats_attr_group);
  343. if (ret)
  344. goto error_out;
  345. stat->cpu = cpu;
  346. per_cpu(cpufreq_stats_table, cpu) = stat;
  347. alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
  348. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  349. alloc_size += count * count * sizeof(int);
  350. #endif
  351. stat->max_state = count;
  352. stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  353. if (!stat->time_in_state) {
  354. ret = -ENOMEM;
  355. goto error_out;
  356. }
  357. stat->freq_table = (unsigned int *)(stat->time_in_state + count);
  358. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  359. stat->trans_table = stat->freq_table + count;
  360. #endif
  361. j = 0;
  362. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  363. unsigned int freq = table[i].frequency;
  364. if (freq == CPUFREQ_ENTRY_INVALID)
  365. continue;
  366. if (freq_table_get_index(stat, freq) == -1)
  367. stat->freq_table[j++] = freq;
  368. }
  369. stat->state_num = j;
  370. spin_lock(&cpufreq_stats_lock);
  371. stat->last_time = get_jiffies_64();
  372. stat->last_index = freq_table_get_index(stat, policy->cur);
  373. spin_unlock(&cpufreq_stats_lock);
  374. cpufreq_cpu_put(data);
  375. return 0;
  376. error_out:
  377. cpufreq_cpu_put(data);
  378. error_get_fail:
  379. kfree(stat);
  380. per_cpu(cpufreq_stats_table, cpu) = NULL;
  381. return ret;
  382. }
  383. static void cpufreq_powerstats_create(unsigned int cpu,
  384. struct cpufreq_frequency_table *table, int count) {
  385. unsigned int alloc_size, i = 0, j = 0, ret = 0;
  386. struct cpufreq_power_stats *powerstats;
  387. struct device_node *cpu_node;
  388. char device_path[16];
  389. powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
  390. GFP_KERNEL);
  391. if (!powerstats)
  392. return;
  393. /* Allocate memory for freq table per cpu as well as clockticks per
  394. * freq*/
  395. alloc_size = count * sizeof(unsigned int) +
  396. count * sizeof(unsigned int);
  397. powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
  398. if (!powerstats->curr) {
  399. kfree(powerstats);
  400. return;
  401. }
  402. powerstats->freq_table = powerstats->curr + count;
  403. spin_lock(&cpufreq_stats_lock);
  404. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
  405. unsigned int freq = table[i].frequency;
  406. if (freq == CPUFREQ_ENTRY_INVALID)
  407. continue;
  408. powerstats->freq_table[j++] = freq;
  409. }
  410. powerstats->state_num = j;
  411. snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
  412. cpu_node = of_find_node_by_path(device_path);
  413. if (cpu_node) {
  414. ret = of_property_read_u32_array(cpu_node, "current",
  415. powerstats->curr, count);
  416. if (ret) {
  417. kfree(powerstats->curr);
  418. kfree(powerstats);
  419. powerstats = NULL;
  420. }
  421. }
  422. per_cpu(cpufreq_power_stats, cpu) = powerstats;
  423. spin_unlock(&cpufreq_stats_lock);
  424. }
  425. static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
  426. {
  427. unsigned int lhs = *(const unsigned int *)(lhs_ptr);
  428. unsigned int rhs = *(const unsigned int *)(rhs_ptr);
  429. return (lhs - rhs);
  430. }
  431. static bool check_all_freq_table(unsigned int freq)
  432. {
  433. int i;
  434. for (i = 0; i < all_freq_table->table_size; i++) {
  435. if (freq == all_freq_table->freq_table[i])
  436. return true;
  437. }
  438. return false;
  439. }
  440. static void create_all_freq_table(void)
  441. {
  442. all_freq_table = kzalloc(sizeof(struct all_freq_table),
  443. GFP_KERNEL);
  444. if (!all_freq_table)
  445. pr_warn("could not allocate memory for all_freq_table\n");
  446. return;
  447. }
  448. static void free_all_freq_table(void)
  449. {
  450. if (all_freq_table) {
  451. if (all_freq_table->freq_table) {
  452. kfree(all_freq_table->freq_table);
  453. all_freq_table->freq_table = NULL;
  454. }
  455. kfree(all_freq_table);
  456. all_freq_table = NULL;
  457. }
  458. }
  459. static void add_all_freq_table(unsigned int freq)
  460. {
  461. unsigned int size;
  462. size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
  463. all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
  464. size, GFP_ATOMIC);
  465. if (IS_ERR(all_freq_table->freq_table)) {
  466. pr_warn("Could not reallocate memory for freq_table\n");
  467. all_freq_table->freq_table = NULL;
  468. return;
  469. }
  470. all_freq_table->freq_table[all_freq_table->table_size++] = freq;
  471. }
  472. static void cpufreq_allstats_create(unsigned int cpu,
  473. struct cpufreq_frequency_table *table, int count)
  474. {
  475. int i , j = 0;
  476. unsigned int alloc_size;
  477. struct all_cpufreq_stats *all_stat;
  478. bool sort_needed = false;
  479. all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
  480. GFP_KERNEL);
  481. if (!all_stat) {
  482. pr_warn("Cannot allocate memory for cpufreq stats\n");
  483. return;
  484. }
  485. /*Allocate memory for freq table per cpu as well as clockticks per freq*/
  486. alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
  487. all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  488. if (!all_stat->time_in_state) {
  489. pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
  490. kfree(all_stat);
  491. all_stat = NULL;
  492. return;
  493. }
  494. all_stat->freq_table = (unsigned int *)
  495. (all_stat->time_in_state + count);
  496. spin_lock(&cpufreq_stats_lock);
  497. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  498. unsigned int freq = table[i].frequency;
  499. if (freq == CPUFREQ_ENTRY_INVALID)
  500. continue;
  501. all_stat->freq_table[j++] = freq;
  502. if (all_freq_table && !check_all_freq_table(freq)) {
  503. add_all_freq_table(freq);
  504. sort_needed = true;
  505. }
  506. }
  507. if (sort_needed)
  508. sort(all_freq_table->freq_table, all_freq_table->table_size,
  509. sizeof(unsigned int), &compare_for_sort, NULL);
  510. all_stat->state_num = j;
  511. per_cpu(all_cpufreq_stats, cpu) = all_stat;
  512. spin_unlock(&cpufreq_stats_lock);
  513. }
  514. static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
  515. unsigned long val, void *data)
  516. {
  517. int ret, count = 0, i;
  518. struct cpufreq_policy *policy = data;
  519. struct cpufreq_frequency_table *table;
  520. unsigned int cpu = policy->cpu;
  521. if (val != CPUFREQ_NOTIFY)
  522. return 0;
  523. table = cpufreq_frequency_get_table(cpu);
  524. if (!table)
  525. return 0;
  526. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  527. unsigned int freq = table[i].frequency;
  528. if (freq == CPUFREQ_ENTRY_INVALID)
  529. continue;
  530. count++;
  531. }
  532. if (!per_cpu(all_cpufreq_stats, cpu))
  533. cpufreq_allstats_create(cpu, table, count);
  534. if (!per_cpu(cpufreq_power_stats, cpu))
  535. cpufreq_powerstats_create(cpu, table, count);
  536. ret = cpufreq_stats_create_table(policy, table, count);
  537. if (ret)
  538. return ret;
  539. return 0;
  540. }
  541. static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
  542. unsigned long val, void *data)
  543. {
  544. struct cpufreq_freqs *freq = data;
  545. struct cpufreq_stats *stat;
  546. int old_index, new_index;
  547. if (val != CPUFREQ_POSTCHANGE)
  548. return 0;
  549. stat = per_cpu(cpufreq_stats_table, freq->cpu);
  550. if (!stat)
  551. return 0;
  552. old_index = stat->last_index;
  553. new_index = freq_table_get_index(stat, freq->new);
  554. /* We can't do stat->time_in_state[-1]= .. */
  555. if (old_index == -1 || new_index == -1)
  556. return 0;
  557. cpufreq_stats_update(freq->cpu);
  558. if (old_index == new_index)
  559. return 0;
  560. spin_lock(&cpufreq_stats_lock);
  561. stat->last_index = new_index;
  562. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  563. stat->trans_table[old_index * stat->max_state + new_index]++;
  564. #endif
  565. stat->total_trans++;
  566. spin_unlock(&cpufreq_stats_lock);
  567. return 0;
  568. }
  569. static int cpufreq_stats_create_table_cpu(unsigned int cpu)
  570. {
  571. struct cpufreq_policy *policy;
  572. struct cpufreq_frequency_table *table;
  573. int ret = -ENODEV, i, count = 0;
  574. policy = cpufreq_cpu_get(cpu);
  575. if (!policy)
  576. return -ENODEV;
  577. table = cpufreq_frequency_get_table(cpu);
  578. if (!table)
  579. goto out;
  580. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  581. unsigned int freq = table[i].frequency;
  582. if (freq == CPUFREQ_ENTRY_INVALID)
  583. continue;
  584. count++;
  585. }
  586. if (!per_cpu(all_cpufreq_stats, cpu))
  587. cpufreq_allstats_create(cpu, table, count);
  588. if (!per_cpu(cpufreq_power_stats, cpu))
  589. cpufreq_powerstats_create(cpu, table, count);
  590. ret = cpufreq_stats_create_table(policy, table, count);
  591. out:
  592. cpufreq_cpu_put(policy);
  593. return ret;
  594. }
  595. static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
  596. unsigned long action,
  597. void *hcpu)
  598. {
  599. unsigned int cpu = (unsigned long)hcpu;
  600. switch (action) {
  601. case CPU_ONLINE:
  602. case CPU_ONLINE_FROZEN:
  603. cpufreq_update_policy(cpu);
  604. break;
  605. case CPU_DOWN_PREPARE:
  606. case CPU_DOWN_PREPARE_FROZEN:
  607. cpufreq_stats_free_sysfs(cpu);
  608. break;
  609. case CPU_DEAD:
  610. case CPU_DEAD_FROZEN:
  611. cpufreq_stats_free_table(cpu);
  612. break;
  613. case CPU_DOWN_FAILED:
  614. case CPU_DOWN_FAILED_FROZEN:
  615. cpufreq_stats_create_table_cpu(cpu);
  616. break;
  617. }
  618. return NOTIFY_OK;
  619. }
  620. /* priority=1 so this will get called before cpufreq_remove_dev */
  621. static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
  622. .notifier_call = cpufreq_stat_cpu_callback,
  623. .priority = 1,
  624. };
  625. static struct notifier_block notifier_policy_block = {
  626. .notifier_call = cpufreq_stat_notifier_policy
  627. };
  628. static struct notifier_block notifier_trans_block = {
  629. .notifier_call = cpufreq_stat_notifier_trans
  630. };
  631. static int __init cpufreq_stats_init(void)
  632. {
  633. int ret;
  634. unsigned int cpu;
  635. spin_lock_init(&cpufreq_stats_lock);
  636. ret = cpufreq_register_notifier(&notifier_policy_block,
  637. CPUFREQ_POLICY_NOTIFIER);
  638. if (ret)
  639. return ret;
  640. create_all_freq_table();
  641. ret = cpufreq_register_notifier(&notifier_trans_block,
  642. CPUFREQ_TRANSITION_NOTIFIER);
  643. if (ret) {
  644. cpufreq_unregister_notifier(&notifier_policy_block,
  645. CPUFREQ_POLICY_NOTIFIER);
  646. free_all_freq_table();
  647. return ret;
  648. }
  649. register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  650. for_each_online_cpu(cpu) {
  651. cpufreq_update_policy(cpu);
  652. }
  653. ret = sysfs_create_file(cpufreq_global_kobject,
  654. &_attr_all_time_in_state.attr);
  655. if (ret)
  656. pr_warn("Cannot create sysfs file for cpufreq stats\n");
  657. ret = sysfs_create_file(cpufreq_global_kobject,
  658. &_attr_current_in_state.attr);
  659. if (ret)
  660. pr_warn("Cannot create sysfs file for cpufreq current stats\n");
  661. return 0;
  662. }
  663. static void __exit cpufreq_stats_exit(void)
  664. {
  665. unsigned int cpu;
  666. cpufreq_unregister_notifier(&notifier_policy_block,
  667. CPUFREQ_POLICY_NOTIFIER);
  668. cpufreq_unregister_notifier(&notifier_trans_block,
  669. CPUFREQ_TRANSITION_NOTIFIER);
  670. unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  671. for_each_online_cpu(cpu) {
  672. cpufreq_stats_free_table(cpu);
  673. cpufreq_stats_free_sysfs(cpu);
  674. }
  675. cpufreq_allstats_free();
  676. cpufreq_powerstats_free();
  677. }
  678. MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
  679. MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
  680. "through sysfs filesystem");
  681. MODULE_LICENSE("GPL");
  682. module_init(cpufreq_stats_init);
  683. module_exit(cpufreq_stats_exit);