main.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. /*
  2. * kernel/power/main.c - PM subsystem core functionality.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. */
  10. #include <linux/export.h>
  11. #include <linux/kobject.h>
  12. #include <linux/string.h>
  13. #include <linux/resume-trace.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/hrtimer.h>
  18. #define CONFIG_SUSPEND_HELPER //etinum.test
  19. //#define SUSPEND_WAKEUP_BOOST
  20. #ifdef SUSPEND_WAKEUP_BOOST
  21. #include <linux/sched.h>
  22. #endif
  23. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  24. #include <linux/cpufreq.h>
  25. #include <linux/cpufreq_limit.h>
  26. #endif
  27. #ifdef CONFIG_SEC_DVFS
  28. #include <linux/cpufreq.h>
  29. #include <linux/rq_stats.h>
  30. #endif
  31. #include "power.h"
  32. #define MAX_BUF 100
  33. DEFINE_MUTEX(pm_mutex);
  34. #ifdef CONFIG_PM_SLEEP
  35. /* Routines for PM-transition notifications */
  36. static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
  37. static void touch_event_fn(struct work_struct *work);
  38. static DECLARE_WORK(touch_event_struct, touch_event_fn);
  39. static struct hrtimer tc_ev_timer;
  40. static int tc_ev_processed;
  41. static ktime_t touch_evt_timer_val;
  42. int register_pm_notifier(struct notifier_block *nb)
  43. {
  44. return blocking_notifier_chain_register(&pm_chain_head, nb);
  45. }
  46. EXPORT_SYMBOL_GPL(register_pm_notifier);
  47. int unregister_pm_notifier(struct notifier_block *nb)
  48. {
  49. return blocking_notifier_chain_unregister(&pm_chain_head, nb);
  50. }
  51. EXPORT_SYMBOL_GPL(unregister_pm_notifier);
  52. int pm_notifier_call_chain(unsigned long val)
  53. {
  54. int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
  55. return notifier_to_errno(ret);
  56. }
  57. /* If set, devices may be suspended and resumed asynchronously. */
  58. int pm_async_enabled = 1;
  59. static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
  60. char *buf)
  61. {
  62. return sprintf(buf, "%d\n", pm_async_enabled);
  63. }
  64. static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
  65. const char *buf, size_t n)
  66. {
  67. unsigned long val;
  68. if (kstrtoul(buf, 10, &val))
  69. return -EINVAL;
  70. if (val > 1)
  71. return -EINVAL;
  72. pm_async_enabled = val;
  73. return n;
  74. }
  75. power_attr(pm_async);
  76. static void touch_event_fn(struct work_struct *work)
  77. {
  78. /* wakeup the userspace poll */
  79. tc_ev_processed = 1;
  80. sysfs_notify(power_kobj, NULL, "touch_event");
  81. return;
  82. }
  83. static enum hrtimer_restart tc_ev_stop(struct hrtimer *hrtimer)
  84. {
  85. schedule_work(&touch_event_struct);
  86. return HRTIMER_NORESTART;
  87. }
  88. #ifdef CONFIG_PM_DEBUG
  89. int pm_test_level = TEST_NONE;
  90. static const char * const pm_tests[__TEST_AFTER_LAST] = {
  91. [TEST_NONE] = "none",
  92. [TEST_CORE] = "core",
  93. [TEST_CPUS] = "processors",
  94. [TEST_PLATFORM] = "platform",
  95. [TEST_DEVICES] = "devices",
  96. [TEST_FREEZER] = "freezer",
  97. };
  98. static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
  99. char *buf)
  100. {
  101. char *s = buf;
  102. int level;
  103. for (level = TEST_FIRST; level <= TEST_MAX; level++)
  104. if (pm_tests[level]) {
  105. if (level == pm_test_level)
  106. s += sprintf(s, "[%s] ", pm_tests[level]);
  107. else
  108. s += sprintf(s, "%s ", pm_tests[level]);
  109. }
  110. if (s != buf)
  111. /* convert the last space to a newline */
  112. *(s-1) = '\n';
  113. return (s - buf);
  114. }
  115. static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
  116. const char *buf, size_t n)
  117. {
  118. const char * const *s;
  119. int level;
  120. char *p;
  121. int len;
  122. int error = -EINVAL;
  123. p = memchr(buf, '\n', n);
  124. len = p ? p - buf : n;
  125. lock_system_sleep();
  126. level = TEST_FIRST;
  127. for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
  128. if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
  129. pm_test_level = level;
  130. error = 0;
  131. break;
  132. }
  133. unlock_system_sleep();
  134. return error ? error : n;
  135. }
  136. power_attr(pm_test);
  137. #endif /* CONFIG_PM_DEBUG */
  138. #ifdef CONFIG_DEBUG_FS
  139. static char *suspend_step_name(enum suspend_stat_step step)
  140. {
  141. switch (step) {
  142. case SUSPEND_FREEZE:
  143. return "freeze";
  144. case SUSPEND_PREPARE:
  145. return "prepare";
  146. case SUSPEND_SUSPEND:
  147. return "suspend";
  148. case SUSPEND_SUSPEND_NOIRQ:
  149. return "suspend_noirq";
  150. case SUSPEND_RESUME_NOIRQ:
  151. return "resume_noirq";
  152. case SUSPEND_RESUME:
  153. return "resume";
  154. default:
  155. return "";
  156. }
  157. }
  158. static int suspend_stats_show(struct seq_file *s, void *unused)
  159. {
  160. int i, index, last_dev, last_errno, last_step;
  161. last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
  162. last_dev %= REC_FAILED_NUM;
  163. last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
  164. last_errno %= REC_FAILED_NUM;
  165. last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
  166. last_step %= REC_FAILED_NUM;
  167. seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
  168. "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
  169. "success", suspend_stats.success,
  170. "fail", suspend_stats.fail,
  171. "failed_freeze", suspend_stats.failed_freeze,
  172. "failed_prepare", suspend_stats.failed_prepare,
  173. "failed_suspend", suspend_stats.failed_suspend,
  174. "failed_suspend_late",
  175. suspend_stats.failed_suspend_late,
  176. "failed_suspend_noirq",
  177. suspend_stats.failed_suspend_noirq,
  178. "failed_resume", suspend_stats.failed_resume,
  179. "failed_resume_early",
  180. suspend_stats.failed_resume_early,
  181. "failed_resume_noirq",
  182. suspend_stats.failed_resume_noirq);
  183. seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
  184. suspend_stats.failed_devs[last_dev]);
  185. for (i = 1; i < REC_FAILED_NUM; i++) {
  186. index = last_dev + REC_FAILED_NUM - i;
  187. index %= REC_FAILED_NUM;
  188. seq_printf(s, "\t\t\t%-s\n",
  189. suspend_stats.failed_devs[index]);
  190. }
  191. seq_printf(s, " last_failed_errno:\t%-d\n",
  192. suspend_stats.errno[last_errno]);
  193. for (i = 1; i < REC_FAILED_NUM; i++) {
  194. index = last_errno + REC_FAILED_NUM - i;
  195. index %= REC_FAILED_NUM;
  196. seq_printf(s, "\t\t\t%-d\n",
  197. suspend_stats.errno[index]);
  198. }
  199. seq_printf(s, " last_failed_step:\t%-s\n",
  200. suspend_step_name(
  201. suspend_stats.failed_steps[last_step]));
  202. for (i = 1; i < REC_FAILED_NUM; i++) {
  203. index = last_step + REC_FAILED_NUM - i;
  204. index %= REC_FAILED_NUM;
  205. seq_printf(s, "\t\t\t%-s\n",
  206. suspend_step_name(
  207. suspend_stats.failed_steps[index]));
  208. }
  209. return 0;
  210. }
  211. static int suspend_stats_open(struct inode *inode, struct file *file)
  212. {
  213. return single_open(file, suspend_stats_show, NULL);
  214. }
  215. static const struct file_operations suspend_stats_operations = {
  216. .open = suspend_stats_open,
  217. .read = seq_read,
  218. .llseek = seq_lseek,
  219. .release = single_release,
  220. };
  221. static int __init pm_debugfs_init(void)
  222. {
  223. debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
  224. NULL, NULL, &suspend_stats_operations);
  225. return 0;
  226. }
  227. late_initcall(pm_debugfs_init);
  228. #endif /* CONFIG_DEBUG_FS */
  229. #endif /* CONFIG_PM_SLEEP */
  230. #ifdef CONFIG_PM_SLEEP_DEBUG
  231. /*
  232. * pm_print_times: print time taken by devices to suspend and resume.
  233. *
  234. * show() returns whether printing of suspend and resume times is enabled.
  235. * store() accepts 0 or 1. 0 disables printing and 1 enables it.
  236. */
  237. bool pm_print_times_enabled;
  238. static ssize_t pm_print_times_show(struct kobject *kobj,
  239. struct kobj_attribute *attr, char *buf)
  240. {
  241. return sprintf(buf, "%d\n", pm_print_times_enabled);
  242. }
  243. static ssize_t pm_print_times_store(struct kobject *kobj,
  244. struct kobj_attribute *attr,
  245. const char *buf, size_t n)
  246. {
  247. unsigned long val;
  248. if (kstrtoul(buf, 10, &val))
  249. return -EINVAL;
  250. if (val > 1)
  251. return -EINVAL;
  252. pm_print_times_enabled = !!val;
  253. return n;
  254. }
  255. power_attr(pm_print_times);
  256. static inline void pm_print_times_init(void)
  257. {
  258. pm_print_times_enabled = !!initcall_debug;
  259. }
  260. #else /* !CONFIG_PP_SLEEP_DEBUG */
  261. static inline void pm_print_times_init(void) {}
  262. #endif /* CONFIG_PM_SLEEP_DEBUG */
  263. struct kobject *power_kobj;
  264. /**
  265. * state - control system power state.
  266. *
  267. * show() returns what states are supported, which is hard-coded to
  268. * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
  269. * 'disk' (Suspend-to-Disk).
  270. *
  271. * store() accepts one of those strings, translates it into the
  272. * proper enumerated value, and initiates a suspend transition.
  273. */
  274. static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
  275. char *buf)
  276. {
  277. char *s = buf;
  278. #ifdef CONFIG_SUSPEND
  279. suspend_state_t i;
  280. for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
  281. if (valid_state(i))
  282. s += sprintf(s,"%s ", pm_states[i].label);
  283. #endif
  284. #ifdef CONFIG_HIBERNATION
  285. s += sprintf(s, "%s\n", "disk");
  286. #else
  287. if (s != buf)
  288. /* convert the last space to a newline */
  289. *(s-1) = '\n';
  290. #endif
  291. return (s - buf);
  292. }
  293. static suspend_state_t decode_state(const char *buf, size_t n)
  294. {
  295. #ifdef CONFIG_SUSPEND
  296. suspend_state_t state = PM_SUSPEND_MIN;
  297. struct pm_sleep_state *s;
  298. #endif
  299. char *p;
  300. int len;
  301. p = memchr(buf, '\n', n);
  302. len = p ? p - buf : n;
  303. /* Check hibernation first. */
  304. if (len == 4 && !strncmp(buf, "disk", len))
  305. return PM_SUSPEND_MAX;
  306. #ifdef CONFIG_SUSPEND
  307. for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
  308. if (len == strlen(s->label) && !strncmp(buf, s->label, len))
  309. return state;
  310. #endif
  311. return PM_SUSPEND_ON;
  312. }
  313. #ifdef CONFIG_SUSPEND_HELPER
  314. static struct workqueue_struct *suspend_helper_wq;
  315. struct state_store_params {
  316. const char *buf;
  317. size_t n;
  318. };
  319. struct suspend_helper_data {
  320. struct work_struct work;
  321. struct completion done;
  322. struct state_store_params params;
  323. int result;
  324. };
  325. struct suspend_helper_data *suspend_helper_data;
  326. static void suspend_helper(struct work_struct *work)
  327. {
  328. struct suspend_helper_data *data = (struct suspend_helper_data *)
  329. container_of(work, struct suspend_helper_data, work);
  330. const char *buf = data->params.buf;
  331. size_t n = data->params.n;
  332. suspend_state_t state;
  333. int error = 0;
  334. pr_info("[suspend helper] %s: start!\n", __func__);
  335. error = pm_autosleep_lock();
  336. if (error) {
  337. goto out_nolock;
  338. }
  339. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  340. error = -EBUSY;
  341. goto out;
  342. }
  343. state = decode_state(buf, n);
  344. if (state < PM_SUSPEND_MAX)
  345. error = pm_suspend(state);
  346. else if (state == PM_SUSPEND_MAX)
  347. error = hibernate();
  348. else
  349. error = -EINVAL;
  350. out:
  351. pm_autosleep_unlock();
  352. out_nolock:
  353. // set result and notify completion
  354. data->result = error;
  355. complete(&data->done);
  356. pr_info("[suspend helper] %s: result = %d\n", __func__, error);
  357. }
  358. static ssize_t state_store_helper(struct kobject *kobj, struct kobj_attribute *attr,
  359. const char *buf, size_t n)
  360. {
  361. int error;
  362. int freezable = 0;
  363. // we don't need to freeze. so tell the freezer
  364. if (!freezer_should_skip(current)) {
  365. freezable = 1;
  366. freezer_do_not_count();
  367. pr_info("[suspend helper] %s: freezer should skip me (%s:%d)\n",
  368. __func__, current->comm, current->pid);
  369. }
  370. suspend_helper_data->params.buf = buf;
  371. suspend_helper_data->params.n = n;
  372. INIT_COMPLETION(suspend_helper_data->done);
  373. // use kworker for suspend resume
  374. queue_work(suspend_helper_wq, &suspend_helper_data->work);
  375. // wait for suspend/resume work to be complete
  376. wait_for_completion(&suspend_helper_data->done);
  377. if (freezable) {
  378. // set ourself as freezable
  379. freezer_count();
  380. }
  381. error = suspend_helper_data->result;
  382. pr_info("[suspend helper] %s: suspend_helper returned %d\n", __func__, error);
  383. return error ? error : n;
  384. }
  385. static int suspend_helper_init(void)
  386. {
  387. int ret = 0;
  388. suspend_helper_wq = alloc_ordered_workqueue("suspend_helper", 0);
  389. if (!suspend_helper_wq)
  390. return -ENOMEM;
  391. suspend_helper_data = kzalloc(sizeof(struct suspend_helper_data), GFP_KERNEL);
  392. if (!suspend_helper_data) {
  393. ret = -ENOMEM;
  394. goto out_destroy_wq;
  395. }
  396. INIT_WORK(&suspend_helper_data->work, suspend_helper);
  397. init_completion(&suspend_helper_data->done);
  398. pr_info("[suspend helper] %s: init done\n", __func__);
  399. return 0;
  400. out_destroy_wq:
  401. destroy_workqueue(suspend_helper_wq);
  402. return ret;
  403. }
  404. #endif
  405. #ifdef SUSPEND_WAKEUP_BOOST
  406. static void pr_sched_state(const char *msg)
  407. {
  408. pr_debug("[sched state] %s: (%s:%d) %pS policy=%d, prio=%d, static_prio=%d, normal_prio=%d, rt_priority=%d\n",
  409. msg, current->comm, current->pid,
  410. current->sched_class, current->policy,
  411. current->prio, current->static_prio, current->normal_prio, current->rt_priority);
  412. }
  413. #endif
  414. static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
  415. const char *buf, size_t n)
  416. {
  417. suspend_state_t state;
  418. int error;
  419. #ifdef SUSPEND_WAKEUP_BOOST
  420. int orig_policy = current->policy;
  421. int orig_nice = task_nice(current);
  422. struct sched_param param = { .sched_priority = 1 };
  423. #endif
  424. #ifdef CONFIG_SUSPEND_HELPER
  425. if (suspend_helper_data) {
  426. pr_info("[suspend helper] %s: Let our helper do the real work!\n", __func__);
  427. return state_store_helper(kobj, attr, buf, n);
  428. }
  429. pr_info("[suspend helper] %s: helper data not avaialbe.. Fall back to the legacy code..\n", __func__);
  430. #endif
  431. error = pm_autosleep_lock();
  432. if (error)
  433. return error;
  434. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  435. error = -EBUSY;
  436. goto out;
  437. }
  438. #ifdef SUSPEND_WAKEUP_BOOST
  439. pr_sched_state("before boost");
  440. sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
  441. pr_sched_state("after boost");
  442. #endif
  443. state = decode_state(buf, n);
  444. if (state < PM_SUSPEND_MAX)
  445. error = pm_suspend(state);
  446. else if (state == PM_SUSPEND_MAX)
  447. error = hibernate();
  448. else
  449. error = -EINVAL;
  450. #ifdef SUSPEND_WAKEUP_BOOST
  451. pr_sched_state("before restore");
  452. param.sched_priority = 0;
  453. sched_setscheduler_nocheck(current, orig_policy, &param);
  454. set_user_nice(current, orig_nice);
  455. pr_sched_state("after restore");
  456. #endif
  457. out:
  458. pm_autosleep_unlock();
  459. return error ? error : n;
  460. }
  461. power_attr(state);
  462. #ifdef CONFIG_PM_SLEEP
  463. /*
  464. * The 'wakeup_count' attribute, along with the functions defined in
  465. * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
  466. * handled in a non-racy way.
  467. *
  468. * If a wakeup event occurs when the system is in a sleep state, it simply is
  469. * woken up. In turn, if an event that would wake the system up from a sleep
  470. * state occurs when it is undergoing a transition to that sleep state, the
  471. * transition should be aborted. Moreover, if such an event occurs when the
  472. * system is in the working state, an attempt to start a transition to the
  473. * given sleep state should fail during certain period after the detection of
  474. * the event. Using the 'state' attribute alone is not sufficient to satisfy
  475. * these requirements, because a wakeup event may occur exactly when 'state'
  476. * is being written to and may be delivered to user space right before it is
  477. * frozen, so the event will remain only partially processed until the system is
  478. * woken up by another event. In particular, it won't cause the transition to
  479. * a sleep state to be aborted.
  480. *
  481. * This difficulty may be overcome if user space uses 'wakeup_count' before
  482. * writing to 'state'. It first should read from 'wakeup_count' and store
  483. * the read value. Then, after carrying out its own preparations for the system
  484. * transition to a sleep state, it should write the stored value to
  485. * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
  486. * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
  487. * is allowed to write to 'state', but the transition will be aborted if there
  488. * are any wakeup events detected after 'wakeup_count' was written to.
  489. */
  490. static ssize_t wakeup_count_show(struct kobject *kobj,
  491. struct kobj_attribute *attr,
  492. char *buf)
  493. {
  494. unsigned int val;
  495. return pm_get_wakeup_count(&val, true) ?
  496. sprintf(buf, "%u\n", val) : -EINTR;
  497. }
  498. static ssize_t wakeup_count_store(struct kobject *kobj,
  499. struct kobj_attribute *attr,
  500. const char *buf, size_t n)
  501. {
  502. unsigned int val;
  503. int error;
  504. error = pm_autosleep_lock();
  505. if (error)
  506. return error;
  507. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  508. error = -EBUSY;
  509. goto out;
  510. }
  511. error = -EINVAL;
  512. if (sscanf(buf, "%u", &val) == 1) {
  513. if (pm_save_wakeup_count(val))
  514. error = n;
  515. else
  516. pm_print_active_wakeup_sources();
  517. }
  518. out:
  519. pm_autosleep_unlock();
  520. return error;
  521. }
  522. power_attr(wakeup_count);
  523. #ifdef CONFIG_PM_AUTOSLEEP
  524. static ssize_t autosleep_show(struct kobject *kobj,
  525. struct kobj_attribute *attr,
  526. char *buf)
  527. {
  528. suspend_state_t state = pm_autosleep_state();
  529. if (state == PM_SUSPEND_ON)
  530. return sprintf(buf, "off\n");
  531. #ifdef CONFIG_SUSPEND
  532. if (state < PM_SUSPEND_MAX)
  533. return sprintf(buf, "%s\n", valid_state(state) ?
  534. pm_states[state].label : "error");
  535. #endif
  536. #ifdef CONFIG_HIBERNATION
  537. return sprintf(buf, "disk\n");
  538. #else
  539. return sprintf(buf, "error");
  540. #endif
  541. }
  542. static ssize_t autosleep_store(struct kobject *kobj,
  543. struct kobj_attribute *attr,
  544. const char *buf, size_t n)
  545. {
  546. suspend_state_t state = decode_state(buf, n);
  547. int error;
  548. if (state == PM_SUSPEND_ON
  549. && strcmp(buf, "off") && strcmp(buf, "off\n"))
  550. return -EINVAL;
  551. error = pm_autosleep_set_state(state);
  552. return error ? error : n;
  553. }
  554. power_attr(autosleep);
  555. #endif /* CONFIG_PM_AUTOSLEEP */
  556. #ifdef CONFIG_PM_WAKELOCKS
  557. static ssize_t wake_lock_show(struct kobject *kobj,
  558. struct kobj_attribute *attr,
  559. char *buf)
  560. {
  561. return pm_show_wakelocks(buf, true);
  562. }
  563. static ssize_t wake_lock_store(struct kobject *kobj,
  564. struct kobj_attribute *attr,
  565. const char *buf, size_t n)
  566. {
  567. int error = pm_wake_lock(buf);
  568. return error ? error : n;
  569. }
  570. power_attr(wake_lock);
  571. static ssize_t wake_unlock_show(struct kobject *kobj,
  572. struct kobj_attribute *attr,
  573. char *buf)
  574. {
  575. return pm_show_wakelocks(buf, false);
  576. }
  577. static ssize_t wake_unlock_store(struct kobject *kobj,
  578. struct kobj_attribute *attr,
  579. const char *buf, size_t n)
  580. {
  581. int error = pm_wake_unlock(buf);
  582. return error ? error : n;
  583. }
  584. power_attr(wake_unlock);
  585. #endif /* CONFIG_PM_WAKELOCKS */
  586. #endif /* CONFIG_PM_SLEEP */
  587. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  588. static int cpufreq_max_limit_val = -1;
  589. static int cpufreq_min_limit_val = -1;
  590. struct cpufreq_limit_handle *cpufreq_max_hd;
  591. struct cpufreq_limit_handle *cpufreq_min_hd;
  592. DEFINE_MUTEX(cpufreq_limit_mutex);
  593. static ssize_t cpufreq_table_show(struct kobject *kobj,
  594. struct kobj_attribute *attr, char *buf)
  595. {
  596. ssize_t len = 0;
  597. int i, count = 0;
  598. unsigned int freq;
  599. struct cpufreq_frequency_table *table;
  600. table = cpufreq_frequency_get_table(0);
  601. if (table == NULL)
  602. return 0;
  603. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++)
  604. count = i;
  605. for (i = count; i >= 0; i--) {
  606. freq = table[i].frequency;
  607. if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
  608. continue;
  609. len += sprintf(buf + len, "%u ", freq);
  610. }
  611. len--;
  612. len += sprintf(buf + len, "\n");
  613. return len;
  614. }
  615. static ssize_t cpufreq_table_store(struct kobject *kobj,
  616. struct kobj_attribute *attr,
  617. const char *buf, size_t n)
  618. {
  619. pr_err("%s: cpufreq_table is read-only\n", __func__);
  620. return -EINVAL;
  621. }
  622. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  623. struct kobj_attribute *attr,
  624. char *buf)
  625. {
  626. return sprintf(buf, "%d\n", cpufreq_max_limit_val);
  627. }
  628. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  629. struct kobj_attribute *attr,
  630. const char *buf, size_t n)
  631. {
  632. int val;
  633. ssize_t ret = -EINVAL;
  634. if (sscanf(buf, "%d", &val) != 1) {
  635. pr_err("%s: Invalid cpufreq format\n", __func__);
  636. goto out;
  637. }
  638. mutex_lock(&cpufreq_limit_mutex);
  639. if (cpufreq_max_hd) {
  640. cpufreq_limit_put(cpufreq_max_hd);
  641. cpufreq_max_hd = NULL;
  642. }
  643. if (val != -1) {
  644. cpufreq_max_hd = cpufreq_limit_max_freq(val, "user lock(max)");
  645. if (IS_ERR(cpufreq_max_hd)) {
  646. pr_err("%s: fail to get the handle\n", __func__);
  647. cpufreq_max_hd = NULL;
  648. }
  649. }
  650. cpufreq_max_hd ?
  651. (cpufreq_max_limit_val = val) : (cpufreq_max_limit_val = -1);
  652. mutex_unlock(&cpufreq_limit_mutex);
  653. ret = n;
  654. out:
  655. return ret;
  656. }
  657. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  658. struct kobj_attribute *attr,
  659. char *buf)
  660. {
  661. return sprintf(buf, "%d\n", cpufreq_min_limit_val);
  662. }
  663. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  664. struct kobj_attribute *attr,
  665. const char *buf, size_t n)
  666. {
  667. int val;
  668. ssize_t ret = -EINVAL;
  669. if (sscanf(buf, "%d", &val) != 1) {
  670. pr_err("%s: Invalid cpufreq format\n", __func__);
  671. goto out;
  672. }
  673. mutex_lock(&cpufreq_limit_mutex);
  674. if (cpufreq_min_hd) {
  675. cpufreq_limit_put(cpufreq_min_hd);
  676. cpufreq_min_hd = NULL;
  677. }
  678. if (val != -1) {
  679. cpufreq_min_hd = cpufreq_limit_min_freq(val, "user lock(min)");
  680. if (IS_ERR(cpufreq_min_hd)) {
  681. pr_err("%s: fail to get the handle\n", __func__);
  682. cpufreq_min_hd = NULL;
  683. }
  684. }
  685. cpufreq_min_hd ?
  686. (cpufreq_min_limit_val = val) : (cpufreq_min_limit_val = -1);
  687. mutex_unlock(&cpufreq_limit_mutex);
  688. ret = n;
  689. out:
  690. return ret;
  691. }
  692. power_attr(cpufreq_table);
  693. power_attr(cpufreq_max_limit);
  694. power_attr(cpufreq_min_limit);
  695. struct cpufreq_limit_handle *cpufreq_min_touch;
  696. struct cpufreq_limit_handle *cpufreq_min_camera;
  697. struct cpufreq_limit_handle *cpufreq_min_sensor;
  698. int set_freq_limit(unsigned long id, unsigned int freq)
  699. {
  700. ssize_t ret = -EINVAL;
  701. mutex_lock(&cpufreq_limit_mutex);
  702. if (cpufreq_min_touch) {
  703. cpufreq_limit_put(cpufreq_min_touch);
  704. cpufreq_min_touch = NULL;
  705. }
  706. if (cpufreq_min_camera) {
  707. cpufreq_limit_put(cpufreq_min_camera);
  708. cpufreq_min_camera = NULL;
  709. }
  710. if (cpufreq_min_sensor) {
  711. cpufreq_limit_put(cpufreq_min_sensor);
  712. cpufreq_min_sensor = NULL;
  713. }
  714. pr_debug("%s: id=%d freq=%d\n", __func__, (int)id, freq);
  715. /* min lock */
  716. if (id & DVFS_TOUCH_ID) {
  717. if (freq != -1) {
  718. cpufreq_min_touch = cpufreq_limit_min_freq(freq, "touch min");
  719. if (IS_ERR(cpufreq_min_touch)) {
  720. pr_err("%s: fail to get the handle\n", __func__);
  721. goto out;
  722. }
  723. }
  724. }
  725. if (id & DVFS_CAMERA_ID) {
  726. if (freq != -1) {
  727. cpufreq_min_camera = cpufreq_limit_min_freq(freq, "camera min");
  728. if (IS_ERR(cpufreq_min_camera)) {
  729. pr_err("%s: fail to get the handle\n", __func__);
  730. goto out;
  731. }
  732. }
  733. }
  734. if (id & DVFS_SENSOR_ID) {
  735. if (freq != -1) {
  736. cpufreq_min_sensor = cpufreq_limit_min_freq(freq, "sensor min");
  737. if (IS_ERR(cpufreq_min_sensor)) {
  738. pr_err("%s: fail to get the handle\n", __func__);
  739. goto out;
  740. }
  741. }
  742. }
  743. ret = 0;
  744. out:
  745. mutex_unlock(&cpufreq_limit_mutex);
  746. return ret;
  747. }
  748. #endif
  749. #ifdef CONFIG_PM_TRACE
  750. int pm_trace_enabled;
  751. static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
  752. char *buf)
  753. {
  754. return sprintf(buf, "%d\n", pm_trace_enabled);
  755. }
  756. static ssize_t
  757. pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
  758. const char *buf, size_t n)
  759. {
  760. int val;
  761. if (sscanf(buf, "%d", &val) == 1) {
  762. pm_trace_enabled = !!val;
  763. if (pm_trace_enabled) {
  764. pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
  765. "PM: Correct system time has to be restored manually after resume.\n");
  766. }
  767. return n;
  768. }
  769. return -EINVAL;
  770. }
  771. power_attr(pm_trace);
  772. static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
  773. struct kobj_attribute *attr,
  774. char *buf)
  775. {
  776. return show_trace_dev_match(buf, PAGE_SIZE);
  777. }
  778. static ssize_t
  779. pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
  780. const char *buf, size_t n)
  781. {
  782. return -EINVAL;
  783. }
  784. power_attr(pm_trace_dev_match);
  785. #endif /* CONFIG_PM_TRACE */
  786. #ifdef CONFIG_SEC_DVFS
  787. DEFINE_MUTEX(dvfs_mutex);
  788. static unsigned long dvfs_id;
  789. static unsigned long apps_min_freq;
  790. static unsigned long apps_max_freq;
  791. static unsigned long thermald_max_freq;
  792. static unsigned long touch_min_freq = MIN_TOUCH_LIMIT;
  793. static unsigned long unicpu_max_freq = MAX_UNICPU_LIMIT;
  794. static unsigned long sensor_min_freq = MIN_SENSOR_LIMIT;
  795. static int verify_cpufreq_target(unsigned int target)
  796. {
  797. int i;
  798. struct cpufreq_frequency_table *table;
  799. table = cpufreq_frequency_get_table(BOOT_CPU);
  800. if (table == NULL)
  801. return -EFAULT;
  802. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  803. if (table[i].frequency < MIN_FREQ_LIMIT ||
  804. table[i].frequency > MAX_FREQ_LIMIT)
  805. continue;
  806. if (target == table[i].frequency)
  807. return 0;
  808. }
  809. return -EINVAL;
  810. }
  811. int set_freq_limit(unsigned long id, unsigned int freq)
  812. {
  813. unsigned int min = MIN_FREQ_LIMIT;
  814. unsigned int max = MAX_FREQ_LIMIT;
  815. if (freq != 0 && freq != -1 && verify_cpufreq_target(freq))
  816. return -EINVAL;
  817. mutex_lock(&dvfs_mutex);
  818. if (freq == -1)
  819. dvfs_id &= ~id;
  820. else
  821. dvfs_id |= id;
  822. /* update freq for apps/thermald */
  823. if (id == DVFS_APPS_MIN_ID)
  824. apps_min_freq = freq;
  825. else if (id == DVFS_APPS_MAX_ID)
  826. apps_max_freq = freq;
  827. else if (id == DVFS_THERMALD_ID)
  828. thermald_max_freq = freq;
  829. else if (id == DVFS_TOUCH_ID)
  830. touch_min_freq = freq;
  831. else if (id == DVFS_SENSOR_ID)
  832. sensor_min_freq = freq;
  833. /* set min - apps */
  834. if (dvfs_id & DVFS_APPS_MIN_ID && min < apps_min_freq)
  835. min = apps_min_freq;
  836. if (dvfs_id & DVFS_TOUCH_ID && min < touch_min_freq)
  837. min = touch_min_freq;
  838. if (dvfs_id & DVFS_SENSOR_ID && min < sensor_min_freq)
  839. min = sensor_min_freq;
  840. /* set max */
  841. if (dvfs_id & DVFS_APPS_MAX_ID && max > apps_max_freq)
  842. max = apps_max_freq;
  843. if (dvfs_id & DVFS_THERMALD_ID && max > thermald_max_freq)
  844. max = thermald_max_freq;
  845. if (dvfs_id & DVFS_UNICPU_ID && max > unicpu_max_freq)
  846. max = unicpu_max_freq;
  847. /* check min max*/
  848. if (min > max)
  849. min = max;
  850. /* update */
  851. set_min_lock(min);
  852. set_max_lock(max);
  853. pr_info("%s: ,dvfs-id:0x%lu ,id:-0x%lu %d, min %d, max %d\n",
  854. __func__,dvfs_id, id, freq, min, max);
  855. /* need to update now */
  856. if (id & UPDATE_NOW_BITS) {
  857. int cpu;
  858. unsigned int cur = 0;
  859. for_each_online_cpu(cpu) {
  860. cur = cpufreq_quick_get(cpu);
  861. if (cur) {
  862. struct cpufreq_policy policy;
  863. policy.cpu = cpu;
  864. if (cur < min)
  865. cpufreq_driver_target(&policy,
  866. min, CPUFREQ_RELATION_H);
  867. else if (cur > max)
  868. cpufreq_driver_target(&policy,
  869. max, CPUFREQ_RELATION_L);
  870. }
  871. }
  872. }
  873. mutex_unlock(&dvfs_mutex);
  874. return 0;
  875. }
  876. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  877. struct kobj_attribute *attr, char *buf)
  878. {
  879. int freq;
  880. freq = get_min_lock();
  881. if (!freq)
  882. freq = -1;
  883. return sprintf(buf, "%d\n", freq);
  884. }
  885. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  886. struct kobj_attribute *attr,
  887. const char *buf, size_t n)
  888. {
  889. int freq_min_limit, ret = 0;
  890. ret = sscanf(buf, "%d", &freq_min_limit);
  891. if (ret != 1)
  892. return -EINVAL;
  893. set_freq_limit(DVFS_APPS_MIN_ID, freq_min_limit);
  894. return n;
  895. }
  896. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  897. struct kobj_attribute *attr, char *buf)
  898. {
  899. int freq;
  900. freq = get_max_lock();
  901. if (!freq)
  902. freq = -1;
  903. return sprintf(buf, "%d\n", freq);
  904. }
  905. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  906. struct kobj_attribute *attr,
  907. const char *buf, size_t n)
  908. {
  909. int freq_max_limit, ret = 0;
  910. ret = sscanf(buf, "%d", &freq_max_limit);
  911. if (ret != 1)
  912. return -EINVAL;
  913. set_freq_limit(DVFS_APPS_MAX_ID, freq_max_limit);
  914. return n;
  915. }
  916. static ssize_t cpufreq_table_show(struct kobject *kobj,
  917. struct kobj_attribute *attr, char *buf)
  918. {
  919. ssize_t len = 0;
  920. int i, count = 0;
  921. unsigned int freq;
  922. struct cpufreq_frequency_table *table;
  923. table = cpufreq_frequency_get_table(BOOT_CPU);
  924. if (table == NULL)
  925. return 0;
  926. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++)
  927. count = i;
  928. for (i = count; i >= 0; i--) {
  929. freq = table[i].frequency;
  930. if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
  931. continue;
  932. len += sprintf(buf + len, "%u ", freq);
  933. }
  934. len--;
  935. len += sprintf(buf + len, "\n");
  936. return len;
  937. }
  938. static ssize_t cpufreq_table_store(struct kobject *kobj,
  939. struct kobj_attribute *attr,
  940. const char *buf, size_t n)
  941. {
  942. pr_info("%s: Not supported\n", __func__);
  943. return n;
  944. }
  945. power_attr(cpufreq_max_limit);
  946. power_attr(cpufreq_min_limit);
  947. power_attr(cpufreq_table);
  948. #endif
  949. #ifdef CONFIG_FREEZER
  950. static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
  951. struct kobj_attribute *attr, char *buf)
  952. {
  953. return sprintf(buf, "%u\n", freeze_timeout_msecs);
  954. }
  955. static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
  956. struct kobj_attribute *attr,
  957. const char *buf, size_t n)
  958. {
  959. unsigned long val;
  960. if (kstrtoul(buf, 10, &val))
  961. return -EINVAL;
  962. freeze_timeout_msecs = val;
  963. return n;
  964. }
  965. power_attr(pm_freeze_timeout);
  966. #endif /* CONFIG_FREEZER*/
  967. static struct attribute * g[] = {
  968. &state_attr.attr,
  969. #ifdef CONFIG_PM_TRACE
  970. &pm_trace_attr.attr,
  971. &pm_trace_dev_match_attr.attr,
  972. #endif
  973. #ifdef CONFIG_PM_SLEEP
  974. &pm_async_attr.attr,
  975. &wakeup_count_attr.attr,
  976. #ifdef CONFIG_PM_AUTOSLEEP
  977. &autosleep_attr.attr,
  978. #endif
  979. #ifdef CONFIG_PM_WAKELOCKS
  980. &wake_lock_attr.attr,
  981. &wake_unlock_attr.attr,
  982. #endif
  983. #ifdef CONFIG_PM_DEBUG
  984. &pm_test_attr.attr,
  985. #endif
  986. #ifdef CONFIG_PM_SLEEP_DEBUG
  987. &pm_print_times_attr.attr,
  988. #endif
  989. #endif
  990. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  991. &cpufreq_table_attr.attr,
  992. &cpufreq_max_limit_attr.attr,
  993. &cpufreq_min_limit_attr.attr,
  994. #endif
  995. #ifdef CONFIG_SEC_DVFS
  996. &cpufreq_min_limit_attr.attr,
  997. &cpufreq_max_limit_attr.attr,
  998. &cpufreq_table_attr.attr,
  999. #endif
  1000. NULL,
  1001. };
  1002. static struct attribute_group attr_group = {
  1003. .attrs = g,
  1004. };
  1005. #ifdef CONFIG_PM_RUNTIME
  1006. struct workqueue_struct *pm_wq;
  1007. EXPORT_SYMBOL_GPL(pm_wq);
  1008. static int __init pm_start_workqueue(void)
  1009. {
  1010. pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
  1011. return pm_wq ? 0 : -ENOMEM;
  1012. }
  1013. #else
  1014. static inline int pm_start_workqueue(void) { return 0; }
  1015. #endif
  1016. static int __init pm_init(void)
  1017. {
  1018. int error = pm_start_workqueue();
  1019. if (error)
  1020. return error;
  1021. hibernate_image_size_init();
  1022. hibernate_reserved_size_init();
  1023. touch_evt_timer_val = ktime_set(2, 0);
  1024. hrtimer_init(&tc_ev_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1025. tc_ev_timer.function = &tc_ev_stop;
  1026. tc_ev_processed = 1;
  1027. power_kobj = kobject_create_and_add("power", NULL);
  1028. if (!power_kobj)
  1029. return -ENOMEM;
  1030. error = sysfs_create_group(power_kobj, &attr_group);
  1031. if (error)
  1032. return error;
  1033. pm_print_times_init();
  1034. #ifdef CONFIG_SUSPEND_HELPER
  1035. suspend_helper_init();
  1036. #endif
  1037. #ifdef CONFIG_SEC_DVFS
  1038. apps_min_freq = MIN_FREQ_LIMIT;
  1039. apps_max_freq = MAX_FREQ_LIMIT;
  1040. thermald_max_freq = MAX_FREQ_LIMIT;
  1041. #endif
  1042. return pm_autosleep_init();
  1043. }
  1044. core_initcall(pm_init);