main.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. /*
  2. * kernel/power/main.c - PM subsystem core functionality.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. */
  10. #include <linux/export.h>
  11. #include <linux/kobject.h>
  12. #include <linux/string.h>
  13. #include <linux/resume-trace.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/hrtimer.h>
  18. #define CONFIG_SUSPEND_HELPER //etinum.test
  19. //#define SUSPEND_WAKEUP_BOOST
  20. #ifdef SUSPEND_WAKEUP_BOOST
  21. #include <linux/sched.h>
  22. #endif
  23. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  24. #include <linux/cpufreq.h>
  25. #include <linux/cpufreq_limit.h>
  26. #endif
  27. #ifdef CONFIG_SEC_DVFS
  28. #include <linux/cpufreq.h>
  29. #include <linux/rq_stats.h>
  30. #endif
  31. #include "power.h"
  32. #define MAX_BUF 100
  33. DEFINE_MUTEX(pm_mutex);
  34. #ifdef CONFIG_PM_SLEEP
  35. /* Routines for PM-transition notifications */
  36. static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
  37. static void touch_event_fn(struct work_struct *work);
  38. static DECLARE_WORK(touch_event_struct, touch_event_fn);
  39. static struct hrtimer tc_ev_timer;
  40. static int tc_ev_processed;
  41. static ktime_t touch_evt_timer_val;
  42. int register_pm_notifier(struct notifier_block *nb)
  43. {
  44. return blocking_notifier_chain_register(&pm_chain_head, nb);
  45. }
  46. EXPORT_SYMBOL_GPL(register_pm_notifier);
  47. int unregister_pm_notifier(struct notifier_block *nb)
  48. {
  49. return blocking_notifier_chain_unregister(&pm_chain_head, nb);
  50. }
  51. EXPORT_SYMBOL_GPL(unregister_pm_notifier);
  52. int pm_notifier_call_chain(unsigned long val)
  53. {
  54. int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
  55. return notifier_to_errno(ret);
  56. }
  57. /* If set, devices may be suspended and resumed asynchronously. */
  58. int pm_async_enabled = 1;
  59. static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
  60. char *buf)
  61. {
  62. return sprintf(buf, "%d\n", pm_async_enabled);
  63. }
  64. static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
  65. const char *buf, size_t n)
  66. {
  67. unsigned long val;
  68. if (strict_strtoul(buf, 10, &val))
  69. return -EINVAL;
  70. if (val > 1)
  71. return -EINVAL;
  72. pm_async_enabled = val;
  73. return n;
  74. }
  75. power_attr(pm_async);
  76. static void touch_event_fn(struct work_struct *work)
  77. {
  78. /* wakeup the userspace poll */
  79. tc_ev_processed = 1;
  80. sysfs_notify(power_kobj, NULL, "touch_event");
  81. return;
  82. }
  83. static enum hrtimer_restart tc_ev_stop(struct hrtimer *hrtimer)
  84. {
  85. schedule_work(&touch_event_struct);
  86. return HRTIMER_NORESTART;
  87. }
  88. #ifdef CONFIG_PM_DEBUG
  89. int pm_test_level = TEST_NONE;
  90. static const char * const pm_tests[__TEST_AFTER_LAST] = {
  91. [TEST_NONE] = "none",
  92. [TEST_CORE] = "core",
  93. [TEST_CPUS] = "processors",
  94. [TEST_PLATFORM] = "platform",
  95. [TEST_DEVICES] = "devices",
  96. [TEST_FREEZER] = "freezer",
  97. };
  98. static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
  99. char *buf)
  100. {
  101. char *s = buf;
  102. int level;
  103. for (level = TEST_FIRST; level <= TEST_MAX; level++)
  104. if (pm_tests[level]) {
  105. if (level == pm_test_level)
  106. s += sprintf(s, "[%s] ", pm_tests[level]);
  107. else
  108. s += sprintf(s, "%s ", pm_tests[level]);
  109. }
  110. if (s != buf)
  111. /* convert the last space to a newline */
  112. *(s-1) = '\n';
  113. return (s - buf);
  114. }
  115. static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
  116. const char *buf, size_t n)
  117. {
  118. const char * const *s;
  119. int level;
  120. char *p;
  121. int len;
  122. int error = -EINVAL;
  123. p = memchr(buf, '\n', n);
  124. len = p ? p - buf : n;
  125. lock_system_sleep();
  126. level = TEST_FIRST;
  127. for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
  128. if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
  129. pm_test_level = level;
  130. error = 0;
  131. break;
  132. }
  133. unlock_system_sleep();
  134. return error ? error : n;
  135. }
  136. power_attr(pm_test);
  137. #endif /* CONFIG_PM_DEBUG */
  138. #ifdef CONFIG_DEBUG_FS
  139. static char *suspend_step_name(enum suspend_stat_step step)
  140. {
  141. switch (step) {
  142. case SUSPEND_FREEZE:
  143. return "freeze";
  144. case SUSPEND_PREPARE:
  145. return "prepare";
  146. case SUSPEND_SUSPEND:
  147. return "suspend";
  148. case SUSPEND_SUSPEND_NOIRQ:
  149. return "suspend_noirq";
  150. case SUSPEND_RESUME_NOIRQ:
  151. return "resume_noirq";
  152. case SUSPEND_RESUME:
  153. return "resume";
  154. default:
  155. return "";
  156. }
  157. }
  158. static int suspend_stats_show(struct seq_file *s, void *unused)
  159. {
  160. int i, index, last_dev, last_errno, last_step;
  161. last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
  162. last_dev %= REC_FAILED_NUM;
  163. last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
  164. last_errno %= REC_FAILED_NUM;
  165. last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
  166. last_step %= REC_FAILED_NUM;
  167. seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
  168. "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
  169. "success", suspend_stats.success,
  170. "fail", suspend_stats.fail,
  171. "failed_freeze", suspend_stats.failed_freeze,
  172. "failed_prepare", suspend_stats.failed_prepare,
  173. "failed_suspend", suspend_stats.failed_suspend,
  174. "failed_suspend_late",
  175. suspend_stats.failed_suspend_late,
  176. "failed_suspend_noirq",
  177. suspend_stats.failed_suspend_noirq,
  178. "failed_resume", suspend_stats.failed_resume,
  179. "failed_resume_early",
  180. suspend_stats.failed_resume_early,
  181. "failed_resume_noirq",
  182. suspend_stats.failed_resume_noirq);
  183. seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
  184. suspend_stats.failed_devs[last_dev]);
  185. for (i = 1; i < REC_FAILED_NUM; i++) {
  186. index = last_dev + REC_FAILED_NUM - i;
  187. index %= REC_FAILED_NUM;
  188. seq_printf(s, "\t\t\t%-s\n",
  189. suspend_stats.failed_devs[index]);
  190. }
  191. seq_printf(s, " last_failed_errno:\t%-d\n",
  192. suspend_stats.errno[last_errno]);
  193. for (i = 1; i < REC_FAILED_NUM; i++) {
  194. index = last_errno + REC_FAILED_NUM - i;
  195. index %= REC_FAILED_NUM;
  196. seq_printf(s, "\t\t\t%-d\n",
  197. suspend_stats.errno[index]);
  198. }
  199. seq_printf(s, " last_failed_step:\t%-s\n",
  200. suspend_step_name(
  201. suspend_stats.failed_steps[last_step]));
  202. for (i = 1; i < REC_FAILED_NUM; i++) {
  203. index = last_step + REC_FAILED_NUM - i;
  204. index %= REC_FAILED_NUM;
  205. seq_printf(s, "\t\t\t%-s\n",
  206. suspend_step_name(
  207. suspend_stats.failed_steps[index]));
  208. }
  209. return 0;
  210. }
  211. static int suspend_stats_open(struct inode *inode, struct file *file)
  212. {
  213. return single_open(file, suspend_stats_show, NULL);
  214. }
  215. static const struct file_operations suspend_stats_operations = {
  216. .open = suspend_stats_open,
  217. .read = seq_read,
  218. .llseek = seq_lseek,
  219. .release = single_release,
  220. };
  221. static int __init pm_debugfs_init(void)
  222. {
  223. debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
  224. NULL, NULL, &suspend_stats_operations);
  225. return 0;
  226. }
  227. late_initcall(pm_debugfs_init);
  228. #endif /* CONFIG_DEBUG_FS */
  229. #endif /* CONFIG_PM_SLEEP */
  230. struct kobject *power_kobj;
  231. /**
  232. * state - control system power state.
  233. *
  234. * show() returns what states are supported, which is hard-coded to
  235. * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
  236. * 'disk' (Suspend-to-Disk).
  237. *
  238. * store() accepts one of those strings, translates it into the
  239. * proper enumerated value, and initiates a suspend transition.
  240. */
  241. static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
  242. char *buf)
  243. {
  244. char *s = buf;
  245. #ifdef CONFIG_SUSPEND
  246. int i;
  247. for (i = 0; i < PM_SUSPEND_MAX; i++) {
  248. if (pm_states[i] && valid_state(i))
  249. s += sprintf(s,"%s ", pm_states[i]);
  250. }
  251. #endif
  252. #ifdef CONFIG_HIBERNATION
  253. s += sprintf(s, "%s\n", "disk");
  254. #else
  255. if (s != buf)
  256. /* convert the last space to a newline */
  257. *(s-1) = '\n';
  258. #endif
  259. return (s - buf);
  260. }
  261. static suspend_state_t decode_state(const char *buf, size_t n)
  262. {
  263. #ifdef CONFIG_SUSPEND
  264. #ifdef CONFIG_EARLYSUSPEND
  265. suspend_state_t state = PM_SUSPEND_ON;
  266. #else
  267. suspend_state_t state = PM_SUSPEND_STANDBY;
  268. #endif
  269. const char * const *s;
  270. #endif
  271. char *p;
  272. int len;
  273. p = memchr(buf, '\n', n);
  274. len = p ? p - buf : n;
  275. /* Check hibernation first. */
  276. if (len == 4 && !strncmp(buf, "disk", len))
  277. return PM_SUSPEND_MAX;
  278. #ifdef CONFIG_SUSPEND
  279. for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
  280. if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
  281. return state;
  282. #endif
  283. return PM_SUSPEND_ON;
  284. }
  285. #ifdef CONFIG_SUSPEND_HELPER
  286. static struct workqueue_struct *suspend_helper_wq;
  287. struct state_store_params {
  288. const char *buf;
  289. size_t n;
  290. };
  291. struct suspend_helper_data {
  292. struct work_struct work;
  293. struct completion done;
  294. struct state_store_params params;
  295. int result;
  296. };
  297. struct suspend_helper_data *suspend_helper_data;
  298. static void suspend_helper(struct work_struct *work)
  299. {
  300. struct suspend_helper_data *data = (struct suspend_helper_data *)
  301. container_of(work, struct suspend_helper_data, work);
  302. const char *buf = data->params.buf;
  303. size_t n = data->params.n;
  304. suspend_state_t state;
  305. int error = 0;
  306. pr_info("[suspend helper] %s: start!\n", __func__);
  307. error = pm_autosleep_lock();
  308. if (error) {
  309. goto out_nolock;
  310. }
  311. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  312. error = -EBUSY;
  313. goto out;
  314. }
  315. state = decode_state(buf, n);
  316. if (state < PM_SUSPEND_MAX)
  317. error = pm_suspend(state);
  318. else if (state == PM_SUSPEND_MAX)
  319. error = hibernate();
  320. else
  321. error = -EINVAL;
  322. out:
  323. pm_autosleep_unlock();
  324. out_nolock:
  325. // set result and notify completion
  326. data->result = error;
  327. complete(&data->done);
  328. pr_info("[suspend helper] %s: result = %d\n", __func__, error);
  329. }
  330. static ssize_t state_store_helper(struct kobject *kobj, struct kobj_attribute *attr,
  331. const char *buf, size_t n)
  332. {
  333. int error;
  334. int freezable = 0;
  335. // we don't need to freeze. so tell the freezer
  336. if (!freezer_should_skip(current)) {
  337. freezable = 1;
  338. freezer_do_not_count();
  339. pr_info("[suspend helper] %s: freezer should skip me (%s:%d)\n",
  340. __func__, current->comm, current->pid);
  341. }
  342. suspend_helper_data->params.buf = buf;
  343. suspend_helper_data->params.n = n;
  344. INIT_COMPLETION(suspend_helper_data->done);
  345. // use kworker for suspend resume
  346. queue_work(suspend_helper_wq, &suspend_helper_data->work);
  347. // wait for suspend/resume work to be complete
  348. wait_for_completion(&suspend_helper_data->done);
  349. if (freezable) {
  350. // set ourself as freezable
  351. freezer_count();
  352. }
  353. error = suspend_helper_data->result;
  354. pr_info("[suspend helper] %s: suspend_helper returned %d\n", __func__, error);
  355. return error ? error : n;
  356. }
  357. static int suspend_helper_init(void)
  358. {
  359. int ret = 0;
  360. suspend_helper_wq = alloc_ordered_workqueue("suspend_helper", 0);
  361. if (!suspend_helper_wq)
  362. return -ENOMEM;
  363. suspend_helper_data = kzalloc(sizeof(struct suspend_helper_data), GFP_KERNEL);
  364. if (!suspend_helper_data) {
  365. ret = -ENOMEM;
  366. goto out_destroy_wq;
  367. }
  368. INIT_WORK(&suspend_helper_data->work, suspend_helper);
  369. init_completion(&suspend_helper_data->done);
  370. pr_info("[suspend helper] %s: init done\n", __func__);
  371. return 0;
  372. out_destroy_wq:
  373. destroy_workqueue(suspend_helper_wq);
  374. return ret;
  375. }
  376. #endif
  377. #ifdef SUSPEND_WAKEUP_BOOST
  378. static void pr_sched_state(const char *msg)
  379. {
  380. pr_debug("[sched state] %s: (%s:%d) %pS policy=%d, prio=%d, static_prio=%d, normal_prio=%d, rt_priority=%d\n",
  381. msg, current->comm, current->pid,
  382. current->sched_class, current->policy,
  383. current->prio, current->static_prio, current->normal_prio, current->rt_priority);
  384. }
  385. #endif
  386. static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
  387. const char *buf, size_t n)
  388. {
  389. suspend_state_t state;
  390. int error;
  391. #ifdef SUSPEND_WAKEUP_BOOST
  392. int orig_policy = current->policy;
  393. int orig_nice = task_nice(current);
  394. struct sched_param param = { .sched_priority = 1 };
  395. #endif
  396. #ifdef CONFIG_SUSPEND_HELPER
  397. if (suspend_helper_data) {
  398. pr_info("[suspend helper] %s: Let our helper do the real work!\n", __func__);
  399. return state_store_helper(kobj, attr, buf, n);
  400. }
  401. pr_info("[suspend helper] %s: helper data not avaialbe.. Fall back to the legacy code..\n", __func__);
  402. #endif
  403. error = pm_autosleep_lock();
  404. if (error)
  405. return error;
  406. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  407. error = -EBUSY;
  408. goto out;
  409. }
  410. #ifdef SUSPEND_WAKEUP_BOOST
  411. pr_sched_state("before boost");
  412. sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
  413. pr_sched_state("after boost");
  414. #endif
  415. state = decode_state(buf, n);
  416. if (state < PM_SUSPEND_MAX)
  417. error = pm_suspend(state);
  418. else if (state == PM_SUSPEND_MAX)
  419. error = hibernate();
  420. else
  421. error = -EINVAL;
  422. #ifdef SUSPEND_WAKEUP_BOOST
  423. pr_sched_state("before restore");
  424. param.sched_priority = 0;
  425. sched_setscheduler_nocheck(current, orig_policy, &param);
  426. set_user_nice(current, orig_nice);
  427. pr_sched_state("after restore");
  428. #endif
  429. out:
  430. pm_autosleep_unlock();
  431. return error ? error : n;
  432. }
  433. power_attr(state);
  434. #ifdef CONFIG_PM_SLEEP
  435. /*
  436. * The 'wakeup_count' attribute, along with the functions defined in
  437. * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
  438. * handled in a non-racy way.
  439. *
  440. * If a wakeup event occurs when the system is in a sleep state, it simply is
  441. * woken up. In turn, if an event that would wake the system up from a sleep
  442. * state occurs when it is undergoing a transition to that sleep state, the
  443. * transition should be aborted. Moreover, if such an event occurs when the
  444. * system is in the working state, an attempt to start a transition to the
  445. * given sleep state should fail during certain period after the detection of
  446. * the event. Using the 'state' attribute alone is not sufficient to satisfy
  447. * these requirements, because a wakeup event may occur exactly when 'state'
  448. * is being written to and may be delivered to user space right before it is
  449. * frozen, so the event will remain only partially processed until the system is
  450. * woken up by another event. In particular, it won't cause the transition to
  451. * a sleep state to be aborted.
  452. *
  453. * This difficulty may be overcome if user space uses 'wakeup_count' before
  454. * writing to 'state'. It first should read from 'wakeup_count' and store
  455. * the read value. Then, after carrying out its own preparations for the system
  456. * transition to a sleep state, it should write the stored value to
  457. * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
  458. * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
  459. * is allowed to write to 'state', but the transition will be aborted if there
  460. * are any wakeup events detected after 'wakeup_count' was written to.
  461. */
  462. static ssize_t wakeup_count_show(struct kobject *kobj,
  463. struct kobj_attribute *attr,
  464. char *buf)
  465. {
  466. unsigned int val;
  467. return pm_get_wakeup_count(&val, true) ?
  468. sprintf(buf, "%u\n", val) : -EINTR;
  469. }
  470. static ssize_t wakeup_count_store(struct kobject *kobj,
  471. struct kobj_attribute *attr,
  472. const char *buf, size_t n)
  473. {
  474. unsigned int val;
  475. int error;
  476. error = pm_autosleep_lock();
  477. if (error)
  478. return error;
  479. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  480. error = -EBUSY;
  481. goto out;
  482. }
  483. error = -EINVAL;
  484. if (sscanf(buf, "%u", &val) == 1) {
  485. if (pm_save_wakeup_count(val))
  486. error = n;
  487. }
  488. out:
  489. pm_autosleep_unlock();
  490. return error;
  491. }
  492. power_attr(wakeup_count);
  493. #ifdef CONFIG_PM_AUTOSLEEP
  494. static ssize_t autosleep_show(struct kobject *kobj,
  495. struct kobj_attribute *attr,
  496. char *buf)
  497. {
  498. suspend_state_t state = pm_autosleep_state();
  499. if (state == PM_SUSPEND_ON)
  500. return sprintf(buf, "off\n");
  501. #ifdef CONFIG_SUSPEND
  502. if (state < PM_SUSPEND_MAX)
  503. return sprintf(buf, "%s\n", valid_state(state) ?
  504. pm_states[state] : "error");
  505. #endif
  506. #ifdef CONFIG_HIBERNATION
  507. return sprintf(buf, "disk\n");
  508. #else
  509. return sprintf(buf, "error");
  510. #endif
  511. }
  512. static ssize_t autosleep_store(struct kobject *kobj,
  513. struct kobj_attribute *attr,
  514. const char *buf, size_t n)
  515. {
  516. suspend_state_t state = decode_state(buf, n);
  517. int error;
  518. if (state == PM_SUSPEND_ON
  519. && strcmp(buf, "off") && strcmp(buf, "off\n"))
  520. return -EINVAL;
  521. error = pm_autosleep_set_state(state);
  522. return error ? error : n;
  523. }
  524. power_attr(autosleep);
  525. #endif /* CONFIG_PM_AUTOSLEEP */
  526. #ifdef CONFIG_PM_WAKELOCKS
  527. static ssize_t wake_lock_show(struct kobject *kobj,
  528. struct kobj_attribute *attr,
  529. char *buf)
  530. {
  531. return pm_show_wakelocks(buf, true);
  532. }
  533. static ssize_t wake_lock_store(struct kobject *kobj,
  534. struct kobj_attribute *attr,
  535. const char *buf, size_t n)
  536. {
  537. int error = pm_wake_lock(buf);
  538. return error ? error : n;
  539. }
  540. power_attr(wake_lock);
  541. static ssize_t wake_unlock_show(struct kobject *kobj,
  542. struct kobj_attribute *attr,
  543. char *buf)
  544. {
  545. return pm_show_wakelocks(buf, false);
  546. }
  547. static ssize_t wake_unlock_store(struct kobject *kobj,
  548. struct kobj_attribute *attr,
  549. const char *buf, size_t n)
  550. {
  551. int error = pm_wake_unlock(buf);
  552. return error ? error : n;
  553. }
  554. power_attr(wake_unlock);
  555. #endif /* CONFIG_PM_WAKELOCKS */
  556. #endif /* CONFIG_PM_SLEEP */
  557. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  558. static int cpufreq_max_limit_val = -1;
  559. static int cpufreq_min_limit_val = -1;
  560. struct cpufreq_limit_handle *cpufreq_max_hd;
  561. struct cpufreq_limit_handle *cpufreq_min_hd;
  562. DEFINE_MUTEX(cpufreq_limit_mutex);
  563. static ssize_t cpufreq_table_show(struct kobject *kobj,
  564. struct kobj_attribute *attr, char *buf)
  565. {
  566. ssize_t len = 0;
  567. int i, count = 0;
  568. unsigned int freq;
  569. struct cpufreq_frequency_table *table;
  570. table = cpufreq_frequency_get_table(0);
  571. if (table == NULL)
  572. return 0;
  573. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++)
  574. count = i;
  575. for (i = count; i >= 0; i--) {
  576. freq = table[i].frequency;
  577. if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
  578. continue;
  579. len += sprintf(buf + len, "%u ", freq);
  580. }
  581. len--;
  582. len += sprintf(buf + len, "\n");
  583. return len;
  584. }
  585. static ssize_t cpufreq_table_store(struct kobject *kobj,
  586. struct kobj_attribute *attr,
  587. const char *buf, size_t n)
  588. {
  589. pr_err("%s: cpufreq_table is read-only\n", __func__);
  590. return -EINVAL;
  591. }
  592. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  593. struct kobj_attribute *attr,
  594. char *buf)
  595. {
  596. return sprintf(buf, "%d\n", cpufreq_max_limit_val);
  597. }
  598. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  599. struct kobj_attribute *attr,
  600. const char *buf, size_t n)
  601. {
  602. int val;
  603. ssize_t ret = -EINVAL;
  604. if (sscanf(buf, "%d", &val) != 1) {
  605. pr_err("%s: Invalid cpufreq format\n", __func__);
  606. goto out;
  607. }
  608. mutex_lock(&cpufreq_limit_mutex);
  609. if (cpufreq_max_hd) {
  610. cpufreq_limit_put(cpufreq_max_hd);
  611. cpufreq_max_hd = NULL;
  612. }
  613. if (val != -1) {
  614. cpufreq_max_hd = cpufreq_limit_max_freq(val, "user lock(max)");
  615. if (IS_ERR(cpufreq_max_hd)) {
  616. pr_err("%s: fail to get the handle\n", __func__);
  617. cpufreq_max_hd = NULL;
  618. }
  619. }
  620. cpufreq_max_hd ?
  621. (cpufreq_max_limit_val = val) : (cpufreq_max_limit_val = -1);
  622. mutex_unlock(&cpufreq_limit_mutex);
  623. ret = n;
  624. out:
  625. return ret;
  626. }
  627. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  628. struct kobj_attribute *attr,
  629. char *buf)
  630. {
  631. return sprintf(buf, "%d\n", cpufreq_min_limit_val);
  632. }
  633. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  634. struct kobj_attribute *attr,
  635. const char *buf, size_t n)
  636. {
  637. int val;
  638. ssize_t ret = -EINVAL;
  639. if (sscanf(buf, "%d", &val) != 1) {
  640. pr_err("%s: Invalid cpufreq format\n", __func__);
  641. goto out;
  642. }
  643. mutex_lock(&cpufreq_limit_mutex);
  644. if (cpufreq_min_hd) {
  645. cpufreq_limit_put(cpufreq_min_hd);
  646. cpufreq_min_hd = NULL;
  647. }
  648. if (val != -1) {
  649. cpufreq_min_hd = cpufreq_limit_min_freq(val, "user lock(min)");
  650. if (IS_ERR(cpufreq_min_hd)) {
  651. pr_err("%s: fail to get the handle\n", __func__);
  652. cpufreq_min_hd = NULL;
  653. }
  654. }
  655. cpufreq_min_hd ?
  656. (cpufreq_min_limit_val = val) : (cpufreq_min_limit_val = -1);
  657. mutex_unlock(&cpufreq_limit_mutex);
  658. ret = n;
  659. out:
  660. return ret;
  661. }
  662. power_attr(cpufreq_table);
  663. power_attr(cpufreq_max_limit);
  664. power_attr(cpufreq_min_limit);
  665. struct cpufreq_limit_handle *cpufreq_min_touch;
  666. struct cpufreq_limit_handle *cpufreq_min_camera;
  667. struct cpufreq_limit_handle *cpufreq_min_sensor;
  668. int set_freq_limit(unsigned long id, unsigned int freq)
  669. {
  670. ssize_t ret = -EINVAL;
  671. mutex_lock(&cpufreq_limit_mutex);
  672. if (cpufreq_min_touch) {
  673. cpufreq_limit_put(cpufreq_min_touch);
  674. cpufreq_min_touch = NULL;
  675. }
  676. if (cpufreq_min_camera) {
  677. cpufreq_limit_put(cpufreq_min_camera);
  678. cpufreq_min_camera = NULL;
  679. }
  680. if (cpufreq_min_sensor) {
  681. cpufreq_limit_put(cpufreq_min_sensor);
  682. cpufreq_min_sensor = NULL;
  683. }
  684. pr_debug("%s: id=%d freq=%d\n", __func__, (int)id, freq);
  685. /* min lock */
  686. if (id & DVFS_TOUCH_ID) {
  687. if (freq != -1) {
  688. cpufreq_min_touch = cpufreq_limit_min_freq(freq, "touch min");
  689. if (IS_ERR(cpufreq_min_touch)) {
  690. pr_err("%s: fail to get the handle\n", __func__);
  691. goto out;
  692. }
  693. }
  694. }
  695. if (id & DVFS_CAMERA_ID) {
  696. if (freq != -1) {
  697. cpufreq_min_camera = cpufreq_limit_min_freq(freq, "camera min");
  698. if (IS_ERR(cpufreq_min_camera)) {
  699. pr_err("%s: fail to get the handle\n", __func__);
  700. goto out;
  701. }
  702. }
  703. }
  704. if (id & DVFS_SENSOR_ID) {
  705. if (freq != -1) {
  706. cpufreq_min_sensor = cpufreq_limit_min_freq(freq, "sensor min");
  707. if (IS_ERR(cpufreq_min_sensor)) {
  708. pr_err("%s: fail to get the handle\n", __func__);
  709. goto out;
  710. }
  711. }
  712. }
  713. ret = 0;
  714. out:
  715. mutex_unlock(&cpufreq_limit_mutex);
  716. return ret;
  717. }
  718. #endif
  719. #ifdef CONFIG_PM_TRACE
  720. int pm_trace_enabled;
  721. static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
  722. char *buf)
  723. {
  724. return sprintf(buf, "%d\n", pm_trace_enabled);
  725. }
  726. static ssize_t
  727. pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
  728. const char *buf, size_t n)
  729. {
  730. int val;
  731. if (sscanf(buf, "%d", &val) == 1) {
  732. pm_trace_enabled = !!val;
  733. return n;
  734. }
  735. return -EINVAL;
  736. }
  737. power_attr(pm_trace);
  738. static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
  739. struct kobj_attribute *attr,
  740. char *buf)
  741. {
  742. return show_trace_dev_match(buf, PAGE_SIZE);
  743. }
  744. static ssize_t
  745. pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
  746. const char *buf, size_t n)
  747. {
  748. return -EINVAL;
  749. }
  750. power_attr(pm_trace_dev_match);
  751. #endif /* CONFIG_PM_TRACE */
  752. #ifdef CONFIG_SEC_DVFS
  753. DEFINE_MUTEX(dvfs_mutex);
  754. static unsigned long dvfs_id;
  755. static unsigned long apps_min_freq;
  756. static unsigned long apps_max_freq;
  757. static unsigned long thermald_max_freq;
  758. static unsigned long touch_min_freq = MIN_TOUCH_LIMIT;
  759. static unsigned long unicpu_max_freq = MAX_UNICPU_LIMIT;
  760. static unsigned long sensor_min_freq = MIN_SENSOR_LIMIT;
  761. static int verify_cpufreq_target(unsigned int target)
  762. {
  763. int i;
  764. struct cpufreq_frequency_table *table;
  765. table = cpufreq_frequency_get_table(BOOT_CPU);
  766. if (table == NULL)
  767. return -EFAULT;
  768. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  769. if (table[i].frequency < MIN_FREQ_LIMIT ||
  770. table[i].frequency > MAX_FREQ_LIMIT)
  771. continue;
  772. if (target == table[i].frequency)
  773. return 0;
  774. }
  775. return -EINVAL;
  776. }
  777. int set_freq_limit(unsigned long id, unsigned int freq)
  778. {
  779. unsigned int min = MIN_FREQ_LIMIT;
  780. unsigned int max = MAX_FREQ_LIMIT;
  781. if (freq != 0 && freq != -1 && verify_cpufreq_target(freq))
  782. return -EINVAL;
  783. mutex_lock(&dvfs_mutex);
  784. if (freq == -1)
  785. dvfs_id &= ~id;
  786. else
  787. dvfs_id |= id;
  788. /* update freq for apps/thermald */
  789. if (id == DVFS_APPS_MIN_ID)
  790. apps_min_freq = freq;
  791. else if (id == DVFS_APPS_MAX_ID)
  792. apps_max_freq = freq;
  793. else if (id == DVFS_THERMALD_ID)
  794. thermald_max_freq = freq;
  795. else if (id == DVFS_TOUCH_ID)
  796. touch_min_freq = freq;
  797. else if (id == DVFS_SENSOR_ID)
  798. sensor_min_freq = freq;
  799. /* set min - apps */
  800. if (dvfs_id & DVFS_APPS_MIN_ID && min < apps_min_freq)
  801. min = apps_min_freq;
  802. if (dvfs_id & DVFS_TOUCH_ID && min < touch_min_freq)
  803. min = touch_min_freq;
  804. if (dvfs_id & DVFS_SENSOR_ID && min < sensor_min_freq)
  805. min = sensor_min_freq;
  806. /* set max */
  807. if (dvfs_id & DVFS_APPS_MAX_ID && max > apps_max_freq)
  808. max = apps_max_freq;
  809. if (dvfs_id & DVFS_THERMALD_ID && max > thermald_max_freq)
  810. max = thermald_max_freq;
  811. if (dvfs_id & DVFS_UNICPU_ID && max > unicpu_max_freq)
  812. max = unicpu_max_freq;
  813. /* check min max*/
  814. if (min > max)
  815. min = max;
  816. /* update */
  817. set_min_lock(min);
  818. set_max_lock(max);
  819. pr_info("%s: ,dvfs-id:0x%lu ,id:-0x%lu %d, min %d, max %d\n",
  820. __func__,dvfs_id, id, freq, min, max);
  821. /* need to update now */
  822. if (id & UPDATE_NOW_BITS) {
  823. int cpu;
  824. unsigned int cur = 0;
  825. for_each_online_cpu(cpu) {
  826. cur = cpufreq_quick_get(cpu);
  827. if (cur) {
  828. struct cpufreq_policy policy;
  829. policy.cpu = cpu;
  830. if (cur < min)
  831. cpufreq_driver_target(&policy,
  832. min, CPUFREQ_RELATION_H);
  833. else if (cur > max)
  834. cpufreq_driver_target(&policy,
  835. max, CPUFREQ_RELATION_L);
  836. }
  837. }
  838. }
  839. mutex_unlock(&dvfs_mutex);
  840. return 0;
  841. }
  842. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  843. struct kobj_attribute *attr, char *buf)
  844. {
  845. int freq;
  846. freq = get_min_lock();
  847. if (!freq)
  848. freq = -1;
  849. return sprintf(buf, "%d\n", freq);
  850. }
  851. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  852. struct kobj_attribute *attr,
  853. const char *buf, size_t n)
  854. {
  855. int freq_min_limit, ret = 0;
  856. ret = sscanf(buf, "%d", &freq_min_limit);
  857. if (ret != 1)
  858. return -EINVAL;
  859. set_freq_limit(DVFS_APPS_MIN_ID, freq_min_limit);
  860. return n;
  861. }
  862. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  863. struct kobj_attribute *attr, char *buf)
  864. {
  865. int freq;
  866. freq = get_max_lock();
  867. if (!freq)
  868. freq = -1;
  869. return sprintf(buf, "%d\n", freq);
  870. }
  871. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  872. struct kobj_attribute *attr,
  873. const char *buf, size_t n)
  874. {
  875. int freq_max_limit, ret = 0;
  876. ret = sscanf(buf, "%d", &freq_max_limit);
  877. if (ret != 1)
  878. return -EINVAL;
  879. set_freq_limit(DVFS_APPS_MAX_ID, freq_max_limit);
  880. return n;
  881. }
  882. static ssize_t cpufreq_table_show(struct kobject *kobj,
  883. struct kobj_attribute *attr, char *buf)
  884. {
  885. ssize_t len = 0;
  886. int i, count = 0;
  887. unsigned int freq;
  888. struct cpufreq_frequency_table *table;
  889. table = cpufreq_frequency_get_table(BOOT_CPU);
  890. if (table == NULL)
  891. return 0;
  892. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++)
  893. count = i;
  894. for (i = count; i >= 0; i--) {
  895. freq = table[i].frequency;
  896. if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
  897. continue;
  898. len += sprintf(buf + len, "%u ", freq);
  899. }
  900. len--;
  901. len += sprintf(buf + len, "\n");
  902. return len;
  903. }
  904. static ssize_t cpufreq_table_store(struct kobject *kobj,
  905. struct kobj_attribute *attr,
  906. const char *buf, size_t n)
  907. {
  908. pr_info("%s: Not supported\n", __func__);
  909. return n;
  910. }
  911. power_attr(cpufreq_max_limit);
  912. power_attr(cpufreq_min_limit);
  913. power_attr(cpufreq_table);
  914. #endif
  915. static struct attribute * g[] = {
  916. &state_attr.attr,
  917. #ifdef CONFIG_PM_TRACE
  918. &pm_trace_attr.attr,
  919. &pm_trace_dev_match_attr.attr,
  920. #endif
  921. #ifdef CONFIG_PM_SLEEP
  922. &pm_async_attr.attr,
  923. &wakeup_count_attr.attr,
  924. #ifdef CONFIG_PM_AUTOSLEEP
  925. &autosleep_attr.attr,
  926. #endif
  927. #ifdef CONFIG_PM_WAKELOCKS
  928. &wake_lock_attr.attr,
  929. &wake_unlock_attr.attr,
  930. #endif
  931. #ifdef CONFIG_PM_DEBUG
  932. &pm_test_attr.attr,
  933. #endif
  934. #endif
  935. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  936. &cpufreq_table_attr.attr,
  937. &cpufreq_max_limit_attr.attr,
  938. &cpufreq_min_limit_attr.attr,
  939. #endif
  940. #ifdef CONFIG_SEC_DVFS
  941. &cpufreq_min_limit_attr.attr,
  942. &cpufreq_max_limit_attr.attr,
  943. &cpufreq_table_attr.attr,
  944. #endif
  945. NULL,
  946. };
  947. static struct attribute_group attr_group = {
  948. .attrs = g,
  949. };
  950. #ifdef CONFIG_PM_RUNTIME
  951. struct workqueue_struct *pm_wq;
  952. EXPORT_SYMBOL_GPL(pm_wq);
  953. static int __init pm_start_workqueue(void)
  954. {
  955. pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
  956. return pm_wq ? 0 : -ENOMEM;
  957. }
  958. #else
  959. static inline int pm_start_workqueue(void) { return 0; }
  960. #endif
  961. static int __init pm_init(void)
  962. {
  963. int error = pm_start_workqueue();
  964. if (error)
  965. return error;
  966. hibernate_image_size_init();
  967. hibernate_reserved_size_init();
  968. touch_evt_timer_val = ktime_set(2, 0);
  969. hrtimer_init(&tc_ev_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  970. tc_ev_timer.function = &tc_ev_stop;
  971. tc_ev_processed = 1;
  972. power_kobj = kobject_create_and_add("power", NULL);
  973. if (!power_kobj)
  974. return -ENOMEM;
  975. error = sysfs_create_group(power_kobj, &attr_group);
  976. if (error)
  977. return error;
  978. #ifdef CONFIG_SUSPEND_HELPER
  979. suspend_helper_init();
  980. #endif
  981. #ifdef CONFIG_SEC_DVFS
  982. apps_min_freq = MIN_FREQ_LIMIT;
  983. apps_max_freq = MAX_FREQ_LIMIT;
  984. thermald_max_freq = MAX_FREQ_LIMIT;
  985. #endif
  986. return pm_autosleep_init();
  987. }
  988. core_initcall(pm_init);