main.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371
  1. /*
  2. * kernel/power/main.c - PM subsystem core functionality.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. */
  10. #include <linux/export.h>
  11. #include <linux/kobject.h>
  12. #include <linux/string.h>
  13. #include <linux/resume-trace.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/hrtimer.h>
  18. #define CONFIG_SUSPEND_HELPER //etinum.test
  19. //#define SUSPEND_WAKEUP_BOOST
  20. #ifdef SUSPEND_WAKEUP_BOOST
  21. #include <linux/sched.h>
  22. #endif
  23. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  24. #include <linux/cpufreq.h>
  25. #include <linux/cpufreq_limit.h>
  26. #endif
  27. #ifdef CONFIG_SEC_DVFS
  28. #include <linux/cpufreq.h>
  29. #include <linux/rq_stats.h>
  30. #endif
  31. #include "power.h"
  32. #define MAX_BUF 100
  33. DEFINE_MUTEX(pm_mutex);
  34. #ifdef CONFIG_PM_SLEEP
  35. /* Routines for PM-transition notifications */
  36. static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
  37. static void touch_event_fn(struct work_struct *work);
  38. static DECLARE_WORK(touch_event_struct, touch_event_fn);
  39. static struct hrtimer tc_ev_timer;
  40. static int tc_ev_processed;
  41. static ktime_t touch_evt_timer_val;
  42. int register_pm_notifier(struct notifier_block *nb)
  43. {
  44. return blocking_notifier_chain_register(&pm_chain_head, nb);
  45. }
  46. EXPORT_SYMBOL_GPL(register_pm_notifier);
  47. int unregister_pm_notifier(struct notifier_block *nb)
  48. {
  49. return blocking_notifier_chain_unregister(&pm_chain_head, nb);
  50. }
  51. EXPORT_SYMBOL_GPL(unregister_pm_notifier);
  52. int pm_notifier_call_chain(unsigned long val)
  53. {
  54. int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
  55. return notifier_to_errno(ret);
  56. }
  57. /* If set, devices may be suspended and resumed asynchronously. */
  58. int pm_async_enabled = 1;
  59. static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
  60. char *buf)
  61. {
  62. return sprintf(buf, "%d\n", pm_async_enabled);
  63. }
  64. static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
  65. const char *buf, size_t n)
  66. {
  67. unsigned long val;
  68. if (kstrtoul(buf, 10, &val))
  69. return -EINVAL;
  70. if (val > 1)
  71. return -EINVAL;
  72. pm_async_enabled = val;
  73. return n;
  74. }
  75. power_attr(pm_async);
  76. static void touch_event_fn(struct work_struct *work)
  77. {
  78. /* wakeup the userspace poll */
  79. tc_ev_processed = 1;
  80. sysfs_notify(power_kobj, NULL, "touch_event");
  81. return;
  82. }
  83. static enum hrtimer_restart tc_ev_stop(struct hrtimer *hrtimer)
  84. {
  85. schedule_work(&touch_event_struct);
  86. return HRTIMER_NORESTART;
  87. }
  88. #ifdef CONFIG_PM_DEBUG
  89. int pm_test_level = TEST_NONE;
  90. static const char * const pm_tests[__TEST_AFTER_LAST] = {
  91. [TEST_NONE] = "none",
  92. [TEST_CORE] = "core",
  93. [TEST_CPUS] = "processors",
  94. [TEST_PLATFORM] = "platform",
  95. [TEST_DEVICES] = "devices",
  96. [TEST_FREEZER] = "freezer",
  97. };
  98. static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
  99. char *buf)
  100. {
  101. char *s = buf;
  102. int level;
  103. for (level = TEST_FIRST; level <= TEST_MAX; level++)
  104. if (pm_tests[level]) {
  105. if (level == pm_test_level)
  106. s += sprintf(s, "[%s] ", pm_tests[level]);
  107. else
  108. s += sprintf(s, "%s ", pm_tests[level]);
  109. }
  110. if (s != buf)
  111. /* convert the last space to a newline */
  112. *(s-1) = '\n';
  113. return (s - buf);
  114. }
  115. static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
  116. const char *buf, size_t n)
  117. {
  118. const char * const *s;
  119. int level;
  120. char *p;
  121. int len;
  122. int error = -EINVAL;
  123. p = memchr(buf, '\n', n);
  124. len = p ? p - buf : n;
  125. lock_system_sleep();
  126. level = TEST_FIRST;
  127. for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
  128. if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
  129. pm_test_level = level;
  130. error = 0;
  131. break;
  132. }
  133. unlock_system_sleep();
  134. return error ? error : n;
  135. }
  136. power_attr(pm_test);
  137. #endif /* CONFIG_PM_DEBUG */
  138. static char *suspend_step_name(enum suspend_stat_step step)
  139. {
  140. switch (step) {
  141. case SUSPEND_FREEZE:
  142. return "freeze";
  143. case SUSPEND_PREPARE:
  144. return "prepare";
  145. case SUSPEND_SUSPEND:
  146. return "suspend";
  147. case SUSPEND_SUSPEND_NOIRQ:
  148. return "suspend_noirq";
  149. case SUSPEND_RESUME_NOIRQ:
  150. return "resume_noirq";
  151. case SUSPEND_RESUME:
  152. return "resume";
  153. default:
  154. return "";
  155. }
  156. }
  157. #define suspend_attr(_name) \
  158. static ssize_t _name##_show(struct kobject *kobj, \
  159. struct kobj_attribute *attr, char *buf) \
  160. { \
  161. return sprintf(buf, "%d\n", suspend_stats._name); \
  162. } \
  163. static struct kobj_attribute _name = __ATTR_RO(_name)
  164. suspend_attr(success);
  165. suspend_attr(fail);
  166. suspend_attr(failed_freeze);
  167. suspend_attr(failed_prepare);
  168. suspend_attr(failed_suspend);
  169. suspend_attr(failed_suspend_late);
  170. suspend_attr(failed_suspend_noirq);
  171. suspend_attr(failed_resume);
  172. suspend_attr(failed_resume_early);
  173. suspend_attr(failed_resume_noirq);
  174. static ssize_t last_failed_dev_show(struct kobject *kobj,
  175. struct kobj_attribute *attr, char *buf)
  176. {
  177. int index;
  178. char *last_failed_dev = NULL;
  179. index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
  180. index %= REC_FAILED_NUM;
  181. last_failed_dev = suspend_stats.failed_devs[index];
  182. return sprintf(buf, "%s\n", last_failed_dev);
  183. }
  184. static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
  185. static ssize_t last_failed_errno_show(struct kobject *kobj,
  186. struct kobj_attribute *attr, char *buf)
  187. {
  188. int index;
  189. int last_failed_errno;
  190. index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
  191. index %= REC_FAILED_NUM;
  192. last_failed_errno = suspend_stats.errno[index];
  193. return sprintf(buf, "%d\n", last_failed_errno);
  194. }
  195. static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
  196. static ssize_t last_failed_step_show(struct kobject *kobj,
  197. struct kobj_attribute *attr, char *buf)
  198. {
  199. int index;
  200. enum suspend_stat_step step;
  201. char *last_failed_step = NULL;
  202. index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
  203. index %= REC_FAILED_NUM;
  204. step = suspend_stats.failed_steps[index];
  205. last_failed_step = suspend_step_name(step);
  206. return sprintf(buf, "%s\n", last_failed_step);
  207. }
  208. static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
  209. static struct attribute *suspend_attrs[] = {
  210. &success.attr,
  211. &fail.attr,
  212. &failed_freeze.attr,
  213. &failed_prepare.attr,
  214. &failed_suspend.attr,
  215. &failed_suspend_late.attr,
  216. &failed_suspend_noirq.attr,
  217. &failed_resume.attr,
  218. &failed_resume_early.attr,
  219. &failed_resume_noirq.attr,
  220. &last_failed_dev.attr,
  221. &last_failed_errno.attr,
  222. &last_failed_step.attr,
  223. NULL,
  224. };
  225. static struct attribute_group suspend_attr_group = {
  226. .name = "suspend_stats",
  227. .attrs = suspend_attrs,
  228. };
  229. #ifdef CONFIG_DEBUG_FS
  230. static int suspend_stats_show(struct seq_file *s, void *unused)
  231. {
  232. int i, index, last_dev, last_errno, last_step;
  233. last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
  234. last_dev %= REC_FAILED_NUM;
  235. last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
  236. last_errno %= REC_FAILED_NUM;
  237. last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
  238. last_step %= REC_FAILED_NUM;
  239. seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
  240. "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
  241. "success", suspend_stats.success,
  242. "fail", suspend_stats.fail,
  243. "failed_freeze", suspend_stats.failed_freeze,
  244. "failed_prepare", suspend_stats.failed_prepare,
  245. "failed_suspend", suspend_stats.failed_suspend,
  246. "failed_suspend_late",
  247. suspend_stats.failed_suspend_late,
  248. "failed_suspend_noirq",
  249. suspend_stats.failed_suspend_noirq,
  250. "failed_resume", suspend_stats.failed_resume,
  251. "failed_resume_early",
  252. suspend_stats.failed_resume_early,
  253. "failed_resume_noirq",
  254. suspend_stats.failed_resume_noirq);
  255. seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
  256. suspend_stats.failed_devs[last_dev]);
  257. for (i = 1; i < REC_FAILED_NUM; i++) {
  258. index = last_dev + REC_FAILED_NUM - i;
  259. index %= REC_FAILED_NUM;
  260. seq_printf(s, "\t\t\t%-s\n",
  261. suspend_stats.failed_devs[index]);
  262. }
  263. seq_printf(s, " last_failed_errno:\t%-d\n",
  264. suspend_stats.errno[last_errno]);
  265. for (i = 1; i < REC_FAILED_NUM; i++) {
  266. index = last_errno + REC_FAILED_NUM - i;
  267. index %= REC_FAILED_NUM;
  268. seq_printf(s, "\t\t\t%-d\n",
  269. suspend_stats.errno[index]);
  270. }
  271. seq_printf(s, " last_failed_step:\t%-s\n",
  272. suspend_step_name(
  273. suspend_stats.failed_steps[last_step]));
  274. for (i = 1; i < REC_FAILED_NUM; i++) {
  275. index = last_step + REC_FAILED_NUM - i;
  276. index %= REC_FAILED_NUM;
  277. seq_printf(s, "\t\t\t%-s\n",
  278. suspend_step_name(
  279. suspend_stats.failed_steps[index]));
  280. }
  281. return 0;
  282. }
  283. static int suspend_stats_open(struct inode *inode, struct file *file)
  284. {
  285. return single_open(file, suspend_stats_show, NULL);
  286. }
  287. static const struct file_operations suspend_stats_operations = {
  288. .open = suspend_stats_open,
  289. .read = seq_read,
  290. .llseek = seq_lseek,
  291. .release = single_release,
  292. };
  293. static int __init pm_debugfs_init(void)
  294. {
  295. debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
  296. NULL, NULL, &suspend_stats_operations);
  297. return 0;
  298. }
  299. late_initcall(pm_debugfs_init);
  300. #endif /* CONFIG_DEBUG_FS */
  301. #endif /* CONFIG_PM_SLEEP */
  302. #ifdef CONFIG_PM_SLEEP_DEBUG
  303. /*
  304. * pm_print_times: print time taken by devices to suspend and resume.
  305. *
  306. * show() returns whether printing of suspend and resume times is enabled.
  307. * store() accepts 0 or 1. 0 disables printing and 1 enables it.
  308. */
  309. bool pm_print_times_enabled;
  310. static ssize_t pm_print_times_show(struct kobject *kobj,
  311. struct kobj_attribute *attr, char *buf)
  312. {
  313. return sprintf(buf, "%d\n", pm_print_times_enabled);
  314. }
  315. static ssize_t pm_print_times_store(struct kobject *kobj,
  316. struct kobj_attribute *attr,
  317. const char *buf, size_t n)
  318. {
  319. unsigned long val;
  320. if (kstrtoul(buf, 10, &val))
  321. return -EINVAL;
  322. if (val > 1)
  323. return -EINVAL;
  324. pm_print_times_enabled = !!val;
  325. return n;
  326. }
  327. power_attr(pm_print_times);
  328. static inline void pm_print_times_init(void)
  329. {
  330. pm_print_times_enabled = !!initcall_debug;
  331. }
  332. #else /* !CONFIG_PP_SLEEP_DEBUG */
  333. static inline void pm_print_times_init(void) {}
  334. #endif /* CONFIG_PM_SLEEP_DEBUG */
  335. struct kobject *power_kobj;
  336. /**
  337. * state - control system power state.
  338. *
  339. * show() returns what states are supported, which is hard-coded to
  340. * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
  341. * 'disk' (Suspend-to-Disk).
  342. *
  343. * store() accepts one of those strings, translates it into the
  344. * proper enumerated value, and initiates a suspend transition.
  345. */
  346. static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
  347. char *buf)
  348. {
  349. char *s = buf;
  350. #ifdef CONFIG_SUSPEND
  351. suspend_state_t i;
  352. for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
  353. if (valid_state(i))
  354. s += sprintf(s,"%s ", pm_states[i].label);
  355. #endif
  356. #ifdef CONFIG_HIBERNATION
  357. s += sprintf(s, "%s\n", "disk");
  358. #else
  359. if (s != buf)
  360. /* convert the last space to a newline */
  361. *(s-1) = '\n';
  362. #endif
  363. return (s - buf);
  364. }
  365. static suspend_state_t decode_state(const char *buf, size_t n)
  366. {
  367. #ifdef CONFIG_SUSPEND
  368. suspend_state_t state = PM_SUSPEND_MIN;
  369. struct pm_sleep_state *s;
  370. #endif
  371. char *p;
  372. int len;
  373. p = memchr(buf, '\n', n);
  374. len = p ? p - buf : n;
  375. /* Check hibernation first. */
  376. if (len == 4 && !strncmp(buf, "disk", len))
  377. return PM_SUSPEND_MAX;
  378. #ifdef CONFIG_SUSPEND
  379. for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
  380. if (len == strlen(s->label) && !strncmp(buf, s->label, len))
  381. return state;
  382. #endif
  383. return PM_SUSPEND_ON;
  384. }
  385. #ifdef CONFIG_SUSPEND_HELPER
  386. static struct workqueue_struct *suspend_helper_wq;
  387. struct state_store_params {
  388. const char *buf;
  389. size_t n;
  390. };
  391. struct suspend_helper_data {
  392. struct work_struct work;
  393. struct completion done;
  394. struct state_store_params params;
  395. int result;
  396. };
  397. struct suspend_helper_data *suspend_helper_data;
  398. static void suspend_helper(struct work_struct *work)
  399. {
  400. struct suspend_helper_data *data = (struct suspend_helper_data *)
  401. container_of(work, struct suspend_helper_data, work);
  402. const char *buf = data->params.buf;
  403. size_t n = data->params.n;
  404. suspend_state_t state;
  405. int error = 0;
  406. pr_info("[suspend helper] %s: start!\n", __func__);
  407. error = pm_autosleep_lock();
  408. if (error) {
  409. goto out_nolock;
  410. }
  411. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  412. error = -EBUSY;
  413. goto out;
  414. }
  415. state = decode_state(buf, n);
  416. if (state < PM_SUSPEND_MAX)
  417. error = pm_suspend(state);
  418. else if (state == PM_SUSPEND_MAX)
  419. error = hibernate();
  420. else
  421. error = -EINVAL;
  422. out:
  423. pm_autosleep_unlock();
  424. out_nolock:
  425. // set result and notify completion
  426. data->result = error;
  427. complete(&data->done);
  428. pr_info("[suspend helper] %s: result = %d\n", __func__, error);
  429. }
  430. static ssize_t state_store_helper(struct kobject *kobj, struct kobj_attribute *attr,
  431. const char *buf, size_t n)
  432. {
  433. int error;
  434. int freezable = 0;
  435. // we don't need to freeze. so tell the freezer
  436. if (!freezer_should_skip(current)) {
  437. freezable = 1;
  438. freezer_do_not_count();
  439. pr_info("[suspend helper] %s: freezer should skip me (%s:%d)\n",
  440. __func__, current->comm, current->pid);
  441. }
  442. suspend_helper_data->params.buf = buf;
  443. suspend_helper_data->params.n = n;
  444. INIT_COMPLETION(suspend_helper_data->done);
  445. // use kworker for suspend resume
  446. queue_work(suspend_helper_wq, &suspend_helper_data->work);
  447. // wait for suspend/resume work to be complete
  448. wait_for_completion(&suspend_helper_data->done);
  449. if (freezable) {
  450. // set ourself as freezable
  451. freezer_count();
  452. }
  453. error = suspend_helper_data->result;
  454. pr_info("[suspend helper] %s: suspend_helper returned %d\n", __func__, error);
  455. return error ? error : n;
  456. }
  457. static int suspend_helper_init(void)
  458. {
  459. int ret = 0;
  460. suspend_helper_wq = alloc_ordered_workqueue("suspend_helper", 0);
  461. if (!suspend_helper_wq)
  462. return -ENOMEM;
  463. suspend_helper_data = kzalloc(sizeof(struct suspend_helper_data), GFP_KERNEL);
  464. if (!suspend_helper_data) {
  465. ret = -ENOMEM;
  466. goto out_destroy_wq;
  467. }
  468. INIT_WORK(&suspend_helper_data->work, suspend_helper);
  469. init_completion(&suspend_helper_data->done);
  470. pr_info("[suspend helper] %s: init done\n", __func__);
  471. return 0;
  472. out_destroy_wq:
  473. destroy_workqueue(suspend_helper_wq);
  474. return ret;
  475. }
  476. #endif
  477. #ifdef SUSPEND_WAKEUP_BOOST
  478. static void pr_sched_state(const char *msg)
  479. {
  480. pr_debug("[sched state] %s: (%s:%d) %pS policy=%d, prio=%d, static_prio=%d, normal_prio=%d, rt_priority=%d\n",
  481. msg, current->comm, current->pid,
  482. current->sched_class, current->policy,
  483. current->prio, current->static_prio, current->normal_prio, current->rt_priority);
  484. }
  485. #endif
  486. static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
  487. const char *buf, size_t n)
  488. {
  489. suspend_state_t state;
  490. int error;
  491. #ifdef SUSPEND_WAKEUP_BOOST
  492. int orig_policy = current->policy;
  493. int orig_nice = task_nice(current);
  494. struct sched_param param = { .sched_priority = 1 };
  495. #endif
  496. #ifdef CONFIG_SUSPEND_HELPER
  497. if (suspend_helper_data) {
  498. pr_info("[suspend helper] %s: Let our helper do the real work!\n", __func__);
  499. return state_store_helper(kobj, attr, buf, n);
  500. }
  501. pr_info("[suspend helper] %s: helper data not avaialbe.. Fall back to the legacy code..\n", __func__);
  502. #endif
  503. error = pm_autosleep_lock();
  504. if (error)
  505. return error;
  506. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  507. error = -EBUSY;
  508. goto out;
  509. }
  510. #ifdef SUSPEND_WAKEUP_BOOST
  511. pr_sched_state("before boost");
  512. sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
  513. pr_sched_state("after boost");
  514. #endif
  515. state = decode_state(buf, n);
  516. if (state < PM_SUSPEND_MAX)
  517. error = pm_suspend(state);
  518. else if (state == PM_SUSPEND_MAX)
  519. error = hibernate();
  520. else
  521. error = -EINVAL;
  522. #ifdef SUSPEND_WAKEUP_BOOST
  523. pr_sched_state("before restore");
  524. param.sched_priority = 0;
  525. sched_setscheduler_nocheck(current, orig_policy, &param);
  526. set_user_nice(current, orig_nice);
  527. pr_sched_state("after restore");
  528. #endif
  529. out:
  530. pm_autosleep_unlock();
  531. return error ? error : n;
  532. }
  533. power_attr(state);
  534. #ifdef CONFIG_PM_SLEEP
  535. /*
  536. * The 'wakeup_count' attribute, along with the functions defined in
  537. * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
  538. * handled in a non-racy way.
  539. *
  540. * If a wakeup event occurs when the system is in a sleep state, it simply is
  541. * woken up. In turn, if an event that would wake the system up from a sleep
  542. * state occurs when it is undergoing a transition to that sleep state, the
  543. * transition should be aborted. Moreover, if such an event occurs when the
  544. * system is in the working state, an attempt to start a transition to the
  545. * given sleep state should fail during certain period after the detection of
  546. * the event. Using the 'state' attribute alone is not sufficient to satisfy
  547. * these requirements, because a wakeup event may occur exactly when 'state'
  548. * is being written to and may be delivered to user space right before it is
  549. * frozen, so the event will remain only partially processed until the system is
  550. * woken up by another event. In particular, it won't cause the transition to
  551. * a sleep state to be aborted.
  552. *
  553. * This difficulty may be overcome if user space uses 'wakeup_count' before
  554. * writing to 'state'. It first should read from 'wakeup_count' and store
  555. * the read value. Then, after carrying out its own preparations for the system
  556. * transition to a sleep state, it should write the stored value to
  557. * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
  558. * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
  559. * is allowed to write to 'state', but the transition will be aborted if there
  560. * are any wakeup events detected after 'wakeup_count' was written to.
  561. */
  562. static ssize_t wakeup_count_show(struct kobject *kobj,
  563. struct kobj_attribute *attr,
  564. char *buf)
  565. {
  566. unsigned int val;
  567. return pm_get_wakeup_count(&val, true) ?
  568. sprintf(buf, "%u\n", val) : -EINTR;
  569. }
  570. static ssize_t wakeup_count_store(struct kobject *kobj,
  571. struct kobj_attribute *attr,
  572. const char *buf, size_t n)
  573. {
  574. unsigned int val;
  575. int error;
  576. error = pm_autosleep_lock();
  577. if (error)
  578. return error;
  579. if (pm_autosleep_state() > PM_SUSPEND_ON) {
  580. error = -EBUSY;
  581. goto out;
  582. }
  583. error = -EINVAL;
  584. if (sscanf(buf, "%u", &val) == 1) {
  585. if (pm_save_wakeup_count(val))
  586. error = n;
  587. else
  588. pm_print_active_wakeup_sources();
  589. }
  590. out:
  591. pm_autosleep_unlock();
  592. return error;
  593. }
  594. power_attr(wakeup_count);
  595. #ifdef CONFIG_PM_AUTOSLEEP
  596. static ssize_t autosleep_show(struct kobject *kobj,
  597. struct kobj_attribute *attr,
  598. char *buf)
  599. {
  600. suspend_state_t state = pm_autosleep_state();
  601. if (state == PM_SUSPEND_ON)
  602. return sprintf(buf, "off\n");
  603. #ifdef CONFIG_SUSPEND
  604. if (state < PM_SUSPEND_MAX)
  605. return sprintf(buf, "%s\n", valid_state(state) ?
  606. pm_states[state].label : "error");
  607. #endif
  608. #ifdef CONFIG_HIBERNATION
  609. return sprintf(buf, "disk\n");
  610. #else
  611. return sprintf(buf, "error");
  612. #endif
  613. }
  614. static ssize_t autosleep_store(struct kobject *kobj,
  615. struct kobj_attribute *attr,
  616. const char *buf, size_t n)
  617. {
  618. suspend_state_t state = decode_state(buf, n);
  619. int error;
  620. if (state == PM_SUSPEND_ON
  621. && strcmp(buf, "off") && strcmp(buf, "off\n"))
  622. return -EINVAL;
  623. error = pm_autosleep_set_state(state);
  624. return error ? error : n;
  625. }
  626. power_attr(autosleep);
  627. #endif /* CONFIG_PM_AUTOSLEEP */
  628. #ifdef CONFIG_PM_WAKELOCKS
  629. static ssize_t wake_lock_show(struct kobject *kobj,
  630. struct kobj_attribute *attr,
  631. char *buf)
  632. {
  633. return pm_show_wakelocks(buf, true);
  634. }
  635. static ssize_t wake_lock_store(struct kobject *kobj,
  636. struct kobj_attribute *attr,
  637. const char *buf, size_t n)
  638. {
  639. int error = pm_wake_lock(buf);
  640. return error ? error : n;
  641. }
  642. power_attr(wake_lock);
  643. static ssize_t wake_unlock_show(struct kobject *kobj,
  644. struct kobj_attribute *attr,
  645. char *buf)
  646. {
  647. return pm_show_wakelocks(buf, false);
  648. }
  649. static ssize_t wake_unlock_store(struct kobject *kobj,
  650. struct kobj_attribute *attr,
  651. const char *buf, size_t n)
  652. {
  653. int error = pm_wake_unlock(buf);
  654. return error ? error : n;
  655. }
  656. power_attr(wake_unlock);
  657. #endif /* CONFIG_PM_WAKELOCKS */
  658. #endif /* CONFIG_PM_SLEEP */
  659. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  660. static int cpufreq_max_limit_val = -1;
  661. static int cpufreq_min_limit_val = -1;
  662. struct cpufreq_limit_handle *cpufreq_max_hd;
  663. struct cpufreq_limit_handle *cpufreq_min_hd;
  664. DEFINE_MUTEX(cpufreq_limit_mutex);
  665. static ssize_t cpufreq_table_show(struct kobject *kobj,
  666. struct kobj_attribute *attr, char *buf)
  667. {
  668. ssize_t len = 0;
  669. int i, count = 0;
  670. unsigned int freq;
  671. struct cpufreq_frequency_table *table;
  672. table = cpufreq_frequency_get_table(0);
  673. if (table == NULL)
  674. return 0;
  675. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++)
  676. count = i;
  677. for (i = count; i >= 0; i--) {
  678. freq = table[i].frequency;
  679. if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
  680. continue;
  681. len += sprintf(buf + len, "%u ", freq);
  682. }
  683. len--;
  684. len += sprintf(buf + len, "\n");
  685. return len;
  686. }
  687. static ssize_t cpufreq_table_store(struct kobject *kobj,
  688. struct kobj_attribute *attr,
  689. const char *buf, size_t n)
  690. {
  691. pr_err("%s: cpufreq_table is read-only\n", __func__);
  692. return -EINVAL;
  693. }
  694. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  695. struct kobj_attribute *attr,
  696. char *buf)
  697. {
  698. return sprintf(buf, "%d\n", cpufreq_max_limit_val);
  699. }
  700. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  701. struct kobj_attribute *attr,
  702. const char *buf, size_t n)
  703. {
  704. int val;
  705. ssize_t ret = -EINVAL;
  706. if (sscanf(buf, "%d", &val) != 1) {
  707. pr_err("%s: Invalid cpufreq format\n", __func__);
  708. goto out;
  709. }
  710. mutex_lock(&cpufreq_limit_mutex);
  711. if (cpufreq_max_hd) {
  712. cpufreq_limit_put(cpufreq_max_hd);
  713. cpufreq_max_hd = NULL;
  714. }
  715. if (val != -1) {
  716. cpufreq_max_hd = cpufreq_limit_max_freq(val, "user lock(max)");
  717. if (IS_ERR(cpufreq_max_hd)) {
  718. pr_err("%s: fail to get the handle\n", __func__);
  719. cpufreq_max_hd = NULL;
  720. }
  721. }
  722. cpufreq_max_hd ?
  723. (cpufreq_max_limit_val = val) : (cpufreq_max_limit_val = -1);
  724. mutex_unlock(&cpufreq_limit_mutex);
  725. ret = n;
  726. out:
  727. return ret;
  728. }
  729. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  730. struct kobj_attribute *attr,
  731. char *buf)
  732. {
  733. return sprintf(buf, "%d\n", cpufreq_min_limit_val);
  734. }
  735. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  736. struct kobj_attribute *attr,
  737. const char *buf, size_t n)
  738. {
  739. int val;
  740. ssize_t ret = -EINVAL;
  741. if (sscanf(buf, "%d", &val) != 1) {
  742. pr_err("%s: Invalid cpufreq format\n", __func__);
  743. goto out;
  744. }
  745. mutex_lock(&cpufreq_limit_mutex);
  746. if (cpufreq_min_hd) {
  747. cpufreq_limit_put(cpufreq_min_hd);
  748. cpufreq_min_hd = NULL;
  749. }
  750. if (val != -1) {
  751. cpufreq_min_hd = cpufreq_limit_min_freq(val, "user lock(min)");
  752. if (IS_ERR(cpufreq_min_hd)) {
  753. pr_err("%s: fail to get the handle\n", __func__);
  754. cpufreq_min_hd = NULL;
  755. }
  756. }
  757. cpufreq_min_hd ?
  758. (cpufreq_min_limit_val = val) : (cpufreq_min_limit_val = -1);
  759. mutex_unlock(&cpufreq_limit_mutex);
  760. ret = n;
  761. out:
  762. return ret;
  763. }
  764. power_attr(cpufreq_table);
  765. power_attr(cpufreq_max_limit);
  766. power_attr(cpufreq_min_limit);
  767. struct cpufreq_limit_handle *cpufreq_min_touch;
  768. struct cpufreq_limit_handle *cpufreq_min_camera;
  769. struct cpufreq_limit_handle *cpufreq_min_sensor;
  770. int set_freq_limit(unsigned long id, unsigned int freq)
  771. {
  772. ssize_t ret = -EINVAL;
  773. mutex_lock(&cpufreq_limit_mutex);
  774. if (cpufreq_min_touch) {
  775. cpufreq_limit_put(cpufreq_min_touch);
  776. cpufreq_min_touch = NULL;
  777. }
  778. if (cpufreq_min_camera) {
  779. cpufreq_limit_put(cpufreq_min_camera);
  780. cpufreq_min_camera = NULL;
  781. }
  782. if (cpufreq_min_sensor) {
  783. cpufreq_limit_put(cpufreq_min_sensor);
  784. cpufreq_min_sensor = NULL;
  785. }
  786. pr_debug("%s: id=%d freq=%d\n", __func__, (int)id, freq);
  787. /* min lock */
  788. if (id & DVFS_TOUCH_ID) {
  789. if (freq != -1) {
  790. cpufreq_min_touch = cpufreq_limit_min_freq(freq, "touch min");
  791. if (IS_ERR(cpufreq_min_touch)) {
  792. pr_err("%s: fail to get the handle\n", __func__);
  793. goto out;
  794. }
  795. }
  796. }
  797. if (id & DVFS_CAMERA_ID) {
  798. if (freq != -1) {
  799. cpufreq_min_camera = cpufreq_limit_min_freq(freq, "camera min");
  800. if (IS_ERR(cpufreq_min_camera)) {
  801. pr_err("%s: fail to get the handle\n", __func__);
  802. goto out;
  803. }
  804. }
  805. }
  806. if (id & DVFS_SENSOR_ID) {
  807. if (freq != -1) {
  808. cpufreq_min_sensor = cpufreq_limit_min_freq(freq, "sensor min");
  809. if (IS_ERR(cpufreq_min_sensor)) {
  810. pr_err("%s: fail to get the handle\n", __func__);
  811. goto out;
  812. }
  813. }
  814. }
  815. ret = 0;
  816. out:
  817. mutex_unlock(&cpufreq_limit_mutex);
  818. return ret;
  819. }
  820. #endif
  821. #ifdef CONFIG_PM_TRACE
  822. int pm_trace_enabled;
  823. static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
  824. char *buf)
  825. {
  826. return sprintf(buf, "%d\n", pm_trace_enabled);
  827. }
  828. static ssize_t
  829. pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
  830. const char *buf, size_t n)
  831. {
  832. int val;
  833. if (sscanf(buf, "%d", &val) == 1) {
  834. pm_trace_enabled = !!val;
  835. if (pm_trace_enabled) {
  836. pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
  837. "PM: Correct system time has to be restored manually after resume.\n");
  838. }
  839. return n;
  840. }
  841. return -EINVAL;
  842. }
  843. power_attr(pm_trace);
  844. static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
  845. struct kobj_attribute *attr,
  846. char *buf)
  847. {
  848. return show_trace_dev_match(buf, PAGE_SIZE);
  849. }
  850. static ssize_t
  851. pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
  852. const char *buf, size_t n)
  853. {
  854. return -EINVAL;
  855. }
  856. power_attr(pm_trace_dev_match);
  857. #endif /* CONFIG_PM_TRACE */
  858. #ifdef CONFIG_SEC_DVFS
  859. DEFINE_MUTEX(dvfs_mutex);
  860. static unsigned long dvfs_id;
  861. static unsigned long apps_min_freq;
  862. static unsigned long apps_max_freq;
  863. static unsigned long thermald_max_freq;
  864. static unsigned long touch_min_freq = MIN_TOUCH_LIMIT;
  865. static unsigned long unicpu_max_freq = MAX_UNICPU_LIMIT;
  866. static unsigned long sensor_min_freq = MIN_SENSOR_LIMIT;
  867. static int verify_cpufreq_target(unsigned int target)
  868. {
  869. int i;
  870. struct cpufreq_frequency_table *table;
  871. table = cpufreq_frequency_get_table(BOOT_CPU);
  872. if (table == NULL)
  873. return -EFAULT;
  874. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  875. if (table[i].frequency < MIN_FREQ_LIMIT ||
  876. table[i].frequency > MAX_FREQ_LIMIT)
  877. continue;
  878. if (target == table[i].frequency)
  879. return 0;
  880. }
  881. return -EINVAL;
  882. }
  883. int set_freq_limit(unsigned long id, unsigned int freq)
  884. {
  885. unsigned int min = MIN_FREQ_LIMIT;
  886. unsigned int max = MAX_FREQ_LIMIT;
  887. if (freq != 0 && freq != -1 && verify_cpufreq_target(freq))
  888. return -EINVAL;
  889. mutex_lock(&dvfs_mutex);
  890. if (freq == -1)
  891. dvfs_id &= ~id;
  892. else
  893. dvfs_id |= id;
  894. /* update freq for apps/thermald */
  895. if (id == DVFS_APPS_MIN_ID)
  896. apps_min_freq = freq;
  897. else if (id == DVFS_APPS_MAX_ID)
  898. apps_max_freq = freq;
  899. else if (id == DVFS_THERMALD_ID)
  900. thermald_max_freq = freq;
  901. else if (id == DVFS_TOUCH_ID)
  902. touch_min_freq = freq;
  903. else if (id == DVFS_SENSOR_ID)
  904. sensor_min_freq = freq;
  905. /* set min - apps */
  906. if (dvfs_id & DVFS_APPS_MIN_ID && min < apps_min_freq)
  907. min = apps_min_freq;
  908. if (dvfs_id & DVFS_TOUCH_ID && min < touch_min_freq)
  909. min = touch_min_freq;
  910. if (dvfs_id & DVFS_SENSOR_ID && min < sensor_min_freq)
  911. min = sensor_min_freq;
  912. /* set max */
  913. if (dvfs_id & DVFS_APPS_MAX_ID && max > apps_max_freq)
  914. max = apps_max_freq;
  915. if (dvfs_id & DVFS_THERMALD_ID && max > thermald_max_freq)
  916. max = thermald_max_freq;
  917. if (dvfs_id & DVFS_UNICPU_ID && max > unicpu_max_freq)
  918. max = unicpu_max_freq;
  919. /* check min max*/
  920. if (min > max)
  921. min = max;
  922. /* update */
  923. set_min_lock(min);
  924. set_max_lock(max);
  925. pr_info("%s: ,dvfs-id:0x%lu ,id:-0x%lu %d, min %d, max %d\n",
  926. __func__,dvfs_id, id, freq, min, max);
  927. /* need to update now */
  928. if (id & UPDATE_NOW_BITS) {
  929. int cpu;
  930. unsigned int cur = 0;
  931. for_each_online_cpu(cpu) {
  932. cur = cpufreq_quick_get(cpu);
  933. if (cur) {
  934. struct cpufreq_policy policy;
  935. policy.cpu = cpu;
  936. if (cur < min)
  937. cpufreq_driver_target(&policy,
  938. min, CPUFREQ_RELATION_H);
  939. else if (cur > max)
  940. cpufreq_driver_target(&policy,
  941. max, CPUFREQ_RELATION_L);
  942. }
  943. }
  944. }
  945. mutex_unlock(&dvfs_mutex);
  946. return 0;
  947. }
  948. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  949. struct kobj_attribute *attr, char *buf)
  950. {
  951. int freq;
  952. freq = get_min_lock();
  953. if (!freq)
  954. freq = -1;
  955. return sprintf(buf, "%d\n", freq);
  956. }
  957. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  958. struct kobj_attribute *attr,
  959. const char *buf, size_t n)
  960. {
  961. int freq_min_limit, ret = 0;
  962. ret = sscanf(buf, "%d", &freq_min_limit);
  963. if (ret != 1)
  964. return -EINVAL;
  965. set_freq_limit(DVFS_APPS_MIN_ID, freq_min_limit);
  966. return n;
  967. }
  968. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  969. struct kobj_attribute *attr, char *buf)
  970. {
  971. int freq;
  972. freq = get_max_lock();
  973. if (!freq)
  974. freq = -1;
  975. return sprintf(buf, "%d\n", freq);
  976. }
  977. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  978. struct kobj_attribute *attr,
  979. const char *buf, size_t n)
  980. {
  981. int freq_max_limit, ret = 0;
  982. ret = sscanf(buf, "%d", &freq_max_limit);
  983. if (ret != 1)
  984. return -EINVAL;
  985. set_freq_limit(DVFS_APPS_MAX_ID, freq_max_limit);
  986. return n;
  987. }
  988. static ssize_t cpufreq_table_show(struct kobject *kobj,
  989. struct kobj_attribute *attr, char *buf)
  990. {
  991. ssize_t len = 0;
  992. int i, count = 0;
  993. unsigned int freq;
  994. struct cpufreq_frequency_table *table;
  995. table = cpufreq_frequency_get_table(BOOT_CPU);
  996. if (table == NULL)
  997. return 0;
  998. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++)
  999. count = i;
  1000. for (i = count; i >= 0; i--) {
  1001. freq = table[i].frequency;
  1002. if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
  1003. continue;
  1004. len += sprintf(buf + len, "%u ", freq);
  1005. }
  1006. len--;
  1007. len += sprintf(buf + len, "\n");
  1008. return len;
  1009. }
  1010. static ssize_t cpufreq_table_store(struct kobject *kobj,
  1011. struct kobj_attribute *attr,
  1012. const char *buf, size_t n)
  1013. {
  1014. pr_info("%s: Not supported\n", __func__);
  1015. return n;
  1016. }
  1017. power_attr(cpufreq_max_limit);
  1018. power_attr(cpufreq_min_limit);
  1019. power_attr(cpufreq_table);
  1020. #endif
  1021. #ifdef CONFIG_FREEZER
  1022. static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
  1023. struct kobj_attribute *attr, char *buf)
  1024. {
  1025. return sprintf(buf, "%u\n", freeze_timeout_msecs);
  1026. }
  1027. static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
  1028. struct kobj_attribute *attr,
  1029. const char *buf, size_t n)
  1030. {
  1031. unsigned long val;
  1032. if (kstrtoul(buf, 10, &val))
  1033. return -EINVAL;
  1034. freeze_timeout_msecs = val;
  1035. return n;
  1036. }
  1037. power_attr(pm_freeze_timeout);
  1038. #endif /* CONFIG_FREEZER*/
  1039. static struct attribute * g[] = {
  1040. &state_attr.attr,
  1041. #ifdef CONFIG_PM_TRACE
  1042. &pm_trace_attr.attr,
  1043. &pm_trace_dev_match_attr.attr,
  1044. #endif
  1045. #ifdef CONFIG_PM_SLEEP
  1046. &pm_async_attr.attr,
  1047. &wakeup_count_attr.attr,
  1048. #ifdef CONFIG_PM_AUTOSLEEP
  1049. &autosleep_attr.attr,
  1050. #endif
  1051. #ifdef CONFIG_PM_WAKELOCKS
  1052. &wake_lock_attr.attr,
  1053. &wake_unlock_attr.attr,
  1054. #endif
  1055. #ifdef CONFIG_PM_DEBUG
  1056. &pm_test_attr.attr,
  1057. #endif
  1058. #ifdef CONFIG_PM_SLEEP_DEBUG
  1059. &pm_print_times_attr.attr,
  1060. #endif
  1061. #endif
  1062. #ifdef CONFIG_CPU_FREQ_LIMIT_USERSPACE
  1063. &cpufreq_table_attr.attr,
  1064. &cpufreq_max_limit_attr.attr,
  1065. &cpufreq_min_limit_attr.attr,
  1066. #endif
  1067. #ifdef CONFIG_SEC_DVFS
  1068. &cpufreq_min_limit_attr.attr,
  1069. &cpufreq_max_limit_attr.attr,
  1070. &cpufreq_table_attr.attr,
  1071. #endif
  1072. NULL,
  1073. };
  1074. static struct attribute_group attr_group = {
  1075. .attrs = g,
  1076. };
  1077. static const struct attribute_group *attr_groups[] = {
  1078. &attr_group,
  1079. #ifdef CONFIG_PM_SLEEP
  1080. &suspend_attr_group,
  1081. #endif
  1082. NULL,
  1083. };
  1084. #ifdef CONFIG_PM_RUNTIME
  1085. struct workqueue_struct *pm_wq;
  1086. EXPORT_SYMBOL_GPL(pm_wq);
  1087. static int __init pm_start_workqueue(void)
  1088. {
  1089. pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
  1090. return pm_wq ? 0 : -ENOMEM;
  1091. }
  1092. #else
  1093. static inline int pm_start_workqueue(void) { return 0; }
  1094. #endif
  1095. static int __init pm_init(void)
  1096. {
  1097. int error = pm_start_workqueue();
  1098. if (error)
  1099. return error;
  1100. hibernate_image_size_init();
  1101. hibernate_reserved_size_init();
  1102. touch_evt_timer_val = ktime_set(2, 0);
  1103. hrtimer_init(&tc_ev_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1104. tc_ev_timer.function = &tc_ev_stop;
  1105. tc_ev_processed = 1;
  1106. power_kobj = kobject_create_and_add("power", NULL);
  1107. if (!power_kobj)
  1108. return -ENOMEM;
  1109. error = sysfs_create_groups(power_kobj, attr_groups);
  1110. if (error)
  1111. return error;
  1112. pm_print_times_init();
  1113. #ifdef CONFIG_SUSPEND_HELPER
  1114. suspend_helper_init();
  1115. #endif
  1116. #ifdef CONFIG_SEC_DVFS
  1117. apps_min_freq = MIN_FREQ_LIMIT;
  1118. apps_max_freq = MAX_FREQ_LIMIT;
  1119. thermald_max_freq = MAX_FREQ_LIMIT;
  1120. #endif
  1121. return pm_autosleep_init();
  1122. }
  1123. core_initcall(pm_init);