msm_mpdecision.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) "mpd %s: " fmt, __func__
  13. #include <linux/cpumask.h>
  14. #include <linux/kernel.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/mutex.h>
  18. #include <linux/kthread.h>
  19. #include <linux/kobject.h>
  20. #include <linux/ktime.h>
  21. #include <linux/hrtimer.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/cpu.h>
  25. #include <linux/stringify.h>
  26. #include <linux/sched.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/cpu_pm.h>
  30. #include <linux/cpu.h>
  31. #include <linux/cpufreq.h>
  32. #include <linux/sched.h>
  33. #include <linux/rq_stats.h>
  34. #include <asm/atomic.h>
  35. #include <asm/page.h>
  36. #include <mach/msm_dcvs.h>
  37. #include <mach/msm_dcvs_scm.h>
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/mpdcvs_trace.h>
  40. #define DEFAULT_RQ_AVG_POLL_MS (1)
  41. #define DEFAULT_RQ_AVG_DIVIDE (25)
  42. struct mpd_attrib {
  43. struct kobj_attribute enabled;
  44. struct kobj_attribute rq_avg_poll_ms;
  45. struct kobj_attribute iowait_threshold_pct;
  46. struct kobj_attribute rq_avg_divide;
  47. struct kobj_attribute em_win_size_min_us;
  48. struct kobj_attribute em_win_size_max_us;
  49. struct kobj_attribute em_max_util_pct;
  50. struct kobj_attribute mp_em_rounding_point_min;
  51. struct kobj_attribute mp_em_rounding_point_max;
  52. struct kobj_attribute online_util_pct_min;
  53. struct kobj_attribute online_util_pct_max;
  54. struct kobj_attribute slack_time_min_us;
  55. struct kobj_attribute slack_time_max_us;
  56. struct kobj_attribute hp_up_max_ms;
  57. struct kobj_attribute hp_up_ms;
  58. struct kobj_attribute hp_up_count;
  59. struct kobj_attribute hp_dw_max_ms;
  60. struct kobj_attribute hp_dw_ms;
  61. struct kobj_attribute hp_dw_count;
  62. struct attribute_group attrib_group;
  63. };
  64. struct msm_mpd_scm_data {
  65. enum msm_dcvs_scm_event event;
  66. int nr;
  67. };
  68. struct mpdecision {
  69. uint32_t enabled;
  70. atomic_t algo_cpu_mask;
  71. uint32_t rq_avg_poll_ms;
  72. uint32_t iowait_threshold_pct;
  73. uint32_t rq_avg_divide;
  74. ktime_t next_update;
  75. uint32_t slack_us;
  76. struct msm_mpd_algo_param mp_param;
  77. struct mpd_attrib attrib;
  78. struct mutex lock;
  79. struct task_struct *task;
  80. struct task_struct *hptask;
  81. struct hrtimer slack_timer;
  82. struct msm_mpd_scm_data data;
  83. int hpupdate;
  84. wait_queue_head_t wait_q;
  85. wait_queue_head_t wait_hpq;
  86. };
  87. struct hp_latency {
  88. int hp_up_max_ms;
  89. int hp_up_ms;
  90. int hp_up_count;
  91. int hp_dw_max_ms;
  92. int hp_dw_ms;
  93. int hp_dw_count;
  94. };
  95. static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer);
  96. static DEFINE_SPINLOCK(rq_avg_lock);
  97. enum {
  98. MSM_MPD_DEBUG_NOTIFIER = BIT(0),
  99. MSM_MPD_CORE_STATUS = BIT(1),
  100. MSM_MPD_SLACK_TIMER = BIT(2),
  101. };
  102. enum {
  103. HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */
  104. HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */
  105. HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */
  106. };
  107. static int msm_mpd_enabled = 1;
  108. module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
  109. static struct dentry *debugfs_base;
  110. static struct mpdecision msm_mpd;
  111. static struct hp_latency hp_latencies;
  112. static unsigned long last_nr;
  113. static int num_present_hundreds;
  114. static ktime_t last_down_time;
  115. static bool ok_to_update_tz(int nr, int last_nr)
  116. {
  117. /*
  118. * Exclude unnecessary TZ reports if run queue haven't changed much from
  119. * the last reported value. The divison by rq_avg_divide is to
  120. * filter out small changes in the run queue average which won't cause
  121. * a online cpu mask change. Also if the cpu online count does not match
  122. * the count requested by TZ and we are not in the process of bringing
  123. * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata
  124. */
  125. return
  126. (((nr / msm_mpd.rq_avg_divide)
  127. != (last_nr / msm_mpd.rq_avg_divide))
  128. || ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask))
  129. != num_online_cpus())
  130. && (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS)));
  131. }
  132. static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer)
  133. {
  134. int nr, nr_iowait;
  135. ktime_t curr_time = ktime_get();
  136. unsigned long flags;
  137. int cpu = smp_processor_id();
  138. enum hrtimer_restart restart = HRTIMER_RESTART;
  139. spin_lock_irqsave(&rq_avg_lock, flags);
  140. /* If running on the wrong cpu, don't restart */
  141. if (&per_cpu(rq_avg_poll_timer, cpu) != timer)
  142. restart = HRTIMER_NORESTART;
  143. if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0)
  144. goto out;
  145. msm_mpd.next_update = ktime_add_ns(curr_time,
  146. (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
  147. sched_get_nr_running_avg(&nr, &nr_iowait);
  148. if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr))
  149. nr = last_nr;
  150. if (nr > num_present_hundreds)
  151. nr = num_present_hundreds;
  152. trace_msm_mp_runq("nr_running", nr);
  153. if (ok_to_update_tz(nr, last_nr)) {
  154. hrtimer_try_to_cancel(&msm_mpd.slack_timer);
  155. msm_mpd.data.nr = nr;
  156. msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE;
  157. wake_up(&msm_mpd.wait_q);
  158. last_nr = nr;
  159. }
  160. out:
  161. hrtimer_set_expires(timer, msm_mpd.next_update);
  162. spin_unlock_irqrestore(&rq_avg_lock, flags);
  163. /* set next expiration */
  164. return restart;
  165. }
  166. static void bring_up_cpu(int cpu)
  167. {
  168. int cpu_action_time_ms;
  169. int time_taken_ms;
  170. int ret, ret1, ret2;
  171. cpu_action_time_ms = ktime_to_ms(ktime_get());
  172. ret = cpu_up(cpu);
  173. if (ret) {
  174. pr_debug("Error %d online core %d\n", ret, cpu);
  175. } else {
  176. time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
  177. if (time_taken_ms > hp_latencies.hp_up_max_ms)
  178. hp_latencies.hp_up_max_ms = time_taken_ms;
  179. hp_latencies.hp_up_ms += time_taken_ms;
  180. hp_latencies.hp_up_count++;
  181. ret = msm_dcvs_scm_event(
  182. CPU_OFFSET + cpu,
  183. MSM_DCVS_SCM_CORE_ONLINE,
  184. cpufreq_get(cpu),
  185. (uint32_t) time_taken_ms * USEC_PER_MSEC,
  186. &ret1, &ret2);
  187. if (ret)
  188. pr_err("Error sending hotplug scm event err=%d\n", ret);
  189. }
  190. }
  191. static void bring_down_cpu(int cpu)
  192. {
  193. int cpu_action_time_ms;
  194. int time_taken_ms;
  195. int ret, ret1, ret2;
  196. BUG_ON(cpu == 0);
  197. cpu_action_time_ms = ktime_to_ms(ktime_get());
  198. ret = cpu_down(cpu);
  199. if (ret) {
  200. pr_debug("Error %d offline" "core %d\n", ret, cpu);
  201. } else {
  202. time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
  203. if (time_taken_ms > hp_latencies.hp_dw_max_ms)
  204. hp_latencies.hp_dw_max_ms = time_taken_ms;
  205. hp_latencies.hp_dw_ms += time_taken_ms;
  206. hp_latencies.hp_dw_count++;
  207. ret = msm_dcvs_scm_event(
  208. CPU_OFFSET + cpu,
  209. MSM_DCVS_SCM_CORE_OFFLINE,
  210. (uint32_t) time_taken_ms * USEC_PER_MSEC,
  211. 0,
  212. &ret1, &ret2);
  213. if (ret)
  214. pr_err("Error sending hotplug scm event err=%d\n", ret);
  215. }
  216. }
  217. static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr)
  218. {
  219. int ret = 0;
  220. uint32_t req_cpu_mask = 0;
  221. uint32_t slack_us = 0;
  222. uint32_t param0 = 0;
  223. if (event == MSM_DCVS_SCM_RUNQ_UPDATE)
  224. param0 = nr;
  225. ret = msm_dcvs_scm_event(0, event, param0, 0,
  226. &req_cpu_mask, &slack_us);
  227. if (ret) {
  228. pr_err("Error (%d) sending event %d, param %d\n", ret, event,
  229. param0);
  230. return ret;
  231. }
  232. trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask);
  233. trace_msm_mp_slacktime("slack_time_mp", slack_us);
  234. msm_mpd.slack_us = slack_us;
  235. atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
  236. msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
  237. wake_up(&msm_mpd.wait_hpq);
  238. /* Start MP Decision slack timer */
  239. if (slack_us) {
  240. hrtimer_cancel(&msm_mpd.slack_timer);
  241. ret = hrtimer_start(&msm_mpd.slack_timer,
  242. ktime_set(0, slack_us * NSEC_PER_USEC),
  243. HRTIMER_MODE_REL_PINNED);
  244. if (ret)
  245. pr_err("Failed to register slack timer (%d) %d\n",
  246. slack_us, ret);
  247. }
  248. return ret;
  249. }
  250. static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer)
  251. {
  252. unsigned long flags;
  253. trace_printk("mpd:slack_timer_fired!\n");
  254. spin_lock_irqsave(&rq_avg_lock, flags);
  255. if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
  256. goto out;
  257. msm_mpd.data.nr = 0;
  258. msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED;
  259. wake_up(&msm_mpd.wait_q);
  260. out:
  261. spin_unlock_irqrestore(&rq_avg_lock, flags);
  262. return HRTIMER_NORESTART;
  263. }
  264. static int msm_mpd_idle_notifier(struct notifier_block *self,
  265. unsigned long cmd, void *v)
  266. {
  267. int cpu = smp_processor_id();
  268. unsigned long flags;
  269. switch (cmd) {
  270. case CPU_PM_EXIT:
  271. spin_lock_irqsave(&rq_avg_lock, flags);
  272. hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
  273. msm_mpd.next_update,
  274. HRTIMER_MODE_ABS_PINNED);
  275. spin_unlock_irqrestore(&rq_avg_lock, flags);
  276. break;
  277. case CPU_PM_ENTER:
  278. hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
  279. break;
  280. default:
  281. break;
  282. }
  283. return NOTIFY_OK;
  284. }
  285. static int msm_mpd_hotplug_notifier(struct notifier_block *self,
  286. unsigned long action, void *hcpu)
  287. {
  288. int cpu = (int)hcpu;
  289. unsigned long flags;
  290. switch (action & (~CPU_TASKS_FROZEN)) {
  291. case CPU_STARTING:
  292. spin_lock_irqsave(&rq_avg_lock, flags);
  293. hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
  294. msm_mpd.next_update,
  295. HRTIMER_MODE_ABS_PINNED);
  296. spin_unlock_irqrestore(&rq_avg_lock, flags);
  297. break;
  298. default:
  299. break;
  300. }
  301. return NOTIFY_OK;
  302. }
  303. static struct notifier_block msm_mpd_idle_nb = {
  304. .notifier_call = msm_mpd_idle_notifier,
  305. };
  306. static struct notifier_block msm_mpd_hotplug_nb = {
  307. .notifier_call = msm_mpd_hotplug_notifier,
  308. };
  309. static int __cpuinit msm_mpd_do_hotplug(void *data)
  310. {
  311. int *event = (int *)data;
  312. int cpu;
  313. while (1) {
  314. msm_dcvs_update_algo_params();
  315. wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
  316. if (kthread_should_stop())
  317. break;
  318. msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS;
  319. /*
  320. * Bring online any offline cores, then offline any online
  321. * cores. Whenever a core is off/onlined restart the procedure
  322. * in case a new core is desired to be brought online in the
  323. * mean time.
  324. */
  325. restart:
  326. for_each_possible_cpu(cpu) {
  327. if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
  328. && !cpu_online(cpu)) {
  329. bring_up_cpu(cpu);
  330. if (cpu_online(cpu))
  331. goto restart;
  332. }
  333. }
  334. if (ktime_to_ns(ktime_sub(ktime_get(), last_down_time)) >
  335. 100 * NSEC_PER_MSEC)
  336. for_each_possible_cpu(cpu)
  337. if (!(atomic_read(&msm_mpd.algo_cpu_mask) &
  338. (1 << cpu)) && cpu_online(cpu)) {
  339. bring_down_cpu(cpu);
  340. last_down_time = ktime_get();
  341. break;
  342. }
  343. msm_mpd.hpupdate = HPUPDATE_WAITING;
  344. msm_dcvs_apply_gpu_floor(0);
  345. }
  346. return 0;
  347. }
  348. static int msm_mpd_do_update_scm(void *data)
  349. {
  350. struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data;
  351. unsigned long flags;
  352. enum msm_dcvs_scm_event event;
  353. int nr;
  354. while (1) {
  355. wait_event(msm_mpd.wait_q,
  356. msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
  357. || msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE
  358. || kthread_should_stop());
  359. if (kthread_should_stop())
  360. break;
  361. spin_lock_irqsave(&rq_avg_lock, flags);
  362. event = scm_data->event;
  363. nr = scm_data->nr;
  364. scm_data->event = 0;
  365. scm_data->nr = 0;
  366. spin_unlock_irqrestore(&rq_avg_lock, flags);
  367. msm_mpd_update_scm(event, nr);
  368. }
  369. return 0;
  370. }
  371. static int __ref msm_mpd_set_enabled(uint32_t enable)
  372. {
  373. int ret = 0;
  374. int ret0 = 0;
  375. int ret1 = 0;
  376. int cpu;
  377. static uint32_t last_enable;
  378. enable = (enable > 0) ? 1 : 0;
  379. if (last_enable == enable)
  380. return ret;
  381. if (enable) {
  382. ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param);
  383. if (ret) {
  384. pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n",
  385. ret);
  386. return ret;
  387. }
  388. }
  389. ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0,
  390. &ret0, &ret1);
  391. if (ret) {
  392. pr_err("Error(%d) %s MP Decision\n",
  393. ret, (enable ? "enabling" : "disabling"));
  394. } else {
  395. last_enable = enable;
  396. last_nr = 0;
  397. }
  398. if (enable) {
  399. msm_mpd.next_update = ktime_add_ns(ktime_get(),
  400. (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
  401. msm_mpd.task = kthread_run(msm_mpd_do_update_scm,
  402. &msm_mpd.data, "msm_mpdecision");
  403. if (IS_ERR(msm_mpd.task))
  404. return -EFAULT;
  405. msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug,
  406. &msm_mpd.hpupdate, "msm_hp");
  407. if (IS_ERR(msm_mpd.hptask))
  408. return -EFAULT;
  409. for_each_online_cpu(cpu)
  410. hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
  411. msm_mpd.next_update,
  412. HRTIMER_MODE_ABS_PINNED);
  413. cpu_pm_register_notifier(&msm_mpd_idle_nb);
  414. register_cpu_notifier(&msm_mpd_hotplug_nb);
  415. msm_mpd.enabled = 1;
  416. } else {
  417. for_each_online_cpu(cpu)
  418. hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
  419. kthread_stop(msm_mpd.hptask);
  420. kthread_stop(msm_mpd.task);
  421. cpu_pm_unregister_notifier(&msm_mpd_idle_nb);
  422. unregister_cpu_notifier(&msm_mpd_hotplug_nb);
  423. msm_mpd.enabled = 0;
  424. }
  425. return ret;
  426. }
  427. static int msm_mpd_set_rq_avg_poll_ms(uint32_t val)
  428. {
  429. /*
  430. * No need to do anything. Just let the timer set its own next poll
  431. * interval when it next fires.
  432. */
  433. msm_mpd.rq_avg_poll_ms = val;
  434. return 0;
  435. }
  436. static int msm_mpd_set_iowait_threshold_pct(uint32_t val)
  437. {
  438. /*
  439. * No need to do anything. Just let the timer set its own next poll
  440. * interval when it next fires.
  441. */
  442. msm_mpd.iowait_threshold_pct = val;
  443. return 0;
  444. }
  445. static int msm_mpd_set_rq_avg_divide(uint32_t val)
  446. {
  447. /*
  448. * No need to do anything. New value will be used next time
  449. * the decision is made as to whether to update tz.
  450. */
  451. if (val == 0)
  452. return -EINVAL;
  453. msm_mpd.rq_avg_divide = val;
  454. return 0;
  455. }
  456. #define MPD_ALGO_PARAM(_name, _param) \
  457. static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
  458. struct kobj_attribute *attr, char *buf) \
  459. { \
  460. return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
  461. } \
  462. static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
  463. struct kobj_attribute *attr, const char *buf, size_t count) \
  464. { \
  465. int ret = 0; \
  466. uint32_t val; \
  467. uint32_t old_val; \
  468. mutex_lock(&msm_mpd.lock); \
  469. ret = kstrtouint(buf, 10, &val); \
  470. if (ret) { \
  471. pr_err("Invalid input %s for %s %d\n", \
  472. buf, __stringify(_name), ret);\
  473. return 0; \
  474. } \
  475. old_val = _param; \
  476. _param = val; \
  477. ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \
  478. if (ret) { \
  479. pr_err("Error %d returned when setting algo param %s to %d\n",\
  480. ret, __stringify(_name), val); \
  481. _param = old_val; \
  482. } \
  483. mutex_unlock(&msm_mpd.lock); \
  484. return count; \
  485. }
  486. #define MPD_PARAM(_name, _param) \
  487. static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
  488. struct kobj_attribute *attr, char *buf) \
  489. { \
  490. return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
  491. } \
  492. static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
  493. struct kobj_attribute *attr, const char *buf, size_t count) \
  494. { \
  495. int ret = 0; \
  496. uint32_t val; \
  497. uint32_t old_val; \
  498. mutex_lock(&msm_mpd.lock); \
  499. ret = kstrtouint(buf, 10, &val); \
  500. if (ret) { \
  501. pr_err("Invalid input %s for %s %d\n", \
  502. buf, __stringify(_name), ret);\
  503. return 0; \
  504. } \
  505. old_val = _param; \
  506. ret = msm_mpd_set_##_name(val); \
  507. if (ret) { \
  508. pr_err("Error %d returned when setting algo param %s to %d\n",\
  509. ret, __stringify(_name), val); \
  510. _param = old_val; \
  511. } \
  512. mutex_unlock(&msm_mpd.lock); \
  513. return count; \
  514. }
  515. #define MPD_RW_ATTRIB(i, _name) \
  516. msm_mpd.attrib._name.attr.name = __stringify(_name); \
  517. msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \
  518. msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \
  519. msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \
  520. msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr;
  521. MPD_PARAM(enabled, msm_mpd.enabled);
  522. MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms);
  523. MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct);
  524. MPD_PARAM(rq_avg_divide, msm_mpd.rq_avg_divide);
  525. MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us);
  526. MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us);
  527. MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct);
  528. MPD_ALGO_PARAM(mp_em_rounding_point_min,
  529. msm_mpd.mp_param.mp_em_rounding_point_min);
  530. MPD_ALGO_PARAM(mp_em_rounding_point_max,
  531. msm_mpd.mp_param.mp_em_rounding_point_max);
  532. MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min);
  533. MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max);
  534. MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us);
  535. MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us);
  536. MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms);
  537. MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms);
  538. MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count);
  539. MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms);
  540. MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms);
  541. MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count);
  542. static int __devinit msm_mpd_probe(struct platform_device *pdev)
  543. {
  544. struct kobject *module_kobj = NULL;
  545. int ret = 0;
  546. const int attr_count = 20;
  547. struct msm_mpd_algo_param *param = NULL;
  548. param = pdev->dev.platform_data;
  549. module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
  550. if (!module_kobj) {
  551. pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
  552. ret = -ENOENT;
  553. goto done;
  554. }
  555. msm_mpd.attrib.attrib_group.attrs =
  556. kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL);
  557. if (!msm_mpd.attrib.attrib_group.attrs) {
  558. ret = -ENOMEM;
  559. goto done;
  560. }
  561. MPD_RW_ATTRIB(0, enabled);
  562. MPD_RW_ATTRIB(1, rq_avg_poll_ms);
  563. MPD_RW_ATTRIB(2, iowait_threshold_pct);
  564. MPD_RW_ATTRIB(3, rq_avg_divide);
  565. MPD_RW_ATTRIB(4, em_win_size_min_us);
  566. MPD_RW_ATTRIB(5, em_win_size_max_us);
  567. MPD_RW_ATTRIB(6, em_max_util_pct);
  568. MPD_RW_ATTRIB(7, mp_em_rounding_point_min);
  569. MPD_RW_ATTRIB(8, mp_em_rounding_point_max);
  570. MPD_RW_ATTRIB(9, online_util_pct_min);
  571. MPD_RW_ATTRIB(10, online_util_pct_max);
  572. MPD_RW_ATTRIB(11, slack_time_min_us);
  573. MPD_RW_ATTRIB(12, slack_time_max_us);
  574. MPD_RW_ATTRIB(13, hp_up_max_ms);
  575. MPD_RW_ATTRIB(14, hp_up_ms);
  576. MPD_RW_ATTRIB(15, hp_up_count);
  577. MPD_RW_ATTRIB(16, hp_dw_max_ms);
  578. MPD_RW_ATTRIB(17, hp_dw_ms);
  579. MPD_RW_ATTRIB(18, hp_dw_count);
  580. msm_mpd.attrib.attrib_group.attrs[19] = NULL;
  581. ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group);
  582. if (ret)
  583. pr_err("Unable to create sysfs objects :%d\n", ret);
  584. msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS;
  585. msm_mpd.rq_avg_divide = DEFAULT_RQ_AVG_DIVIDE;
  586. memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param));
  587. debugfs_base = debugfs_create_dir("msm_mpdecision", NULL);
  588. if (!debugfs_base) {
  589. pr_err("Cannot create debugfs base msm_mpdecision\n");
  590. ret = -ENOENT;
  591. goto done;
  592. }
  593. done:
  594. if (ret && debugfs_base)
  595. debugfs_remove(debugfs_base);
  596. return ret;
  597. }
  598. static int __devexit msm_mpd_remove(struct platform_device *pdev)
  599. {
  600. platform_set_drvdata(pdev, NULL);
  601. return 0;
  602. }
  603. static struct platform_driver msm_mpd_driver = {
  604. .probe = msm_mpd_probe,
  605. .remove = __devexit_p(msm_mpd_remove),
  606. .driver = {
  607. .name = "msm_mpdecision",
  608. .owner = THIS_MODULE,
  609. },
  610. };
  611. static int __init msm_mpdecision_init(void)
  612. {
  613. int cpu;
  614. if (!msm_mpd_enabled) {
  615. pr_info("Not enabled\n");
  616. return 0;
  617. }
  618. num_present_hundreds = 100 * num_present_cpus();
  619. hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC,
  620. HRTIMER_MODE_REL_PINNED);
  621. msm_mpd.slack_timer.function = msm_mpd_slack_timer;
  622. for_each_possible_cpu(cpu) {
  623. hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu),
  624. CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
  625. per_cpu(rq_avg_poll_timer, cpu).function
  626. = msm_mpd_rq_avg_poll_timer;
  627. }
  628. mutex_init(&msm_mpd.lock);
  629. init_waitqueue_head(&msm_mpd.wait_q);
  630. init_waitqueue_head(&msm_mpd.wait_hpq);
  631. return platform_driver_register(&msm_mpd_driver);
  632. }
  633. late_initcall(msm_mpdecision_init);