debug_aee.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*
  2. * Copyright (C) 2016 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
  12. */
  13. #include <linux/sched.h>
  14. #include "mtk_ram_console.h"
  15. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  16. #include "mtk_rt_mon.h"
  17. #endif
  18. /* sched: aee for sched/debug */
  19. /* #define TEST_SCHED_DEBUG_ENHANCEMENT */
  20. #define TRYLOCK_NUM 10
  21. #include <linux/delay.h>
  22. char print_at_AEE_buffer[160];
  23. /* sched: add rt_exec_task info */
  24. DECLARE_PER_CPU(u64, rt_throttling_start);
  25. DECLARE_PER_CPU(u64, exec_delta_time);
  26. DECLARE_PER_CPU(u64, clock_task);
  27. DECLARE_PER_CPU(u64, exec_start);
  28. DECLARE_PER_CPU(struct task_struct, exec_task);
  29. DECLARE_PER_CPU(u64, old_rt_time);
  30. DECLARE_PER_CPU(u64, init_rt_time);
  31. DECLARE_PER_CPU(u64, rt_period_time);
  32. #define SEQ_printf_at_AEE(m, x...) \
  33. do { \
  34. snprintf(print_at_AEE_buffer, sizeof(print_at_AEE_buffer), x); \
  35. aee_sram_fiq_log(print_at_AEE_buffer); \
  36. } while (0)
  37. static void
  38. print_task_at_AEE(struct seq_file *m, struct rq *rq, struct task_struct *p)
  39. {
  40. #ifdef CONFIG_SCHEDSTATS
  41. if (rq->curr == p) {
  42. #ifdef CONFIG_CGROUP_SCHED
  43. SEQ_printf_at_AEE(m, "R %15s %5d %9lld.%06ld %9lld %5d %9lld.%06ld %9lld.%06ld %9lld.%06ld %s\n",
  44. p->comm,
  45. task_pid_nr(p),
  46. SPLIT_NS(p->se.vruntime),
  47. (long long)(p->nvcsw + p->nivcsw),
  48. p->prio,
  49. SPLIT_NS(p->se.statistics.wait_sum),
  50. SPLIT_NS(p->se.sum_exec_runtime),
  51. SPLIT_NS(p->se.statistics.sum_sleep_runtime),
  52. task_group_path(task_group(p)));
  53. #else
  54. SEQ_printf_at_AEE(m, "R %15s %5d %9lld.%06ld %9lld %5d %9lld.%06ld %9lld.%06ld %9lld.%06ld\n",
  55. p->comm,
  56. task_pid_nr(p),
  57. SPLIT_NS(p->se.vruntime),
  58. (long long)(p->nvcsw + p->nivcsw),
  59. p->prio,
  60. SPLIT_NS(p->se.statistics.wait_sum),
  61. SPLIT_NS(p->se.sum_exec_runtime),
  62. SPLIT_NS(p->se.statistics.sum_sleep_runtime));
  63. #endif
  64. } else {
  65. #ifdef CONFIG_CGROUP_SCHED
  66. SEQ_printf_at_AEE(m, " %15s %5d %9lld.%06ld %9lld %5d %9lld.%06ld %9lld.%06ld %9lld.%06ld %s\n",
  67. p->comm,
  68. task_pid_nr(p),
  69. SPLIT_NS(p->se.vruntime),
  70. (long long)(p->nvcsw + p->nivcsw),
  71. p->prio,
  72. SPLIT_NS(p->se.statistics.wait_sum),
  73. SPLIT_NS(p->se.sum_exec_runtime),
  74. SPLIT_NS(p->se.statistics.sum_sleep_runtime),
  75. task_group_path(task_group(p)));
  76. #else
  77. SEQ_printf_at_AEE(m, " %15s %5d %9lld.%06ld %9lld %5d %9lld.%06ld %9lld.%06ld %9lld.%06ld\n",
  78. p->comm,
  79. task_pid_nr(p),
  80. SPLIT_NS(p->se.vruntime),
  81. (long long)(p->nvcsw + p->nivcsw),
  82. p->prio,
  83. SPLIT_NS(p->se.statistics.wait_sum),
  84. SPLIT_NS(p->se.sum_exec_runtime),
  85. SPLIT_NS(p->se.statistics.sum_sleep_runtime));
  86. #endif
  87. }
  88. #else
  89. SEQ_printf_at_AEE(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
  90. 0LL, 0L,
  91. SPLIT_NS(p->se.sum_exec_runtime),
  92. 0LL, 0L);
  93. #endif
  94. }
  95. /* sched: add aee log */
  96. #define read_trylock_irqsave(lock, flags) \
  97. ({ \
  98. typecheck(unsigned long, flags); \
  99. local_irq_save(flags); \
  100. read_trylock(lock) ? \
  101. 1 : ({ local_irq_restore(flags); 0; }); \
  102. })
  103. int read_trylock_n_irqsave(rwlock_t *lock,
  104. unsigned long *flags, struct seq_file *m, char *msg)
  105. {
  106. int locked, trylock_cnt = 0;
  107. do {
  108. locked = read_trylock_irqsave(lock, *flags);
  109. trylock_cnt++;
  110. mdelay(10);
  111. } while ((!locked) && (trylock_cnt < TRYLOCK_NUM));
  112. if (!locked) {
  113. #ifdef CONFIG_DEBUG_SPINLOCK
  114. struct task_struct *owner = NULL;
  115. #endif
  116. SEQ_printf_at_AEE(m, "Warning: fail to get lock in %s\n", msg);
  117. #ifdef CONFIG_DEBUG_SPINLOCK
  118. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  119. owner = lock->owner;
  120. #ifdef CONFIG_SMP
  121. SEQ_printf_at_AEE(m, " lock: %p, .magic: %08x, .owner: %s/%d",
  122. lock, lock->magic,
  123. owner ? owner->comm : "<<none>>",
  124. owner ? task_pid_nr(owner) : -1);
  125. SEQ_printf_at_AEE(m, ".owner_cpu: %d, value: %d\n",
  126. lock->owner_cpu, lock->raw_lock.lock);
  127. #else
  128. SEQ_printf_at_AEE(m, " lock: %p, .magic: %08x, .owner: %s/%d",
  129. lock, lock->magic,
  130. owner ? owner->comm : "<<none>>",
  131. owner ? task_pid_nr(owner) : -1);
  132. SEQ_printf_at_AEE(m, ".owner_cpu: %d\n", lock->owner_cpu);
  133. #endif
  134. #endif
  135. }
  136. return locked;
  137. }
  138. int raw_spin_trylock_n_irqsave(raw_spinlock_t *lock,
  139. unsigned long *flags, struct seq_file *m, char *msg)
  140. {
  141. int locked, trylock_cnt = 0;
  142. do {
  143. locked = raw_spin_trylock_irqsave(lock, *flags);
  144. trylock_cnt++;
  145. mdelay(10);
  146. } while ((!locked) && (trylock_cnt < TRYLOCK_NUM));
  147. if (!locked) {
  148. #ifdef CONFIG_DEBUG_SPINLOCK
  149. struct task_struct *owner = NULL;
  150. #endif
  151. SEQ_printf_at_AEE(m, "Warning: fail to get lock in %s\n", msg);
  152. #ifdef CONFIG_DEBUG_SPINLOCK
  153. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  154. owner = lock->owner;
  155. #ifdef CONFIG_ARM64
  156. #ifdef CONFIG_SMP
  157. SEQ_printf_at_AEE(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  158. (long)lock, lock->magic,
  159. owner ? owner->comm : "<<none>>",
  160. owner ? task_pid_nr(owner) : -1);
  161. SEQ_printf_at_AEE(m, ".owner_cpu: %d, owner: %hu, next: %hu\n",
  162. lock->owner_cpu,
  163. lock->raw_lock.owner, lock->raw_lock.next);
  164. #else
  165. SEQ_printf_at_AEE(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  166. (long)lock, lock->magic,
  167. owner ? owner->comm : "<<none>>",
  168. owner ? task_pid_nr(owner) : -1);
  169. SEQ_printf_at_AEE(m, ".owner_cpu: %d, value: %d\n",
  170. lock->owner_cpu, lock->raw_lock.slock);
  171. #endif
  172. #else
  173. SEQ_printf_at_AEE(m, " lock: %x, .magic: %08x, .owner: %s/%d",
  174. (int)lock, lock->magic,
  175. owner ? owner->comm : "<<none>>",
  176. owner ? task_pid_nr(owner) : -1);
  177. SEQ_printf_at_AEE(m, ".owner_cpu: %d, value: %d\n",
  178. lock->owner_cpu, lock->raw_lock.slock);
  179. #endif
  180. #endif
  181. }
  182. return locked;
  183. }
  184. int spin_trylock_n_irqsave(spinlock_t *lock,
  185. unsigned long *flags, struct seq_file *m, char *msg)
  186. {
  187. int locked, trylock_cnt = 0;
  188. do {
  189. locked = spin_trylock_irqsave(lock, *flags);
  190. trylock_cnt++;
  191. mdelay(10);
  192. } while ((!locked) && (trylock_cnt < TRYLOCK_NUM));
  193. if (!locked) {
  194. #ifdef CONFIG_DEBUG_SPINLOCK
  195. raw_spinlock_t rlock = lock->rlock;
  196. struct task_struct *owner = NULL;
  197. #endif
  198. SEQ_printf_at_AEE(m, "Warning: fail to get lock in %s\n", msg);
  199. #ifdef CONFIG_DEBUG_SPINLOCK
  200. if (rlock.owner && rlock.owner != SPINLOCK_OWNER_INIT)
  201. owner = rlock.owner;
  202. #ifdef CONFIG_ARM64
  203. #ifdef CONFIG_SMP
  204. SEQ_printf_at_AEE(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  205. (long)&rlock, rlock.magic,
  206. owner ? owner->comm : "<<none>>",
  207. owner ? task_pid_nr(owner) : -1);
  208. SEQ_printf_at_AEE(m, ".owner_cpu: %d, owner: %hu, next: %hu\n",
  209. rlock.owner_cpu,
  210. rlock.raw_lock.owner, rlock.raw_lock.next);
  211. #else
  212. SEQ_printf_at_AEE(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  213. (long)&rlock, rlock.magic,
  214. owner ? owner->comm : "<<none>>",
  215. owner ? task_pid_nr(owner) : -1);
  216. SEQ_printf_at_AEE(m, ".owner_cpu: %d, value: %d\n",
  217. rlock.owner_cpu, rlock.raw_lock.slock);
  218. #endif
  219. #else
  220. SEQ_printf_at_AEE(m, " lock: %x, .magic: %08x, .owner: %s/%d",
  221. (int)&rlock, rlock.magic,
  222. owner ? owner->comm : "<<none>>",
  223. owner ? task_pid_nr(owner) : -1);
  224. SEQ_printf_at_AEE(m, ".owner_cpu: %d, value: %d\n",
  225. rlock.owner_cpu, rlock.raw_lock.slock);
  226. #endif
  227. #endif
  228. }
  229. return locked;
  230. }
  231. static void print_rq_at_AEE(struct seq_file *m, struct rq *rq, int rq_cpu)
  232. {
  233. struct task_struct *g, *p;
  234. SEQ_printf_at_AEE(m, "\nrunnable tasks:\n");
  235. SEQ_printf_at_AEE(m,
  236. " task PID tree-key switches prio wait-time sum-exec sum-sleep\n");
  237. SEQ_printf_at_AEE(m, "---------------------------------------------------\n");
  238. rcu_read_lock();
  239. for_each_process_thread(g, p) {
  240. /*
  241. * if (task_cpu(p) != rq_cpu)
  242. * sched: only output the runnable tasks,
  243. * rather than ALL tasks in runqueues
  244. */
  245. if (!p->on_rq || task_cpu(p) != rq_cpu)
  246. continue;
  247. print_task_at_AEE(m, rq, p);
  248. }
  249. rcu_read_unlock();
  250. }
  251. #ifdef CONFIG_FAIR_GROUP_SCHED
  252. static void print_cfs_group_stats_at_AEE(struct seq_file *m,
  253. int cpu, struct task_group *tg)
  254. {
  255. struct sched_entity *se = tg->se[cpu];
  256. #define P(F) \
  257. SEQ_printf_at_AEE(m, " .%-30s: %lld\n", #F, (long long)F)
  258. #define PN(F) \
  259. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", \
  260. #F, SPLIT_NS((long long)F))
  261. if (!se)
  262. return;
  263. PN(se->exec_start);
  264. PN(se->vruntime);
  265. PN(se->sum_exec_runtime);
  266. #ifdef CONFIG_SCHEDSTATS
  267. PN(se->statistics.wait_start);
  268. PN(se->statistics.sleep_start);
  269. PN(se->statistics.block_start);
  270. PN(se->statistics.sleep_max);
  271. PN(se->statistics.block_max);
  272. PN(se->statistics.exec_max);
  273. PN(se->statistics.slice_max);
  274. PN(se->statistics.wait_max);
  275. PN(se->statistics.wait_sum);
  276. P(se->statistics.wait_count);
  277. #endif
  278. P(se->load.weight);
  279. #ifdef CONFIG_SMP
  280. P(se->avg.load_avg);
  281. P(se->avg.util_avg);
  282. #endif
  283. #undef PN
  284. #undef P
  285. }
  286. #endif
  287. void print_cfs_rq_at_AEE(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
  288. {
  289. s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
  290. spread, rq0_min_vruntime, spread0;
  291. struct rq *rq = cpu_rq(cpu);
  292. struct sched_entity *last;
  293. unsigned long flags;
  294. int locked;
  295. #ifdef CONFIG_FAIR_GROUP_SCHED
  296. SEQ_printf_at_AEE(m, "\ncfs_rq[%d]:%s\n",
  297. cpu, task_group_path(cfs_rq->tg));
  298. #else
  299. SEQ_printf_at_AEE(m, "\ncfs_rq[%d]:\n", cpu);
  300. #endif
  301. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", "exec_clock",
  302. SPLIT_NS(cfs_rq->exec_clock));
  303. /*raw_spin_lock_irqsave(&rq->lock, flags);*/
  304. locked = raw_spin_trylock_n_irqsave(&rq->lock,
  305. &flags, m, "print_cfs_rq_at_AEE");
  306. if (rb_first_cached(&cfs_rq->tasks_timeline))
  307. MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
  308. last = __pick_last_entity(cfs_rq);
  309. if (last)
  310. max_vruntime = last->vruntime;
  311. min_vruntime = cfs_rq->min_vruntime;
  312. rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
  313. if (locked)
  314. raw_spin_unlock_irqrestore(&rq->lock, flags);
  315. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", "MIN_vruntime",
  316. SPLIT_NS(MIN_vruntime));
  317. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", "min_vruntime",
  318. SPLIT_NS(min_vruntime));
  319. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", "max_vruntime",
  320. SPLIT_NS(max_vruntime));
  321. spread = max_vruntime - MIN_vruntime;
  322. /*
  323. * SEQ_printf_at_AEE(m, " .%-30s: %Ld.%06ld\n", "spread",
  324. * SPLIT_NS(spread));
  325. */
  326. spread0 = min_vruntime - rq0_min_vruntime;
  327. /*
  328. * SEQ_printf_at_AEE(m, " .%-30s: %Ld.%06ld\n", "spread0",
  329. * SPLIT_NS(spread0));
  330. * SEQ_printf_at_AEE(m, " .%-30s: %d\n", "nr_spread_over",
  331. * cfs_rq->nr_spread_over);
  332. */
  333. SEQ_printf_at_AEE(m, " .%-30s: %d\n",
  334. "nr_running", cfs_rq->nr_running);
  335. SEQ_printf_at_AEE(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
  336. #ifdef CONFIG_SMP
  337. SEQ_printf_at_AEE(m, " .%-30s: %lu\n", "load_avg",
  338. cfs_rq->avg.load_avg);
  339. SEQ_printf_at_AEE(m, " .%-30s: %lu\n", "runnable_load_avg",
  340. cfs_rq->runnable_load_avg);
  341. SEQ_printf_at_AEE(m, " .%-30s: %lu\n", "util_avg",
  342. cfs_rq->avg.util_avg);
  343. SEQ_printf_at_AEE(m, " .%-30s: %ld\n", "removed_load_avg",
  344. atomic_long_read(&cfs_rq->removed_load_avg));
  345. SEQ_printf_at_AEE(m, " .%-30s: %ld\n", "removed_util_avg",
  346. atomic_long_read(&cfs_rq->removed_util_avg));
  347. #ifdef CONFIG_FAIR_GROUP_SCHED
  348. SEQ_printf_at_AEE(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
  349. cfs_rq->tg_load_avg_contrib);
  350. SEQ_printf_at_AEE(m, " .%-30s: %ld\n", "tg_load_avg",
  351. atomic_long_read(&cfs_rq->tg->load_avg));
  352. #endif
  353. #endif
  354. #ifdef CONFIG_CFS_BANDWIDTH
  355. SEQ_printf_at_AEE(m, " .%-30s: %d\n", "throttled",
  356. cfs_rq->throttled);
  357. SEQ_printf_at_AEE(m, " .%-30s: %d\n", "throttle_count",
  358. cfs_rq->throttle_count);
  359. #endif
  360. #ifdef CONFIG_FAIR_GROUP_SCHED
  361. print_cfs_group_stats_at_AEE(m, cpu, cfs_rq->tg);
  362. #endif
  363. }
  364. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  365. list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  366. void print_cfs_stats_at_AEE(struct seq_file *m, int cpu)
  367. {
  368. struct cfs_rq *cfs_rq;
  369. rcu_read_lock();
  370. cfs_rq = &cpu_rq(cpu)->cfs;
  371. /*sched: only output / cgroup schedule info*/
  372. print_cfs_rq_at_AEE(m, cpu, cfs_rq);
  373. rcu_read_unlock();
  374. }
  375. void print_rt_rq_at_AEE(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
  376. {
  377. #ifdef CONFIG_RT_GROUP_SCHED
  378. int cpu_rq_throttle = rq_cpu(rt_rq->rq);
  379. SEQ_printf_at_AEE(m, "\nrt_rq[%d]:%s\n",
  380. cpu, task_group_path(rt_rq->tg));
  381. #else
  382. SEQ_printf_at_AEE(m, "\nrt_rq[%d]:\n", cpu);
  383. #endif
  384. #define P(x) \
  385. SEQ_printf_at_AEE(m, " .%-30s: %lld\n", #x, (long long)(rt_rq->x))
  386. #define PN(x) \
  387. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
  388. P(rt_nr_running);
  389. P(rt_throttled);
  390. SEQ_printf_at_AEE(m, " exec_task[%d:%s], prio=%d\n",
  391. per_cpu(exec_task, cpu).pid,
  392. per_cpu(exec_task, cpu).comm,
  393. per_cpu(exec_task, cpu).prio);
  394. #ifdef CONFIG_RT_GROUP_SCHED
  395. SEQ_printf_at_AEE(m, " .rt_throttling_start : [%llu]\n",
  396. per_cpu(rt_throttling_start, cpu_rq_throttle));
  397. #endif
  398. PN(rt_time);
  399. PN(rt_runtime);
  400. #undef PN
  401. #undef P
  402. }
  403. #ifdef CONFIG_RT_GROUP_SCHED
  404. static inline struct task_group *next_task_group(struct task_group *tg)
  405. {
  406. do {
  407. tg = list_entry_rcu(tg->list.next,
  408. typeof(struct task_group), list);
  409. } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
  410. if (&tg->list == &task_groups)
  411. tg = NULL;
  412. return tg;
  413. }
  414. #define for_each_rt_rq(rt_rq, iter, rq) \
  415. for (iter = container_of(&task_groups, typeof(*iter), list); \
  416. (iter = next_task_group(iter)) && \
  417. (rt_rq = iter->rt_rq[cpu_of(rq)]);)
  418. #else /* !CONFIG_RT_GROUP_SCHED */
  419. #define for_each_rt_rq(rt_rq, iter, rq) \
  420. for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
  421. #endif
  422. void print_rt_stats_at_AEE(struct seq_file *m, int cpu)
  423. {
  424. struct rt_rq *rt_rq;
  425. rt_rq = &cpu_rq(cpu)->rt;
  426. rcu_read_lock();
  427. /*sched: only output / cgroup schedule info*/
  428. print_rt_rq_at_AEE(m, cpu, rt_rq);
  429. rcu_read_unlock();
  430. }
  431. void print_dl_rq_at_AEE(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
  432. {
  433. SEQ_printf_at_AEE(m, "\ndl_rq[%d]:\n", cpu);
  434. SEQ_printf_at_AEE(m, " .%-30s: %ld\n",
  435. "dl_nr_running", dl_rq->dl_nr_running);
  436. }
  437. void print_dl_stats_at_AEE(struct seq_file *m, int cpu)
  438. {
  439. print_dl_rq_at_AEE(m, cpu, &cpu_rq(cpu)->dl);
  440. }
  441. static void print_cpu_at_AEE(struct seq_file *m, int cpu)
  442. {
  443. struct rq *rq = cpu_rq(cpu);
  444. unsigned long flags;
  445. int locked;
  446. #ifdef CONFIG_X86
  447. {
  448. unsigned int freq = cpu_khz ? : 1;
  449. SEQ_printf_at_AEE(m, "cpu#%d, %u.%03u MHz\n",
  450. cpu, freq / 1000, (freq % 1000));
  451. }
  452. #else
  453. /* sched: add cpu info */
  454. SEQ_printf_at_AEE(m, "cpu#%d: %s\n", cpu,
  455. cpu_is_offline(cpu) ? "Offline" : "Online");
  456. #endif
  457. #define P(x) \
  458. do { \
  459. if (sizeof(rq->x) == 4) \
  460. SEQ_printf_at_AEE(m, " .%-30s: %ld\n", \
  461. #x, (long)(rq->x)); \
  462. else \
  463. SEQ_printf_at_AEE(m, " .%-30s: %lld\n", \
  464. #x, (long long)(rq->x)); \
  465. } while (0)
  466. #define PN(x) \
  467. SEQ_printf_at_AEE(m, " .%-30s: %lld.%06ld\n", #x, SPLIT_NS(rq->x))
  468. P(nr_running);
  469. SEQ_printf_at_AEE(m, " .%-30s: %lu\n", "load",
  470. rq->load.weight);
  471. /*P(nr_switches);*/
  472. P(nr_load_updates);
  473. P(nr_uninterruptible);
  474. PN(next_balance);
  475. SEQ_printf_at_AEE(m, " .%-30s: %ld\n",
  476. "curr->pid", (long)(task_pid_nr(rq->curr)));
  477. PN(clock);
  478. PN(clock_task);
  479. SEQ_printf_at_AEE(m, " .%-30s: %ld %ld %ld %ld %ld\n", "cpu_load",
  480. (long)(rq->cpu_load[0]),
  481. (long)(rq->cpu_load[1]),
  482. (long)(rq->cpu_load[2]),
  483. (long)(rq->cpu_load[3]),
  484. (long)(rq->cpu_load[4]));
  485. /*
  486. * P(cpu_load[0]);
  487. * P(cpu_load[1]);
  488. * P(cpu_load[2]);
  489. * P(cpu_load[3]);
  490. * P(cpu_load[4]);
  491. */
  492. #undef P
  493. #undef PN
  494. #ifdef CONFIG_SCHEDSTATS
  495. #define P(n) SEQ_printf_at_AEE(m, " .%-30s: %d\n", #n, rq->n)
  496. #define P64(n) SEQ_printf_at_AEE(m, " .%-30s: %lld\n", #n, rq->n)
  497. /*
  498. * P(yld_count);
  499. * P(sched_count);
  500. * P(sched_goidle);
  501. */
  502. #ifdef CONFIG_SMP
  503. P64(avg_idle);
  504. P64(max_idle_balance_cost);
  505. #endif
  506. /*
  507. * P(ttwu_count);
  508. * P(ttwu_local);
  509. */
  510. #undef P
  511. #undef P64
  512. #endif
  513. /*spin_lock_irqsave_lock_irqsave(&sched_debug_lock, flags);*/
  514. locked = spin_trylock_n_irqsave(&sched_debug_lock,
  515. &flags, m, "print_cpu_at_AEE");
  516. print_cfs_stats_at_AEE(m, cpu);
  517. print_rt_stats_at_AEE(m, cpu);
  518. print_dl_stats_at_AEE(m, cpu);
  519. rcu_read_lock();
  520. print_rq_at_AEE(m, rq, cpu);
  521. SEQ_printf_at_AEE(m, "============================================\n");
  522. rcu_read_unlock();
  523. /*spin_unlock_irqrestore(&sched_debug_lock, flags);*/
  524. if (locked)
  525. spin_unlock_irqrestore(&sched_debug_lock, flags);
  526. }
  527. static void sched_debug_header_at_AEE(struct seq_file *m)
  528. {
  529. u64 sched_clk, cpu_clk;
  530. unsigned long flags;
  531. #ifdef TEST_SCHED_DEBUG_ENHANCEMENT
  532. struct rq *rq = cpu_rq(0);
  533. /* lock_timekeeper(); */
  534. raw_spin_lock_irq(&rq->lock);
  535. spin_lock_irqsave(&sched_debug_lock, flags);
  536. write_lock_irqsave(&tasklist_lock, flags);
  537. #endif
  538. local_irq_save(flags);
  539. /*ktime = ktime_to_ns(ktime_get());*/
  540. sched_clk = sched_clock();
  541. cpu_clk = local_clock();
  542. local_irq_restore(flags);
  543. SEQ_printf_at_AEE(m, "Sched Debug Version: v0.11, %s %.*s\n",
  544. init_utsname()->release,
  545. (int)strcspn(init_utsname()->version, " "),
  546. init_utsname()->version);
  547. #define P(x) \
  548. SEQ_printf_at_AEE(m, "%-40s: %lld\n", #x, (long long)(x))
  549. #define PN(x) \
  550. SEQ_printf_at_AEE(m, "%-40s: %lld.%06ld\n", #x, SPLIT_NS(x))
  551. /*PN(ktime);*/
  552. PN(sched_clk);
  553. PN(cpu_clk);
  554. P(jiffies);
  555. #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  556. P(sched_clock_stable());
  557. #endif
  558. #undef PN
  559. #undef P
  560. /*SEQ_printf_at_AEE(m, "\n");*/
  561. SEQ_printf_at_AEE(m, "sysctl_sched\n");
  562. #define P(x) \
  563. SEQ_printf_at_AEE(m, " .%-40s: %lld\n", #x, (long long)(x))
  564. #define PN(x) \
  565. SEQ_printf_at_AEE(m, " .%-40s: %lld.%06ld\n", #x, SPLIT_NS(x))
  566. PN(sysctl_sched_latency);
  567. PN(sysctl_sched_min_granularity);
  568. PN(sysctl_sched_wakeup_granularity);
  569. P(sysctl_sched_child_runs_first);
  570. P(sysctl_sched_features);
  571. #undef PN
  572. #undef P
  573. SEQ_printf_at_AEE(m, " .%-40s: %d (%s)\n",
  574. "sysctl_sched_tunable_scaling",
  575. sysctl_sched_tunable_scaling,
  576. sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
  577. SEQ_printf_at_AEE(m, "\n");
  578. }
  579. void sysrq_sched_debug_show_at_AEE(void)
  580. {
  581. int cpu;
  582. unsigned long flags;
  583. int locked;
  584. sched_debug_header_at_AEE(NULL);
  585. /* read_lock_irqsave(&tasklist_lock, flags); */
  586. locked = read_trylock_n_irqsave(&tasklist_lock,
  587. &flags, NULL, "sched_debug_show_at_AEE");
  588. /* for_each_online_cpu(cpu) */
  589. for_each_possible_cpu(cpu) {
  590. print_cpu_at_AEE(NULL, cpu);
  591. }
  592. if (locked)
  593. read_unlock_irqrestore(&tasklist_lock, flags);
  594. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  595. /* sched:rt throttle monitor */
  596. mt_rt_mon_print_task_from_buffer();
  597. #endif
  598. }