ladder.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * ladder.c - the residency ladder algorithm
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. *
  8. * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  9. * Shaohua Li <shaohua.li@intel.com>
  10. * Adam Belay <abelay@novell.com>
  11. *
  12. * This code is licenced under the GPL.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/cpuidle.h>
  16. #include <linux/pm_qos_params.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/jiffies.h>
  19. #include <asm/io.h>
  20. #include <asm/uaccess.h>
  21. #define PROMOTION_COUNT 4
  22. #define DEMOTION_COUNT 1
  23. struct ladder_device_state {
  24. struct {
  25. u32 promotion_count;
  26. u32 demotion_count;
  27. u32 promotion_time;
  28. u32 demotion_time;
  29. } threshold;
  30. struct {
  31. int promotion_count;
  32. int demotion_count;
  33. } stats;
  34. };
  35. struct ladder_device {
  36. struct ladder_device_state states[CPUIDLE_STATE_MAX];
  37. int last_state_idx;
  38. };
  39. static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
  40. /**
  41. * ladder_do_selection - prepares private data for a state change
  42. * @ldev: the ladder device
  43. * @old_idx: the current state index
  44. * @new_idx: the new target state index
  45. */
  46. static inline void ladder_do_selection(struct ladder_device *ldev,
  47. int old_idx, int new_idx)
  48. {
  49. ldev->states[old_idx].stats.promotion_count = 0;
  50. ldev->states[old_idx].stats.demotion_count = 0;
  51. ldev->last_state_idx = new_idx;
  52. }
  53. /**
  54. * ladder_select_state - selects the next state to enter
  55. * @dev: the CPU
  56. */
  57. static int ladder_select_state(struct cpuidle_device *dev)
  58. {
  59. struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
  60. struct ladder_device_state *last_state;
  61. int last_residency, last_idx = ldev->last_state_idx;
  62. int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
  63. /* Special case when user has set very strict latency requirement */
  64. if (unlikely(latency_req == 0)) {
  65. ladder_do_selection(ldev, last_idx, 0);
  66. return 0;
  67. }
  68. last_state = &ldev->states[last_idx];
  69. if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
  70. last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
  71. else
  72. last_residency = last_state->threshold.promotion_time + 1;
  73. /* consider promotion */
  74. if (last_idx < dev->state_count - 1 &&
  75. last_residency > last_state->threshold.promotion_time &&
  76. dev->states[last_idx + 1].exit_latency <= latency_req) {
  77. last_state->stats.promotion_count++;
  78. last_state->stats.demotion_count = 0;
  79. if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
  80. ladder_do_selection(ldev, last_idx, last_idx + 1);
  81. return last_idx + 1;
  82. }
  83. }
  84. /* consider demotion */
  85. if (last_idx > CPUIDLE_DRIVER_STATE_START &&
  86. dev->states[last_idx].exit_latency > latency_req) {
  87. int i;
  88. for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
  89. if (dev->states[i].exit_latency <= latency_req)
  90. break;
  91. }
  92. ladder_do_selection(ldev, last_idx, i);
  93. return i;
  94. }
  95. if (last_idx > CPUIDLE_DRIVER_STATE_START &&
  96. last_residency < last_state->threshold.demotion_time) {
  97. last_state->stats.demotion_count++;
  98. last_state->stats.promotion_count = 0;
  99. if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
  100. ladder_do_selection(ldev, last_idx, last_idx - 1);
  101. return last_idx - 1;
  102. }
  103. }
  104. /* otherwise remain at the current state */
  105. return last_idx;
  106. }
  107. /**
  108. * ladder_enable_device - setup for the governor
  109. * @dev: the CPU
  110. */
  111. static int ladder_enable_device(struct cpuidle_device *dev)
  112. {
  113. int i;
  114. struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
  115. struct ladder_device_state *lstate;
  116. struct cpuidle_state *state;
  117. ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
  118. for (i = 0; i < dev->state_count; i++) {
  119. state = &dev->states[i];
  120. lstate = &ldev->states[i];
  121. lstate->stats.promotion_count = 0;
  122. lstate->stats.demotion_count = 0;
  123. lstate->threshold.promotion_count = PROMOTION_COUNT;
  124. lstate->threshold.demotion_count = DEMOTION_COUNT;
  125. if (i < dev->state_count - 1)
  126. lstate->threshold.promotion_time = state->exit_latency;
  127. if (i > 0)
  128. lstate->threshold.demotion_time = state->exit_latency;
  129. }
  130. return 0;
  131. }
  132. static struct cpuidle_governor ladder_governor = {
  133. .name = "ladder",
  134. .rating = 10,
  135. .enable = ladder_enable_device,
  136. .select = ladder_select_state,
  137. .owner = THIS_MODULE,
  138. };
  139. /**
  140. * init_ladder - initializes the governor
  141. */
  142. static int __init init_ladder(void)
  143. {
  144. return cpuidle_register_governor(&ladder_governor);
  145. }
  146. /**
  147. * exit_ladder - exits the governor
  148. */
  149. static void __exit exit_ladder(void)
  150. {
  151. cpuidle_unregister_governor(&ladder_governor);
  152. }
  153. MODULE_LICENSE("GPL");
  154. module_init(init_ladder);
  155. module_exit(exit_ladder);