kgsl_pwrctrl.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737
  1. /* Copyright (c) 2010-2014,2016 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/export.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/page.h>
  16. #include <linux/pm_runtime.h>
  17. #include <mach/msm_iomap.h>
  18. #include <mach/msm_bus.h>
  19. #include <mach/msm_bus_board.h>
  20. #include <linux/ktime.h>
  21. #include <linux/delay.h>
  22. #include "kgsl.h"
  23. #include "kgsl_pwrscale.h"
  24. #include "kgsl_device.h"
  25. #include "kgsl_trace.h"
  26. #include "kgsl_sharedmem.h"
  27. #define KGSL_PWRFLAGS_POWER_ON 0
  28. #define KGSL_PWRFLAGS_CLK_ON 1
  29. #define KGSL_PWRFLAGS_AXI_ON 2
  30. #define KGSL_PWRFLAGS_IRQ_ON 3
  31. #define UPDATE_BUSY_VAL 1000000
  32. #define UPDATE_BUSY 50
  33. /*
  34. * Expected delay for post-interrupt processing on A3xx.
  35. * The delay may be longer, gradually increase the delay
  36. * to compensate. If the GPU isn't done by max delay,
  37. * it's working on something other than just the final
  38. * command sequence so stop waiting for it to be idle.
  39. */
  40. #define INIT_UDELAY 200
  41. #define MAX_UDELAY 2000
  42. struct clk_pair {
  43. const char *name;
  44. uint map;
  45. };
  46. struct clk_pair clks[KGSL_MAX_CLKS] = {
  47. {
  48. .name = "src_clk",
  49. .map = KGSL_CLK_SRC,
  50. },
  51. {
  52. .name = "core_clk",
  53. .map = KGSL_CLK_CORE,
  54. },
  55. {
  56. .name = "iface_clk",
  57. .map = KGSL_CLK_IFACE,
  58. },
  59. {
  60. .name = "mem_clk",
  61. .map = KGSL_CLK_MEM,
  62. },
  63. {
  64. .name = "mem_iface_clk",
  65. .map = KGSL_CLK_MEM_IFACE,
  66. },
  67. {
  68. .name = "alt_mem_iface_clk",
  69. .map = KGSL_CLK_ALT_MEM_IFACE,
  70. },
  71. };
  72. static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
  73. int requested_state);
  74. static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
  75. static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
  76. /* Update the elapsed time at a particular clock level
  77. * if the device is active(on_time = true).Otherwise
  78. * store it as sleep time.
  79. */
  80. static void update_clk_statistics(struct kgsl_device *device,
  81. bool on_time)
  82. {
  83. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  84. struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
  85. ktime_t elapsed;
  86. int elapsed_us;
  87. if (clkstats->start.tv64 == 0)
  88. clkstats->start = ktime_get();
  89. clkstats->stop = ktime_get();
  90. elapsed = ktime_sub(clkstats->stop, clkstats->start);
  91. elapsed_us = ktime_to_us(elapsed);
  92. clkstats->elapsed += elapsed_us;
  93. if (on_time)
  94. clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
  95. else
  96. clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
  97. clkstats->start = ktime_get();
  98. }
  99. /*
  100. * Given a requested power level do bounds checking on the constraints and
  101. * return the nearest possible level
  102. */
  103. static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
  104. {
  105. int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
  106. int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
  107. if (level < max_pwrlevel)
  108. return max_pwrlevel;
  109. if (level > min_pwrlevel)
  110. return min_pwrlevel;
  111. return level;
  112. }
  113. void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
  114. bool on)
  115. {
  116. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  117. int cur = pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
  118. int buslevel = 0;
  119. if (!pwr->pcl)
  120. return;
  121. /* the bus should be ON to update the active frequency */
  122. if (on && !(test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)))
  123. return;
  124. /*
  125. * If the bus should remain on calculate our request and submit it,
  126. * otherwise request bus level 0, off.
  127. */
  128. if (on) {
  129. buslevel = min_t(int, pwr->pwrlevels[0].bus_freq,
  130. cur + pwr->bus_mod);
  131. buslevel = max_t(int, buslevel, 1);
  132. }
  133. msm_bus_scale_client_update_request(pwr->pcl, buslevel);
  134. trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, buslevel);
  135. }
  136. EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update);
  137. void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
  138. unsigned int new_level)
  139. {
  140. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  141. struct kgsl_pwrlevel *pwrlevel;
  142. int delta, level;
  143. /* Adjust the power level to the current constraints */
  144. new_level = _adjust_pwrlevel(pwr, new_level);
  145. if (new_level == pwr->active_pwrlevel)
  146. return;
  147. delta = new_level < pwr->active_pwrlevel ? -1 : 1;
  148. update_clk_statistics(device, true);
  149. level = pwr->active_pwrlevel;
  150. /*
  151. * Set the active powerlevel first in case the clocks are off - if we
  152. * don't do this then the pwrlevel change won't take effect when the
  153. * clocks come back
  154. */
  155. pwr->active_pwrlevel = new_level;
  156. pwr->bus_mod = 0;
  157. pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
  158. kgsl_pwrctrl_buslevel_update(device, true);
  159. if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
  160. if (pwr->ebi1_clk)
  161. clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
  162. if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
  163. (device->state == KGSL_STATE_NAP)) {
  164. /*
  165. * On some platforms, instability is caused on
  166. * changing clock freq when the core is busy.
  167. * Idle the gpu core before changing the clock freq.
  168. */
  169. if (pwr->idle_needed == true)
  170. device->ftbl->idle(device);
  171. /*
  172. * Don't shift by more than one level at a time to
  173. * avoid glitches.
  174. */
  175. while (level != new_level) {
  176. level += delta;
  177. clk_set_rate(pwr->grp_clks[0],
  178. pwr->pwrlevels[level].gpu_freq);
  179. }
  180. }
  181. trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
  182. }
  183. EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
  184. static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
  185. struct device_attribute *attr,
  186. const char *buf, size_t count)
  187. {
  188. struct kgsl_device *device = kgsl_device_from_dev(dev);
  189. struct kgsl_pwrctrl *pwr;
  190. int ret;
  191. unsigned int level = 0;
  192. if (device == NULL)
  193. return 0;
  194. pwr = &device->pwrctrl;
  195. ret = kgsl_sysfs_store(buf, &level);
  196. if (ret)
  197. return ret;
  198. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  199. if (level > pwr->num_pwrlevels - 2)
  200. level = pwr->num_pwrlevels - 2;
  201. pwr->thermal_pwrlevel = level;
  202. /*
  203. * If there is no power policy set the clock to the requested thermal
  204. * level - if thermal now happens to be higher than max, then that will
  205. * be limited by the pwrlevel change function. Otherwise if there is
  206. * a policy only change the active clock if it is higher then the new
  207. * thermal level
  208. */
  209. if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
  210. kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
  211. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  212. return count;
  213. }
  214. static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
  215. struct device_attribute *attr,
  216. char *buf)
  217. {
  218. struct kgsl_device *device = kgsl_device_from_dev(dev);
  219. struct kgsl_pwrctrl *pwr;
  220. if (device == NULL)
  221. return 0;
  222. pwr = &device->pwrctrl;
  223. return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
  224. }
  225. static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
  226. struct device_attribute *attr,
  227. const char *buf, size_t count)
  228. {
  229. struct kgsl_device *device = kgsl_device_from_dev(dev);
  230. struct kgsl_pwrctrl *pwr;
  231. int ret, max_level;
  232. unsigned int level = 0;
  233. if (device == NULL)
  234. return 0;
  235. pwr = &device->pwrctrl;
  236. ret = kgsl_sysfs_store(buf, &level);
  237. if (ret)
  238. return ret;
  239. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  240. /* You can't set a maximum power level lower than the minimum */
  241. if (level > pwr->min_pwrlevel)
  242. level = pwr->min_pwrlevel;
  243. pwr->max_pwrlevel = level;
  244. max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
  245. /*
  246. * If there is no policy then move to max by default. Otherwise only
  247. * move max if the current level happens to be higher then the new max
  248. */
  249. if (max_level > pwr->active_pwrlevel)
  250. kgsl_pwrctrl_pwrlevel_change(device, max_level);
  251. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  252. return count;
  253. }
  254. static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
  255. struct device_attribute *attr,
  256. char *buf)
  257. {
  258. struct kgsl_device *device = kgsl_device_from_dev(dev);
  259. struct kgsl_pwrctrl *pwr;
  260. if (device == NULL)
  261. return 0;
  262. pwr = &device->pwrctrl;
  263. return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
  264. }
  265. static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
  266. struct device_attribute *attr,
  267. const char *buf, size_t count)
  268. { struct kgsl_device *device = kgsl_device_from_dev(dev);
  269. struct kgsl_pwrctrl *pwr;
  270. int ret, min_level;
  271. unsigned int level = 0;
  272. if (device == NULL)
  273. return 0;
  274. pwr = &device->pwrctrl;
  275. ret = kgsl_sysfs_store(buf, &level);
  276. if (ret)
  277. return ret;
  278. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  279. if (level > pwr->num_pwrlevels - 2)
  280. level = pwr->num_pwrlevels - 2;
  281. /* You can't set a minimum power level lower than the maximum */
  282. if (level < pwr->max_pwrlevel)
  283. level = pwr->max_pwrlevel;
  284. pwr->min_pwrlevel = level;
  285. min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
  286. /* Only move the power level higher if minimum is higher then the
  287. * current level
  288. */
  289. if (min_level < pwr->active_pwrlevel)
  290. kgsl_pwrctrl_pwrlevel_change(device, min_level);
  291. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  292. return count;
  293. }
  294. static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
  295. struct device_attribute *attr,
  296. char *buf)
  297. {
  298. struct kgsl_device *device = kgsl_device_from_dev(dev);
  299. struct kgsl_pwrctrl *pwr;
  300. if (device == NULL)
  301. return 0;
  302. pwr = &device->pwrctrl;
  303. return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
  304. }
  305. static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
  306. struct device_attribute *attr,
  307. char *buf)
  308. {
  309. struct kgsl_device *device = kgsl_device_from_dev(dev);
  310. struct kgsl_pwrctrl *pwr;
  311. if (device == NULL)
  312. return 0;
  313. pwr = &device->pwrctrl;
  314. return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
  315. }
  316. /* Given a GPU clock value, return the lowest matching powerlevel */
  317. static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
  318. {
  319. int i;
  320. for (i = pwr->num_pwrlevels - 1; i >= 0; i--) {
  321. if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
  322. return i;
  323. }
  324. return -ERANGE;
  325. }
  326. static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
  327. struct device_attribute *attr,
  328. const char *buf, size_t count)
  329. {
  330. struct kgsl_device *device = kgsl_device_from_dev(dev);
  331. struct kgsl_pwrctrl *pwr;
  332. unsigned int val = 0;
  333. int ret, level;
  334. if (device == NULL)
  335. return 0;
  336. pwr = &device->pwrctrl;
  337. ret = kgsl_sysfs_store(buf, &val);
  338. if (ret)
  339. return ret;
  340. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  341. level = _get_nearest_pwrlevel(pwr, val);
  342. if (level < 0)
  343. goto done;
  344. pwr->thermal_pwrlevel = level;
  345. /*
  346. * if the thermal limit is lower than the current setting,
  347. * move the speed down immediately
  348. */
  349. if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
  350. kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
  351. done:
  352. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  353. return count;
  354. }
  355. static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
  356. struct device_attribute *attr,
  357. char *buf)
  358. {
  359. struct kgsl_device *device = kgsl_device_from_dev(dev);
  360. struct kgsl_pwrctrl *pwr;
  361. if (device == NULL)
  362. return 0;
  363. pwr = &device->pwrctrl;
  364. return snprintf(buf, PAGE_SIZE, "%d\n",
  365. pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
  366. }
  367. static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
  368. struct device_attribute *attr,
  369. const char *buf, size_t count)
  370. {
  371. struct kgsl_device *device = kgsl_device_from_dev(dev);
  372. struct kgsl_pwrctrl *pwr;
  373. unsigned int val = 0;
  374. int ret, level;
  375. if (device == NULL)
  376. return 0;
  377. pwr = &device->pwrctrl;
  378. ret = kgsl_sysfs_store(buf, &val);
  379. if (ret)
  380. return ret;
  381. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  382. level = _get_nearest_pwrlevel(pwr, val);
  383. if (level >= 0)
  384. kgsl_pwrctrl_pwrlevel_change(device, level);
  385. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  386. return count;
  387. }
  388. static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
  389. struct device_attribute *attr,
  390. char *buf)
  391. {
  392. struct kgsl_device *device = kgsl_device_from_dev(dev);
  393. struct kgsl_pwrctrl *pwr;
  394. if (device == NULL)
  395. return 0;
  396. pwr = &device->pwrctrl;
  397. return snprintf(buf, PAGE_SIZE, "%ld\n", kgsl_pwrctrl_active_freq(pwr));
  398. }
  399. static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
  400. struct device_attribute *attr,
  401. const char *buf, size_t count)
  402. {
  403. unsigned int val = 0;
  404. struct kgsl_device *device = kgsl_device_from_dev(dev);
  405. struct kgsl_pwrctrl *pwr;
  406. int ret;
  407. if (device == NULL)
  408. return 0;
  409. pwr = &device->pwrctrl;
  410. ret = kgsl_sysfs_store(buf, &val);
  411. if (ret)
  412. return ret;
  413. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  414. /* Let the timeout be requested in ms, store in jiffies. */
  415. pwr->interval_timeout = msecs_to_jiffies(val);
  416. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  417. return count;
  418. }
  419. static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
  420. struct device_attribute *attr,
  421. char *buf)
  422. {
  423. struct kgsl_device *device = kgsl_device_from_dev(dev);
  424. if (device == NULL)
  425. return 0;
  426. /* Show the idle_timeout in msec */
  427. return snprintf(buf, PAGE_SIZE, "%d\n",
  428. jiffies_to_msecs(device->pwrctrl.interval_timeout));
  429. }
  430. static int kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
  431. struct device_attribute *attr,
  432. const char *buf, size_t count)
  433. {
  434. unsigned int val = 0;
  435. struct kgsl_device *device = kgsl_device_from_dev(dev);
  436. int ret;
  437. if (device == NULL)
  438. return 0;
  439. ret = kgsl_sysfs_store(buf, &val);
  440. if (ret)
  441. return ret;
  442. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  443. device->pwrctrl.pm_qos_latency = val;
  444. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  445. return count;
  446. }
  447. static int kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
  448. struct device_attribute *attr,
  449. char *buf)
  450. {
  451. struct kgsl_device *device = kgsl_device_from_dev(dev);
  452. if (device == NULL)
  453. return 0;
  454. return snprintf(buf, PAGE_SIZE, "%d\n",
  455. device->pwrctrl.pm_qos_latency);
  456. }
  457. static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
  458. struct device_attribute *attr,
  459. char *buf)
  460. {
  461. int ret;
  462. struct kgsl_device *device = kgsl_device_from_dev(dev);
  463. struct kgsl_clk_stats *clkstats;
  464. if (device == NULL)
  465. return 0;
  466. clkstats = &device->pwrctrl.clk_stats;
  467. ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
  468. clkstats->on_time_old, clkstats->elapsed_old);
  469. if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
  470. clkstats->on_time_old = 0;
  471. clkstats->elapsed_old = 0;
  472. }
  473. return ret;
  474. }
  475. static int kgsl_pwrctrl_gputop_show(struct device *dev,
  476. struct device_attribute *attr,
  477. char *buf)
  478. {
  479. int ret;
  480. struct kgsl_device *device = kgsl_device_from_dev(dev);
  481. struct kgsl_clk_stats *clkstats;
  482. int i = 0;
  483. char *ptr = buf;
  484. if (device == NULL)
  485. return 0;
  486. clkstats = &device->pwrctrl.clk_stats;
  487. ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
  488. clkstats->elapsed_old);
  489. for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
  490. i++, ptr += ret)
  491. ret = snprintf(ptr, PAGE_SIZE, "%7d ",
  492. clkstats->old_clock_time[i]);
  493. if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
  494. clkstats->on_time_old = 0;
  495. clkstats->elapsed_old = 0;
  496. for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
  497. clkstats->old_clock_time[i] = 0;
  498. }
  499. return (unsigned int) (ptr - buf);
  500. }
  501. static int kgsl_pwrctrl_gpu_available_frequencies_show(
  502. struct device *dev,
  503. struct device_attribute *attr,
  504. char *buf)
  505. {
  506. struct kgsl_device *device = kgsl_device_from_dev(dev);
  507. struct kgsl_pwrctrl *pwr;
  508. int index, num_chars = 0;
  509. if (device == NULL)
  510. return 0;
  511. pwr = &device->pwrctrl;
  512. for (index = 0; index < pwr->num_pwrlevels - 1; index++)
  513. num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
  514. pwr->pwrlevels[index].gpu_freq);
  515. buf[num_chars++] = '\n';
  516. return num_chars;
  517. }
  518. static int kgsl_pwrctrl_reset_count_show(struct device *dev,
  519. struct device_attribute *attr,
  520. char *buf)
  521. {
  522. struct kgsl_device *device = kgsl_device_from_dev(dev);
  523. if (device == NULL)
  524. return 0;
  525. return snprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
  526. }
  527. static void __force_on(struct kgsl_device *device, int flag, int on)
  528. {
  529. if (on) {
  530. switch (flag) {
  531. case KGSL_PWRFLAGS_CLK_ON:
  532. kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
  533. KGSL_STATE_ACTIVE);
  534. break;
  535. case KGSL_PWRFLAGS_AXI_ON:
  536. kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
  537. break;
  538. case KGSL_PWRFLAGS_POWER_ON:
  539. kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
  540. break;
  541. }
  542. set_bit(flag, &device->pwrctrl.ctrl_flags);
  543. } else {
  544. clear_bit(flag, &device->pwrctrl.ctrl_flags);
  545. }
  546. }
  547. static int __force_on_show(struct device *dev,
  548. struct device_attribute *attr,
  549. char *buf, int flag)
  550. {
  551. struct kgsl_device *device = kgsl_device_from_dev(dev);
  552. if (device == NULL)
  553. return 0;
  554. return snprintf(buf, PAGE_SIZE, "%d\n",
  555. test_bit(flag, &device->pwrctrl.ctrl_flags));
  556. }
  557. static int __force_on_store(struct device *dev,
  558. struct device_attribute *attr,
  559. const char *buf, size_t count,
  560. int flag)
  561. {
  562. unsigned int val = 0;
  563. struct kgsl_device *device = kgsl_device_from_dev(dev);
  564. int ret;
  565. if (device == NULL)
  566. return 0;
  567. ret = kgsl_sysfs_store(buf, &val);
  568. if (ret)
  569. return ret;
  570. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  571. __force_on(device, flag, val);
  572. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  573. return count;
  574. }
  575. static int kgsl_pwrctrl_force_clk_on_show(struct device *dev,
  576. struct device_attribute *attr,
  577. char *buf)
  578. {
  579. return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_CLK_ON);
  580. }
  581. static int kgsl_pwrctrl_force_clk_on_store(struct device *dev,
  582. struct device_attribute *attr,
  583. const char *buf, size_t count)
  584. {
  585. return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_CLK_ON);
  586. }
  587. static int kgsl_pwrctrl_force_bus_on_show(struct device *dev,
  588. struct device_attribute *attr,
  589. char *buf)
  590. {
  591. return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_AXI_ON);
  592. }
  593. static int kgsl_pwrctrl_force_bus_on_store(struct device *dev,
  594. struct device_attribute *attr,
  595. const char *buf, size_t count)
  596. {
  597. return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_AXI_ON);
  598. }
  599. static int kgsl_pwrctrl_force_rail_on_show(struct device *dev,
  600. struct device_attribute *attr,
  601. char *buf)
  602. {
  603. return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_POWER_ON);
  604. }
  605. static int kgsl_pwrctrl_force_rail_on_store(struct device *dev,
  606. struct device_attribute *attr,
  607. const char *buf, size_t count)
  608. {
  609. return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
  610. }
  611. static ssize_t kgsl_pwrctrl_bus_split_show(struct device *dev,
  612. struct device_attribute *attr,
  613. char *buf)
  614. {
  615. struct kgsl_device *device = kgsl_device_from_dev(dev);
  616. if (device == NULL)
  617. return 0;
  618. return snprintf(buf, PAGE_SIZE, "%d\n",
  619. device->pwrctrl.bus_control);
  620. }
  621. static ssize_t kgsl_pwrctrl_bus_split_store(struct device *dev,
  622. struct device_attribute *attr,
  623. const char *buf, size_t count)
  624. {
  625. unsigned int val = 0;
  626. struct kgsl_device *device = kgsl_device_from_dev(dev);
  627. int ret;
  628. if (device == NULL)
  629. return 0;
  630. ret = kgsl_sysfs_store(buf, &val);
  631. if (ret)
  632. return ret;
  633. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  634. device->pwrctrl.bus_control = val ? true : false;
  635. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  636. return count;
  637. }
  638. DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
  639. DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
  640. kgsl_pwrctrl_max_gpuclk_store);
  641. DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
  642. kgsl_pwrctrl_idle_timer_store);
  643. DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
  644. NULL);
  645. DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
  646. NULL);
  647. DEVICE_ATTR(gpu_available_frequencies, 0444,
  648. kgsl_pwrctrl_gpu_available_frequencies_show,
  649. NULL);
  650. DEVICE_ATTR(max_pwrlevel, 0644,
  651. kgsl_pwrctrl_max_pwrlevel_show,
  652. kgsl_pwrctrl_max_pwrlevel_store);
  653. DEVICE_ATTR(min_pwrlevel, 0644,
  654. kgsl_pwrctrl_min_pwrlevel_show,
  655. kgsl_pwrctrl_min_pwrlevel_store);
  656. DEVICE_ATTR(thermal_pwrlevel, 0644,
  657. kgsl_pwrctrl_thermal_pwrlevel_show,
  658. kgsl_pwrctrl_thermal_pwrlevel_store);
  659. DEVICE_ATTR(num_pwrlevels, 0444,
  660. kgsl_pwrctrl_num_pwrlevels_show,
  661. NULL);
  662. DEVICE_ATTR(pmqos_latency, 0644,
  663. kgsl_pwrctrl_pmqos_latency_show,
  664. kgsl_pwrctrl_pmqos_latency_store);
  665. DEVICE_ATTR(reset_count, 0444,
  666. kgsl_pwrctrl_reset_count_show,
  667. NULL);
  668. DEVICE_ATTR(force_clk_on, 0644,
  669. kgsl_pwrctrl_force_clk_on_show,
  670. kgsl_pwrctrl_force_clk_on_store);
  671. DEVICE_ATTR(force_bus_on, 0644,
  672. kgsl_pwrctrl_force_bus_on_show,
  673. kgsl_pwrctrl_force_bus_on_store);
  674. DEVICE_ATTR(force_rail_on, 0644,
  675. kgsl_pwrctrl_force_rail_on_show,
  676. kgsl_pwrctrl_force_rail_on_store);
  677. DEVICE_ATTR(bus_split, 0644,
  678. kgsl_pwrctrl_bus_split_show,
  679. kgsl_pwrctrl_bus_split_store);
  680. static const struct device_attribute *pwrctrl_attr_list[] = {
  681. &dev_attr_gpuclk,
  682. &dev_attr_max_gpuclk,
  683. &dev_attr_idle_timer,
  684. &dev_attr_gpubusy,
  685. &dev_attr_gputop,
  686. &dev_attr_gpu_available_frequencies,
  687. &dev_attr_max_pwrlevel,
  688. &dev_attr_min_pwrlevel,
  689. &dev_attr_thermal_pwrlevel,
  690. &dev_attr_num_pwrlevels,
  691. &dev_attr_pmqos_latency,
  692. &dev_attr_reset_count,
  693. &dev_attr_force_clk_on,
  694. &dev_attr_force_bus_on,
  695. &dev_attr_force_rail_on,
  696. &dev_attr_bus_split,
  697. NULL
  698. };
  699. int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
  700. {
  701. return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
  702. }
  703. void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
  704. {
  705. kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
  706. }
  707. static void update_statistics(struct kgsl_device *device)
  708. {
  709. struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
  710. unsigned int on_time = 0;
  711. int i;
  712. int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
  713. /*PER CLK TIME*/
  714. for (i = 0; i < num_pwrlevels; i++) {
  715. clkstats->old_clock_time[i] = clkstats->clock_time[i];
  716. on_time += clkstats->clock_time[i];
  717. clkstats->clock_time[i] = 0;
  718. }
  719. clkstats->old_clock_time[num_pwrlevels] =
  720. clkstats->clock_time[num_pwrlevels];
  721. clkstats->clock_time[num_pwrlevels] = 0;
  722. clkstats->on_time_old = on_time;
  723. clkstats->elapsed_old = clkstats->elapsed;
  724. clkstats->elapsed = 0;
  725. trace_kgsl_gpubusy(device, clkstats->on_time_old,
  726. clkstats->elapsed_old);
  727. }
  728. /* Track the amount of time the gpu is on vs the total system time. *
  729. * Regularly update the percentage of busy time displayed by sysfs. */
  730. static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
  731. {
  732. struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
  733. update_clk_statistics(device, on_time);
  734. /* Update the output regularly and reset the counters. */
  735. if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
  736. !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
  737. update_statistics(device);
  738. }
  739. }
  740. void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
  741. int requested_state)
  742. {
  743. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  744. int i = 0;
  745. if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
  746. return;
  747. if (state == KGSL_PWRFLAGS_OFF) {
  748. if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
  749. &pwr->power_flags)) {
  750. trace_kgsl_clk(device, state);
  751. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  752. if (pwr->grp_clks[i])
  753. clk_disable(pwr->grp_clks[i]);
  754. /* High latency clock maintenance. */
  755. if ((pwr->pwrlevels[0].gpu_freq > 0) &&
  756. (requested_state != KGSL_STATE_NAP)) {
  757. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  758. if (pwr->grp_clks[i])
  759. clk_unprepare(pwr->grp_clks[i]);
  760. clk_set_rate(pwr->grp_clks[0],
  761. pwr->pwrlevels[pwr->num_pwrlevels - 1].
  762. gpu_freq);
  763. }
  764. kgsl_pwrctrl_busy_time(device, true);
  765. } else if (requested_state == KGSL_STATE_SLEEP) {
  766. /* High latency clock maintenance. */
  767. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  768. if (pwr->grp_clks[i])
  769. clk_unprepare(pwr->grp_clks[i]);
  770. if ((pwr->pwrlevels[0].gpu_freq > 0))
  771. clk_set_rate(pwr->grp_clks[0],
  772. pwr->pwrlevels[pwr->num_pwrlevels - 1].
  773. gpu_freq);
  774. }
  775. } else if (state == KGSL_PWRFLAGS_ON) {
  776. if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
  777. &pwr->power_flags)) {
  778. trace_kgsl_clk(device, state);
  779. /* High latency clock maintenance. */
  780. if (device->state != KGSL_STATE_NAP) {
  781. if (pwr->pwrlevels[0].gpu_freq > 0)
  782. clk_set_rate(pwr->grp_clks[0],
  783. pwr->pwrlevels
  784. [pwr->active_pwrlevel].
  785. gpu_freq);
  786. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  787. if (pwr->grp_clks[i])
  788. clk_prepare(pwr->grp_clks[i]);
  789. }
  790. /* as last step, enable grp_clk
  791. this is to let GPU interrupt to come */
  792. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  793. if (pwr->grp_clks[i])
  794. clk_enable(pwr->grp_clks[i]);
  795. kgsl_pwrctrl_busy_time(device, false);
  796. }
  797. }
  798. }
  799. static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
  800. {
  801. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  802. if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
  803. return;
  804. if (state == KGSL_PWRFLAGS_OFF) {
  805. if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
  806. &pwr->power_flags)) {
  807. trace_kgsl_bus(device, state);
  808. if (pwr->ebi1_clk) {
  809. clk_set_rate(pwr->ebi1_clk, 0);
  810. clk_disable_unprepare(pwr->ebi1_clk);
  811. }
  812. kgsl_pwrctrl_buslevel_update(device, false);
  813. }
  814. } else if (state == KGSL_PWRFLAGS_ON) {
  815. if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
  816. &pwr->power_flags)) {
  817. trace_kgsl_bus(device, state);
  818. if (pwr->ebi1_clk) {
  819. clk_prepare_enable(pwr->ebi1_clk);
  820. clk_set_rate(pwr->ebi1_clk,
  821. pwr->pwrlevels[pwr->active_pwrlevel].
  822. bus_freq);
  823. }
  824. kgsl_pwrctrl_buslevel_update(device, true);
  825. }
  826. }
  827. }
  828. static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
  829. {
  830. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  831. if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
  832. return;
  833. if (state == KGSL_PWRFLAGS_OFF) {
  834. if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
  835. &pwr->power_flags)) {
  836. trace_kgsl_rail(device, state);
  837. if (pwr->gpu_cx)
  838. regulator_disable(pwr->gpu_cx);
  839. if (pwr->gpu_reg)
  840. regulator_disable(pwr->gpu_reg);
  841. }
  842. } else if (state == KGSL_PWRFLAGS_ON) {
  843. if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
  844. &pwr->power_flags)) {
  845. trace_kgsl_rail(device, state);
  846. if (pwr->gpu_reg) {
  847. int status = regulator_enable(pwr->gpu_reg);
  848. if (status)
  849. KGSL_DRV_ERR(device,
  850. "core regulator_enable "
  851. "failed: %d\n",
  852. status);
  853. }
  854. if (pwr->gpu_cx) {
  855. int status = regulator_enable(pwr->gpu_cx);
  856. if (status)
  857. KGSL_DRV_ERR(device,
  858. "cx regulator_enable "
  859. "failed: %d\n",
  860. status);
  861. }
  862. }
  863. }
  864. }
  865. void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
  866. {
  867. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  868. if (state == KGSL_PWRFLAGS_ON) {
  869. if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
  870. &pwr->power_flags)) {
  871. trace_kgsl_irq(device, state);
  872. enable_irq(pwr->interrupt_num);
  873. }
  874. } else if (state == KGSL_PWRFLAGS_OFF) {
  875. if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
  876. &pwr->power_flags)) {
  877. trace_kgsl_irq(device, state);
  878. if (in_interrupt())
  879. disable_irq_nosync(pwr->interrupt_num);
  880. else
  881. disable_irq(pwr->interrupt_num);
  882. }
  883. }
  884. }
  885. EXPORT_SYMBOL(kgsl_pwrctrl_irq);
  886. int kgsl_pwrctrl_init(struct kgsl_device *device)
  887. {
  888. int i, k, m, n = 0, result = 0;
  889. struct clk *clk;
  890. struct platform_device *pdev =
  891. container_of(device->parentdev, struct platform_device, dev);
  892. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  893. struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
  894. /*acquire clocks */
  895. for (i = 0; i < KGSL_MAX_CLKS; i++) {
  896. if (pdata->clk_map & clks[i].map) {
  897. clk = clk_get(&pdev->dev, clks[i].name);
  898. if (IS_ERR(clk))
  899. goto clk_err;
  900. pwr->grp_clks[i] = clk;
  901. }
  902. }
  903. /* Make sure we have a source clk for freq setting */
  904. if (pwr->grp_clks[0] == NULL)
  905. pwr->grp_clks[0] = pwr->grp_clks[1];
  906. /* put the AXI bus into asynchronous mode with the graphics cores */
  907. if (pdata->set_grp_async != NULL)
  908. pdata->set_grp_async();
  909. if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
  910. pdata->num_levels < 1) {
  911. KGSL_PWR_ERR(device, "invalid power level count: %d\n",
  912. pdata->num_levels);
  913. result = -EINVAL;
  914. goto done;
  915. }
  916. pwr->num_pwrlevels = pdata->num_levels;
  917. /* Initialize the user and thermal clock constraints */
  918. pwr->max_pwrlevel = 0;
  919. pwr->min_pwrlevel = pdata->num_levels - 2;
  920. pwr->thermal_pwrlevel = 0;
  921. pwr->active_pwrlevel = pdata->init_level;
  922. pwr->default_pwrlevel = pdata->init_level;
  923. pwr->init_pwrlevel = pdata->init_level;
  924. for (i = 0; i < pdata->num_levels; i++) {
  925. pwr->pwrlevels[i].gpu_freq =
  926. (pdata->pwrlevel[i].gpu_freq > 0) ?
  927. clk_round_rate(pwr->grp_clks[0],
  928. pdata->pwrlevel[i].
  929. gpu_freq) : 0;
  930. pwr->pwrlevels[i].bus_freq =
  931. pdata->pwrlevel[i].bus_freq;
  932. pwr->pwrlevels[i].io_fraction =
  933. pdata->pwrlevel[i].io_fraction;
  934. }
  935. /* Do not set_rate for targets in sync with AXI */
  936. if (pwr->pwrlevels[0].gpu_freq > 0)
  937. clk_set_rate(pwr->grp_clks[0], pwr->
  938. pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
  939. pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
  940. if (IS_ERR(pwr->gpu_reg))
  941. pwr->gpu_reg = NULL;
  942. if (pwr->gpu_reg) {
  943. pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
  944. if (IS_ERR(pwr->gpu_cx))
  945. pwr->gpu_cx = NULL;
  946. } else
  947. pwr->gpu_cx = NULL;
  948. pwr->power_flags = 0;
  949. pwr->idle_needed = pdata->idle_needed;
  950. pwr->interval_timeout = msecs_to_jiffies(pdata->idle_timeout);
  951. pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
  952. pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
  953. if (IS_ERR(pwr->ebi1_clk))
  954. pwr->ebi1_clk = NULL;
  955. else
  956. clk_set_rate(pwr->ebi1_clk,
  957. pwr->pwrlevels[pwr->active_pwrlevel].
  958. bus_freq);
  959. pwr->pm_qos_latency = pdata->pm_qos_latency;
  960. pm_runtime_enable(device->parentdev);
  961. if (pdata->bus_scale_table == NULL)
  962. return result;
  963. pwr->pcl = msm_bus_scale_register_client(pdata->
  964. bus_scale_table);
  965. if (!pwr->pcl) {
  966. KGSL_PWR_ERR(device,
  967. "msm_bus_scale_register_client failed: "
  968. "id %d table %pK", device->id,
  969. pdata->bus_scale_table);
  970. result = -EINVAL;
  971. goto done;
  972. }
  973. /* Set if independent bus BW voting is supported */
  974. pwr->bus_control = pdata->bus_control;
  975. /*
  976. * Pull the BW vote out of the bus table. They will be used to
  977. * calculate the ratio between the votes.
  978. */
  979. for (i = 0; i < pdata->bus_scale_table->num_usecases; i++) {
  980. struct msm_bus_paths *usecase =
  981. &pdata->bus_scale_table->usecase[i];
  982. struct msm_bus_vectors *vector = &usecase->vectors[0];
  983. if (vector->dst == MSM_BUS_SLAVE_EBI_CH0 &&
  984. vector->ib != 0) {
  985. for (k = 0; k < n; k++)
  986. if (vector->ib == pwr->bus_ib[k])
  987. break;
  988. /* if this is a new ib value, save it */
  989. if (k == n) {
  990. pwr->bus_ib[k] = vector->ib;
  991. n++;
  992. /* find which pwrlevels use this ib */
  993. for (m = 0; m < pwr->num_pwrlevels - 1; m++) {
  994. if (pdata->bus_scale_table->
  995. usecase[pwr->pwrlevels[m].
  996. bus_freq].vectors[0].ib
  997. == vector->ib)
  998. pwr->bus_index[m] = k;
  999. }
  1000. printk("kgsl bus ib [%d] = %llu\n", k, vector->ib);
  1001. }
  1002. }
  1003. }
  1004. for (m = 0; m < pwr->num_pwrlevels - 1; m++)
  1005. printk("kgsl bus index is %d for pwrlevel %d\n", pwr->bus_index[m], m);
  1006. return result;
  1007. clk_err:
  1008. result = PTR_ERR(clk);
  1009. KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
  1010. clks[i].name, result);
  1011. done:
  1012. return result;
  1013. }
  1014. void kgsl_pwrctrl_close(struct kgsl_device *device)
  1015. {
  1016. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1017. int i;
  1018. KGSL_PWR_INFO(device, "close device %d\n", device->id);
  1019. pm_runtime_disable(device->parentdev);
  1020. clk_put(pwr->ebi1_clk);
  1021. if (pwr->pcl)
  1022. msm_bus_scale_unregister_client(pwr->pcl);
  1023. pwr->pcl = 0;
  1024. if (pwr->gpu_reg) {
  1025. regulator_put(pwr->gpu_reg);
  1026. pwr->gpu_reg = NULL;
  1027. }
  1028. if (pwr->gpu_cx) {
  1029. regulator_put(pwr->gpu_cx);
  1030. pwr->gpu_cx = NULL;
  1031. }
  1032. for (i = 1; i < KGSL_MAX_CLKS; i++)
  1033. if (pwr->grp_clks[i]) {
  1034. clk_put(pwr->grp_clks[i]);
  1035. pwr->grp_clks[i] = NULL;
  1036. }
  1037. pwr->grp_clks[0] = NULL;
  1038. pwr->power_flags = 0;
  1039. }
  1040. /**
  1041. * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
  1042. * @device: The device
  1043. *
  1044. * This function is called for work that is queued by the interrupt
  1045. * handler or the idle timer. It attempts to transition to a clocks
  1046. * off state if the active_cnt is 0 and the hardware is idle.
  1047. */
  1048. void kgsl_idle_check(struct work_struct *work)
  1049. {
  1050. int delay = INIT_UDELAY;
  1051. int requested_state;
  1052. struct kgsl_device *device = container_of(work, struct kgsl_device,
  1053. idle_check_ws);
  1054. WARN_ON(device == NULL);
  1055. if (device == NULL)
  1056. return;
  1057. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  1058. kgsl_pwrscale_update(device);
  1059. if (device->state == KGSL_STATE_ACTIVE
  1060. || device->state == KGSL_STATE_NAP) {
  1061. /*
  1062. * If no user is explicitly trying to use the GPU
  1063. * (active_cnt is zero), then loop with increasing delay,
  1064. * waiting for the GPU to become idle.
  1065. */
  1066. while (!atomic_read(&device->active_cnt) &&
  1067. (delay < MAX_UDELAY)) {
  1068. requested_state = device->requested_state;
  1069. if (!kgsl_pwrctrl_sleep(device))
  1070. break;
  1071. /*
  1072. * If no new commands have been issued since the
  1073. * last interrupt, stay in this loop waiting for
  1074. * the GPU to become idle.
  1075. */
  1076. if (!device->pwrctrl.irq_last)
  1077. break;
  1078. kgsl_pwrctrl_request_state(device, requested_state);
  1079. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  1080. udelay(delay);
  1081. delay *= 2;
  1082. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  1083. }
  1084. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1085. if (device->state == KGSL_STATE_ACTIVE) {
  1086. mod_timer(&device->idle_timer,
  1087. jiffies +
  1088. device->pwrctrl.interval_timeout);
  1089. /*
  1090. * If the GPU has been too busy to sleep, make sure
  1091. * that is acurately reflected in the % busy numbers.
  1092. */
  1093. device->pwrctrl.clk_stats.no_nap_cnt++;
  1094. if (device->pwrctrl.clk_stats.no_nap_cnt >
  1095. UPDATE_BUSY) {
  1096. kgsl_pwrctrl_busy_time(device, true);
  1097. device->pwrctrl.clk_stats.no_nap_cnt = 0;
  1098. }
  1099. } else {
  1100. device->pwrctrl.irq_last = 0;
  1101. }
  1102. }
  1103. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  1104. }
  1105. EXPORT_SYMBOL(kgsl_idle_check);
  1106. void kgsl_timer(unsigned long data)
  1107. {
  1108. struct kgsl_device *device = (struct kgsl_device *) data;
  1109. KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
  1110. if (device->requested_state != KGSL_STATE_SUSPEND) {
  1111. if (device->pwrctrl.strtstp_sleepwake)
  1112. kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
  1113. else
  1114. kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
  1115. /* Have work run in a non-interrupt context. */
  1116. queue_work(device->work_queue, &device->idle_check_ws);
  1117. }
  1118. }
  1119. bool kgsl_pwrctrl_isenabled(struct kgsl_device *device)
  1120. {
  1121. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1122. return (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) != 0);
  1123. }
  1124. /**
  1125. * kgsl_pre_hwaccess - Enforce preconditions for touching registers
  1126. * @device: The device
  1127. *
  1128. * This function ensures that the correct lock is held and that the GPU
  1129. * clock is on immediately before a register is read or written. Note
  1130. * that this function does not check active_cnt because the registers
  1131. * must be accessed during device start and stop, when the active_cnt
  1132. * may legitimately be 0.
  1133. */
  1134. void kgsl_pre_hwaccess(struct kgsl_device *device)
  1135. {
  1136. /* In order to touch a register you must hold the device mutex...*/
  1137. BUG_ON(!mutex_is_locked(&device->mutex));
  1138. /* and have the clock on! */
  1139. BUG_ON(!kgsl_pwrctrl_isenabled(device));
  1140. }
  1141. EXPORT_SYMBOL(kgsl_pre_hwaccess);
  1142. static int
  1143. _nap(struct kgsl_device *device)
  1144. {
  1145. struct kgsl_power_stats stats;
  1146. switch (device->state) {
  1147. case KGSL_STATE_ACTIVE:
  1148. if (!device->ftbl->isidle(device)) {
  1149. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1150. return -EBUSY;
  1151. }
  1152. /*
  1153. * Read HW busy counters before going to NAP state.
  1154. * The data might be used by power scale governors
  1155. * independently of the HW activity. For example
  1156. * the simple-on-demand governor will get the latest
  1157. * busy_time data even if the gpu isn't active.
  1158. */
  1159. device->ftbl->power_stats(device, &stats);
  1160. device->pwrscale.accum_stats.busy_time += stats.busy_time;
  1161. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
  1162. kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
  1163. kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
  1164. case KGSL_STATE_NAP:
  1165. case KGSL_STATE_SLEEP:
  1166. case KGSL_STATE_SLUMBER:
  1167. break;
  1168. default:
  1169. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1170. break;
  1171. }
  1172. return 0;
  1173. }
  1174. static void
  1175. _sleep_accounting(struct kgsl_device *device)
  1176. {
  1177. kgsl_pwrctrl_busy_time(device, false);
  1178. device->pwrctrl.clk_stats.start = ktime_set(0, 0);
  1179. kgsl_pwrscale_sleep(device);
  1180. }
  1181. static int
  1182. _sleep(struct kgsl_device *device)
  1183. {
  1184. switch (device->state) {
  1185. case KGSL_STATE_ACTIVE:
  1186. if (!device->ftbl->isidle(device)) {
  1187. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1188. return -EBUSY;
  1189. }
  1190. /* fall through */
  1191. case KGSL_STATE_NAP:
  1192. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
  1193. kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
  1194. _sleep_accounting(device);
  1195. kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
  1196. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
  1197. pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
  1198. PM_QOS_DEFAULT_VALUE);
  1199. break;
  1200. case KGSL_STATE_SLEEP:
  1201. case KGSL_STATE_SLUMBER:
  1202. break;
  1203. default:
  1204. KGSL_PWR_WARN(device, "unhandled state %s\n",
  1205. kgsl_pwrstate_to_str(device->state));
  1206. break;
  1207. }
  1208. return 0;
  1209. }
  1210. static int
  1211. _slumber(struct kgsl_device *device)
  1212. {
  1213. switch (device->state) {
  1214. case KGSL_STATE_ACTIVE:
  1215. if (!device->ftbl->isidle(device)) {
  1216. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1217. return -EBUSY;
  1218. }
  1219. /* fall through */
  1220. case KGSL_STATE_NAP:
  1221. case KGSL_STATE_SLEEP:
  1222. del_timer_sync(&device->idle_timer);
  1223. /* make sure power is on to stop the device*/
  1224. kgsl_pwrctrl_enable(device);
  1225. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
  1226. device->ftbl->suspend_context(device);
  1227. device->ftbl->stop(device);
  1228. _sleep_accounting(device);
  1229. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  1230. pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
  1231. PM_QOS_DEFAULT_VALUE);
  1232. break;
  1233. case KGSL_STATE_SLUMBER:
  1234. break;
  1235. default:
  1236. KGSL_PWR_WARN(device, "unhandled state %s\n",
  1237. kgsl_pwrstate_to_str(device->state));
  1238. break;
  1239. }
  1240. return 0;
  1241. }
  1242. /******************************************************************/
  1243. /* Caller must hold the device mutex. */
  1244. int kgsl_pwrctrl_sleep(struct kgsl_device *device)
  1245. {
  1246. int status = 0;
  1247. KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
  1248. /* Work through the legal state transitions */
  1249. switch (device->requested_state) {
  1250. case KGSL_STATE_NAP:
  1251. status = _nap(device);
  1252. break;
  1253. case KGSL_STATE_SLEEP:
  1254. status = _sleep(device);
  1255. break;
  1256. case KGSL_STATE_SLUMBER:
  1257. status = _slumber(device);
  1258. break;
  1259. default:
  1260. KGSL_PWR_INFO(device, "bad state request 0x%x\n",
  1261. device->requested_state);
  1262. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1263. status = -EINVAL;
  1264. break;
  1265. }
  1266. return status;
  1267. }
  1268. EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
  1269. /**
  1270. * kgsl_pwrctrl_wake() - Power up the GPU from a slumber/sleep state
  1271. * @device - Pointer to the kgsl_device struct
  1272. * @priority - Boolean flag to indicate that the GPU start should be run in the
  1273. * higher priority thread
  1274. *
  1275. * Resume the GPU from a lower power state to ACTIVE. The caller to this
  1276. * fucntion must host the kgsl_device mutex.
  1277. */
  1278. int kgsl_pwrctrl_wake(struct kgsl_device *device, int priority)
  1279. {
  1280. int status = 0;
  1281. unsigned int context_id;
  1282. unsigned int state = device->state;
  1283. unsigned int ts_processed = 0xdeaddead;
  1284. struct kgsl_context *context;
  1285. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  1286. switch (device->state) {
  1287. case KGSL_STATE_SLUMBER:
  1288. status = device->ftbl->start(device, priority);
  1289. if (status) {
  1290. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1291. KGSL_DRV_ERR(device, "start failed %d\n", status);
  1292. break;
  1293. }
  1294. /* fall through */
  1295. case KGSL_STATE_SLEEP:
  1296. kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
  1297. kgsl_pwrscale_wake(device);
  1298. kgsl_sharedmem_readl(&device->memstore,
  1299. (unsigned int *) &context_id,
  1300. KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
  1301. current_context));
  1302. context = kgsl_context_get(device, context_id);
  1303. if (context)
  1304. ts_processed = kgsl_readtimestamp(device, context,
  1305. KGSL_TIMESTAMP_RETIRED);
  1306. KGSL_PWR_INFO(device, "Wake from %s state. CTXT: %d RTRD TS: %08X\n",
  1307. kgsl_pwrstate_to_str(state),
  1308. context ? context->id : -1, ts_processed);
  1309. kgsl_context_put(context);
  1310. /* fall through */
  1311. case KGSL_STATE_NAP:
  1312. /* Turn on the core clocks */
  1313. kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
  1314. /* Enable state before turning on irq */
  1315. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  1316. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
  1317. mod_timer(&device->idle_timer, jiffies +
  1318. device->pwrctrl.interval_timeout);
  1319. pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
  1320. device->pwrctrl.pm_qos_latency);
  1321. case KGSL_STATE_ACTIVE:
  1322. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1323. break;
  1324. case KGSL_STATE_INIT:
  1325. break;
  1326. default:
  1327. KGSL_PWR_WARN(device, "unhandled state %s\n",
  1328. kgsl_pwrstate_to_str(device->state));
  1329. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1330. status = -EINVAL;
  1331. break;
  1332. }
  1333. return status;
  1334. }
  1335. EXPORT_SYMBOL(kgsl_pwrctrl_wake);
  1336. void kgsl_pwrctrl_enable(struct kgsl_device *device)
  1337. {
  1338. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1339. /* Order pwrrail/clk sequence based upon platform */
  1340. kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
  1341. if (pwr->constraint.type == KGSL_CONSTRAINT_NONE)
  1342. kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
  1343. kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
  1344. kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
  1345. }
  1346. EXPORT_SYMBOL(kgsl_pwrctrl_enable);
  1347. void kgsl_pwrctrl_disable(struct kgsl_device *device)
  1348. {
  1349. /* Order pwrrail/clk sequence based upon platform */
  1350. kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
  1351. kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
  1352. kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
  1353. }
  1354. EXPORT_SYMBOL(kgsl_pwrctrl_disable);
  1355. void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
  1356. {
  1357. trace_kgsl_pwr_set_state(device, state);
  1358. device->state = state;
  1359. device->requested_state = KGSL_STATE_NONE;
  1360. }
  1361. EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
  1362. void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
  1363. {
  1364. if (state != KGSL_STATE_NONE && state != device->requested_state)
  1365. trace_kgsl_pwr_request_state(device, state);
  1366. device->requested_state = state;
  1367. }
  1368. EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
  1369. const char *kgsl_pwrstate_to_str(unsigned int state)
  1370. {
  1371. switch (state) {
  1372. case KGSL_STATE_NONE:
  1373. return "NONE";
  1374. case KGSL_STATE_INIT:
  1375. return "INIT";
  1376. case KGSL_STATE_ACTIVE:
  1377. return "ACTIVE";
  1378. case KGSL_STATE_NAP:
  1379. return "NAP";
  1380. case KGSL_STATE_SLEEP:
  1381. return "SLEEP";
  1382. case KGSL_STATE_SUSPEND:
  1383. return "SUSPEND";
  1384. case KGSL_STATE_SLUMBER:
  1385. return "SLUMBER";
  1386. default:
  1387. break;
  1388. }
  1389. return "UNKNOWN";
  1390. }
  1391. EXPORT_SYMBOL(kgsl_pwrstate_to_str);
  1392. /**
  1393. * kgsl_active_count_get() - Increase the device active count
  1394. * @device: Pointer to a KGSL device
  1395. *
  1396. * Increase the active count for the KGSL device and turn on
  1397. * clocks if this is the first reference. Code paths that need
  1398. * to touch the hardware or wait for the hardware to complete
  1399. * an operation must hold an active count reference until they
  1400. * are finished. An error code will be returned if waking the
  1401. * device fails. The device mutex must be held while *calling
  1402. * this function.
  1403. */
  1404. int kgsl_active_count_get(struct kgsl_device *device)
  1405. {
  1406. int ret = 0;
  1407. BUG_ON(!mutex_is_locked(&device->mutex));
  1408. if ((atomic_read(&device->active_cnt) == 0) &&
  1409. (device->state != KGSL_STATE_ACTIVE)) {
  1410. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  1411. wait_for_completion(&device->hwaccess_gate);
  1412. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  1413. ret = kgsl_pwrctrl_wake(device, 1);
  1414. }
  1415. if (ret == 0)
  1416. atomic_inc(&device->active_cnt);
  1417. trace_kgsl_active_count(device,
  1418. (unsigned long) __builtin_return_address(0));
  1419. return ret;
  1420. }
  1421. EXPORT_SYMBOL(kgsl_active_count_get);
  1422. /**
  1423. * kgsl_active_count_get_light() - Increase the device active count
  1424. * @device: Pointer to a KGSL device
  1425. *
  1426. * Increase the active count for the KGSL device WITHOUT
  1427. * turning on the clocks based on the assumption that the clocks are already
  1428. * on from a previous active_count_get(). Currently this is only used for
  1429. * creating kgsl_events.
  1430. */
  1431. int kgsl_active_count_get_light(struct kgsl_device *device)
  1432. {
  1433. if (atomic_inc_not_zero(&device->active_cnt) == 0) {
  1434. dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
  1435. return -EINVAL;
  1436. }
  1437. trace_kgsl_active_count(device,
  1438. (unsigned long) __builtin_return_address(0));
  1439. return 0;
  1440. }
  1441. EXPORT_SYMBOL(kgsl_active_count_get_light);
  1442. /**
  1443. * kgsl_active_count_put() - Decrease the device active count
  1444. * @device: Pointer to a KGSL device
  1445. *
  1446. * Decrease the active count for the KGSL device and turn off
  1447. * clocks if there are no remaining references. This function will
  1448. * transition the device to NAP if there are no other pending state
  1449. * changes. It also completes the suspend gate. The device mutex must
  1450. * be held while calling this function.
  1451. */
  1452. void kgsl_active_count_put(struct kgsl_device *device)
  1453. {
  1454. BUG_ON(!mutex_is_locked(&device->mutex));
  1455. BUG_ON(atomic_read(&device->active_cnt) == 0);
  1456. if (atomic_dec_and_test(&device->active_cnt)) {
  1457. if (device->state == KGSL_STATE_ACTIVE &&
  1458. device->requested_state == KGSL_STATE_NONE) {
  1459. kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
  1460. queue_work(device->work_queue, &device->idle_check_ws);
  1461. }
  1462. mod_timer(&device->idle_timer,
  1463. jiffies + device->pwrctrl.interval_timeout);
  1464. } else {
  1465. kgsl_pwrscale_update(device);
  1466. }
  1467. trace_kgsl_active_count(device,
  1468. (unsigned long) __builtin_return_address(0));
  1469. wake_up(&device->active_cnt_wq);
  1470. }
  1471. EXPORT_SYMBOL(kgsl_active_count_put);
  1472. static int _check_active_count(struct kgsl_device *device, int count)
  1473. {
  1474. /* Return 0 if the active count is greater than the desired value */
  1475. return atomic_read(&device->active_cnt) > count ? 0 : 1;
  1476. }
  1477. /**
  1478. * kgsl_active_count_wait() - Wait for activity to finish.
  1479. * @device: Pointer to a KGSL device
  1480. * @count: Active count value to wait for
  1481. *
  1482. * Block until the active_cnt value hits the desired value
  1483. */
  1484. int kgsl_active_count_wait(struct kgsl_device *device, int count)
  1485. {
  1486. int result = 0;
  1487. long wait_jiffies = msecs_to_jiffies(1000);
  1488. BUG_ON(!mutex_is_locked(&device->mutex));
  1489. while (atomic_read(&device->active_cnt) > count) {
  1490. long ret;
  1491. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  1492. ret = wait_event_timeout(device->active_cnt_wq,
  1493. _check_active_count(device, count), wait_jiffies);
  1494. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  1495. result = ret == 0 ? -ETIMEDOUT : 0;
  1496. if (!result)
  1497. wait_jiffies = ret;
  1498. else
  1499. break;
  1500. }
  1501. return result;
  1502. }
  1503. EXPORT_SYMBOL(kgsl_active_count_wait);