123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737 |
- /* Copyright (c) 2010-2014,2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
- #include <linux/export.h>
- #include <linux/interrupt.h>
- #include <asm/page.h>
- #include <linux/pm_runtime.h>
- #include <mach/msm_iomap.h>
- #include <mach/msm_bus.h>
- #include <mach/msm_bus_board.h>
- #include <linux/ktime.h>
- #include <linux/delay.h>
- #include "kgsl.h"
- #include "kgsl_pwrscale.h"
- #include "kgsl_device.h"
- #include "kgsl_trace.h"
- #include "kgsl_sharedmem.h"
- #define KGSL_PWRFLAGS_POWER_ON 0
- #define KGSL_PWRFLAGS_CLK_ON 1
- #define KGSL_PWRFLAGS_AXI_ON 2
- #define KGSL_PWRFLAGS_IRQ_ON 3
- #define UPDATE_BUSY_VAL 1000000
- #define UPDATE_BUSY 50
- /*
- * Expected delay for post-interrupt processing on A3xx.
- * The delay may be longer, gradually increase the delay
- * to compensate. If the GPU isn't done by max delay,
- * it's working on something other than just the final
- * command sequence so stop waiting for it to be idle.
- */
- #define INIT_UDELAY 200
- #define MAX_UDELAY 2000
- struct clk_pair {
- const char *name;
- uint map;
- };
- struct clk_pair clks[KGSL_MAX_CLKS] = {
- {
- .name = "src_clk",
- .map = KGSL_CLK_SRC,
- },
- {
- .name = "core_clk",
- .map = KGSL_CLK_CORE,
- },
- {
- .name = "iface_clk",
- .map = KGSL_CLK_IFACE,
- },
- {
- .name = "mem_clk",
- .map = KGSL_CLK_MEM,
- },
- {
- .name = "mem_iface_clk",
- .map = KGSL_CLK_MEM_IFACE,
- },
- {
- .name = "alt_mem_iface_clk",
- .map = KGSL_CLK_ALT_MEM_IFACE,
- },
- };
- static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
- int requested_state);
- static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
- static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
- /* Update the elapsed time at a particular clock level
- * if the device is active(on_time = true).Otherwise
- * store it as sleep time.
- */
- static void update_clk_statistics(struct kgsl_device *device,
- bool on_time)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
- ktime_t elapsed;
- int elapsed_us;
- if (clkstats->start.tv64 == 0)
- clkstats->start = ktime_get();
- clkstats->stop = ktime_get();
- elapsed = ktime_sub(clkstats->stop, clkstats->start);
- elapsed_us = ktime_to_us(elapsed);
- clkstats->elapsed += elapsed_us;
- if (on_time)
- clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
- else
- clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
- clkstats->start = ktime_get();
- }
- /*
- * Given a requested power level do bounds checking on the constraints and
- * return the nearest possible level
- */
- static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
- {
- int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
- int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
- if (level < max_pwrlevel)
- return max_pwrlevel;
- if (level > min_pwrlevel)
- return min_pwrlevel;
- return level;
- }
- void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
- bool on)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int cur = pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
- int buslevel = 0;
- if (!pwr->pcl)
- return;
- /* the bus should be ON to update the active frequency */
- if (on && !(test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)))
- return;
- /*
- * If the bus should remain on calculate our request and submit it,
- * otherwise request bus level 0, off.
- */
- if (on) {
- buslevel = min_t(int, pwr->pwrlevels[0].bus_freq,
- cur + pwr->bus_mod);
- buslevel = max_t(int, buslevel, 1);
- }
- msm_bus_scale_client_update_request(pwr->pcl, buslevel);
- trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, buslevel);
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update);
- void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
- unsigned int new_level)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct kgsl_pwrlevel *pwrlevel;
- int delta, level;
- /* Adjust the power level to the current constraints */
- new_level = _adjust_pwrlevel(pwr, new_level);
- if (new_level == pwr->active_pwrlevel)
- return;
- delta = new_level < pwr->active_pwrlevel ? -1 : 1;
- update_clk_statistics(device, true);
- level = pwr->active_pwrlevel;
- /*
- * Set the active powerlevel first in case the clocks are off - if we
- * don't do this then the pwrlevel change won't take effect when the
- * clocks come back
- */
- pwr->active_pwrlevel = new_level;
- pwr->bus_mod = 0;
- pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
- kgsl_pwrctrl_buslevel_update(device, true);
- if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
- if (pwr->ebi1_clk)
- clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
- if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
- (device->state == KGSL_STATE_NAP)) {
- /*
- * On some platforms, instability is caused on
- * changing clock freq when the core is busy.
- * Idle the gpu core before changing the clock freq.
- */
- if (pwr->idle_needed == true)
- device->ftbl->idle(device);
- /*
- * Don't shift by more than one level at a time to
- * avoid glitches.
- */
- while (level != new_level) {
- level += delta;
- clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[level].gpu_freq);
- }
- }
- trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
- static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- int ret;
- unsigned int level = 0;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &level);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- if (level > pwr->num_pwrlevels - 2)
- level = pwr->num_pwrlevels - 2;
- pwr->thermal_pwrlevel = level;
- /*
- * If there is no power policy set the clock to the requested thermal
- * level - if thermal now happens to be higher than max, then that will
- * be limited by the pwrlevel change function. Otherwise if there is
- * a policy only change the active clock if it is higher then the new
- * thermal level
- */
- if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
- kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
- }
- static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- int ret, max_level;
- unsigned int level = 0;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &level);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- /* You can't set a maximum power level lower than the minimum */
- if (level > pwr->min_pwrlevel)
- level = pwr->min_pwrlevel;
- pwr->max_pwrlevel = level;
- max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
- /*
- * If there is no policy then move to max by default. Otherwise only
- * move max if the current level happens to be higher then the new max
- */
- if (max_level > pwr->active_pwrlevel)
- kgsl_pwrctrl_pwrlevel_change(device, max_level);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
- }
- static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- { struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- int ret, min_level;
- unsigned int level = 0;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &level);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- if (level > pwr->num_pwrlevels - 2)
- level = pwr->num_pwrlevels - 2;
- /* You can't set a minimum power level lower than the maximum */
- if (level < pwr->max_pwrlevel)
- level = pwr->max_pwrlevel;
- pwr->min_pwrlevel = level;
- min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
- /* Only move the power level higher if minimum is higher then the
- * current level
- */
- if (min_level < pwr->active_pwrlevel)
- kgsl_pwrctrl_pwrlevel_change(device, min_level);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
- }
- static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
- }
- /* Given a GPU clock value, return the lowest matching powerlevel */
- static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
- {
- int i;
- for (i = pwr->num_pwrlevels - 1; i >= 0; i--) {
- if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
- return i;
- }
- return -ERANGE;
- }
- static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- unsigned int val = 0;
- int ret, level;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- level = _get_nearest_pwrlevel(pwr, val);
- if (level < 0)
- goto done;
- pwr->thermal_pwrlevel = level;
- /*
- * if the thermal limit is lower than the current setting,
- * move the speed down immediately
- */
- if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
- kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
- done:
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
- }
- static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- unsigned int val = 0;
- int ret, level;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- level = _get_nearest_pwrlevel(pwr, val);
- if (level >= 0)
- kgsl_pwrctrl_pwrlevel_change(device, level);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%ld\n", kgsl_pwrctrl_active_freq(pwr));
- }
- static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- unsigned int val = 0;
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- int ret;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- /* Let the timeout be requested in ms, store in jiffies. */
- pwr->interval_timeout = msecs_to_jiffies(val);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- if (device == NULL)
- return 0;
- /* Show the idle_timeout in msec */
- return snprintf(buf, PAGE_SIZE, "%d\n",
- jiffies_to_msecs(device->pwrctrl.interval_timeout));
- }
- static int kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- unsigned int val = 0;
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- int ret;
- if (device == NULL)
- return 0;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- device->pwrctrl.pm_qos_latency = val;
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- if (device == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- device->pwrctrl.pm_qos_latency);
- }
- static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- int ret;
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_clk_stats *clkstats;
- if (device == NULL)
- return 0;
- clkstats = &device->pwrctrl.clk_stats;
- ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
- clkstats->on_time_old, clkstats->elapsed_old);
- if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
- clkstats->on_time_old = 0;
- clkstats->elapsed_old = 0;
- }
- return ret;
- }
- static int kgsl_pwrctrl_gputop_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- int ret;
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_clk_stats *clkstats;
- int i = 0;
- char *ptr = buf;
- if (device == NULL)
- return 0;
- clkstats = &device->pwrctrl.clk_stats;
- ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
- clkstats->elapsed_old);
- for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
- i++, ptr += ret)
- ret = snprintf(ptr, PAGE_SIZE, "%7d ",
- clkstats->old_clock_time[i]);
- if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
- clkstats->on_time_old = 0;
- clkstats->elapsed_old = 0;
- for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
- clkstats->old_clock_time[i] = 0;
- }
- return (unsigned int) (ptr - buf);
- }
- static int kgsl_pwrctrl_gpu_available_frequencies_show(
- struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- int index, num_chars = 0;
- if (device == NULL)
- return 0;
- pwr = &device->pwrctrl;
- for (index = 0; index < pwr->num_pwrlevels - 1; index++)
- num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
- pwr->pwrlevels[index].gpu_freq);
- buf[num_chars++] = '\n';
- return num_chars;
- }
- static int kgsl_pwrctrl_reset_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- if (device == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
- }
- static void __force_on(struct kgsl_device *device, int flag, int on)
- {
- if (on) {
- switch (flag) {
- case KGSL_PWRFLAGS_CLK_ON:
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
- KGSL_STATE_ACTIVE);
- break;
- case KGSL_PWRFLAGS_AXI_ON:
- kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
- break;
- case KGSL_PWRFLAGS_POWER_ON:
- kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
- break;
- }
- set_bit(flag, &device->pwrctrl.ctrl_flags);
- } else {
- clear_bit(flag, &device->pwrctrl.ctrl_flags);
- }
- }
- static int __force_on_show(struct device *dev,
- struct device_attribute *attr,
- char *buf, int flag)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- if (device == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- test_bit(flag, &device->pwrctrl.ctrl_flags));
- }
- static int __force_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count,
- int flag)
- {
- unsigned int val = 0;
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- int ret;
- if (device == NULL)
- return 0;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- __force_on(device, flag, val);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- static int kgsl_pwrctrl_force_clk_on_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_CLK_ON);
- }
- static int kgsl_pwrctrl_force_clk_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_CLK_ON);
- }
- static int kgsl_pwrctrl_force_bus_on_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_AXI_ON);
- }
- static int kgsl_pwrctrl_force_bus_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_AXI_ON);
- }
- static int kgsl_pwrctrl_force_rail_on_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_POWER_ON);
- }
- static int kgsl_pwrctrl_force_rail_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
- }
- static ssize_t kgsl_pwrctrl_bus_split_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- if (device == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- device->pwrctrl.bus_control);
- }
- static ssize_t kgsl_pwrctrl_bus_split_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- unsigned int val = 0;
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- int ret;
- if (device == NULL)
- return 0;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- device->pwrctrl.bus_control = val ? true : false;
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- return count;
- }
- DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
- DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
- kgsl_pwrctrl_max_gpuclk_store);
- DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
- kgsl_pwrctrl_idle_timer_store);
- DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
- NULL);
- DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
- NULL);
- DEVICE_ATTR(gpu_available_frequencies, 0444,
- kgsl_pwrctrl_gpu_available_frequencies_show,
- NULL);
- DEVICE_ATTR(max_pwrlevel, 0644,
- kgsl_pwrctrl_max_pwrlevel_show,
- kgsl_pwrctrl_max_pwrlevel_store);
- DEVICE_ATTR(min_pwrlevel, 0644,
- kgsl_pwrctrl_min_pwrlevel_show,
- kgsl_pwrctrl_min_pwrlevel_store);
- DEVICE_ATTR(thermal_pwrlevel, 0644,
- kgsl_pwrctrl_thermal_pwrlevel_show,
- kgsl_pwrctrl_thermal_pwrlevel_store);
- DEVICE_ATTR(num_pwrlevels, 0444,
- kgsl_pwrctrl_num_pwrlevels_show,
- NULL);
- DEVICE_ATTR(pmqos_latency, 0644,
- kgsl_pwrctrl_pmqos_latency_show,
- kgsl_pwrctrl_pmqos_latency_store);
- DEVICE_ATTR(reset_count, 0444,
- kgsl_pwrctrl_reset_count_show,
- NULL);
- DEVICE_ATTR(force_clk_on, 0644,
- kgsl_pwrctrl_force_clk_on_show,
- kgsl_pwrctrl_force_clk_on_store);
- DEVICE_ATTR(force_bus_on, 0644,
- kgsl_pwrctrl_force_bus_on_show,
- kgsl_pwrctrl_force_bus_on_store);
- DEVICE_ATTR(force_rail_on, 0644,
- kgsl_pwrctrl_force_rail_on_show,
- kgsl_pwrctrl_force_rail_on_store);
- DEVICE_ATTR(bus_split, 0644,
- kgsl_pwrctrl_bus_split_show,
- kgsl_pwrctrl_bus_split_store);
- static const struct device_attribute *pwrctrl_attr_list[] = {
- &dev_attr_gpuclk,
- &dev_attr_max_gpuclk,
- &dev_attr_idle_timer,
- &dev_attr_gpubusy,
- &dev_attr_gputop,
- &dev_attr_gpu_available_frequencies,
- &dev_attr_max_pwrlevel,
- &dev_attr_min_pwrlevel,
- &dev_attr_thermal_pwrlevel,
- &dev_attr_num_pwrlevels,
- &dev_attr_pmqos_latency,
- &dev_attr_reset_count,
- &dev_attr_force_clk_on,
- &dev_attr_force_bus_on,
- &dev_attr_force_rail_on,
- &dev_attr_bus_split,
- NULL
- };
- int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
- {
- return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
- }
- void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
- {
- kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
- }
- static void update_statistics(struct kgsl_device *device)
- {
- struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
- unsigned int on_time = 0;
- int i;
- int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
- /*PER CLK TIME*/
- for (i = 0; i < num_pwrlevels; i++) {
- clkstats->old_clock_time[i] = clkstats->clock_time[i];
- on_time += clkstats->clock_time[i];
- clkstats->clock_time[i] = 0;
- }
- clkstats->old_clock_time[num_pwrlevels] =
- clkstats->clock_time[num_pwrlevels];
- clkstats->clock_time[num_pwrlevels] = 0;
- clkstats->on_time_old = on_time;
- clkstats->elapsed_old = clkstats->elapsed;
- clkstats->elapsed = 0;
- trace_kgsl_gpubusy(device, clkstats->on_time_old,
- clkstats->elapsed_old);
- }
- /* Track the amount of time the gpu is on vs the total system time. *
- * Regularly update the percentage of busy time displayed by sysfs. */
- static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
- {
- struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
- update_clk_statistics(device, on_time);
- /* Update the output regularly and reset the counters. */
- if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
- !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
- update_statistics(device);
- }
- }
- void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
- int requested_state)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int i = 0;
- if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
- return;
- if (state == KGSL_PWRFLAGS_OFF) {
- if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
- &pwr->power_flags)) {
- trace_kgsl_clk(device, state);
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_disable(pwr->grp_clks[i]);
- /* High latency clock maintenance. */
- if ((pwr->pwrlevels[0].gpu_freq > 0) &&
- (requested_state != KGSL_STATE_NAP)) {
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_unprepare(pwr->grp_clks[i]);
- clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[pwr->num_pwrlevels - 1].
- gpu_freq);
- }
- kgsl_pwrctrl_busy_time(device, true);
- } else if (requested_state == KGSL_STATE_SLEEP) {
- /* High latency clock maintenance. */
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_unprepare(pwr->grp_clks[i]);
- if ((pwr->pwrlevels[0].gpu_freq > 0))
- clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[pwr->num_pwrlevels - 1].
- gpu_freq);
- }
- } else if (state == KGSL_PWRFLAGS_ON) {
- if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
- &pwr->power_flags)) {
- trace_kgsl_clk(device, state);
- /* High latency clock maintenance. */
- if (device->state != KGSL_STATE_NAP) {
- if (pwr->pwrlevels[0].gpu_freq > 0)
- clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels
- [pwr->active_pwrlevel].
- gpu_freq);
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_prepare(pwr->grp_clks[i]);
- }
- /* as last step, enable grp_clk
- this is to let GPU interrupt to come */
- for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
- if (pwr->grp_clks[i])
- clk_enable(pwr->grp_clks[i]);
- kgsl_pwrctrl_busy_time(device, false);
- }
- }
- }
- static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
- return;
- if (state == KGSL_PWRFLAGS_OFF) {
- if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
- &pwr->power_flags)) {
- trace_kgsl_bus(device, state);
- if (pwr->ebi1_clk) {
- clk_set_rate(pwr->ebi1_clk, 0);
- clk_disable_unprepare(pwr->ebi1_clk);
- }
- kgsl_pwrctrl_buslevel_update(device, false);
- }
- } else if (state == KGSL_PWRFLAGS_ON) {
- if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
- &pwr->power_flags)) {
- trace_kgsl_bus(device, state);
- if (pwr->ebi1_clk) {
- clk_prepare_enable(pwr->ebi1_clk);
- clk_set_rate(pwr->ebi1_clk,
- pwr->pwrlevels[pwr->active_pwrlevel].
- bus_freq);
- }
- kgsl_pwrctrl_buslevel_update(device, true);
- }
- }
- }
- static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
- return;
- if (state == KGSL_PWRFLAGS_OFF) {
- if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
- &pwr->power_flags)) {
- trace_kgsl_rail(device, state);
- if (pwr->gpu_cx)
- regulator_disable(pwr->gpu_cx);
- if (pwr->gpu_reg)
- regulator_disable(pwr->gpu_reg);
- }
- } else if (state == KGSL_PWRFLAGS_ON) {
- if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
- &pwr->power_flags)) {
- trace_kgsl_rail(device, state);
- if (pwr->gpu_reg) {
- int status = regulator_enable(pwr->gpu_reg);
- if (status)
- KGSL_DRV_ERR(device,
- "core regulator_enable "
- "failed: %d\n",
- status);
- }
- if (pwr->gpu_cx) {
- int status = regulator_enable(pwr->gpu_cx);
- if (status)
- KGSL_DRV_ERR(device,
- "cx regulator_enable "
- "failed: %d\n",
- status);
- }
- }
- }
- }
- void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- if (state == KGSL_PWRFLAGS_ON) {
- if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
- &pwr->power_flags)) {
- trace_kgsl_irq(device, state);
- enable_irq(pwr->interrupt_num);
- }
- } else if (state == KGSL_PWRFLAGS_OFF) {
- if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
- &pwr->power_flags)) {
- trace_kgsl_irq(device, state);
- if (in_interrupt())
- disable_irq_nosync(pwr->interrupt_num);
- else
- disable_irq(pwr->interrupt_num);
- }
- }
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_irq);
- int kgsl_pwrctrl_init(struct kgsl_device *device)
- {
- int i, k, m, n = 0, result = 0;
- struct clk *clk;
- struct platform_device *pdev =
- container_of(device->parentdev, struct platform_device, dev);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
- /*acquire clocks */
- for (i = 0; i < KGSL_MAX_CLKS; i++) {
- if (pdata->clk_map & clks[i].map) {
- clk = clk_get(&pdev->dev, clks[i].name);
- if (IS_ERR(clk))
- goto clk_err;
- pwr->grp_clks[i] = clk;
- }
- }
- /* Make sure we have a source clk for freq setting */
- if (pwr->grp_clks[0] == NULL)
- pwr->grp_clks[0] = pwr->grp_clks[1];
- /* put the AXI bus into asynchronous mode with the graphics cores */
- if (pdata->set_grp_async != NULL)
- pdata->set_grp_async();
- if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
- pdata->num_levels < 1) {
- KGSL_PWR_ERR(device, "invalid power level count: %d\n",
- pdata->num_levels);
- result = -EINVAL;
- goto done;
- }
- pwr->num_pwrlevels = pdata->num_levels;
- /* Initialize the user and thermal clock constraints */
- pwr->max_pwrlevel = 0;
- pwr->min_pwrlevel = pdata->num_levels - 2;
- pwr->thermal_pwrlevel = 0;
- pwr->active_pwrlevel = pdata->init_level;
- pwr->default_pwrlevel = pdata->init_level;
- pwr->init_pwrlevel = pdata->init_level;
- for (i = 0; i < pdata->num_levels; i++) {
- pwr->pwrlevels[i].gpu_freq =
- (pdata->pwrlevel[i].gpu_freq > 0) ?
- clk_round_rate(pwr->grp_clks[0],
- pdata->pwrlevel[i].
- gpu_freq) : 0;
- pwr->pwrlevels[i].bus_freq =
- pdata->pwrlevel[i].bus_freq;
- pwr->pwrlevels[i].io_fraction =
- pdata->pwrlevel[i].io_fraction;
- }
- /* Do not set_rate for targets in sync with AXI */
- if (pwr->pwrlevels[0].gpu_freq > 0)
- clk_set_rate(pwr->grp_clks[0], pwr->
- pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
- pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(pwr->gpu_reg))
- pwr->gpu_reg = NULL;
- if (pwr->gpu_reg) {
- pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
- if (IS_ERR(pwr->gpu_cx))
- pwr->gpu_cx = NULL;
- } else
- pwr->gpu_cx = NULL;
- pwr->power_flags = 0;
- pwr->idle_needed = pdata->idle_needed;
- pwr->interval_timeout = msecs_to_jiffies(pdata->idle_timeout);
- pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
- pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
- if (IS_ERR(pwr->ebi1_clk))
- pwr->ebi1_clk = NULL;
- else
- clk_set_rate(pwr->ebi1_clk,
- pwr->pwrlevels[pwr->active_pwrlevel].
- bus_freq);
- pwr->pm_qos_latency = pdata->pm_qos_latency;
- pm_runtime_enable(device->parentdev);
- if (pdata->bus_scale_table == NULL)
- return result;
- pwr->pcl = msm_bus_scale_register_client(pdata->
- bus_scale_table);
- if (!pwr->pcl) {
- KGSL_PWR_ERR(device,
- "msm_bus_scale_register_client failed: "
- "id %d table %pK", device->id,
- pdata->bus_scale_table);
- result = -EINVAL;
- goto done;
- }
- /* Set if independent bus BW voting is supported */
- pwr->bus_control = pdata->bus_control;
- /*
- * Pull the BW vote out of the bus table. They will be used to
- * calculate the ratio between the votes.
- */
- for (i = 0; i < pdata->bus_scale_table->num_usecases; i++) {
- struct msm_bus_paths *usecase =
- &pdata->bus_scale_table->usecase[i];
- struct msm_bus_vectors *vector = &usecase->vectors[0];
- if (vector->dst == MSM_BUS_SLAVE_EBI_CH0 &&
- vector->ib != 0) {
- for (k = 0; k < n; k++)
- if (vector->ib == pwr->bus_ib[k])
- break;
- /* if this is a new ib value, save it */
- if (k == n) {
- pwr->bus_ib[k] = vector->ib;
- n++;
- /* find which pwrlevels use this ib */
- for (m = 0; m < pwr->num_pwrlevels - 1; m++) {
- if (pdata->bus_scale_table->
- usecase[pwr->pwrlevels[m].
- bus_freq].vectors[0].ib
- == vector->ib)
- pwr->bus_index[m] = k;
- }
- printk("kgsl bus ib [%d] = %llu\n", k, vector->ib);
- }
- }
- }
- for (m = 0; m < pwr->num_pwrlevels - 1; m++)
- printk("kgsl bus index is %d for pwrlevel %d\n", pwr->bus_index[m], m);
- return result;
- clk_err:
- result = PTR_ERR(clk);
- KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
- clks[i].name, result);
- done:
- return result;
- }
- void kgsl_pwrctrl_close(struct kgsl_device *device)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int i;
- KGSL_PWR_INFO(device, "close device %d\n", device->id);
- pm_runtime_disable(device->parentdev);
- clk_put(pwr->ebi1_clk);
- if (pwr->pcl)
- msm_bus_scale_unregister_client(pwr->pcl);
- pwr->pcl = 0;
- if (pwr->gpu_reg) {
- regulator_put(pwr->gpu_reg);
- pwr->gpu_reg = NULL;
- }
- if (pwr->gpu_cx) {
- regulator_put(pwr->gpu_cx);
- pwr->gpu_cx = NULL;
- }
- for (i = 1; i < KGSL_MAX_CLKS; i++)
- if (pwr->grp_clks[i]) {
- clk_put(pwr->grp_clks[i]);
- pwr->grp_clks[i] = NULL;
- }
- pwr->grp_clks[0] = NULL;
- pwr->power_flags = 0;
- }
- /**
- * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
- * @device: The device
- *
- * This function is called for work that is queued by the interrupt
- * handler or the idle timer. It attempts to transition to a clocks
- * off state if the active_cnt is 0 and the hardware is idle.
- */
- void kgsl_idle_check(struct work_struct *work)
- {
- int delay = INIT_UDELAY;
- int requested_state;
- struct kgsl_device *device = container_of(work, struct kgsl_device,
- idle_check_ws);
- WARN_ON(device == NULL);
- if (device == NULL)
- return;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- kgsl_pwrscale_update(device);
- if (device->state == KGSL_STATE_ACTIVE
- || device->state == KGSL_STATE_NAP) {
- /*
- * If no user is explicitly trying to use the GPU
- * (active_cnt is zero), then loop with increasing delay,
- * waiting for the GPU to become idle.
- */
- while (!atomic_read(&device->active_cnt) &&
- (delay < MAX_UDELAY)) {
- requested_state = device->requested_state;
- if (!kgsl_pwrctrl_sleep(device))
- break;
- /*
- * If no new commands have been issued since the
- * last interrupt, stay in this loop waiting for
- * the GPU to become idle.
- */
- if (!device->pwrctrl.irq_last)
- break;
- kgsl_pwrctrl_request_state(device, requested_state);
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- udelay(delay);
- delay *= 2;
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- }
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- if (device->state == KGSL_STATE_ACTIVE) {
- mod_timer(&device->idle_timer,
- jiffies +
- device->pwrctrl.interval_timeout);
- /*
- * If the GPU has been too busy to sleep, make sure
- * that is acurately reflected in the % busy numbers.
- */
- device->pwrctrl.clk_stats.no_nap_cnt++;
- if (device->pwrctrl.clk_stats.no_nap_cnt >
- UPDATE_BUSY) {
- kgsl_pwrctrl_busy_time(device, true);
- device->pwrctrl.clk_stats.no_nap_cnt = 0;
- }
- } else {
- device->pwrctrl.irq_last = 0;
- }
- }
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- }
- EXPORT_SYMBOL(kgsl_idle_check);
- void kgsl_timer(unsigned long data)
- {
- struct kgsl_device *device = (struct kgsl_device *) data;
- KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
- if (device->requested_state != KGSL_STATE_SUSPEND) {
- if (device->pwrctrl.strtstp_sleepwake)
- kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
- else
- kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
- /* Have work run in a non-interrupt context. */
- queue_work(device->work_queue, &device->idle_check_ws);
- }
- }
- bool kgsl_pwrctrl_isenabled(struct kgsl_device *device)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- return (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) != 0);
- }
- /**
- * kgsl_pre_hwaccess - Enforce preconditions for touching registers
- * @device: The device
- *
- * This function ensures that the correct lock is held and that the GPU
- * clock is on immediately before a register is read or written. Note
- * that this function does not check active_cnt because the registers
- * must be accessed during device start and stop, when the active_cnt
- * may legitimately be 0.
- */
- void kgsl_pre_hwaccess(struct kgsl_device *device)
- {
- /* In order to touch a register you must hold the device mutex...*/
- BUG_ON(!mutex_is_locked(&device->mutex));
- /* and have the clock on! */
- BUG_ON(!kgsl_pwrctrl_isenabled(device));
- }
- EXPORT_SYMBOL(kgsl_pre_hwaccess);
- static int
- _nap(struct kgsl_device *device)
- {
- struct kgsl_power_stats stats;
- switch (device->state) {
- case KGSL_STATE_ACTIVE:
- if (!device->ftbl->isidle(device)) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- return -EBUSY;
- }
- /*
- * Read HW busy counters before going to NAP state.
- * The data might be used by power scale governors
- * independently of the HW activity. For example
- * the simple-on-demand governor will get the latest
- * busy_time data even if the gpu isn't active.
- */
- device->ftbl->power_stats(device, &stats);
- device->pwrscale.accum_stats.busy_time += stats.busy_time;
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
- kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
- case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
- case KGSL_STATE_SLUMBER:
- break;
- default:
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- break;
- }
- return 0;
- }
- static void
- _sleep_accounting(struct kgsl_device *device)
- {
- kgsl_pwrctrl_busy_time(device, false);
- device->pwrctrl.clk_stats.start = ktime_set(0, 0);
- kgsl_pwrscale_sleep(device);
- }
- static int
- _sleep(struct kgsl_device *device)
- {
- switch (device->state) {
- case KGSL_STATE_ACTIVE:
- if (!device->ftbl->isidle(device)) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- return -EBUSY;
- }
- /* fall through */
- case KGSL_STATE_NAP:
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
- kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
- _sleep_accounting(device);
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- PM_QOS_DEFAULT_VALUE);
- break;
- case KGSL_STATE_SLEEP:
- case KGSL_STATE_SLUMBER:
- break;
- default:
- KGSL_PWR_WARN(device, "unhandled state %s\n",
- kgsl_pwrstate_to_str(device->state));
- break;
- }
- return 0;
- }
- static int
- _slumber(struct kgsl_device *device)
- {
- switch (device->state) {
- case KGSL_STATE_ACTIVE:
- if (!device->ftbl->isidle(device)) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- return -EBUSY;
- }
- /* fall through */
- case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
- del_timer_sync(&device->idle_timer);
- /* make sure power is on to stop the device*/
- kgsl_pwrctrl_enable(device);
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- device->ftbl->suspend_context(device);
- device->ftbl->stop(device);
- _sleep_accounting(device);
- kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- PM_QOS_DEFAULT_VALUE);
- break;
- case KGSL_STATE_SLUMBER:
- break;
- default:
- KGSL_PWR_WARN(device, "unhandled state %s\n",
- kgsl_pwrstate_to_str(device->state));
- break;
- }
- return 0;
- }
- /******************************************************************/
- /* Caller must hold the device mutex. */
- int kgsl_pwrctrl_sleep(struct kgsl_device *device)
- {
- int status = 0;
- KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
- /* Work through the legal state transitions */
- switch (device->requested_state) {
- case KGSL_STATE_NAP:
- status = _nap(device);
- break;
- case KGSL_STATE_SLEEP:
- status = _sleep(device);
- break;
- case KGSL_STATE_SLUMBER:
- status = _slumber(device);
- break;
- default:
- KGSL_PWR_INFO(device, "bad state request 0x%x\n",
- device->requested_state);
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- status = -EINVAL;
- break;
- }
- return status;
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
- /**
- * kgsl_pwrctrl_wake() - Power up the GPU from a slumber/sleep state
- * @device - Pointer to the kgsl_device struct
- * @priority - Boolean flag to indicate that the GPU start should be run in the
- * higher priority thread
- *
- * Resume the GPU from a lower power state to ACTIVE. The caller to this
- * fucntion must host the kgsl_device mutex.
- */
- int kgsl_pwrctrl_wake(struct kgsl_device *device, int priority)
- {
- int status = 0;
- unsigned int context_id;
- unsigned int state = device->state;
- unsigned int ts_processed = 0xdeaddead;
- struct kgsl_context *context;
- kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
- switch (device->state) {
- case KGSL_STATE_SLUMBER:
- status = device->ftbl->start(device, priority);
- if (status) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- KGSL_DRV_ERR(device, "start failed %d\n", status);
- break;
- }
- /* fall through */
- case KGSL_STATE_SLEEP:
- kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
- kgsl_pwrscale_wake(device);
- kgsl_sharedmem_readl(&device->memstore,
- (unsigned int *) &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- context = kgsl_context_get(device, context_id);
- if (context)
- ts_processed = kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
- KGSL_PWR_INFO(device, "Wake from %s state. CTXT: %d RTRD TS: %08X\n",
- kgsl_pwrstate_to_str(state),
- context ? context->id : -1, ts_processed);
- kgsl_context_put(context);
- /* fall through */
- case KGSL_STATE_NAP:
- /* Turn on the core clocks */
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
- /* Enable state before turning on irq */
- kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- mod_timer(&device->idle_timer, jiffies +
- device->pwrctrl.interval_timeout);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- device->pwrctrl.pm_qos_latency);
- case KGSL_STATE_ACTIVE:
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- break;
- case KGSL_STATE_INIT:
- break;
- default:
- KGSL_PWR_WARN(device, "unhandled state %s\n",
- kgsl_pwrstate_to_str(device->state));
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- status = -EINVAL;
- break;
- }
- return status;
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_wake);
- void kgsl_pwrctrl_enable(struct kgsl_device *device)
- {
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- /* Order pwrrail/clk sequence based upon platform */
- kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
- if (pwr->constraint.type == KGSL_CONSTRAINT_NONE)
- kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
- kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_enable);
- void kgsl_pwrctrl_disable(struct kgsl_device *device)
- {
- /* Order pwrrail/clk sequence based upon platform */
- kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
- kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_disable);
- void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
- {
- trace_kgsl_pwr_set_state(device, state);
- device->state = state;
- device->requested_state = KGSL_STATE_NONE;
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
- void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
- {
- if (state != KGSL_STATE_NONE && state != device->requested_state)
- trace_kgsl_pwr_request_state(device, state);
- device->requested_state = state;
- }
- EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
- const char *kgsl_pwrstate_to_str(unsigned int state)
- {
- switch (state) {
- case KGSL_STATE_NONE:
- return "NONE";
- case KGSL_STATE_INIT:
- return "INIT";
- case KGSL_STATE_ACTIVE:
- return "ACTIVE";
- case KGSL_STATE_NAP:
- return "NAP";
- case KGSL_STATE_SLEEP:
- return "SLEEP";
- case KGSL_STATE_SUSPEND:
- return "SUSPEND";
- case KGSL_STATE_SLUMBER:
- return "SLUMBER";
- default:
- break;
- }
- return "UNKNOWN";
- }
- EXPORT_SYMBOL(kgsl_pwrstate_to_str);
- /**
- * kgsl_active_count_get() - Increase the device active count
- * @device: Pointer to a KGSL device
- *
- * Increase the active count for the KGSL device and turn on
- * clocks if this is the first reference. Code paths that need
- * to touch the hardware or wait for the hardware to complete
- * an operation must hold an active count reference until they
- * are finished. An error code will be returned if waking the
- * device fails. The device mutex must be held while *calling
- * this function.
- */
- int kgsl_active_count_get(struct kgsl_device *device)
- {
- int ret = 0;
- BUG_ON(!mutex_is_locked(&device->mutex));
- if ((atomic_read(&device->active_cnt) == 0) &&
- (device->state != KGSL_STATE_ACTIVE)) {
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- wait_for_completion(&device->hwaccess_gate);
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- ret = kgsl_pwrctrl_wake(device, 1);
- }
- if (ret == 0)
- atomic_inc(&device->active_cnt);
- trace_kgsl_active_count(device,
- (unsigned long) __builtin_return_address(0));
- return ret;
- }
- EXPORT_SYMBOL(kgsl_active_count_get);
- /**
- * kgsl_active_count_get_light() - Increase the device active count
- * @device: Pointer to a KGSL device
- *
- * Increase the active count for the KGSL device WITHOUT
- * turning on the clocks based on the assumption that the clocks are already
- * on from a previous active_count_get(). Currently this is only used for
- * creating kgsl_events.
- */
- int kgsl_active_count_get_light(struct kgsl_device *device)
- {
- if (atomic_inc_not_zero(&device->active_cnt) == 0) {
- dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
- return -EINVAL;
- }
- trace_kgsl_active_count(device,
- (unsigned long) __builtin_return_address(0));
- return 0;
- }
- EXPORT_SYMBOL(kgsl_active_count_get_light);
- /**
- * kgsl_active_count_put() - Decrease the device active count
- * @device: Pointer to a KGSL device
- *
- * Decrease the active count for the KGSL device and turn off
- * clocks if there are no remaining references. This function will
- * transition the device to NAP if there are no other pending state
- * changes. It also completes the suspend gate. The device mutex must
- * be held while calling this function.
- */
- void kgsl_active_count_put(struct kgsl_device *device)
- {
- BUG_ON(!mutex_is_locked(&device->mutex));
- BUG_ON(atomic_read(&device->active_cnt) == 0);
- if (atomic_dec_and_test(&device->active_cnt)) {
- if (device->state == KGSL_STATE_ACTIVE &&
- device->requested_state == KGSL_STATE_NONE) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- queue_work(device->work_queue, &device->idle_check_ws);
- }
- mod_timer(&device->idle_timer,
- jiffies + device->pwrctrl.interval_timeout);
- } else {
- kgsl_pwrscale_update(device);
- }
- trace_kgsl_active_count(device,
- (unsigned long) __builtin_return_address(0));
- wake_up(&device->active_cnt_wq);
- }
- EXPORT_SYMBOL(kgsl_active_count_put);
- static int _check_active_count(struct kgsl_device *device, int count)
- {
- /* Return 0 if the active count is greater than the desired value */
- return atomic_read(&device->active_cnt) > count ? 0 : 1;
- }
- /**
- * kgsl_active_count_wait() - Wait for activity to finish.
- * @device: Pointer to a KGSL device
- * @count: Active count value to wait for
- *
- * Block until the active_cnt value hits the desired value
- */
- int kgsl_active_count_wait(struct kgsl_device *device, int count)
- {
- int result = 0;
- long wait_jiffies = msecs_to_jiffies(1000);
- BUG_ON(!mutex_is_locked(&device->mutex));
- while (atomic_read(&device->active_cnt) > count) {
- long ret;
- kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
- ret = wait_event_timeout(device->active_cnt_wq,
- _check_active_count(device, count), wait_jiffies);
- kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- result = ret == 0 ? -ETIMEDOUT : 0;
- if (!result)
- wait_jiffies = ret;
- else
- break;
- }
- return result;
- }
- EXPORT_SYMBOL(kgsl_active_count_wait);
|