clock-local2.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #define pr_fmt(fmt) "%s: " fmt, __func__
  14. #include <linux/kernel.h>
  15. #include <linux/init.h>
  16. #include <linux/err.h>
  17. #include <linux/ctype.h>
  18. #include <linux/bitops.h>
  19. #include <linux/io.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/delay.h>
  22. #include <linux/clk.h>
  23. #include <mach/clk.h>
  24. #include <mach/clk-provider.h>
  25. #include <mach/clock-generic.h>
  26. #include "clock-local2.h"
  27. /*
  28. * When enabling/disabling a clock, check the halt bit up to this number
  29. * number of times (with a 1 us delay in between) before continuing.
  30. */
  31. #define HALT_CHECK_MAX_LOOPS 500
  32. /* For clock without halt checking, wait this long after enables/disables. */
  33. #define HALT_CHECK_DELAY_US 10
  34. /*
  35. * When updating an RCG configuration, check the update bit up to this number
  36. * number of times (with a 1 us delay in between) before continuing.
  37. */
  38. #define UPDATE_CHECK_MAX_LOOPS 500
  39. DEFINE_SPINLOCK(local_clock_reg_lock);
  40. struct clk_freq_tbl rcg_dummy_freq = F_END;
  41. #define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
  42. #define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
  43. #define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
  44. #define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
  45. #define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
  46. #define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
  47. #define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
  48. #define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
  49. /*
  50. * Important clock bit positions and masks
  51. */
  52. #define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
  53. #define CBCR_BRANCH_ENABLE_BIT BIT(0)
  54. #define CBCR_BRANCH_OFF_BIT BIT(31)
  55. #define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
  56. #define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
  57. #define BCR_BLK_ARES_BIT BIT(0)
  58. #define CBCR_HW_CTL_BIT BIT(1)
  59. #define CFG_RCGR_DIV_MASK BM(4, 0)
  60. #define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
  61. #define MND_MODE_MASK BM(13, 12)
  62. #define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
  63. #define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
  64. #define CBCR_CDIV_LSB 16
  65. #define CBCR_CDIV_MSB 24
  66. enum branch_state {
  67. BRANCH_ON,
  68. BRANCH_OFF,
  69. };
  70. /*
  71. * RCG functions
  72. */
  73. /*
  74. * Update an RCG with a new configuration. This may include a new M, N, or D
  75. * value, source selection or pre-divider value.
  76. *
  77. */
  78. static void rcg_update_config(struct rcg_clk *rcg)
  79. {
  80. u32 cmd_rcgr_regval, count;
  81. cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
  82. cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
  83. writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
  84. /* Wait for update to take effect */
  85. for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
  86. if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
  87. CMD_RCGR_CONFIG_UPDATE_BIT))
  88. return;
  89. udelay(1);
  90. }
  91. WARN(count == 0, "%s: rcg didn't update its configuration.",
  92. rcg->c.dbg_name);
  93. }
  94. /* RCG set rate function for clocks with Half Integer Dividers. */
  95. void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
  96. {
  97. u32 cfg_regval;
  98. unsigned long flags;
  99. spin_lock_irqsave(&local_clock_reg_lock, flags);
  100. cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
  101. cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
  102. cfg_regval |= nf->div_src_val;
  103. writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
  104. rcg_update_config(rcg);
  105. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  106. }
  107. /* RCG set rate function for clocks with MND & Half Integer Dividers. */
  108. void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
  109. {
  110. u32 cfg_regval;
  111. unsigned long flags;
  112. spin_lock_irqsave(&local_clock_reg_lock, flags);
  113. cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
  114. writel_relaxed(nf->m_val, M_REG(rcg));
  115. writel_relaxed(nf->n_val, N_REG(rcg));
  116. writel_relaxed(nf->d_val, D_REG(rcg));
  117. cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
  118. cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
  119. cfg_regval |= nf->div_src_val;
  120. /* Activate or disable the M/N:D divider as necessary */
  121. cfg_regval &= ~MND_MODE_MASK;
  122. if (nf->n_val != 0)
  123. cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
  124. writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
  125. rcg_update_config(rcg);
  126. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  127. }
  128. static int rcg_clk_prepare(struct clk *c)
  129. {
  130. struct rcg_clk *rcg = to_rcg_clk(c);
  131. WARN(rcg->current_freq == &rcg_dummy_freq,
  132. "Attempting to prepare %s before setting its rate. "
  133. "Set the rate first!\n", rcg->c.dbg_name);
  134. return 0;
  135. }
  136. static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
  137. {
  138. struct clk_freq_tbl *cf, *nf;
  139. struct rcg_clk *rcg = to_rcg_clk(c);
  140. int rc;
  141. unsigned long flags;
  142. for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
  143. && nf->freq_hz != rate; nf++)
  144. ;
  145. if (nf->freq_hz == FREQ_END)
  146. return -EINVAL;
  147. cf = rcg->current_freq;
  148. rc = __clk_pre_reparent(c, nf->src_clk, &flags);
  149. if (rc)
  150. return rc;
  151. BUG_ON(!rcg->set_rate);
  152. /* Perform clock-specific frequency switch operations. */
  153. rcg->set_rate(rcg, nf);
  154. rcg->current_freq = nf;
  155. c->parent = nf->src_clk;
  156. __clk_post_reparent(c, cf->src_clk, &flags);
  157. return 0;
  158. }
  159. /*
  160. * Return a supported rate that's at least the specified rate or
  161. * the max supported rate if the specified rate is larger than the
  162. * max supported rate.
  163. */
  164. static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
  165. {
  166. struct rcg_clk *rcg = to_rcg_clk(c);
  167. struct clk_freq_tbl *f;
  168. for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
  169. if (f->freq_hz >= rate)
  170. return f->freq_hz;
  171. f--;
  172. return f->freq_hz;
  173. }
  174. /* Return the nth supported frequency for a given clock. */
  175. static long rcg_clk_list_rate(struct clk *c, unsigned n)
  176. {
  177. struct rcg_clk *rcg = to_rcg_clk(c);
  178. if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
  179. return -ENXIO;
  180. return (rcg->freq_tbl + n)->freq_hz;
  181. }
  182. static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
  183. {
  184. u32 n_regval = 0, m_regval = 0, d_regval = 0;
  185. u32 cfg_regval;
  186. struct clk_freq_tbl *freq;
  187. u32 cmd_rcgr_regval;
  188. /* Is there a pending configuration? */
  189. cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
  190. if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
  191. return NULL;
  192. /* Get values of m, n, d, div and src_sel registers. */
  193. if (has_mnd) {
  194. m_regval = readl_relaxed(M_REG(rcg));
  195. n_regval = readl_relaxed(N_REG(rcg));
  196. d_regval = readl_relaxed(D_REG(rcg));
  197. /*
  198. * The n and d values stored in the frequency tables are sign
  199. * extended to 32 bits. The n and d values in the registers are
  200. * sign extended to 8 or 16 bits. Sign extend the values read
  201. * from the registers so that they can be compared to the
  202. * values in the frequency tables.
  203. */
  204. n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
  205. d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
  206. }
  207. cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
  208. cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
  209. | MND_MODE_MASK;
  210. /* If mnd counter is present, check if it's in use. */
  211. has_mnd = (has_mnd) &&
  212. ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
  213. /*
  214. * Clear out the mn counter mode bits since we now want to compare only
  215. * the source mux selection and pre-divider values in the registers.
  216. */
  217. cfg_regval &= ~MND_MODE_MASK;
  218. /* Figure out what rate the rcg is running at */
  219. for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
  220. if (freq->div_src_val != cfg_regval)
  221. continue;
  222. if (has_mnd) {
  223. if (freq->m_val != m_regval)
  224. continue;
  225. if (freq->n_val != n_regval)
  226. continue;
  227. if (freq->d_val != d_regval)
  228. continue;
  229. }
  230. break;
  231. }
  232. /* No known frequency found */
  233. if (freq->freq_hz == FREQ_END)
  234. return NULL;
  235. rcg->current_freq = freq;
  236. return freq->src_clk;
  237. }
  238. static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
  239. {
  240. u32 cmd_rcgr_regval;
  241. if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
  242. rcg->c.rate = rcg->current_freq->freq_hz;
  243. /* Is the root enabled? */
  244. cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
  245. if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
  246. return HANDOFF_DISABLED_CLK;
  247. return HANDOFF_ENABLED_CLK;
  248. }
  249. static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
  250. {
  251. return _rcg_clk_get_parent(to_rcg_clk(c), 1);
  252. }
  253. static struct clk *rcg_clk_get_parent(struct clk *c)
  254. {
  255. return _rcg_clk_get_parent(to_rcg_clk(c), 0);
  256. }
  257. static enum handoff rcg_mnd_clk_handoff(struct clk *c)
  258. {
  259. return _rcg_clk_handoff(to_rcg_clk(c));
  260. }
  261. static enum handoff rcg_clk_handoff(struct clk *c)
  262. {
  263. return _rcg_clk_handoff(to_rcg_clk(c));
  264. }
  265. #define BRANCH_CHECK_MASK BM(31, 28)
  266. #define BRANCH_ON_VAL BVAL(31, 28, 0x0)
  267. #define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
  268. #define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
  269. /*
  270. * Branch clock functions
  271. */
  272. static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
  273. void __iomem *cbcr_reg,
  274. enum branch_state br_status)
  275. {
  276. char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
  277. /*
  278. * Use a memory barrier since some halt status registers are
  279. * not within the same 1K segment as the branch/root enable
  280. * registers. It's also needed in the udelay() case to ensure
  281. * the delay starts after the branch disable.
  282. */
  283. mb();
  284. if (halt_check == DELAY || halt_check == HALT_VOTED) {
  285. udelay(HALT_CHECK_DELAY_US);
  286. } else if (halt_check == HALT) {
  287. int count;
  288. u32 val;
  289. for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
  290. val = readl_relaxed(cbcr_reg);
  291. val &= BRANCH_CHECK_MASK;
  292. switch (br_status) {
  293. case BRANCH_ON:
  294. if (val == BRANCH_ON_VAL
  295. || val == BRANCH_NOC_FSM_ON_VAL)
  296. return;
  297. break;
  298. case BRANCH_OFF:
  299. if (val == BRANCH_OFF_VAL)
  300. return;
  301. break;
  302. };
  303. udelay(1);
  304. }
  305. WARN(count == 0, "%s status stuck %s", clk_name, status_str);
  306. }
  307. }
  308. static int branch_clk_enable(struct clk *c)
  309. {
  310. unsigned long flags;
  311. u32 cbcr_val;
  312. struct branch_clk *branch = to_branch_clk(c);
  313. spin_lock_irqsave(&local_clock_reg_lock, flags);
  314. cbcr_val = readl_relaxed(CBCR_REG(branch));
  315. cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
  316. writel_relaxed(cbcr_val, CBCR_REG(branch));
  317. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  318. /* Wait for clock to enable before continuing. */
  319. branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
  320. CBCR_REG(branch), BRANCH_ON);
  321. return 0;
  322. }
  323. static void branch_clk_disable(struct clk *c)
  324. {
  325. unsigned long flags;
  326. struct branch_clk *branch = to_branch_clk(c);
  327. u32 reg_val;
  328. spin_lock_irqsave(&local_clock_reg_lock, flags);
  329. reg_val = readl_relaxed(CBCR_REG(branch));
  330. reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
  331. writel_relaxed(reg_val, CBCR_REG(branch));
  332. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  333. /* Wait for clock to disable before continuing. */
  334. branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
  335. CBCR_REG(branch), BRANCH_OFF);
  336. }
  337. static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
  338. {
  339. unsigned long flags;
  340. u32 regval;
  341. if (rate > branch->max_div)
  342. return -EINVAL;
  343. spin_lock_irqsave(&local_clock_reg_lock, flags);
  344. regval = readl_relaxed(CBCR_REG(branch));
  345. regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
  346. regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
  347. writel_relaxed(regval, CBCR_REG(branch));
  348. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  349. return 0;
  350. }
  351. static int branch_clk_set_rate(struct clk *c, unsigned long rate)
  352. {
  353. struct branch_clk *branch = to_branch_clk(c);
  354. if (branch->max_div)
  355. return branch_cdiv_set_rate(branch, rate);
  356. if (!branch->has_sibling)
  357. return clk_set_rate(c->parent, rate);
  358. return -EPERM;
  359. }
  360. static long branch_clk_round_rate(struct clk *c, unsigned long rate)
  361. {
  362. struct branch_clk *branch = to_branch_clk(c);
  363. if (branch->max_div)
  364. return rate <= (branch->max_div) ? rate : -EPERM;
  365. if (!branch->has_sibling)
  366. return clk_round_rate(c->parent, rate);
  367. return -EPERM;
  368. }
  369. static unsigned long branch_clk_get_rate(struct clk *c)
  370. {
  371. struct branch_clk *branch = to_branch_clk(c);
  372. if (branch->max_div)
  373. return branch->c.rate;
  374. return clk_get_rate(c->parent);
  375. }
  376. static long branch_clk_list_rate(struct clk *c, unsigned n)
  377. {
  378. int level, fmax = 0, rate;
  379. struct branch_clk *branch = to_branch_clk(c);
  380. struct clk *parent = c->parent;
  381. if (branch->has_sibling == 1)
  382. return -ENXIO;
  383. if (!parent || !parent->ops->list_rate)
  384. return -ENXIO;
  385. /* Find max frequency supported within voltage constraints. */
  386. if (!parent->vdd_class) {
  387. fmax = INT_MAX;
  388. } else {
  389. for (level = 0; level < parent->num_fmax; level++)
  390. if (parent->fmax[level])
  391. fmax = parent->fmax[level];
  392. }
  393. rate = parent->ops->list_rate(parent, n);
  394. if (rate <= fmax)
  395. return rate;
  396. else
  397. return -ENXIO;
  398. }
  399. static enum handoff branch_clk_handoff(struct clk *c)
  400. {
  401. struct branch_clk *branch = to_branch_clk(c);
  402. u32 cbcr_regval;
  403. cbcr_regval = readl_relaxed(CBCR_REG(branch));
  404. if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
  405. return HANDOFF_DISABLED_CLK;
  406. if (branch->max_div) {
  407. cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
  408. cbcr_regval >>= CBCR_CDIV_LSB;
  409. c->rate = cbcr_regval;
  410. } else if (!branch->has_sibling) {
  411. c->rate = clk_get_rate(c->parent);
  412. }
  413. return HANDOFF_ENABLED_CLK;
  414. }
  415. static int __branch_clk_reset(void __iomem *bcr_reg,
  416. enum clk_reset_action action)
  417. {
  418. int ret = 0;
  419. unsigned long flags;
  420. u32 reg_val;
  421. spin_lock_irqsave(&local_clock_reg_lock, flags);
  422. reg_val = readl_relaxed(bcr_reg);
  423. switch (action) {
  424. case CLK_RESET_ASSERT:
  425. reg_val |= BCR_BLK_ARES_BIT;
  426. break;
  427. case CLK_RESET_DEASSERT:
  428. reg_val &= ~BCR_BLK_ARES_BIT;
  429. break;
  430. default:
  431. ret = -EINVAL;
  432. }
  433. writel_relaxed(reg_val, bcr_reg);
  434. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  435. /* Make sure write is issued before returning. */
  436. mb();
  437. return ret;
  438. }
  439. static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
  440. {
  441. struct branch_clk *branch = to_branch_clk(c);
  442. if (!branch->bcr_reg)
  443. return -EPERM;
  444. return __branch_clk_reset(BCR_REG(branch), action);
  445. }
  446. static int branch_clk_set_flags(struct clk *c, unsigned flags)
  447. {
  448. u32 cbcr_val;
  449. unsigned long irq_flags;
  450. struct branch_clk *branch = to_branch_clk(c);
  451. int delay_us = 0, ret = 0;
  452. spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
  453. cbcr_val = readl_relaxed(CBCR_REG(branch));
  454. switch (flags) {
  455. case CLKFLAG_RETAIN_PERIPH:
  456. cbcr_val |= BIT(13);
  457. delay_us = 1;
  458. break;
  459. case CLKFLAG_NORETAIN_PERIPH:
  460. cbcr_val &= ~BIT(13);
  461. break;
  462. case CLKFLAG_RETAIN_MEM:
  463. cbcr_val |= BIT(14);
  464. delay_us = 1;
  465. break;
  466. case CLKFLAG_NORETAIN_MEM:
  467. cbcr_val &= ~BIT(14);
  468. break;
  469. default:
  470. ret = -EINVAL;
  471. }
  472. writel_relaxed(cbcr_val, CBCR_REG(branch));
  473. /* Make sure power is enabled before returning. */
  474. mb();
  475. udelay(delay_us);
  476. spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
  477. return ret;
  478. }
  479. /*
  480. * Voteable clock functions
  481. */
  482. static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
  483. {
  484. struct local_vote_clk *vclk = to_local_vote_clk(c);
  485. if (!vclk->bcr_reg) {
  486. WARN("clk_reset called on an unsupported clock (%s)\n",
  487. c->dbg_name);
  488. return -EPERM;
  489. }
  490. return __branch_clk_reset(BCR_REG(vclk), action);
  491. }
  492. static int local_vote_clk_enable(struct clk *c)
  493. {
  494. unsigned long flags;
  495. u32 ena;
  496. struct local_vote_clk *vclk = to_local_vote_clk(c);
  497. spin_lock_irqsave(&local_clock_reg_lock, flags);
  498. ena = readl_relaxed(VOTE_REG(vclk));
  499. ena |= vclk->en_mask;
  500. writel_relaxed(ena, VOTE_REG(vclk));
  501. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  502. branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
  503. BRANCH_ON);
  504. return 0;
  505. }
  506. static void local_vote_clk_disable(struct clk *c)
  507. {
  508. unsigned long flags;
  509. u32 ena;
  510. struct local_vote_clk *vclk = to_local_vote_clk(c);
  511. spin_lock_irqsave(&local_clock_reg_lock, flags);
  512. ena = readl_relaxed(VOTE_REG(vclk));
  513. ena &= ~vclk->en_mask;
  514. writel_relaxed(ena, VOTE_REG(vclk));
  515. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  516. }
  517. static enum handoff local_vote_clk_handoff(struct clk *c)
  518. {
  519. struct local_vote_clk *vclk = to_local_vote_clk(c);
  520. u32 vote_regval;
  521. /* Is the branch voted on by apps? */
  522. vote_regval = readl_relaxed(VOTE_REG(vclk));
  523. if (!(vote_regval & vclk->en_mask))
  524. return HANDOFF_DISABLED_CLK;
  525. return HANDOFF_ENABLED_CLK;
  526. }
  527. struct frac_entry {
  528. int num;
  529. int den;
  530. };
  531. static struct frac_entry frac_table_675m[] = { /* link rate of 270M */
  532. {52, 295}, /* 119 M */
  533. {11, 57}, /* 130.25 M */
  534. {63, 307}, /* 138.50 M */
  535. {11, 50}, /* 148.50 M */
  536. {47, 206}, /* 154 M */
  537. {31, 100}, /* 205.25 M */
  538. {89, 225}, /* 267.00 M */
  539. {107, 269}, /* 268.50 M */
  540. /* {55, 136}, 272.977 M */
  541. {151, 372}, /* 274 M */
  542. {0, 0},
  543. };
  544. static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
  545. {31, 211}, /* 119 M */
  546. {32, 199}, /* 130.25 M */
  547. {63, 307}, /* 138.50 M */
  548. {11, 60}, /* 148.50 M */
  549. {50, 263}, /* 154 M */
  550. {31, 120}, /* 205.25 M */
  551. {119, 359}, /* 268.50 M */
  552. {0, 0},
  553. };
  554. int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
  555. {
  556. struct rcg_clk *rcg = to_rcg_clk(clk);
  557. struct clk_freq_tbl *pixel_freq = rcg->current_freq;
  558. struct frac_entry *frac;
  559. int delta = 100000;
  560. s64 request;
  561. s64 src_rate;
  562. src_rate = clk_get_rate(clk->parent);
  563. if (src_rate == 810000000)
  564. frac = frac_table_810m;
  565. else
  566. frac = frac_table_675m;
  567. while (frac->num) {
  568. request = rate;
  569. request *= frac->den;
  570. request = div_s64(request, frac->num);
  571. pr_info("%s rate : %lu requet : %llu delta : %d src_rate : %llu", __func__, rate, request, delta, src_rate);
  572. if ((src_rate < (request - delta)) ||
  573. (src_rate > (request + delta))) {
  574. frac++;
  575. continue;
  576. }
  577. pixel_freq->div_src_val &= ~BM(4, 0);
  578. if (frac->den == frac->num) {
  579. pixel_freq->m_val = 0;
  580. pixel_freq->n_val = 0;
  581. } else {
  582. pixel_freq->m_val = frac->num;
  583. pixel_freq->n_val = ~(frac->den - frac->num);
  584. pixel_freq->d_val = ~frac->den;
  585. }
  586. set_rate_mnd(rcg, pixel_freq);
  587. return 0;
  588. }
  589. pr_info("%s fail", __func__);
  590. return -EINVAL;
  591. }
  592. enum handoff byte_rcg_handoff(struct clk *clk)
  593. {
  594. struct rcg_clk *rcg = to_rcg_clk(clk);
  595. u32 div_val;
  596. unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
  597. /* If the pre-divider is used, find the rate after the division */
  598. div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
  599. if (div_val > 1)
  600. pre_div_rate = parent_rate / ((div_val + 1) >> 1);
  601. else
  602. pre_div_rate = parent_rate;
  603. clk->rate = pre_div_rate;
  604. if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
  605. return HANDOFF_DISABLED_CLK;
  606. return HANDOFF_ENABLED_CLK;
  607. }
  608. static int set_rate_byte(struct clk *clk, unsigned long rate)
  609. {
  610. struct rcg_clk *rcg = to_rcg_clk(clk);
  611. struct clk *pll = clk->parent;
  612. unsigned long source_rate, div;
  613. struct clk_freq_tbl *byte_freq = rcg->current_freq;
  614. int rc;
  615. if (rate == 0)
  616. return -EINVAL;
  617. rc = clk_set_rate(pll, rate);
  618. if (rc)
  619. return rc;
  620. source_rate = clk_round_rate(pll, rate);
  621. if ((2 * source_rate) % rate)
  622. return -EINVAL;
  623. div = ((2 * source_rate)/rate) - 1;
  624. if (div > CFG_RCGR_DIV_MASK)
  625. return -EINVAL;
  626. byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
  627. byte_freq->div_src_val |= BVAL(4, 0, div);
  628. set_rate_hid(rcg, byte_freq);
  629. return 0;
  630. }
  631. enum handoff pixel_rcg_handoff(struct clk *clk)
  632. {
  633. struct rcg_clk *rcg = to_rcg_clk(clk);
  634. u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
  635. unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
  636. cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
  637. /* If the pre-divider is used, find the rate after the division */
  638. div_val = cfg_regval & CFG_RCGR_DIV_MASK;
  639. if (div_val > 1)
  640. pre_div_rate = parent_rate / ((div_val + 1) >> 1);
  641. else
  642. pre_div_rate = parent_rate;
  643. clk->rate = pre_div_rate;
  644. /*
  645. * Pixel clocks have one frequency entry in their frequency table.
  646. * Update that entry.
  647. */
  648. if (rcg->current_freq) {
  649. rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
  650. rcg->current_freq->div_src_val |= div_val;
  651. }
  652. /* If MND is used, find the rate after the MND division */
  653. if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
  654. mval = readl_relaxed(M_REG(rcg));
  655. nval = readl_relaxed(N_REG(rcg));
  656. if (!nval)
  657. return HANDOFF_DISABLED_CLK;
  658. nval = (~nval) + mval;
  659. if (rcg->current_freq) {
  660. rcg->current_freq->n_val = ~(nval - mval);
  661. rcg->current_freq->m_val = mval;
  662. rcg->current_freq->d_val = ~nval;
  663. }
  664. clk->rate = (pre_div_rate * mval) / nval;
  665. }
  666. if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
  667. return HANDOFF_DISABLED_CLK;
  668. return HANDOFF_ENABLED_CLK;
  669. }
  670. static int set_rate_pixel(struct clk *clk, unsigned long rate)
  671. {
  672. struct rcg_clk *rcg = to_rcg_clk(clk);
  673. struct clk *pll = clk->parent;
  674. unsigned long source_rate, div;
  675. struct clk_freq_tbl *pixel_freq = rcg->current_freq;
  676. int rc;
  677. if (rate == 0)
  678. return -EINVAL;
  679. rc = clk_set_rate(pll, rate);
  680. if (rc)
  681. return rc;
  682. source_rate = clk_round_rate(pll, rate);
  683. if ((2 * source_rate) % rate)
  684. return -EINVAL;
  685. div = ((2 * source_rate)/rate) - 1;
  686. if (div > CFG_RCGR_DIV_MASK)
  687. return -EINVAL;
  688. pixel_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
  689. pixel_freq->div_src_val |= BVAL(4, 0, div);
  690. set_rate_mnd(rcg, pixel_freq);
  691. return 0;
  692. }
  693. /*
  694. * Unlike other clocks, the HDMI rate is adjusted through PLL
  695. * re-programming. It is also routed through an HID divider.
  696. */
  697. static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
  698. {
  699. struct rcg_clk *rcg = to_rcg_clk(c);
  700. struct clk_freq_tbl *nf = rcg->freq_tbl;
  701. int rc;
  702. rc = clk_set_rate(nf->src_clk, rate);
  703. if (rc < 0)
  704. goto out;
  705. set_rate_hid(rcg, nf);
  706. rcg->current_freq = nf;
  707. out:
  708. return rc;
  709. }
  710. static struct clk *edp_clk_get_parent(struct clk *c)
  711. {
  712. struct rcg_clk *rcg = to_rcg_clk(c);
  713. struct clk *clk;
  714. struct clk_freq_tbl *freq;
  715. uint32_t rate;
  716. /* Figure out what rate the rcg is running at */
  717. for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
  718. clk = freq->src_clk;
  719. if (clk && clk->ops->get_rate) {
  720. rate = clk->ops->get_rate(clk);
  721. if (rate == freq->freq_hz)
  722. break;
  723. }
  724. }
  725. /* No known frequency found */
  726. if (freq->freq_hz == FREQ_END)
  727. return NULL;
  728. rcg->current_freq = freq;
  729. return freq->src_clk;
  730. }
  731. static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
  732. {
  733. struct rcg_clk *rcg = to_rcg_clk(c);
  734. struct clk_freq_tbl *freq = rcg->freq_tbl;
  735. u32 cmd_rcgr_regval;
  736. /* Is there a pending configuration? */
  737. cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
  738. if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
  739. return NULL;
  740. rcg->current_freq->freq_hz = clk_get_rate(c->parent);
  741. return freq->src_clk;
  742. }
  743. static DEFINE_SPINLOCK(mux_reg_lock);
  744. static int mux_reg_enable(struct mux_clk *clk)
  745. {
  746. u32 regval;
  747. unsigned long flags;
  748. u32 offset = clk->en_reg ? clk->en_offset : clk->offset;
  749. spin_lock_irqsave(&mux_reg_lock, flags);
  750. regval = readl_relaxed(*clk->base + offset);
  751. regval |= clk->en_mask;
  752. writel_relaxed(regval, *clk->base + offset);
  753. /* Ensure enable request goes through before returning */
  754. mb();
  755. spin_unlock_irqrestore(&mux_reg_lock, flags);
  756. return 0;
  757. }
  758. static void mux_reg_disable(struct mux_clk *clk)
  759. {
  760. u32 regval;
  761. unsigned long flags;
  762. u32 offset = clk->en_reg ? clk->en_offset : clk->offset;
  763. spin_lock_irqsave(&mux_reg_lock, flags);
  764. regval = readl_relaxed(*clk->base + offset);
  765. regval &= ~clk->en_mask;
  766. writel_relaxed(regval, *clk->base + offset);
  767. spin_unlock_irqrestore(&mux_reg_lock, flags);
  768. }
  769. static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
  770. {
  771. u32 regval;
  772. unsigned long flags;
  773. spin_lock_irqsave(&mux_reg_lock, flags);
  774. regval = readl_relaxed(*clk->base + clk->offset);
  775. regval &= ~(clk->mask << clk->shift);
  776. regval |= (sel & clk->mask) << clk->shift;
  777. writel_relaxed(regval, *clk->base + clk->offset);
  778. /* Ensure switch request goes through before returning */
  779. mb();
  780. spin_unlock_irqrestore(&mux_reg_lock, flags);
  781. return 0;
  782. }
  783. static int mux_reg_get_mux_sel(struct mux_clk *clk)
  784. {
  785. u32 regval = readl_relaxed(*clk->base + clk->offset);
  786. return !!((regval >> clk->shift) & clk->mask);
  787. }
  788. static bool mux_reg_is_enabled(struct mux_clk *clk)
  789. {
  790. u32 regval = readl_relaxed(*clk->base + clk->offset);
  791. return !!(regval & clk->en_mask);
  792. }
  793. struct clk_ops clk_ops_empty;
  794. struct clk_ops clk_ops_rcg = {
  795. .enable = rcg_clk_prepare,
  796. .set_rate = rcg_clk_set_rate,
  797. .list_rate = rcg_clk_list_rate,
  798. .round_rate = rcg_clk_round_rate,
  799. .handoff = rcg_clk_handoff,
  800. .get_parent = rcg_clk_get_parent,
  801. };
  802. struct clk_ops clk_ops_rcg_mnd = {
  803. .enable = rcg_clk_prepare,
  804. .set_rate = rcg_clk_set_rate,
  805. .list_rate = rcg_clk_list_rate,
  806. .round_rate = rcg_clk_round_rate,
  807. .handoff = rcg_mnd_clk_handoff,
  808. .get_parent = rcg_mnd_clk_get_parent,
  809. };
  810. struct clk_ops clk_ops_pixel = {
  811. .enable = rcg_clk_prepare,
  812. .set_rate = set_rate_pixel,
  813. .list_rate = rcg_clk_list_rate,
  814. .round_rate = rcg_clk_round_rate,
  815. .handoff = pixel_rcg_handoff,
  816. };
  817. struct clk_ops clk_ops_edppixel = {
  818. .enable = rcg_clk_prepare,
  819. .set_rate = set_rate_edp_pixel,
  820. .list_rate = rcg_clk_list_rate,
  821. .round_rate = rcg_clk_round_rate,
  822. .handoff = pixel_rcg_handoff,
  823. };
  824. struct clk_ops clk_ops_byte = {
  825. .enable = rcg_clk_prepare,
  826. .set_rate = set_rate_byte,
  827. .list_rate = rcg_clk_list_rate,
  828. .round_rate = rcg_clk_round_rate,
  829. .handoff = byte_rcg_handoff,
  830. };
  831. struct clk_ops clk_ops_rcg_hdmi = {
  832. .enable = rcg_clk_prepare,
  833. .set_rate = rcg_clk_set_rate_hdmi,
  834. .list_rate = rcg_clk_list_rate,
  835. .round_rate = rcg_clk_round_rate,
  836. .handoff = rcg_clk_handoff,
  837. .get_parent = rcg_hdmi_clk_get_parent,
  838. };
  839. struct clk_ops clk_ops_rcg_edp = {
  840. .enable = rcg_clk_prepare,
  841. .set_rate = rcg_clk_set_rate_hdmi,
  842. .list_rate = rcg_clk_list_rate,
  843. .round_rate = rcg_clk_round_rate,
  844. .handoff = rcg_clk_handoff,
  845. .get_parent = edp_clk_get_parent,
  846. };
  847. struct clk_ops clk_ops_branch = {
  848. .enable = branch_clk_enable,
  849. .disable = branch_clk_disable,
  850. .set_rate = branch_clk_set_rate,
  851. .get_rate = branch_clk_get_rate,
  852. .list_rate = branch_clk_list_rate,
  853. .round_rate = branch_clk_round_rate,
  854. .reset = branch_clk_reset,
  855. .set_flags = branch_clk_set_flags,
  856. .handoff = branch_clk_handoff,
  857. };
  858. struct clk_ops clk_ops_vote = {
  859. .enable = local_vote_clk_enable,
  860. .disable = local_vote_clk_disable,
  861. .reset = local_vote_clk_reset,
  862. .handoff = local_vote_clk_handoff,
  863. };
  864. struct clk_mux_ops mux_reg_ops = {
  865. .enable = mux_reg_enable,
  866. .disable = mux_reg_disable,
  867. .set_mux_sel = mux_reg_set_mux_sel,
  868. .get_mux_sel = mux_reg_get_mux_sel,
  869. .is_enabled = mux_reg_is_enabled,
  870. };