clk-iproc-pll.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767
  1. /*
  2. * Copyright (C) 2014 Broadcom Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation version 2.
  7. *
  8. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  9. * kind, whether express or implied; without even the implied warranty
  10. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/err.h>
  15. #include <linux/clk-provider.h>
  16. #include <linux/io.h>
  17. #include <linux/of.h>
  18. #include <linux/clkdev.h>
  19. #include <linux/of_address.h>
  20. #include <linux/delay.h>
  21. #include "clk-iproc.h"
  22. #define PLL_VCO_HIGH_SHIFT 19
  23. #define PLL_VCO_LOW_SHIFT 30
  24. /*
  25. * PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
  26. * from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
  27. */
  28. #define PLL_USER_MODE 7
  29. /* number of delay loops waiting for PLL to lock */
  30. #define LOCK_DELAY 100
  31. /* number of VCO frequency bands */
  32. #define NUM_FREQ_BANDS 8
  33. #define NUM_KP_BANDS 3
  34. enum kp_band {
  35. KP_BAND_MID = 0,
  36. KP_BAND_HIGH,
  37. KP_BAND_HIGH_HIGH
  38. };
  39. static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = {
  40. { 5, 6, 6, 7, 7, 8, 9, 10 },
  41. { 4, 4, 5, 5, 6, 7, 8, 9 },
  42. { 4, 5, 5, 6, 7, 8, 9, 10 },
  43. };
  44. static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = {
  45. { 10000000, 12500000 },
  46. { 12500000, 15000000 },
  47. { 15000000, 20000000 },
  48. { 20000000, 25000000 },
  49. { 25000000, 50000000 },
  50. { 50000000, 75000000 },
  51. { 75000000, 100000000 },
  52. { 100000000, 125000000 },
  53. };
  54. enum vco_freq_range {
  55. VCO_LOW = 700000000U,
  56. VCO_MID = 1200000000U,
  57. VCO_HIGH = 2200000000U,
  58. VCO_HIGH_HIGH = 3100000000U,
  59. VCO_MAX = 4000000000U,
  60. };
  61. struct iproc_pll;
  62. struct iproc_clk {
  63. struct clk_hw hw;
  64. const char *name;
  65. struct iproc_pll *pll;
  66. unsigned long rate;
  67. const struct iproc_clk_ctrl *ctrl;
  68. };
  69. struct iproc_pll {
  70. void __iomem *status_base;
  71. void __iomem *control_base;
  72. void __iomem *pwr_base;
  73. void __iomem *asiu_base;
  74. const struct iproc_pll_ctrl *ctrl;
  75. const struct iproc_pll_vco_param *vco_param;
  76. unsigned int num_vco_entries;
  77. struct clk_hw_onecell_data *clk_data;
  78. struct iproc_clk *clks;
  79. };
  80. #define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
  81. /*
  82. * Based on the target frequency, find a match from the VCO frequency parameter
  83. * table and return its index
  84. */
  85. static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate)
  86. {
  87. int i;
  88. for (i = 0; i < pll->num_vco_entries; i++)
  89. if (target_rate == pll->vco_param[i].rate)
  90. break;
  91. if (i >= pll->num_vco_entries)
  92. return -EINVAL;
  93. return i;
  94. }
  95. static int get_kp(unsigned long ref_freq, enum kp_band kp_index)
  96. {
  97. int i;
  98. if (ref_freq < ref_freq_table[0][0])
  99. return -EINVAL;
  100. for (i = 0; i < NUM_FREQ_BANDS; i++) {
  101. if (ref_freq >= ref_freq_table[i][0] &&
  102. ref_freq < ref_freq_table[i][1])
  103. return kp_table[kp_index][i];
  104. }
  105. return -EINVAL;
  106. }
  107. static int pll_wait_for_lock(struct iproc_pll *pll)
  108. {
  109. int i;
  110. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  111. for (i = 0; i < LOCK_DELAY; i++) {
  112. u32 val = readl(pll->status_base + ctrl->status.offset);
  113. if (val & (1 << ctrl->status.shift))
  114. return 0;
  115. udelay(10);
  116. }
  117. return -EIO;
  118. }
  119. static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base,
  120. const u32 offset, u32 val)
  121. {
  122. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  123. writel(val, base + offset);
  124. if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK &&
  125. (base == pll->status_base || base == pll->control_base)))
  126. val = readl(base + offset);
  127. }
  128. static void __pll_disable(struct iproc_pll *pll)
  129. {
  130. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  131. u32 val;
  132. if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
  133. val = readl(pll->asiu_base + ctrl->asiu.offset);
  134. val &= ~(1 << ctrl->asiu.en_shift);
  135. iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
  136. }
  137. if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
  138. val = readl(pll->control_base + ctrl->aon.offset);
  139. val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
  140. iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
  141. }
  142. if (pll->pwr_base) {
  143. /* latch input value so core power can be shut down */
  144. val = readl(pll->pwr_base + ctrl->aon.offset);
  145. val |= 1 << ctrl->aon.iso_shift;
  146. iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
  147. /* power down the core */
  148. val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
  149. iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
  150. }
  151. }
  152. static int __pll_enable(struct iproc_pll *pll)
  153. {
  154. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  155. u32 val;
  156. if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
  157. val = readl(pll->control_base + ctrl->aon.offset);
  158. val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
  159. iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
  160. }
  161. if (pll->pwr_base) {
  162. /* power up the PLL and make sure it's not latched */
  163. val = readl(pll->pwr_base + ctrl->aon.offset);
  164. val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
  165. val &= ~(1 << ctrl->aon.iso_shift);
  166. iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
  167. }
  168. /* certain PLLs also need to be ungated from the ASIU top level */
  169. if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
  170. val = readl(pll->asiu_base + ctrl->asiu.offset);
  171. val |= (1 << ctrl->asiu.en_shift);
  172. iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
  173. }
  174. return 0;
  175. }
  176. static void __pll_put_in_reset(struct iproc_pll *pll)
  177. {
  178. u32 val;
  179. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  180. const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
  181. val = readl(pll->control_base + reset->offset);
  182. if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
  183. val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
  184. else
  185. val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
  186. iproc_pll_write(pll, pll->control_base, reset->offset, val);
  187. }
  188. static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
  189. unsigned int ka, unsigned int ki)
  190. {
  191. u32 val;
  192. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  193. const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
  194. const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter;
  195. val = readl(pll->control_base + dig_filter->offset);
  196. val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift |
  197. bit_mask(dig_filter->kp_width) << dig_filter->kp_shift |
  198. bit_mask(dig_filter->ka_width) << dig_filter->ka_shift);
  199. val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift |
  200. ka << dig_filter->ka_shift;
  201. iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
  202. val = readl(pll->control_base + reset->offset);
  203. if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
  204. val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
  205. else
  206. val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
  207. iproc_pll_write(pll, pll->control_base, reset->offset, val);
  208. }
  209. static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
  210. unsigned long parent_rate)
  211. {
  212. struct iproc_pll *pll = clk->pll;
  213. const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index];
  214. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  215. int ka = 0, ki, kp, ret;
  216. unsigned long rate = vco->rate;
  217. u32 val;
  218. enum kp_band kp_index;
  219. unsigned long ref_freq;
  220. /*
  221. * reference frequency = parent frequency / PDIV
  222. * If PDIV = 0, then it becomes a multiplier (x2)
  223. */
  224. if (vco->pdiv == 0)
  225. ref_freq = parent_rate * 2;
  226. else
  227. ref_freq = parent_rate / vco->pdiv;
  228. /* determine Ki and Kp index based on target VCO frequency */
  229. if (rate >= VCO_LOW && rate < VCO_HIGH) {
  230. ki = 4;
  231. kp_index = KP_BAND_MID;
  232. } else if (rate >= VCO_HIGH && rate < VCO_HIGH_HIGH) {
  233. ki = 3;
  234. kp_index = KP_BAND_HIGH;
  235. } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) {
  236. ki = 3;
  237. kp_index = KP_BAND_HIGH_HIGH;
  238. } else {
  239. pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
  240. clk->name, rate);
  241. return -EINVAL;
  242. }
  243. kp = get_kp(ref_freq, kp_index);
  244. if (kp < 0) {
  245. pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name);
  246. return kp;
  247. }
  248. ret = __pll_enable(pll);
  249. if (ret) {
  250. pr_err("%s: pll: %s fails to enable\n", __func__, clk->name);
  251. return ret;
  252. }
  253. /* put PLL in reset */
  254. __pll_put_in_reset(pll);
  255. /* set PLL in user mode before modifying PLL controls */
  256. if (ctrl->flags & IPROC_CLK_PLL_USER_MODE_ON) {
  257. val = readl(pll->control_base + ctrl->macro_mode.offset);
  258. val &= ~(bit_mask(ctrl->macro_mode.width) <<
  259. ctrl->macro_mode.shift);
  260. val |= PLL_USER_MODE << ctrl->macro_mode.shift;
  261. iproc_pll_write(pll, pll->control_base,
  262. ctrl->macro_mode.offset, val);
  263. }
  264. iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
  265. val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
  266. if (rate >= VCO_LOW && rate < VCO_MID)
  267. val |= (1 << PLL_VCO_LOW_SHIFT);
  268. if (rate < VCO_HIGH)
  269. val &= ~(1 << PLL_VCO_HIGH_SHIFT);
  270. else
  271. val |= (1 << PLL_VCO_HIGH_SHIFT);
  272. iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.l_offset, val);
  273. /* program integer part of NDIV */
  274. val = readl(pll->control_base + ctrl->ndiv_int.offset);
  275. val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
  276. val |= vco->ndiv_int << ctrl->ndiv_int.shift;
  277. iproc_pll_write(pll, pll->control_base, ctrl->ndiv_int.offset, val);
  278. /* program fractional part of NDIV */
  279. if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
  280. val = readl(pll->control_base + ctrl->ndiv_frac.offset);
  281. val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
  282. ctrl->ndiv_frac.shift);
  283. val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
  284. iproc_pll_write(pll, pll->control_base, ctrl->ndiv_frac.offset,
  285. val);
  286. }
  287. /* program PDIV */
  288. val = readl(pll->control_base + ctrl->pdiv.offset);
  289. val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
  290. val |= vco->pdiv << ctrl->pdiv.shift;
  291. iproc_pll_write(pll, pll->control_base, ctrl->pdiv.offset, val);
  292. __pll_bring_out_reset(pll, kp, ka, ki);
  293. ret = pll_wait_for_lock(pll);
  294. if (ret < 0) {
  295. pr_err("%s: pll: %s failed to lock\n", __func__, clk->name);
  296. return ret;
  297. }
  298. return 0;
  299. }
  300. static int iproc_pll_enable(struct clk_hw *hw)
  301. {
  302. struct iproc_clk *clk = to_iproc_clk(hw);
  303. struct iproc_pll *pll = clk->pll;
  304. return __pll_enable(pll);
  305. }
  306. static void iproc_pll_disable(struct clk_hw *hw)
  307. {
  308. struct iproc_clk *clk = to_iproc_clk(hw);
  309. struct iproc_pll *pll = clk->pll;
  310. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  311. if (ctrl->flags & IPROC_CLK_AON)
  312. return;
  313. __pll_disable(pll);
  314. }
  315. static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
  316. unsigned long parent_rate)
  317. {
  318. struct iproc_clk *clk = to_iproc_clk(hw);
  319. struct iproc_pll *pll = clk->pll;
  320. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  321. u32 val;
  322. u64 ndiv, ndiv_int, ndiv_frac;
  323. unsigned int pdiv;
  324. if (parent_rate == 0)
  325. return 0;
  326. /* PLL needs to be locked */
  327. val = readl(pll->status_base + ctrl->status.offset);
  328. if ((val & (1 << ctrl->status.shift)) == 0) {
  329. clk->rate = 0;
  330. return 0;
  331. }
  332. /*
  333. * PLL output frequency =
  334. *
  335. * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
  336. */
  337. val = readl(pll->control_base + ctrl->ndiv_int.offset);
  338. ndiv_int = (val >> ctrl->ndiv_int.shift) &
  339. bit_mask(ctrl->ndiv_int.width);
  340. ndiv = ndiv_int << 20;
  341. if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
  342. val = readl(pll->control_base + ctrl->ndiv_frac.offset);
  343. ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
  344. bit_mask(ctrl->ndiv_frac.width);
  345. ndiv += ndiv_frac;
  346. }
  347. val = readl(pll->control_base + ctrl->pdiv.offset);
  348. pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
  349. clk->rate = (ndiv * parent_rate) >> 20;
  350. if (pdiv == 0)
  351. clk->rate *= 2;
  352. else
  353. clk->rate /= pdiv;
  354. return clk->rate;
  355. }
  356. static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate,
  357. unsigned long *parent_rate)
  358. {
  359. unsigned i;
  360. struct iproc_clk *clk = to_iproc_clk(hw);
  361. struct iproc_pll *pll = clk->pll;
  362. if (rate == 0 || *parent_rate == 0 || !pll->vco_param)
  363. return -EINVAL;
  364. for (i = 0; i < pll->num_vco_entries; i++) {
  365. if (rate <= pll->vco_param[i].rate)
  366. break;
  367. }
  368. if (i == pll->num_vco_entries)
  369. i--;
  370. return pll->vco_param[i].rate;
  371. }
  372. static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
  373. unsigned long parent_rate)
  374. {
  375. struct iproc_clk *clk = to_iproc_clk(hw);
  376. struct iproc_pll *pll = clk->pll;
  377. int rate_index, ret;
  378. rate_index = pll_get_rate_index(pll, rate);
  379. if (rate_index < 0)
  380. return rate_index;
  381. ret = pll_set_rate(clk, rate_index, parent_rate);
  382. return ret;
  383. }
  384. static const struct clk_ops iproc_pll_ops = {
  385. .enable = iproc_pll_enable,
  386. .disable = iproc_pll_disable,
  387. .recalc_rate = iproc_pll_recalc_rate,
  388. .round_rate = iproc_pll_round_rate,
  389. .set_rate = iproc_pll_set_rate,
  390. };
  391. static int iproc_clk_enable(struct clk_hw *hw)
  392. {
  393. struct iproc_clk *clk = to_iproc_clk(hw);
  394. const struct iproc_clk_ctrl *ctrl = clk->ctrl;
  395. struct iproc_pll *pll = clk->pll;
  396. u32 val;
  397. /* channel enable is active low */
  398. val = readl(pll->control_base + ctrl->enable.offset);
  399. val &= ~(1 << ctrl->enable.enable_shift);
  400. iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
  401. /* also make sure channel is not held */
  402. val = readl(pll->control_base + ctrl->enable.offset);
  403. val &= ~(1 << ctrl->enable.hold_shift);
  404. iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
  405. return 0;
  406. }
  407. static void iproc_clk_disable(struct clk_hw *hw)
  408. {
  409. struct iproc_clk *clk = to_iproc_clk(hw);
  410. const struct iproc_clk_ctrl *ctrl = clk->ctrl;
  411. struct iproc_pll *pll = clk->pll;
  412. u32 val;
  413. if (ctrl->flags & IPROC_CLK_AON)
  414. return;
  415. val = readl(pll->control_base + ctrl->enable.offset);
  416. val |= 1 << ctrl->enable.enable_shift;
  417. iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
  418. }
  419. static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
  420. unsigned long parent_rate)
  421. {
  422. struct iproc_clk *clk = to_iproc_clk(hw);
  423. const struct iproc_clk_ctrl *ctrl = clk->ctrl;
  424. struct iproc_pll *pll = clk->pll;
  425. u32 val;
  426. unsigned int mdiv;
  427. if (parent_rate == 0)
  428. return 0;
  429. val = readl(pll->control_base + ctrl->mdiv.offset);
  430. mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
  431. if (mdiv == 0)
  432. mdiv = 256;
  433. if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
  434. clk->rate = parent_rate / (mdiv * 2);
  435. else
  436. clk->rate = parent_rate / mdiv;
  437. return clk->rate;
  438. }
  439. static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate,
  440. unsigned long *parent_rate)
  441. {
  442. unsigned int div;
  443. if (rate == 0 || *parent_rate == 0)
  444. return -EINVAL;
  445. if (rate == *parent_rate)
  446. return *parent_rate;
  447. div = DIV_ROUND_UP(*parent_rate, rate);
  448. if (div < 2)
  449. return *parent_rate;
  450. if (div > 256)
  451. div = 256;
  452. return *parent_rate / div;
  453. }
  454. static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  455. unsigned long parent_rate)
  456. {
  457. struct iproc_clk *clk = to_iproc_clk(hw);
  458. const struct iproc_clk_ctrl *ctrl = clk->ctrl;
  459. struct iproc_pll *pll = clk->pll;
  460. u32 val;
  461. unsigned int div;
  462. if (rate == 0 || parent_rate == 0)
  463. return -EINVAL;
  464. if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
  465. div = DIV_ROUND_UP(parent_rate, rate * 2);
  466. else
  467. div = DIV_ROUND_UP(parent_rate, rate);
  468. if (div > 256)
  469. return -EINVAL;
  470. val = readl(pll->control_base + ctrl->mdiv.offset);
  471. if (div == 256) {
  472. val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
  473. } else {
  474. val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
  475. val |= div << ctrl->mdiv.shift;
  476. }
  477. iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
  478. if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
  479. clk->rate = parent_rate / (div * 2);
  480. else
  481. clk->rate = parent_rate / div;
  482. return 0;
  483. }
  484. static const struct clk_ops iproc_clk_ops = {
  485. .enable = iproc_clk_enable,
  486. .disable = iproc_clk_disable,
  487. .recalc_rate = iproc_clk_recalc_rate,
  488. .round_rate = iproc_clk_round_rate,
  489. .set_rate = iproc_clk_set_rate,
  490. };
  491. /**
  492. * Some PLLs require the PLL SW override bit to be set before changes can be
  493. * applied to the PLL
  494. */
  495. static void iproc_pll_sw_cfg(struct iproc_pll *pll)
  496. {
  497. const struct iproc_pll_ctrl *ctrl = pll->ctrl;
  498. if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
  499. u32 val;
  500. val = readl(pll->control_base + ctrl->sw_ctrl.offset);
  501. val |= BIT(ctrl->sw_ctrl.shift);
  502. iproc_pll_write(pll, pll->control_base, ctrl->sw_ctrl.offset,
  503. val);
  504. }
  505. }
  506. void iproc_pll_clk_setup(struct device_node *node,
  507. const struct iproc_pll_ctrl *pll_ctrl,
  508. const struct iproc_pll_vco_param *vco,
  509. unsigned int num_vco_entries,
  510. const struct iproc_clk_ctrl *clk_ctrl,
  511. unsigned int num_clks)
  512. {
  513. int i, ret;
  514. struct iproc_pll *pll;
  515. struct iproc_clk *iclk;
  516. struct clk_init_data init;
  517. const char *parent_name;
  518. if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
  519. return;
  520. pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  521. if (WARN_ON(!pll))
  522. return;
  523. pll->clk_data = kzalloc(sizeof(*pll->clk_data->hws) * num_clks +
  524. sizeof(*pll->clk_data), GFP_KERNEL);
  525. if (WARN_ON(!pll->clk_data))
  526. goto err_clk_data;
  527. pll->clk_data->num = num_clks;
  528. pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL);
  529. if (WARN_ON(!pll->clks))
  530. goto err_clks;
  531. pll->control_base = of_iomap(node, 0);
  532. if (WARN_ON(!pll->control_base))
  533. goto err_pll_iomap;
  534. /* Some SoCs do not require the pwr_base, thus failing is not fatal */
  535. pll->pwr_base = of_iomap(node, 1);
  536. /* some PLLs require gating control at the top ASIU level */
  537. if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
  538. pll->asiu_base = of_iomap(node, 2);
  539. if (WARN_ON(!pll->asiu_base))
  540. goto err_asiu_iomap;
  541. }
  542. if (pll_ctrl->flags & IPROC_CLK_PLL_SPLIT_STAT_CTRL) {
  543. /* Some SoCs have a split status/control. If this does not
  544. * exist, assume they are unified.
  545. */
  546. pll->status_base = of_iomap(node, 2);
  547. if (!pll->status_base)
  548. goto err_status_iomap;
  549. } else
  550. pll->status_base = pll->control_base;
  551. /* initialize and register the PLL itself */
  552. pll->ctrl = pll_ctrl;
  553. iclk = &pll->clks[0];
  554. iclk->pll = pll;
  555. iclk->name = node->name;
  556. init.name = node->name;
  557. init.ops = &iproc_pll_ops;
  558. init.flags = 0;
  559. parent_name = of_clk_get_parent_name(node, 0);
  560. init.parent_names = (parent_name ? &parent_name : NULL);
  561. init.num_parents = (parent_name ? 1 : 0);
  562. iclk->hw.init = &init;
  563. if (vco) {
  564. pll->num_vco_entries = num_vco_entries;
  565. pll->vco_param = vco;
  566. }
  567. iproc_pll_sw_cfg(pll);
  568. ret = clk_hw_register(NULL, &iclk->hw);
  569. if (WARN_ON(ret))
  570. goto err_pll_register;
  571. pll->clk_data->hws[0] = &iclk->hw;
  572. /* now initialize and register all leaf clocks */
  573. for (i = 1; i < num_clks; i++) {
  574. const char *clk_name;
  575. memset(&init, 0, sizeof(init));
  576. parent_name = node->name;
  577. ret = of_property_read_string_index(node, "clock-output-names",
  578. i, &clk_name);
  579. if (WARN_ON(ret))
  580. goto err_clk_register;
  581. iclk = &pll->clks[i];
  582. iclk->name = clk_name;
  583. iclk->pll = pll;
  584. iclk->ctrl = &clk_ctrl[i];
  585. init.name = clk_name;
  586. init.ops = &iproc_clk_ops;
  587. init.flags = 0;
  588. init.parent_names = (parent_name ? &parent_name : NULL);
  589. init.num_parents = (parent_name ? 1 : 0);
  590. iclk->hw.init = &init;
  591. ret = clk_hw_register(NULL, &iclk->hw);
  592. if (WARN_ON(ret))
  593. goto err_clk_register;
  594. pll->clk_data->hws[i] = &iclk->hw;
  595. }
  596. ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
  597. pll->clk_data);
  598. if (WARN_ON(ret))
  599. goto err_clk_register;
  600. return;
  601. err_clk_register:
  602. while (--i >= 0)
  603. clk_hw_unregister(pll->clk_data->hws[i]);
  604. err_pll_register:
  605. if (pll->status_base != pll->control_base)
  606. iounmap(pll->status_base);
  607. err_status_iomap:
  608. if (pll->asiu_base)
  609. iounmap(pll->asiu_base);
  610. err_asiu_iomap:
  611. if (pll->pwr_base)
  612. iounmap(pll->pwr_base);
  613. iounmap(pll->control_base);
  614. err_pll_iomap:
  615. kfree(pll->clks);
  616. err_clks:
  617. kfree(pll->clk_data);
  618. err_clk_data:
  619. kfree(pll);
  620. }