cpg.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3. *
  4. * Copyright (C) 2010 Magnus Damm
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/compiler.h>
  12. #include <linux/slab.h>
  13. #include <linux/io.h>
  14. #include <linux/sh_clk.h>
  15. static int sh_clk_mstp32_enable(struct clk *clk)
  16. {
  17. iowrite32(ioread32(clk->mapped_reg) & ~(1 << clk->enable_bit),
  18. clk->mapped_reg);
  19. return 0;
  20. }
  21. static void sh_clk_mstp32_disable(struct clk *clk)
  22. {
  23. iowrite32(ioread32(clk->mapped_reg) | (1 << clk->enable_bit),
  24. clk->mapped_reg);
  25. }
  26. static struct sh_clk_ops sh_clk_mstp32_clk_ops = {
  27. .enable = sh_clk_mstp32_enable,
  28. .disable = sh_clk_mstp32_disable,
  29. .recalc = followparent_recalc,
  30. };
  31. int __init sh_clk_mstp32_register(struct clk *clks, int nr)
  32. {
  33. struct clk *clkp;
  34. int ret = 0;
  35. int k;
  36. for (k = 0; !ret && (k < nr); k++) {
  37. clkp = clks + k;
  38. clkp->ops = &sh_clk_mstp32_clk_ops;
  39. ret |= clk_register(clkp);
  40. }
  41. return ret;
  42. }
  43. static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
  44. {
  45. return clk_rate_table_round(clk, clk->freq_table, rate);
  46. }
  47. static int sh_clk_div6_divisors[64] = {
  48. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  49. 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
  50. 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
  51. 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
  52. };
  53. static struct clk_div_mult_table sh_clk_div6_table = {
  54. .divisors = sh_clk_div6_divisors,
  55. .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
  56. };
  57. static unsigned long sh_clk_div6_recalc(struct clk *clk)
  58. {
  59. struct clk_div_mult_table *table = &sh_clk_div6_table;
  60. unsigned int idx;
  61. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  62. table, NULL);
  63. idx = ioread32(clk->mapped_reg) & 0x003f;
  64. return clk->freq_table[idx].frequency;
  65. }
  66. static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
  67. {
  68. struct clk_div_mult_table *table = &sh_clk_div6_table;
  69. u32 value;
  70. int ret, i;
  71. if (!clk->parent_table || !clk->parent_num)
  72. return -EINVAL;
  73. /* Search the parent */
  74. for (i = 0; i < clk->parent_num; i++)
  75. if (clk->parent_table[i] == parent)
  76. break;
  77. if (i == clk->parent_num)
  78. return -ENODEV;
  79. ret = clk_reparent(clk, parent);
  80. if (ret < 0)
  81. return ret;
  82. value = ioread32(clk->mapped_reg) &
  83. ~(((1 << clk->src_width) - 1) << clk->src_shift);
  84. iowrite32(value | (i << clk->src_shift), clk->mapped_reg);
  85. /* Rebuild the frequency table */
  86. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  87. table, NULL);
  88. return 0;
  89. }
  90. static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
  91. {
  92. unsigned long value;
  93. int idx;
  94. idx = clk_rate_table_find(clk, clk->freq_table, rate);
  95. if (idx < 0)
  96. return idx;
  97. value = ioread32(clk->mapped_reg);
  98. value &= ~0x3f;
  99. value |= idx;
  100. iowrite32(value, clk->mapped_reg);
  101. return 0;
  102. }
  103. static int sh_clk_div6_enable(struct clk *clk)
  104. {
  105. unsigned long value;
  106. int ret;
  107. ret = sh_clk_div6_set_rate(clk, clk->rate);
  108. if (ret == 0) {
  109. value = ioread32(clk->mapped_reg);
  110. value &= ~0x100; /* clear stop bit to enable clock */
  111. iowrite32(value, clk->mapped_reg);
  112. }
  113. return ret;
  114. }
  115. static void sh_clk_div6_disable(struct clk *clk)
  116. {
  117. unsigned long value;
  118. value = ioread32(clk->mapped_reg);
  119. value |= 0x100; /* stop clock */
  120. value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
  121. iowrite32(value, clk->mapped_reg);
  122. }
  123. static struct sh_clk_ops sh_clk_div6_clk_ops = {
  124. .recalc = sh_clk_div6_recalc,
  125. .round_rate = sh_clk_div_round_rate,
  126. .set_rate = sh_clk_div6_set_rate,
  127. .enable = sh_clk_div6_enable,
  128. .disable = sh_clk_div6_disable,
  129. };
  130. static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
  131. .recalc = sh_clk_div6_recalc,
  132. .round_rate = sh_clk_div_round_rate,
  133. .set_rate = sh_clk_div6_set_rate,
  134. .enable = sh_clk_div6_enable,
  135. .disable = sh_clk_div6_disable,
  136. .set_parent = sh_clk_div6_set_parent,
  137. };
  138. static int __init sh_clk_init_parent(struct clk *clk)
  139. {
  140. u32 val;
  141. if (clk->parent)
  142. return 0;
  143. if (!clk->parent_table || !clk->parent_num)
  144. return 0;
  145. if (!clk->src_width) {
  146. pr_err("sh_clk_init_parent: cannot select parent clock\n");
  147. return -EINVAL;
  148. }
  149. val = (ioread32(clk->mapped_reg) >> clk->src_shift);
  150. val &= (1 << clk->src_width) - 1;
  151. if (val >= clk->parent_num) {
  152. pr_err("sh_clk_init_parent: parent table size failed\n");
  153. return -EINVAL;
  154. }
  155. clk_reparent(clk, clk->parent_table[val]);
  156. if (!clk->parent) {
  157. pr_err("sh_clk_init_parent: unable to set parent");
  158. return -EINVAL;
  159. }
  160. return 0;
  161. }
  162. static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
  163. struct sh_clk_ops *ops)
  164. {
  165. struct clk *clkp;
  166. void *freq_table;
  167. int nr_divs = sh_clk_div6_table.nr_divisors;
  168. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  169. int ret = 0;
  170. int k;
  171. freq_table_size *= (nr_divs + 1);
  172. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  173. if (!freq_table) {
  174. pr_err("sh_clk_div6_register: unable to alloc memory\n");
  175. return -ENOMEM;
  176. }
  177. for (k = 0; !ret && (k < nr); k++) {
  178. clkp = clks + k;
  179. clkp->ops = ops;
  180. clkp->freq_table = freq_table + (k * freq_table_size);
  181. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  182. ret = clk_register(clkp);
  183. if (ret < 0)
  184. break;
  185. ret = sh_clk_init_parent(clkp);
  186. }
  187. return ret;
  188. }
  189. int __init sh_clk_div6_register(struct clk *clks, int nr)
  190. {
  191. return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
  192. }
  193. int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
  194. {
  195. return sh_clk_div6_register_ops(clks, nr,
  196. &sh_clk_div6_reparent_clk_ops);
  197. }
  198. static unsigned long sh_clk_div4_recalc(struct clk *clk)
  199. {
  200. struct clk_div4_table *d4t = clk->priv;
  201. struct clk_div_mult_table *table = d4t->div_mult_table;
  202. unsigned int idx;
  203. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  204. table, &clk->arch_flags);
  205. idx = (ioread32(clk->mapped_reg) >> clk->enable_bit) & 0x000f;
  206. return clk->freq_table[idx].frequency;
  207. }
  208. static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
  209. {
  210. struct clk_div4_table *d4t = clk->priv;
  211. struct clk_div_mult_table *table = d4t->div_mult_table;
  212. u32 value;
  213. int ret;
  214. /* we really need a better way to determine parent index, but for
  215. * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
  216. * no CLK_ENABLE_ON_INIT means external clock...
  217. */
  218. if (parent->flags & CLK_ENABLE_ON_INIT)
  219. value = ioread32(clk->mapped_reg) & ~(1 << 7);
  220. else
  221. value = ioread32(clk->mapped_reg) | (1 << 7);
  222. ret = clk_reparent(clk, parent);
  223. if (ret < 0)
  224. return ret;
  225. iowrite32(value, clk->mapped_reg);
  226. /* Rebiuld the frequency table */
  227. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  228. table, &clk->arch_flags);
  229. return 0;
  230. }
  231. static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
  232. {
  233. struct clk_div4_table *d4t = clk->priv;
  234. unsigned long value;
  235. int idx = clk_rate_table_find(clk, clk->freq_table, rate);
  236. if (idx < 0)
  237. return idx;
  238. value = ioread32(clk->mapped_reg);
  239. value &= ~(0xf << clk->enable_bit);
  240. value |= (idx << clk->enable_bit);
  241. iowrite32(value, clk->mapped_reg);
  242. if (d4t->kick)
  243. d4t->kick(clk);
  244. return 0;
  245. }
  246. static int sh_clk_div4_enable(struct clk *clk)
  247. {
  248. iowrite32(ioread32(clk->mapped_reg) & ~(1 << 8), clk->mapped_reg);
  249. return 0;
  250. }
  251. static void sh_clk_div4_disable(struct clk *clk)
  252. {
  253. iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
  254. }
  255. static struct sh_clk_ops sh_clk_div4_clk_ops = {
  256. .recalc = sh_clk_div4_recalc,
  257. .set_rate = sh_clk_div4_set_rate,
  258. .round_rate = sh_clk_div_round_rate,
  259. };
  260. static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
  261. .recalc = sh_clk_div4_recalc,
  262. .set_rate = sh_clk_div4_set_rate,
  263. .round_rate = sh_clk_div_round_rate,
  264. .enable = sh_clk_div4_enable,
  265. .disable = sh_clk_div4_disable,
  266. };
  267. static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
  268. .recalc = sh_clk_div4_recalc,
  269. .set_rate = sh_clk_div4_set_rate,
  270. .round_rate = sh_clk_div_round_rate,
  271. .enable = sh_clk_div4_enable,
  272. .disable = sh_clk_div4_disable,
  273. .set_parent = sh_clk_div4_set_parent,
  274. };
  275. static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
  276. struct clk_div4_table *table, struct sh_clk_ops *ops)
  277. {
  278. struct clk *clkp;
  279. void *freq_table;
  280. int nr_divs = table->div_mult_table->nr_divisors;
  281. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  282. int ret = 0;
  283. int k;
  284. freq_table_size *= (nr_divs + 1);
  285. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  286. if (!freq_table) {
  287. pr_err("sh_clk_div4_register: unable to alloc memory\n");
  288. return -ENOMEM;
  289. }
  290. for (k = 0; !ret && (k < nr); k++) {
  291. clkp = clks + k;
  292. clkp->ops = ops;
  293. clkp->priv = table;
  294. clkp->freq_table = freq_table + (k * freq_table_size);
  295. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  296. ret = clk_register(clkp);
  297. }
  298. return ret;
  299. }
  300. int __init sh_clk_div4_register(struct clk *clks, int nr,
  301. struct clk_div4_table *table)
  302. {
  303. return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
  304. }
  305. int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
  306. struct clk_div4_table *table)
  307. {
  308. return sh_clk_div4_register_ops(clks, nr, table,
  309. &sh_clk_div4_enable_clk_ops);
  310. }
  311. int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
  312. struct clk_div4_table *table)
  313. {
  314. return sh_clk_div4_register_ops(clks, nr, table,
  315. &sh_clk_div4_reparent_clk_ops);
  316. }