ct-ca9x4.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Versatile Express Core Tile Cortex A9x4 Support
  3. */
  4. #include <linux/init.h>
  5. #include <linux/gfp.h>
  6. #include <linux/device.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/amba/bus.h>
  10. #include <linux/amba/clcd.h>
  11. #include <linux/clkdev.h>
  12. #include <asm/hardware/arm_timer.h>
  13. #include <asm/hardware/cache-l2x0.h>
  14. #include <asm/hardware/gic.h>
  15. #include <asm/pmu.h>
  16. #include <asm/smp_scu.h>
  17. #include <asm/smp_twd.h>
  18. #include <mach/ct-ca9x4.h>
  19. #include <asm/hardware/timer-sp.h>
  20. #include <asm/mach/map.h>
  21. #include <asm/mach/time.h>
  22. #include "core.h"
  23. #include <mach/motherboard.h>
  24. #include <plat/clcd.h>
  25. static struct map_desc ct_ca9x4_io_desc[] __initdata = {
  26. {
  27. .virtual = V2T_PERIPH,
  28. .pfn = __phys_to_pfn(CT_CA9X4_MPIC),
  29. .length = SZ_8K,
  30. .type = MT_DEVICE,
  31. },
  32. };
  33. static void __init ct_ca9x4_map_io(void)
  34. {
  35. iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
  36. }
  37. #ifdef CONFIG_HAVE_ARM_TWD
  38. static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
  39. static void __init ca9x4_twd_init(void)
  40. {
  41. int err = twd_local_timer_register(&twd_local_timer);
  42. if (err)
  43. pr_err("twd_local_timer_register failed %d\n", err);
  44. }
  45. #else
  46. #define ca9x4_twd_init() do {} while(0)
  47. #endif
  48. static void __init ct_ca9x4_init_irq(void)
  49. {
  50. gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
  51. ioremap(A9_MPCORE_GIC_CPU, SZ_256));
  52. ca9x4_twd_init();
  53. }
  54. static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
  55. {
  56. v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0);
  57. v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2);
  58. }
  59. static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
  60. {
  61. unsigned long framesize = 1024 * 768 * 2;
  62. fb->panel = versatile_clcd_get_panel("XVGA");
  63. if (!fb->panel)
  64. return -EINVAL;
  65. return versatile_clcd_setup_dma(fb, framesize);
  66. }
  67. static struct clcd_board ct_ca9x4_clcd_data = {
  68. .name = "CT-CA9X4",
  69. .caps = CLCD_CAP_5551 | CLCD_CAP_565,
  70. .check = clcdfb_check,
  71. .decode = clcdfb_decode,
  72. .enable = ct_ca9x4_clcd_enable,
  73. .setup = ct_ca9x4_clcd_setup,
  74. .mmap = versatile_clcd_mmap_dma,
  75. .remove = versatile_clcd_remove_dma,
  76. };
  77. static AMBA_AHB_DEVICE(clcd, "ct:clcd", 0, CT_CA9X4_CLCDC, IRQ_CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
  78. static AMBA_APB_DEVICE(dmc, "ct:dmc", 0, CT_CA9X4_DMC, IRQ_CT_CA9X4_DMC, NULL);
  79. static AMBA_APB_DEVICE(smc, "ct:smc", 0, CT_CA9X4_SMC, IRQ_CT_CA9X4_SMC, NULL);
  80. static AMBA_APB_DEVICE(gpio, "ct:gpio", 0, CT_CA9X4_GPIO, IRQ_CT_CA9X4_GPIO, NULL);
  81. static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
  82. &clcd_device,
  83. &dmc_device,
  84. &smc_device,
  85. &gpio_device,
  86. };
  87. static long ct_round(struct clk *clk, unsigned long rate)
  88. {
  89. return rate;
  90. }
  91. static int ct_set(struct clk *clk, unsigned long rate)
  92. {
  93. return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate);
  94. }
  95. static const struct clk_ops osc1_clk_ops = {
  96. .round = ct_round,
  97. .set = ct_set,
  98. };
  99. static struct clk osc1_clk = {
  100. .ops = &osc1_clk_ops,
  101. .rate = 24000000,
  102. };
  103. static struct clk ct_sp804_clk = {
  104. .rate = 1000000,
  105. };
  106. static struct clk_lookup lookups[] = {
  107. { /* CLCD */
  108. .dev_id = "ct:clcd",
  109. .clk = &osc1_clk,
  110. }, { /* SP804 timers */
  111. .dev_id = "sp804",
  112. .con_id = "ct-timer0",
  113. .clk = &ct_sp804_clk,
  114. }, { /* SP804 timers */
  115. .dev_id = "sp804",
  116. .con_id = "ct-timer1",
  117. .clk = &ct_sp804_clk,
  118. },
  119. };
  120. static struct resource pmu_resources[] = {
  121. [0] = {
  122. .start = IRQ_CT_CA9X4_PMU_CPU0,
  123. .end = IRQ_CT_CA9X4_PMU_CPU0,
  124. .flags = IORESOURCE_IRQ,
  125. },
  126. [1] = {
  127. .start = IRQ_CT_CA9X4_PMU_CPU1,
  128. .end = IRQ_CT_CA9X4_PMU_CPU1,
  129. .flags = IORESOURCE_IRQ,
  130. },
  131. [2] = {
  132. .start = IRQ_CT_CA9X4_PMU_CPU2,
  133. .end = IRQ_CT_CA9X4_PMU_CPU2,
  134. .flags = IORESOURCE_IRQ,
  135. },
  136. [3] = {
  137. .start = IRQ_CT_CA9X4_PMU_CPU3,
  138. .end = IRQ_CT_CA9X4_PMU_CPU3,
  139. .flags = IORESOURCE_IRQ,
  140. },
  141. };
  142. static struct platform_device pmu_device = {
  143. .name = "arm-pmu",
  144. .id = ARM_PMU_DEVICE_CPU,
  145. .num_resources = ARRAY_SIZE(pmu_resources),
  146. .resource = pmu_resources,
  147. };
  148. static void __init ct_ca9x4_init_early(void)
  149. {
  150. clkdev_add_table(lookups, ARRAY_SIZE(lookups));
  151. }
  152. static void __init ct_ca9x4_init(void)
  153. {
  154. int i;
  155. #ifdef CONFIG_CACHE_L2X0
  156. void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
  157. /* set RAM latencies to 1 cycle for this core tile. */
  158. writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
  159. writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
  160. l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
  161. #endif
  162. for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
  163. amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
  164. platform_device_register(&pmu_device);
  165. }
  166. #ifdef CONFIG_SMP
  167. static void *ct_ca9x4_scu_base __initdata;
  168. static void __init ct_ca9x4_init_cpu_map(void)
  169. {
  170. int i, ncores;
  171. ct_ca9x4_scu_base = ioremap(A9_MPCORE_SCU, SZ_128);
  172. if (WARN_ON(!ct_ca9x4_scu_base))
  173. return;
  174. ncores = scu_get_core_count(ct_ca9x4_scu_base);
  175. if (ncores > nr_cpu_ids) {
  176. pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
  177. ncores, nr_cpu_ids);
  178. ncores = nr_cpu_ids;
  179. }
  180. for (i = 0; i < ncores; ++i)
  181. set_cpu_possible(i, true);
  182. set_smp_cross_call(gic_raise_softirq);
  183. }
  184. static void __init ct_ca9x4_smp_enable(unsigned int max_cpus)
  185. {
  186. scu_enable(ct_ca9x4_scu_base);
  187. }
  188. #endif
  189. struct ct_desc ct_ca9x4_desc __initdata = {
  190. .id = V2M_CT_ID_CA9,
  191. .name = "CA9x4",
  192. .map_io = ct_ca9x4_map_io,
  193. .init_early = ct_ca9x4_init_early,
  194. .init_irq = ct_ca9x4_init_irq,
  195. .init_tile = ct_ca9x4_init,
  196. #ifdef CONFIG_SMP
  197. .init_cpu_map = ct_ca9x4_init_cpu_map,
  198. .smp_enable = ct_ca9x4_smp_enable,
  199. #endif
  200. };