12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182 |
- /*
- * SMP support for R-Mobile / SH-Mobile
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2010 Takashi Yoshii
- *
- * Based on vexpress, Copyright (c) 2003 ARM Limited, All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
- #include <linux/linkage.h>
- #include <linux/init.h>
- #include <asm/memory.h>
- __CPUINIT
- /* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks!
- *
- * The secondary kernel init calls v7_flush_dcache_all before it enables
- * the L1; however, the L1 comes out of reset in an undefined state, so
- * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
- * of cache lines with uninitialized data and uninitialized tags to get
- * written out to memory, which does really unpleasant things to the main
- * processor. We fix this by performing an invalidate, rather than a
- * clean + invalidate, before jumping into the kernel.
- *
- * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs
- * to be called for both secondary cores startup and primary core resume
- * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S.
- */
- ENTRY(v7_invalidate_l1)
- mov r0, #0
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
- mcr p15, 2, r0, c0, c0, 0
- mrc p15, 1, r0, c0, c0, 0
- ldr r1, =0x7fff
- and r2, r1, r0, lsr #13
- ldr r1, =0x3ff
- and r3, r1, r0, lsr #3 @ NumWays - 1
- add r2, r2, #1 @ NumSets
- and r0, r0, #0x7
- add r0, r0, #4 @ SetShift
- clz r1, r3 @ WayShift
- add r4, r3, #1 @ NumWays
- 1: sub r2, r2, #1 @ NumSets--
- mov r3, r4 @ Temp = NumWays
- 2: subs r3, r3, #1 @ Temp--
- mov r5, r3, lsl r1
- mov r6, r2, lsl r0
- orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
- mcr p15, 0, r5, c7, c6, 2
- bgt 2b
- cmp r2, #0
- bgt 1b
- dsb
- isb
- mov pc, lr
- ENDPROC(v7_invalidate_l1)
- ENTRY(shmobile_invalidate_start)
- bl v7_invalidate_l1
- b secondary_startup
- ENDPROC(shmobile_invalidate_start)
- /*
- * Reset vector for secondary CPUs.
- * This will be mapped at address 0 by SBAR register.
- * We need _long_ jump to the physical address.
- */
- .align 12
- ENTRY(shmobile_secondary_vector)
- ldr pc, 1f
- 1: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET
- ENDPROC(shmobile_secondary_vector)
|