cache-l2x0.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809
  1. /*
  2. * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
  3. *
  4. * Copyright (C) 2007 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/cpu.h>
  20. #include <linux/err.h>
  21. #include <linux/init.h>
  22. #include <linux/smp.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/log2.h>
  25. #include <linux/io.h>
  26. #include <linux/of.h>
  27. #include <linux/of_address.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/cp15.h>
  30. #include <asm/cputype.h>
  31. #include <asm/hardware/cache-l2x0.h>
  32. #include "cache-tauros3.h"
  33. #include "cache-aurora-l2.h"
  34. struct l2c_init_data {
  35. const char *type;
  36. unsigned way_size_0;
  37. unsigned num_lock;
  38. void (*of_parse)(const struct device_node *, u32 *, u32 *);
  39. void (*enable)(void __iomem *, unsigned);
  40. void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
  41. void (*save)(void __iomem *);
  42. void (*configure)(void __iomem *);
  43. void (*unlock)(void __iomem *, unsigned);
  44. struct outer_cache_fns outer_cache;
  45. };
  46. #define CACHE_LINE_SIZE 32
  47. static void __iomem *l2x0_base;
  48. static const struct l2c_init_data *l2x0_data;
  49. static DEFINE_RAW_SPINLOCK(l2x0_lock);
  50. static u32 l2x0_way_mask; /* Bitmask of active ways */
  51. static u32 l2x0_size;
  52. static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  53. struct l2x0_regs l2x0_saved_regs;
  54. /*
  55. * Common code for all cache controllers.
  56. */
  57. static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
  58. {
  59. /* wait for cache operation by line or way to complete */
  60. while (readl_relaxed(reg) & mask)
  61. cpu_relax();
  62. }
  63. /*
  64. * By default, we write directly to secure registers. Platforms must
  65. * override this if they are running non-secure.
  66. */
  67. static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
  68. {
  69. if (val == readl_relaxed(base + reg))
  70. return;
  71. if (outer_cache.write_sec)
  72. outer_cache.write_sec(val, reg);
  73. else
  74. writel_relaxed(val, base + reg);
  75. }
  76. /*
  77. * This should only be called when we have a requirement that the
  78. * register be written due to a work-around, as platforms running
  79. * in non-secure mode may not be able to access this register.
  80. */
  81. static inline void l2c_set_debug(void __iomem *base, unsigned long val)
  82. {
  83. l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
  84. }
  85. static void __l2c_op_way(void __iomem *reg)
  86. {
  87. writel_relaxed(l2x0_way_mask, reg);
  88. l2c_wait_mask(reg, l2x0_way_mask);
  89. }
  90. static inline void l2c_unlock(void __iomem *base, unsigned num)
  91. {
  92. unsigned i;
  93. for (i = 0; i < num; i++) {
  94. writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
  95. i * L2X0_LOCKDOWN_STRIDE);
  96. writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
  97. i * L2X0_LOCKDOWN_STRIDE);
  98. }
  99. }
  100. static void l2c_configure(void __iomem *base)
  101. {
  102. l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
  103. }
  104. /*
  105. * Enable the L2 cache controller. This function must only be
  106. * called when the cache controller is known to be disabled.
  107. */
  108. static void l2c_enable(void __iomem *base, unsigned num_lock)
  109. {
  110. unsigned long flags;
  111. if (outer_cache.configure)
  112. outer_cache.configure(&l2x0_saved_regs);
  113. else
  114. l2x0_data->configure(base);
  115. l2x0_data->unlock(base, num_lock);
  116. local_irq_save(flags);
  117. __l2c_op_way(base + L2X0_INV_WAY);
  118. writel_relaxed(0, base + sync_reg_offset);
  119. l2c_wait_mask(base + sync_reg_offset, 1);
  120. local_irq_restore(flags);
  121. l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
  122. }
  123. static void l2c_disable(void)
  124. {
  125. void __iomem *base = l2x0_base;
  126. l2x0_pmu_suspend();
  127. outer_cache.flush_all();
  128. l2c_write_sec(0, base, L2X0_CTRL);
  129. dsb(st);
  130. }
  131. static void l2c_save(void __iomem *base)
  132. {
  133. l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  134. }
  135. static void l2c_resume(void)
  136. {
  137. void __iomem *base = l2x0_base;
  138. /* Do not touch the controller if already enabled. */
  139. if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
  140. l2c_enable(base, l2x0_data->num_lock);
  141. l2x0_pmu_resume();
  142. }
  143. /*
  144. * L2C-210 specific code.
  145. *
  146. * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
  147. * ensure that no background operation is running. The way operations
  148. * are all background tasks.
  149. *
  150. * While a background operation is in progress, any new operation is
  151. * ignored (unspecified whether this causes an error.) Thankfully, not
  152. * used on SMP.
  153. *
  154. * Never has a different sync register other than L2X0_CACHE_SYNC, but
  155. * we use sync_reg_offset here so we can share some of this with L2C-310.
  156. */
  157. static void __l2c210_cache_sync(void __iomem *base)
  158. {
  159. writel_relaxed(0, base + sync_reg_offset);
  160. }
  161. static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
  162. unsigned long end)
  163. {
  164. while (start < end) {
  165. writel_relaxed(start, reg);
  166. start += CACHE_LINE_SIZE;
  167. }
  168. }
  169. static void l2c210_inv_range(unsigned long start, unsigned long end)
  170. {
  171. void __iomem *base = l2x0_base;
  172. if (start & (CACHE_LINE_SIZE - 1)) {
  173. start &= ~(CACHE_LINE_SIZE - 1);
  174. writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  175. start += CACHE_LINE_SIZE;
  176. }
  177. if (end & (CACHE_LINE_SIZE - 1)) {
  178. end &= ~(CACHE_LINE_SIZE - 1);
  179. writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  180. }
  181. __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  182. __l2c210_cache_sync(base);
  183. }
  184. static void l2c210_clean_range(unsigned long start, unsigned long end)
  185. {
  186. void __iomem *base = l2x0_base;
  187. start &= ~(CACHE_LINE_SIZE - 1);
  188. __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
  189. __l2c210_cache_sync(base);
  190. }
  191. static void l2c210_flush_range(unsigned long start, unsigned long end)
  192. {
  193. void __iomem *base = l2x0_base;
  194. start &= ~(CACHE_LINE_SIZE - 1);
  195. __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
  196. __l2c210_cache_sync(base);
  197. }
  198. static void l2c210_flush_all(void)
  199. {
  200. void __iomem *base = l2x0_base;
  201. BUG_ON(!irqs_disabled());
  202. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  203. __l2c210_cache_sync(base);
  204. }
  205. static void l2c210_sync(void)
  206. {
  207. __l2c210_cache_sync(l2x0_base);
  208. }
  209. static const struct l2c_init_data l2c210_data __initconst = {
  210. .type = "L2C-210",
  211. .way_size_0 = SZ_8K,
  212. .num_lock = 1,
  213. .enable = l2c_enable,
  214. .save = l2c_save,
  215. .configure = l2c_configure,
  216. .unlock = l2c_unlock,
  217. .outer_cache = {
  218. .inv_range = l2c210_inv_range,
  219. .clean_range = l2c210_clean_range,
  220. .flush_range = l2c210_flush_range,
  221. .flush_all = l2c210_flush_all,
  222. .disable = l2c_disable,
  223. .sync = l2c210_sync,
  224. .resume = l2c_resume,
  225. },
  226. };
  227. /*
  228. * L2C-220 specific code.
  229. *
  230. * All operations are background operations: they have to be waited for.
  231. * Conflicting requests generate a slave error (which will cause an
  232. * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
  233. * sync register here.
  234. *
  235. * However, we can re-use the l2c210_resume call.
  236. */
  237. static inline void __l2c220_cache_sync(void __iomem *base)
  238. {
  239. writel_relaxed(0, base + L2X0_CACHE_SYNC);
  240. l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
  241. }
  242. static void l2c220_op_way(void __iomem *base, unsigned reg)
  243. {
  244. unsigned long flags;
  245. raw_spin_lock_irqsave(&l2x0_lock, flags);
  246. __l2c_op_way(base + reg);
  247. __l2c220_cache_sync(base);
  248. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  249. }
  250. static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
  251. unsigned long end, unsigned long flags)
  252. {
  253. raw_spinlock_t *lock = &l2x0_lock;
  254. while (start < end) {
  255. unsigned long blk_end = start + min(end - start, 4096UL);
  256. while (start < blk_end) {
  257. l2c_wait_mask(reg, 1);
  258. writel_relaxed(start, reg);
  259. start += CACHE_LINE_SIZE;
  260. }
  261. if (blk_end < end) {
  262. raw_spin_unlock_irqrestore(lock, flags);
  263. raw_spin_lock_irqsave(lock, flags);
  264. }
  265. }
  266. return flags;
  267. }
  268. static void l2c220_inv_range(unsigned long start, unsigned long end)
  269. {
  270. void __iomem *base = l2x0_base;
  271. unsigned long flags;
  272. raw_spin_lock_irqsave(&l2x0_lock, flags);
  273. if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  274. if (start & (CACHE_LINE_SIZE - 1)) {
  275. start &= ~(CACHE_LINE_SIZE - 1);
  276. writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  277. start += CACHE_LINE_SIZE;
  278. }
  279. if (end & (CACHE_LINE_SIZE - 1)) {
  280. end &= ~(CACHE_LINE_SIZE - 1);
  281. l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  282. writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  283. }
  284. }
  285. flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
  286. start, end, flags);
  287. l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
  288. __l2c220_cache_sync(base);
  289. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  290. }
  291. static void l2c220_clean_range(unsigned long start, unsigned long end)
  292. {
  293. void __iomem *base = l2x0_base;
  294. unsigned long flags;
  295. start &= ~(CACHE_LINE_SIZE - 1);
  296. if ((end - start) >= l2x0_size) {
  297. l2c220_op_way(base, L2X0_CLEAN_WAY);
  298. return;
  299. }
  300. raw_spin_lock_irqsave(&l2x0_lock, flags);
  301. flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
  302. start, end, flags);
  303. l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  304. __l2c220_cache_sync(base);
  305. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  306. }
  307. static void l2c220_flush_range(unsigned long start, unsigned long end)
  308. {
  309. void __iomem *base = l2x0_base;
  310. unsigned long flags;
  311. start &= ~(CACHE_LINE_SIZE - 1);
  312. if ((end - start) >= l2x0_size) {
  313. l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
  314. return;
  315. }
  316. raw_spin_lock_irqsave(&l2x0_lock, flags);
  317. flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
  318. start, end, flags);
  319. l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  320. __l2c220_cache_sync(base);
  321. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  322. }
  323. static void l2c220_flush_all(void)
  324. {
  325. l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
  326. }
  327. static void l2c220_sync(void)
  328. {
  329. unsigned long flags;
  330. raw_spin_lock_irqsave(&l2x0_lock, flags);
  331. __l2c220_cache_sync(l2x0_base);
  332. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  333. }
  334. static void l2c220_enable(void __iomem *base, unsigned num_lock)
  335. {
  336. /*
  337. * Always enable non-secure access to the lockdown registers -
  338. * we write to them as part of the L2C enable sequence so they
  339. * need to be accessible.
  340. */
  341. l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
  342. l2c_enable(base, num_lock);
  343. }
  344. static void l2c220_unlock(void __iomem *base, unsigned num_lock)
  345. {
  346. if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
  347. l2c_unlock(base, num_lock);
  348. }
  349. static const struct l2c_init_data l2c220_data = {
  350. .type = "L2C-220",
  351. .way_size_0 = SZ_8K,
  352. .num_lock = 1,
  353. .enable = l2c220_enable,
  354. .save = l2c_save,
  355. .configure = l2c_configure,
  356. .unlock = l2c220_unlock,
  357. .outer_cache = {
  358. .inv_range = l2c220_inv_range,
  359. .clean_range = l2c220_clean_range,
  360. .flush_range = l2c220_flush_range,
  361. .flush_all = l2c220_flush_all,
  362. .disable = l2c_disable,
  363. .sync = l2c220_sync,
  364. .resume = l2c_resume,
  365. },
  366. };
  367. /*
  368. * L2C-310 specific code.
  369. *
  370. * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
  371. * and the way operations are all background tasks. However, issuing an
  372. * operation while a background operation is in progress results in a
  373. * SLVERR response. We can reuse:
  374. *
  375. * __l2c210_cache_sync (using sync_reg_offset)
  376. * l2c210_sync
  377. * l2c210_inv_range (if 588369 is not applicable)
  378. * l2c210_clean_range
  379. * l2c210_flush_range (if 588369 is not applicable)
  380. * l2c210_flush_all (if 727915 is not applicable)
  381. *
  382. * Errata:
  383. * 588369: PL310 R0P0->R1P0, fixed R2P0.
  384. * Affects: all clean+invalidate operations
  385. * clean and invalidate skips the invalidate step, so we need to issue
  386. * separate operations. We also require the above debug workaround
  387. * enclosing this code fragment on affected parts. On unaffected parts,
  388. * we must not use this workaround without the debug register writes
  389. * to avoid exposing a problem similar to 727915.
  390. *
  391. * 727915: PL310 R2P0->R3P0, fixed R3P1.
  392. * Affects: clean+invalidate by way
  393. * clean and invalidate by way runs in the background, and a store can
  394. * hit the line between the clean operation and invalidate operation,
  395. * resulting in the store being lost.
  396. *
  397. * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
  398. * Affects: 8x64-bit (double fill) line fetches
  399. * double fill line fetches can fail to cause dirty data to be evicted
  400. * from the cache before the new data overwrites the second line.
  401. *
  402. * 753970: PL310 R3P0, fixed R3P1.
  403. * Affects: sync
  404. * prevents merging writes after the sync operation, until another L2C
  405. * operation is performed (or a number of other conditions.)
  406. *
  407. * 769419: PL310 R0P0->R3P1, fixed R3P2.
  408. * Affects: store buffer
  409. * store buffer is not automatically drained.
  410. */
  411. static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
  412. {
  413. void __iomem *base = l2x0_base;
  414. if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  415. unsigned long flags;
  416. /* Erratum 588369 for both clean+invalidate operations */
  417. raw_spin_lock_irqsave(&l2x0_lock, flags);
  418. l2c_set_debug(base, 0x03);
  419. if (start & (CACHE_LINE_SIZE - 1)) {
  420. start &= ~(CACHE_LINE_SIZE - 1);
  421. writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  422. writel_relaxed(start, base + L2X0_INV_LINE_PA);
  423. start += CACHE_LINE_SIZE;
  424. }
  425. if (end & (CACHE_LINE_SIZE - 1)) {
  426. end &= ~(CACHE_LINE_SIZE - 1);
  427. writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
  428. writel_relaxed(end, base + L2X0_INV_LINE_PA);
  429. }
  430. l2c_set_debug(base, 0x00);
  431. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  432. }
  433. __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  434. __l2c210_cache_sync(base);
  435. }
  436. static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
  437. {
  438. raw_spinlock_t *lock = &l2x0_lock;
  439. unsigned long flags;
  440. void __iomem *base = l2x0_base;
  441. raw_spin_lock_irqsave(lock, flags);
  442. while (start < end) {
  443. unsigned long blk_end = start + min(end - start, 4096UL);
  444. l2c_set_debug(base, 0x03);
  445. while (start < blk_end) {
  446. writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  447. writel_relaxed(start, base + L2X0_INV_LINE_PA);
  448. start += CACHE_LINE_SIZE;
  449. }
  450. l2c_set_debug(base, 0x00);
  451. if (blk_end < end) {
  452. raw_spin_unlock_irqrestore(lock, flags);
  453. raw_spin_lock_irqsave(lock, flags);
  454. }
  455. }
  456. raw_spin_unlock_irqrestore(lock, flags);
  457. __l2c210_cache_sync(base);
  458. }
  459. static void l2c310_flush_all_erratum(void)
  460. {
  461. void __iomem *base = l2x0_base;
  462. unsigned long flags;
  463. raw_spin_lock_irqsave(&l2x0_lock, flags);
  464. l2c_set_debug(base, 0x03);
  465. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  466. l2c_set_debug(base, 0x00);
  467. __l2c210_cache_sync(base);
  468. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  469. }
  470. static void __init l2c310_save(void __iomem *base)
  471. {
  472. unsigned revision;
  473. l2c_save(base);
  474. l2x0_saved_regs.tag_latency = readl_relaxed(base +
  475. L310_TAG_LATENCY_CTRL);
  476. l2x0_saved_regs.data_latency = readl_relaxed(base +
  477. L310_DATA_LATENCY_CTRL);
  478. l2x0_saved_regs.filter_end = readl_relaxed(base +
  479. L310_ADDR_FILTER_END);
  480. l2x0_saved_regs.filter_start = readl_relaxed(base +
  481. L310_ADDR_FILTER_START);
  482. revision = readl_relaxed(base + L2X0_CACHE_ID) &
  483. L2X0_CACHE_ID_RTL_MASK;
  484. /* From r2p0, there is Prefetch offset/control register */
  485. if (revision >= L310_CACHE_ID_RTL_R2P0)
  486. l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
  487. L310_PREFETCH_CTRL);
  488. /* From r3p0, there is Power control register */
  489. if (revision >= L310_CACHE_ID_RTL_R3P0)
  490. l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
  491. L310_POWER_CTRL);
  492. }
  493. static void l2c310_configure(void __iomem *base)
  494. {
  495. unsigned revision;
  496. l2c_configure(base);
  497. /* restore pl310 setup */
  498. l2c_write_sec(l2x0_saved_regs.tag_latency, base,
  499. L310_TAG_LATENCY_CTRL);
  500. l2c_write_sec(l2x0_saved_regs.data_latency, base,
  501. L310_DATA_LATENCY_CTRL);
  502. l2c_write_sec(l2x0_saved_regs.filter_end, base,
  503. L310_ADDR_FILTER_END);
  504. l2c_write_sec(l2x0_saved_regs.filter_start, base,
  505. L310_ADDR_FILTER_START);
  506. revision = readl_relaxed(base + L2X0_CACHE_ID) &
  507. L2X0_CACHE_ID_RTL_MASK;
  508. if (revision >= L310_CACHE_ID_RTL_R2P0)
  509. l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
  510. L310_PREFETCH_CTRL);
  511. if (revision >= L310_CACHE_ID_RTL_R3P0)
  512. l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
  513. L310_POWER_CTRL);
  514. }
  515. static int l2c310_starting_cpu(unsigned int cpu)
  516. {
  517. set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  518. return 0;
  519. }
  520. static int l2c310_dying_cpu(unsigned int cpu)
  521. {
  522. set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  523. return 0;
  524. }
  525. static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
  526. {
  527. unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
  528. bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
  529. u32 aux = l2x0_saved_regs.aux_ctrl;
  530. if (rev >= L310_CACHE_ID_RTL_R2P0) {
  531. if (cortex_a9) {
  532. aux |= L310_AUX_CTRL_EARLY_BRESP;
  533. pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
  534. } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
  535. pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
  536. aux &= ~L310_AUX_CTRL_EARLY_BRESP;
  537. }
  538. }
  539. if (cortex_a9) {
  540. u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
  541. u32 acr = get_auxcr();
  542. pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
  543. if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
  544. pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
  545. if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
  546. pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
  547. if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
  548. aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
  549. pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
  550. }
  551. } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
  552. pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
  553. aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
  554. }
  555. /*
  556. * Always enable non-secure access to the lockdown registers -
  557. * we write to them as part of the L2C enable sequence so they
  558. * need to be accessible.
  559. */
  560. l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
  561. l2c_enable(base, num_lock);
  562. /* Read back resulting AUX_CTRL value as it could have been altered. */
  563. aux = readl_relaxed(base + L2X0_AUX_CTRL);
  564. if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
  565. u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
  566. pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
  567. aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
  568. aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
  569. 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
  570. }
  571. /* r3p0 or later has power control register */
  572. if (rev >= L310_CACHE_ID_RTL_R3P0) {
  573. u32 power_ctrl;
  574. power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
  575. pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
  576. power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
  577. power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
  578. }
  579. if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
  580. cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
  581. "AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
  582. l2c310_dying_cpu);
  583. }
  584. static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  585. struct outer_cache_fns *fns)
  586. {
  587. unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
  588. const char *errata[8];
  589. unsigned n = 0;
  590. if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
  591. revision < L310_CACHE_ID_RTL_R2P0 &&
  592. /* For bcm compatibility */
  593. fns->inv_range == l2c210_inv_range) {
  594. fns->inv_range = l2c310_inv_range_erratum;
  595. fns->flush_range = l2c310_flush_range_erratum;
  596. errata[n++] = "588369";
  597. }
  598. if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
  599. revision >= L310_CACHE_ID_RTL_R2P0 &&
  600. revision < L310_CACHE_ID_RTL_R3P1) {
  601. fns->flush_all = l2c310_flush_all_erratum;
  602. errata[n++] = "727915";
  603. }
  604. if (revision >= L310_CACHE_ID_RTL_R3P0 &&
  605. revision < L310_CACHE_ID_RTL_R3P2) {
  606. u32 val = l2x0_saved_regs.prefetch_ctrl;
  607. if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
  608. val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
  609. l2x0_saved_regs.prefetch_ctrl = val;
  610. errata[n++] = "752271";
  611. }
  612. }
  613. if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
  614. revision == L310_CACHE_ID_RTL_R3P0) {
  615. sync_reg_offset = L2X0_DUMMY_REG;
  616. errata[n++] = "753970";
  617. }
  618. if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
  619. errata[n++] = "769419";
  620. if (n) {
  621. unsigned i;
  622. pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  623. for (i = 0; i < n; i++)
  624. pr_cont(" %s", errata[i]);
  625. pr_cont(" enabled\n");
  626. }
  627. }
  628. static void l2c310_disable(void)
  629. {
  630. /*
  631. * If full-line-of-zeros is enabled, we must first disable it in the
  632. * Cortex-A9 auxiliary control register before disabling the L2 cache.
  633. */
  634. if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  635. set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  636. l2c_disable();
  637. }
  638. static void l2c310_resume(void)
  639. {
  640. l2c_resume();
  641. /* Re-enable full-line-of-zeros for Cortex-A9 */
  642. if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  643. set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  644. }
  645. static void l2c310_unlock(void __iomem *base, unsigned num_lock)
  646. {
  647. if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
  648. l2c_unlock(base, num_lock);
  649. }
  650. static const struct l2c_init_data l2c310_init_fns __initconst = {
  651. .type = "L2C-310",
  652. .way_size_0 = SZ_8K,
  653. .num_lock = 8,
  654. .enable = l2c310_enable,
  655. .fixup = l2c310_fixup,
  656. .save = l2c310_save,
  657. .configure = l2c310_configure,
  658. .unlock = l2c310_unlock,
  659. .outer_cache = {
  660. .inv_range = l2c210_inv_range,
  661. .clean_range = l2c210_clean_range,
  662. .flush_range = l2c210_flush_range,
  663. .flush_all = l2c210_flush_all,
  664. .disable = l2c310_disable,
  665. .sync = l2c210_sync,
  666. .resume = l2c310_resume,
  667. },
  668. };
  669. static int __init __l2c_init(const struct l2c_init_data *data,
  670. u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
  671. {
  672. struct outer_cache_fns fns;
  673. unsigned way_size_bits, ways;
  674. u32 aux, old_aux;
  675. /*
  676. * Save the pointer globally so that callbacks which do not receive
  677. * context from callers can access the structure.
  678. */
  679. l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
  680. if (!l2x0_data)
  681. return -ENOMEM;
  682. /*
  683. * Sanity check the aux values. aux_mask is the bits we preserve
  684. * from reading the hardware register, and aux_val is the bits we
  685. * set.
  686. */
  687. if (aux_val & aux_mask)
  688. pr_alert("L2C: platform provided aux values permit register corruption.\n");
  689. old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  690. aux &= aux_mask;
  691. aux |= aux_val;
  692. if (old_aux != aux)
  693. pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
  694. old_aux, aux);
  695. /* Determine the number of ways */
  696. switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  697. case L2X0_CACHE_ID_PART_L310:
  698. if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
  699. pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
  700. if (aux & (1 << 16))
  701. ways = 16;
  702. else
  703. ways = 8;
  704. break;
  705. case L2X0_CACHE_ID_PART_L210:
  706. case L2X0_CACHE_ID_PART_L220:
  707. ways = (aux >> 13) & 0xf;
  708. break;
  709. case AURORA_CACHE_ID:
  710. ways = (aux >> 13) & 0xf;
  711. ways = 2 << ((ways + 1) >> 2);
  712. break;
  713. default:
  714. /* Assume unknown chips have 8 ways */
  715. ways = 8;
  716. break;
  717. }
  718. l2x0_way_mask = (1 << ways) - 1;
  719. /*
  720. * way_size_0 is the size that a way_size value of zero would be
  721. * given the calculation: way_size = way_size_0 << way_size_bits.
  722. * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
  723. * then way_size_0 would be 8k.
  724. *
  725. * L2 cache size = number of ways * way size.
  726. */
  727. way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
  728. L2C_AUX_CTRL_WAY_SIZE_SHIFT;
  729. l2x0_size = ways * (data->way_size_0 << way_size_bits);
  730. fns = data->outer_cache;
  731. fns.write_sec = outer_cache.write_sec;
  732. fns.configure = outer_cache.configure;
  733. if (data->fixup)
  734. data->fixup(l2x0_base, cache_id, &fns);
  735. if (nosync) {
  736. pr_info("L2C: disabling outer sync\n");
  737. fns.sync = NULL;
  738. }
  739. /*
  740. * Check if l2x0 controller is already enabled. If we are booting
  741. * in non-secure mode accessing the below registers will fault.
  742. */
  743. if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  744. l2x0_saved_regs.aux_ctrl = aux;
  745. data->enable(l2x0_base, data->num_lock);
  746. }
  747. outer_cache = fns;
  748. /*
  749. * It is strange to save the register state before initialisation,
  750. * but hey, this is what the DT implementations decided to do.
  751. */
  752. if (data->save)
  753. data->save(l2x0_base);
  754. /* Re-read it in case some bits are reserved. */
  755. aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  756. pr_info("%s cache controller enabled, %d ways, %d kB\n",
  757. data->type, ways, l2x0_size >> 10);
  758. pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  759. data->type, cache_id, aux);
  760. l2x0_pmu_register(l2x0_base, cache_id);
  761. return 0;
  762. }
  763. void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  764. {
  765. const struct l2c_init_data *data;
  766. u32 cache_id;
  767. l2x0_base = base;
  768. cache_id = readl_relaxed(base + L2X0_CACHE_ID);
  769. switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  770. default:
  771. case L2X0_CACHE_ID_PART_L210:
  772. data = &l2c210_data;
  773. break;
  774. case L2X0_CACHE_ID_PART_L220:
  775. data = &l2c220_data;
  776. break;
  777. case L2X0_CACHE_ID_PART_L310:
  778. data = &l2c310_init_fns;
  779. break;
  780. }
  781. /* Read back current (default) hardware configuration */
  782. if (data->save)
  783. data->save(l2x0_base);
  784. __l2c_init(data, aux_val, aux_mask, cache_id, false);
  785. }
  786. #ifdef CONFIG_OF
  787. static int l2_wt_override;
  788. /* Aurora don't have the cache ID register available, so we have to
  789. * pass it though the device tree */
  790. static u32 cache_id_part_number_from_dt;
  791. /**
  792. * l2x0_cache_size_of_parse() - read cache size parameters from DT
  793. * @np: the device tree node for the l2 cache
  794. * @aux_val: pointer to machine-supplied auxilary register value, to
  795. * be augmented by the call (bits to be set to 1)
  796. * @aux_mask: pointer to machine-supplied auxilary register mask, to
  797. * be augmented by the call (bits to be set to 0)
  798. * @associativity: variable to return the calculated associativity in
  799. * @max_way_size: the maximum size in bytes for the cache ways
  800. */
  801. static int __init l2x0_cache_size_of_parse(const struct device_node *np,
  802. u32 *aux_val, u32 *aux_mask,
  803. u32 *associativity,
  804. u32 max_way_size)
  805. {
  806. u32 mask = 0, val = 0;
  807. u32 cache_size = 0, sets = 0;
  808. u32 way_size_bits = 1;
  809. u32 way_size = 0;
  810. u32 block_size = 0;
  811. u32 line_size = 0;
  812. of_property_read_u32(np, "cache-size", &cache_size);
  813. of_property_read_u32(np, "cache-sets", &sets);
  814. of_property_read_u32(np, "cache-block-size", &block_size);
  815. of_property_read_u32(np, "cache-line-size", &line_size);
  816. if (!cache_size || !sets)
  817. return -ENODEV;
  818. /* All these l2 caches have the same line = block size actually */
  819. if (!line_size) {
  820. if (block_size) {
  821. /* If linesize is not given, it is equal to blocksize */
  822. line_size = block_size;
  823. } else {
  824. /* Fall back to known size */
  825. pr_warn("L2C OF: no cache block/line size given: "
  826. "falling back to default size %d bytes\n",
  827. CACHE_LINE_SIZE);
  828. line_size = CACHE_LINE_SIZE;
  829. }
  830. }
  831. if (line_size != CACHE_LINE_SIZE)
  832. pr_warn("L2C OF: DT supplied line size %d bytes does "
  833. "not match hardware line size of %d bytes\n",
  834. line_size,
  835. CACHE_LINE_SIZE);
  836. /*
  837. * Since:
  838. * set size = cache size / sets
  839. * ways = cache size / (sets * line size)
  840. * way size = cache size / (cache size / (sets * line size))
  841. * way size = sets * line size
  842. * associativity = ways = cache size / way size
  843. */
  844. way_size = sets * line_size;
  845. *associativity = cache_size / way_size;
  846. if (way_size > max_way_size) {
  847. pr_err("L2C OF: set size %dKB is too large\n", way_size);
  848. return -EINVAL;
  849. }
  850. pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
  851. cache_size, cache_size >> 10);
  852. pr_info("L2C OF: override line size: %d bytes\n", line_size);
  853. pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
  854. way_size, way_size >> 10);
  855. pr_info("L2C OF: override associativity: %d\n", *associativity);
  856. /*
  857. * Calculates the bits 17:19 to set for way size:
  858. * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
  859. */
  860. way_size_bits = ilog2(way_size >> 10) - 3;
  861. if (way_size_bits < 1 || way_size_bits > 6) {
  862. pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
  863. way_size);
  864. return -EINVAL;
  865. }
  866. mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
  867. val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
  868. *aux_val &= ~mask;
  869. *aux_val |= val;
  870. *aux_mask &= ~mask;
  871. return 0;
  872. }
  873. static void __init l2x0_of_parse(const struct device_node *np,
  874. u32 *aux_val, u32 *aux_mask)
  875. {
  876. u32 data[2] = { 0, 0 };
  877. u32 tag = 0;
  878. u32 dirty = 0;
  879. u32 val = 0, mask = 0;
  880. u32 assoc;
  881. int ret;
  882. of_property_read_u32(np, "arm,tag-latency", &tag);
  883. if (tag) {
  884. mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
  885. val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
  886. }
  887. of_property_read_u32_array(np, "arm,data-latency",
  888. data, ARRAY_SIZE(data));
  889. if (data[0] && data[1]) {
  890. mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
  891. L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
  892. val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
  893. ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
  894. }
  895. of_property_read_u32(np, "arm,dirty-latency", &dirty);
  896. if (dirty) {
  897. mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
  898. val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
  899. }
  900. if (of_property_read_bool(np, "arm,parity-enable")) {
  901. mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  902. val |= L2C_AUX_CTRL_PARITY_ENABLE;
  903. } else if (of_property_read_bool(np, "arm,parity-disable")) {
  904. mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  905. }
  906. if (of_property_read_bool(np, "arm,shared-override")) {
  907. mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
  908. val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
  909. }
  910. ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
  911. if (ret)
  912. return;
  913. if (assoc > 8) {
  914. pr_err("l2x0 of: cache setting yield too high associativity\n");
  915. pr_err("l2x0 of: %d calculated, max 8\n", assoc);
  916. } else {
  917. mask |= L2X0_AUX_CTRL_ASSOC_MASK;
  918. val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
  919. }
  920. *aux_val &= ~mask;
  921. *aux_val |= val;
  922. *aux_mask &= ~mask;
  923. }
  924. static const struct l2c_init_data of_l2c210_data __initconst = {
  925. .type = "L2C-210",
  926. .way_size_0 = SZ_8K,
  927. .num_lock = 1,
  928. .of_parse = l2x0_of_parse,
  929. .enable = l2c_enable,
  930. .save = l2c_save,
  931. .configure = l2c_configure,
  932. .unlock = l2c_unlock,
  933. .outer_cache = {
  934. .inv_range = l2c210_inv_range,
  935. .clean_range = l2c210_clean_range,
  936. .flush_range = l2c210_flush_range,
  937. .flush_all = l2c210_flush_all,
  938. .disable = l2c_disable,
  939. .sync = l2c210_sync,
  940. .resume = l2c_resume,
  941. },
  942. };
  943. static const struct l2c_init_data of_l2c220_data __initconst = {
  944. .type = "L2C-220",
  945. .way_size_0 = SZ_8K,
  946. .num_lock = 1,
  947. .of_parse = l2x0_of_parse,
  948. .enable = l2c220_enable,
  949. .save = l2c_save,
  950. .configure = l2c_configure,
  951. .unlock = l2c220_unlock,
  952. .outer_cache = {
  953. .inv_range = l2c220_inv_range,
  954. .clean_range = l2c220_clean_range,
  955. .flush_range = l2c220_flush_range,
  956. .flush_all = l2c220_flush_all,
  957. .disable = l2c_disable,
  958. .sync = l2c220_sync,
  959. .resume = l2c_resume,
  960. },
  961. };
  962. static void __init l2c310_of_parse(const struct device_node *np,
  963. u32 *aux_val, u32 *aux_mask)
  964. {
  965. u32 data[3] = { 0, 0, 0 };
  966. u32 tag[3] = { 0, 0, 0 };
  967. u32 filter[2] = { 0, 0 };
  968. u32 assoc;
  969. u32 prefetch;
  970. u32 power;
  971. u32 val;
  972. int ret;
  973. of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
  974. if (tag[0] && tag[1] && tag[2])
  975. l2x0_saved_regs.tag_latency =
  976. L310_LATENCY_CTRL_RD(tag[0] - 1) |
  977. L310_LATENCY_CTRL_WR(tag[1] - 1) |
  978. L310_LATENCY_CTRL_SETUP(tag[2] - 1);
  979. of_property_read_u32_array(np, "arm,data-latency",
  980. data, ARRAY_SIZE(data));
  981. if (data[0] && data[1] && data[2])
  982. l2x0_saved_regs.data_latency =
  983. L310_LATENCY_CTRL_RD(data[0] - 1) |
  984. L310_LATENCY_CTRL_WR(data[1] - 1) |
  985. L310_LATENCY_CTRL_SETUP(data[2] - 1);
  986. of_property_read_u32_array(np, "arm,filter-ranges",
  987. filter, ARRAY_SIZE(filter));
  988. if (filter[1]) {
  989. l2x0_saved_regs.filter_end =
  990. ALIGN(filter[0] + filter[1], SZ_1M);
  991. l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
  992. | L310_ADDR_FILTER_EN;
  993. }
  994. ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
  995. if (!ret) {
  996. switch (assoc) {
  997. case 16:
  998. *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  999. *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
  1000. *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1001. break;
  1002. case 8:
  1003. *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1004. *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1005. break;
  1006. default:
  1007. pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
  1008. assoc);
  1009. break;
  1010. }
  1011. }
  1012. if (of_property_read_bool(np, "arm,shared-override")) {
  1013. *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
  1014. *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
  1015. }
  1016. if (of_property_read_bool(np, "arm,parity-enable")) {
  1017. *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
  1018. *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  1019. } else if (of_property_read_bool(np, "arm,parity-disable")) {
  1020. *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  1021. *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  1022. }
  1023. prefetch = l2x0_saved_regs.prefetch_ctrl;
  1024. ret = of_property_read_u32(np, "arm,double-linefill", &val);
  1025. if (ret == 0) {
  1026. if (val)
  1027. prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
  1028. else
  1029. prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
  1030. } else if (ret != -EINVAL) {
  1031. pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
  1032. }
  1033. ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
  1034. if (ret == 0) {
  1035. if (val)
  1036. prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
  1037. else
  1038. prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
  1039. } else if (ret != -EINVAL) {
  1040. pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
  1041. }
  1042. ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
  1043. if (ret == 0) {
  1044. if (!val)
  1045. prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
  1046. else
  1047. prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
  1048. } else if (ret != -EINVAL) {
  1049. pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
  1050. }
  1051. ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
  1052. if (ret == 0) {
  1053. if (val)
  1054. prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
  1055. else
  1056. prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
  1057. } else if (ret != -EINVAL) {
  1058. pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
  1059. }
  1060. ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
  1061. if (ret == 0) {
  1062. prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
  1063. prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
  1064. } else if (ret != -EINVAL) {
  1065. pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
  1066. }
  1067. ret = of_property_read_u32(np, "prefetch-data", &val);
  1068. if (ret == 0) {
  1069. if (val)
  1070. prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
  1071. else
  1072. prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
  1073. } else if (ret != -EINVAL) {
  1074. pr_err("L2C-310 OF prefetch-data property value is missing\n");
  1075. }
  1076. ret = of_property_read_u32(np, "prefetch-instr", &val);
  1077. if (ret == 0) {
  1078. if (val)
  1079. prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1080. else
  1081. prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1082. } else if (ret != -EINVAL) {
  1083. pr_err("L2C-310 OF prefetch-instr property value is missing\n");
  1084. }
  1085. l2x0_saved_regs.prefetch_ctrl = prefetch;
  1086. power = l2x0_saved_regs.pwr_ctrl |
  1087. L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
  1088. ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
  1089. if (!ret) {
  1090. if (!val)
  1091. power &= ~L310_DYNAMIC_CLK_GATING_EN;
  1092. } else if (ret != -EINVAL) {
  1093. pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
  1094. }
  1095. ret = of_property_read_u32(np, "arm,standby-mode", &val);
  1096. if (!ret) {
  1097. if (!val)
  1098. power &= ~L310_STNDBY_MODE_EN;
  1099. } else if (ret != -EINVAL) {
  1100. pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
  1101. }
  1102. l2x0_saved_regs.pwr_ctrl = power;
  1103. }
  1104. static const struct l2c_init_data of_l2c310_data __initconst = {
  1105. .type = "L2C-310",
  1106. .way_size_0 = SZ_8K,
  1107. .num_lock = 8,
  1108. .of_parse = l2c310_of_parse,
  1109. .enable = l2c310_enable,
  1110. .fixup = l2c310_fixup,
  1111. .save = l2c310_save,
  1112. .configure = l2c310_configure,
  1113. .unlock = l2c310_unlock,
  1114. .outer_cache = {
  1115. .inv_range = l2c210_inv_range,
  1116. .clean_range = l2c210_clean_range,
  1117. .flush_range = l2c210_flush_range,
  1118. .flush_all = l2c210_flush_all,
  1119. .disable = l2c310_disable,
  1120. .sync = l2c210_sync,
  1121. .resume = l2c310_resume,
  1122. },
  1123. };
  1124. /*
  1125. * This is a variant of the of_l2c310_data with .sync set to
  1126. * NULL. Outer sync operations are not needed when the system is I/O
  1127. * coherent, and potentially harmful in certain situations (PCIe/PL310
  1128. * deadlock on Armada 375/38x due to hardware I/O coherency). The
  1129. * other operations are kept because they are infrequent (therefore do
  1130. * not cause the deadlock in practice) and needed for secondary CPU
  1131. * boot and other power management activities.
  1132. */
  1133. static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
  1134. .type = "L2C-310 Coherent",
  1135. .way_size_0 = SZ_8K,
  1136. .num_lock = 8,
  1137. .of_parse = l2c310_of_parse,
  1138. .enable = l2c310_enable,
  1139. .fixup = l2c310_fixup,
  1140. .save = l2c310_save,
  1141. .configure = l2c310_configure,
  1142. .unlock = l2c310_unlock,
  1143. .outer_cache = {
  1144. .inv_range = l2c210_inv_range,
  1145. .clean_range = l2c210_clean_range,
  1146. .flush_range = l2c210_flush_range,
  1147. .flush_all = l2c210_flush_all,
  1148. .disable = l2c310_disable,
  1149. .resume = l2c310_resume,
  1150. },
  1151. };
  1152. /*
  1153. * Note that the end addresses passed to Linux primitives are
  1154. * noninclusive, while the hardware cache range operations use
  1155. * inclusive start and end addresses.
  1156. */
  1157. static unsigned long aurora_range_end(unsigned long start, unsigned long end)
  1158. {
  1159. /*
  1160. * Limit the number of cache lines processed at once,
  1161. * since cache range operations stall the CPU pipeline
  1162. * until completion.
  1163. */
  1164. if (end > start + MAX_RANGE_SIZE)
  1165. end = start + MAX_RANGE_SIZE;
  1166. /*
  1167. * Cache range operations can't straddle a page boundary.
  1168. */
  1169. if (end > PAGE_ALIGN(start+1))
  1170. end = PAGE_ALIGN(start+1);
  1171. return end;
  1172. }
  1173. static void aurora_pa_range(unsigned long start, unsigned long end,
  1174. unsigned long offset)
  1175. {
  1176. void __iomem *base = l2x0_base;
  1177. unsigned long range_end;
  1178. unsigned long flags;
  1179. /*
  1180. * round start and end adresses up to cache line size
  1181. */
  1182. start &= ~(CACHE_LINE_SIZE - 1);
  1183. end = ALIGN(end, CACHE_LINE_SIZE);
  1184. /*
  1185. * perform operation on all full cache lines between 'start' and 'end'
  1186. */
  1187. while (start < end) {
  1188. range_end = aurora_range_end(start, end);
  1189. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1190. writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
  1191. writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
  1192. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1193. writel_relaxed(0, base + AURORA_SYNC_REG);
  1194. start = range_end;
  1195. }
  1196. }
  1197. static void aurora_inv_range(unsigned long start, unsigned long end)
  1198. {
  1199. aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
  1200. }
  1201. static void aurora_clean_range(unsigned long start, unsigned long end)
  1202. {
  1203. /*
  1204. * If L2 is forced to WT, the L2 will always be clean and we
  1205. * don't need to do anything here.
  1206. */
  1207. if (!l2_wt_override)
  1208. aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
  1209. }
  1210. static void aurora_flush_range(unsigned long start, unsigned long end)
  1211. {
  1212. if (l2_wt_override)
  1213. aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
  1214. else
  1215. aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
  1216. }
  1217. static void aurora_flush_all(void)
  1218. {
  1219. void __iomem *base = l2x0_base;
  1220. unsigned long flags;
  1221. /* clean all ways */
  1222. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1223. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  1224. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1225. writel_relaxed(0, base + AURORA_SYNC_REG);
  1226. }
  1227. static void aurora_cache_sync(void)
  1228. {
  1229. writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
  1230. }
  1231. static void aurora_disable(void)
  1232. {
  1233. void __iomem *base = l2x0_base;
  1234. unsigned long flags;
  1235. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1236. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  1237. writel_relaxed(0, base + AURORA_SYNC_REG);
  1238. l2c_write_sec(0, base, L2X0_CTRL);
  1239. dsb(st);
  1240. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1241. }
  1242. static void aurora_save(void __iomem *base)
  1243. {
  1244. l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
  1245. l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
  1246. }
  1247. /*
  1248. * For Aurora cache in no outer mode, enable via the CP15 coprocessor
  1249. * broadcasting of cache commands to L2.
  1250. */
  1251. static void __init aurora_enable_no_outer(void __iomem *base,
  1252. unsigned num_lock)
  1253. {
  1254. u32 u;
  1255. asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
  1256. u |= AURORA_CTRL_FW; /* Set the FW bit */
  1257. asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
  1258. isb();
  1259. l2c_enable(base, num_lock);
  1260. }
  1261. static void __init aurora_fixup(void __iomem *base, u32 cache_id,
  1262. struct outer_cache_fns *fns)
  1263. {
  1264. sync_reg_offset = AURORA_SYNC_REG;
  1265. }
  1266. static void __init aurora_of_parse(const struct device_node *np,
  1267. u32 *aux_val, u32 *aux_mask)
  1268. {
  1269. u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
  1270. u32 mask = AURORA_ACR_REPLACEMENT_MASK;
  1271. of_property_read_u32(np, "cache-id-part",
  1272. &cache_id_part_number_from_dt);
  1273. /* Determine and save the write policy */
  1274. l2_wt_override = of_property_read_bool(np, "wt-override");
  1275. if (l2_wt_override) {
  1276. val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
  1277. mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
  1278. }
  1279. *aux_val &= ~mask;
  1280. *aux_val |= val;
  1281. *aux_mask &= ~mask;
  1282. }
  1283. static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
  1284. .type = "Aurora",
  1285. .way_size_0 = SZ_4K,
  1286. .num_lock = 4,
  1287. .of_parse = aurora_of_parse,
  1288. .enable = l2c_enable,
  1289. .fixup = aurora_fixup,
  1290. .save = aurora_save,
  1291. .configure = l2c_configure,
  1292. .unlock = l2c_unlock,
  1293. .outer_cache = {
  1294. .inv_range = aurora_inv_range,
  1295. .clean_range = aurora_clean_range,
  1296. .flush_range = aurora_flush_range,
  1297. .flush_all = aurora_flush_all,
  1298. .disable = aurora_disable,
  1299. .sync = aurora_cache_sync,
  1300. .resume = l2c_resume,
  1301. },
  1302. };
  1303. static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
  1304. .type = "Aurora",
  1305. .way_size_0 = SZ_4K,
  1306. .num_lock = 4,
  1307. .of_parse = aurora_of_parse,
  1308. .enable = aurora_enable_no_outer,
  1309. .fixup = aurora_fixup,
  1310. .save = aurora_save,
  1311. .configure = l2c_configure,
  1312. .unlock = l2c_unlock,
  1313. .outer_cache = {
  1314. .resume = l2c_resume,
  1315. },
  1316. };
  1317. /*
  1318. * For certain Broadcom SoCs, depending on the address range, different offsets
  1319. * need to be added to the address before passing it to L2 for
  1320. * invalidation/clean/flush
  1321. *
  1322. * Section Address Range Offset EMI
  1323. * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
  1324. * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
  1325. * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
  1326. *
  1327. * When the start and end addresses have crossed two different sections, we
  1328. * need to break the L2 operation into two, each within its own section.
  1329. * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
  1330. * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
  1331. * 0xC0000000 - 0xC0001000
  1332. *
  1333. * Note 1:
  1334. * By breaking a single L2 operation into two, we may potentially suffer some
  1335. * performance hit, but keep in mind the cross section case is very rare
  1336. *
  1337. * Note 2:
  1338. * We do not need to handle the case when the start address is in
  1339. * Section 1 and the end address is in Section 3, since it is not a valid use
  1340. * case
  1341. *
  1342. * Note 3:
  1343. * Section 1 in practical terms can no longer be used on rev A2. Because of
  1344. * that the code does not need to handle section 1 at all.
  1345. *
  1346. */
  1347. #define BCM_SYS_EMI_START_ADDR 0x40000000UL
  1348. #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
  1349. #define BCM_SYS_EMI_OFFSET 0x40000000UL
  1350. #define BCM_VC_EMI_OFFSET 0x80000000UL
  1351. static inline int bcm_addr_is_sys_emi(unsigned long addr)
  1352. {
  1353. return (addr >= BCM_SYS_EMI_START_ADDR) &&
  1354. (addr < BCM_VC_EMI_SEC3_START_ADDR);
  1355. }
  1356. static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
  1357. {
  1358. if (bcm_addr_is_sys_emi(addr))
  1359. return addr + BCM_SYS_EMI_OFFSET;
  1360. else
  1361. return addr + BCM_VC_EMI_OFFSET;
  1362. }
  1363. static void bcm_inv_range(unsigned long start, unsigned long end)
  1364. {
  1365. unsigned long new_start, new_end;
  1366. BUG_ON(start < BCM_SYS_EMI_START_ADDR);
  1367. if (unlikely(end <= start))
  1368. return;
  1369. new_start = bcm_l2_phys_addr(start);
  1370. new_end = bcm_l2_phys_addr(end);
  1371. /* normal case, no cross section between start and end */
  1372. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  1373. l2c210_inv_range(new_start, new_end);
  1374. return;
  1375. }
  1376. /* They cross sections, so it can only be a cross from section
  1377. * 2 to section 3
  1378. */
  1379. l2c210_inv_range(new_start,
  1380. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  1381. l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  1382. new_end);
  1383. }
  1384. static void bcm_clean_range(unsigned long start, unsigned long end)
  1385. {
  1386. unsigned long new_start, new_end;
  1387. BUG_ON(start < BCM_SYS_EMI_START_ADDR);
  1388. if (unlikely(end <= start))
  1389. return;
  1390. new_start = bcm_l2_phys_addr(start);
  1391. new_end = bcm_l2_phys_addr(end);
  1392. /* normal case, no cross section between start and end */
  1393. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  1394. l2c210_clean_range(new_start, new_end);
  1395. return;
  1396. }
  1397. /* They cross sections, so it can only be a cross from section
  1398. * 2 to section 3
  1399. */
  1400. l2c210_clean_range(new_start,
  1401. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  1402. l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  1403. new_end);
  1404. }
  1405. static void bcm_flush_range(unsigned long start, unsigned long end)
  1406. {
  1407. unsigned long new_start, new_end;
  1408. BUG_ON(start < BCM_SYS_EMI_START_ADDR);
  1409. if (unlikely(end <= start))
  1410. return;
  1411. if ((end - start) >= l2x0_size) {
  1412. outer_cache.flush_all();
  1413. return;
  1414. }
  1415. new_start = bcm_l2_phys_addr(start);
  1416. new_end = bcm_l2_phys_addr(end);
  1417. /* normal case, no cross section between start and end */
  1418. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  1419. l2c210_flush_range(new_start, new_end);
  1420. return;
  1421. }
  1422. /* They cross sections, so it can only be a cross from section
  1423. * 2 to section 3
  1424. */
  1425. l2c210_flush_range(new_start,
  1426. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  1427. l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  1428. new_end);
  1429. }
  1430. /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
  1431. static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
  1432. .type = "BCM-L2C-310",
  1433. .way_size_0 = SZ_8K,
  1434. .num_lock = 8,
  1435. .of_parse = l2c310_of_parse,
  1436. .enable = l2c310_enable,
  1437. .save = l2c310_save,
  1438. .configure = l2c310_configure,
  1439. .unlock = l2c310_unlock,
  1440. .outer_cache = {
  1441. .inv_range = bcm_inv_range,
  1442. .clean_range = bcm_clean_range,
  1443. .flush_range = bcm_flush_range,
  1444. .flush_all = l2c210_flush_all,
  1445. .disable = l2c310_disable,
  1446. .sync = l2c210_sync,
  1447. .resume = l2c310_resume,
  1448. },
  1449. };
  1450. static void __init tauros3_save(void __iomem *base)
  1451. {
  1452. l2c_save(base);
  1453. l2x0_saved_regs.aux2_ctrl =
  1454. readl_relaxed(base + TAUROS3_AUX2_CTRL);
  1455. l2x0_saved_regs.prefetch_ctrl =
  1456. readl_relaxed(base + L310_PREFETCH_CTRL);
  1457. }
  1458. static void tauros3_configure(void __iomem *base)
  1459. {
  1460. l2c_configure(base);
  1461. writel_relaxed(l2x0_saved_regs.aux2_ctrl,
  1462. base + TAUROS3_AUX2_CTRL);
  1463. writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  1464. base + L310_PREFETCH_CTRL);
  1465. }
  1466. static const struct l2c_init_data of_tauros3_data __initconst = {
  1467. .type = "Tauros3",
  1468. .way_size_0 = SZ_8K,
  1469. .num_lock = 8,
  1470. .enable = l2c_enable,
  1471. .save = tauros3_save,
  1472. .configure = tauros3_configure,
  1473. .unlock = l2c_unlock,
  1474. /* Tauros3 broadcasts L1 cache operations to L2 */
  1475. .outer_cache = {
  1476. .resume = l2c_resume,
  1477. },
  1478. };
  1479. #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
  1480. static const struct of_device_id l2x0_ids[] __initconst = {
  1481. L2C_ID("arm,l210-cache", of_l2c210_data),
  1482. L2C_ID("arm,l220-cache", of_l2c220_data),
  1483. L2C_ID("arm,pl310-cache", of_l2c310_data),
  1484. L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  1485. L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
  1486. L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
  1487. L2C_ID("marvell,tauros3-cache", of_tauros3_data),
  1488. /* Deprecated IDs */
  1489. L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  1490. {}
  1491. };
  1492. int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
  1493. {
  1494. const struct l2c_init_data *data;
  1495. struct device_node *np;
  1496. struct resource res;
  1497. u32 cache_id, old_aux;
  1498. u32 cache_level = 2;
  1499. bool nosync = false;
  1500. np = of_find_matching_node(NULL, l2x0_ids);
  1501. if (!np)
  1502. return -ENODEV;
  1503. if (of_address_to_resource(np, 0, &res))
  1504. return -ENODEV;
  1505. l2x0_base = ioremap(res.start, resource_size(&res));
  1506. if (!l2x0_base)
  1507. return -ENOMEM;
  1508. l2x0_saved_regs.phy_base = res.start;
  1509. data = of_match_node(l2x0_ids, np)->data;
  1510. if (of_device_is_compatible(np, "arm,pl310-cache") &&
  1511. of_property_read_bool(np, "arm,io-coherent"))
  1512. data = &of_l2c310_coherent_data;
  1513. old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  1514. if (old_aux != ((old_aux & aux_mask) | aux_val)) {
  1515. pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
  1516. old_aux, (old_aux & aux_mask) | aux_val);
  1517. } else if (aux_mask != ~0U && aux_val != 0) {
  1518. pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
  1519. }
  1520. /* All L2 caches are unified, so this property should be specified */
  1521. if (!of_property_read_bool(np, "cache-unified"))
  1522. pr_err("L2C: device tree omits to specify unified cache\n");
  1523. if (of_property_read_u32(np, "cache-level", &cache_level))
  1524. pr_err("L2C: device tree omits to specify cache-level\n");
  1525. if (cache_level != 2)
  1526. pr_err("L2C: device tree specifies invalid cache level\n");
  1527. nosync = of_property_read_bool(np, "arm,outer-sync-disable");
  1528. /* Read back current (default) hardware configuration */
  1529. if (data->save)
  1530. data->save(l2x0_base);
  1531. /* L2 configuration can only be changed if the cache is disabled */
  1532. if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
  1533. if (data->of_parse)
  1534. data->of_parse(np, &aux_val, &aux_mask);
  1535. if (cache_id_part_number_from_dt)
  1536. cache_id = cache_id_part_number_from_dt;
  1537. else
  1538. cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
  1539. return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
  1540. }
  1541. #endif