copy_32.S 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. /*
  2. * Memory copy functions for 32-bit PowerPC.
  3. *
  4. * Copyright (C) 1996-2005 Paul Mackerras.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <asm/processor.h>
  12. #include <asm/cache.h>
  13. #include <asm/errno.h>
  14. #include <asm/ppc_asm.h>
  15. #define COPY_16_BYTES \
  16. lwz r7,4(r4); \
  17. lwz r8,8(r4); \
  18. lwz r9,12(r4); \
  19. lwzu r10,16(r4); \
  20. stw r7,4(r6); \
  21. stw r8,8(r6); \
  22. stw r9,12(r6); \
  23. stwu r10,16(r6)
  24. #define COPY_16_BYTES_WITHEX(n) \
  25. 8 ## n ## 0: \
  26. lwz r7,4(r4); \
  27. 8 ## n ## 1: \
  28. lwz r8,8(r4); \
  29. 8 ## n ## 2: \
  30. lwz r9,12(r4); \
  31. 8 ## n ## 3: \
  32. lwzu r10,16(r4); \
  33. 8 ## n ## 4: \
  34. stw r7,4(r6); \
  35. 8 ## n ## 5: \
  36. stw r8,8(r6); \
  37. 8 ## n ## 6: \
  38. stw r9,12(r6); \
  39. 8 ## n ## 7: \
  40. stwu r10,16(r6)
  41. #define COPY_16_BYTES_EXCODE(n) \
  42. 9 ## n ## 0: \
  43. addi r5,r5,-(16 * n); \
  44. b 104f; \
  45. 9 ## n ## 1: \
  46. addi r5,r5,-(16 * n); \
  47. b 105f; \
  48. .section __ex_table,"a"; \
  49. .align 2; \
  50. .long 8 ## n ## 0b,9 ## n ## 0b; \
  51. .long 8 ## n ## 1b,9 ## n ## 0b; \
  52. .long 8 ## n ## 2b,9 ## n ## 0b; \
  53. .long 8 ## n ## 3b,9 ## n ## 0b; \
  54. .long 8 ## n ## 4b,9 ## n ## 1b; \
  55. .long 8 ## n ## 5b,9 ## n ## 1b; \
  56. .long 8 ## n ## 6b,9 ## n ## 1b; \
  57. .long 8 ## n ## 7b,9 ## n ## 1b; \
  58. .text
  59. .text
  60. .stabs "arch/powerpc/lib/",N_SO,0,0,0f
  61. .stabs "copy_32.S",N_SO,0,0,0f
  62. 0:
  63. CACHELINE_BYTES = L1_CACHE_BYTES
  64. LG_CACHELINE_BYTES = L1_CACHE_SHIFT
  65. CACHELINE_MASK = (L1_CACHE_BYTES-1)
  66. /*
  67. * Use dcbz on the complete cache lines in the destination
  68. * to set them to zero. This requires that the destination
  69. * area is cacheable. -- paulus
  70. */
  71. _GLOBAL(cacheable_memzero)
  72. mr r5,r4
  73. li r4,0
  74. addi r6,r3,-4
  75. cmplwi 0,r5,4
  76. blt 7f
  77. stwu r4,4(r6)
  78. beqlr
  79. andi. r0,r6,3
  80. add r5,r0,r5
  81. subf r6,r0,r6
  82. clrlwi r7,r6,32-LG_CACHELINE_BYTES
  83. add r8,r7,r5
  84. srwi r9,r8,LG_CACHELINE_BYTES
  85. addic. r9,r9,-1 /* total number of complete cachelines */
  86. ble 2f
  87. xori r0,r7,CACHELINE_MASK & ~3
  88. srwi. r0,r0,2
  89. beq 3f
  90. mtctr r0
  91. 4: stwu r4,4(r6)
  92. bdnz 4b
  93. 3: mtctr r9
  94. li r7,4
  95. 10: dcbz r7,r6
  96. addi r6,r6,CACHELINE_BYTES
  97. bdnz 10b
  98. clrlwi r5,r8,32-LG_CACHELINE_BYTES
  99. addi r5,r5,4
  100. 2: srwi r0,r5,2
  101. mtctr r0
  102. bdz 6f
  103. 1: stwu r4,4(r6)
  104. bdnz 1b
  105. 6: andi. r5,r5,3
  106. 7: cmpwi 0,r5,0
  107. beqlr
  108. mtctr r5
  109. addi r6,r6,3
  110. 8: stbu r4,1(r6)
  111. bdnz 8b
  112. blr
  113. _GLOBAL(memset)
  114. rlwimi r4,r4,8,16,23
  115. rlwimi r4,r4,16,0,15
  116. addi r6,r3,-4
  117. cmplwi 0,r5,4
  118. blt 7f
  119. stwu r4,4(r6)
  120. beqlr
  121. andi. r0,r6,3
  122. add r5,r0,r5
  123. subf r6,r0,r6
  124. srwi r0,r5,2
  125. mtctr r0
  126. bdz 6f
  127. 1: stwu r4,4(r6)
  128. bdnz 1b
  129. 6: andi. r5,r5,3
  130. 7: cmpwi 0,r5,0
  131. beqlr
  132. mtctr r5
  133. addi r6,r6,3
  134. 8: stbu r4,1(r6)
  135. bdnz 8b
  136. blr
  137. /*
  138. * This version uses dcbz on the complete cache lines in the
  139. * destination area to reduce memory traffic. This requires that
  140. * the destination area is cacheable.
  141. * We only use this version if the source and dest don't overlap.
  142. * -- paulus.
  143. */
  144. _GLOBAL(cacheable_memcpy)
  145. add r7,r3,r5 /* test if the src & dst overlap */
  146. add r8,r4,r5
  147. cmplw 0,r4,r7
  148. cmplw 1,r3,r8
  149. crand 0,0,4 /* cr0.lt &= cr1.lt */
  150. blt memcpy /* if regions overlap */
  151. addi r4,r4,-4
  152. addi r6,r3,-4
  153. neg r0,r3
  154. andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
  155. beq 58f
  156. cmplw 0,r5,r0 /* is this more than total to do? */
  157. blt 63f /* if not much to do */
  158. andi. r8,r0,3 /* get it word-aligned first */
  159. subf r5,r0,r5
  160. mtctr r8
  161. beq+ 61f
  162. 70: lbz r9,4(r4) /* do some bytes */
  163. stb r9,4(r6)
  164. addi r4,r4,1
  165. addi r6,r6,1
  166. bdnz 70b
  167. 61: srwi. r0,r0,2
  168. mtctr r0
  169. beq 58f
  170. 72: lwzu r9,4(r4) /* do some words */
  171. stwu r9,4(r6)
  172. bdnz 72b
  173. 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
  174. clrlwi r5,r5,32-LG_CACHELINE_BYTES
  175. li r11,4
  176. mtctr r0
  177. beq 63f
  178. 53:
  179. dcbz r11,r6
  180. COPY_16_BYTES
  181. #if L1_CACHE_BYTES >= 32
  182. COPY_16_BYTES
  183. #if L1_CACHE_BYTES >= 64
  184. COPY_16_BYTES
  185. COPY_16_BYTES
  186. #if L1_CACHE_BYTES >= 128
  187. COPY_16_BYTES
  188. COPY_16_BYTES
  189. COPY_16_BYTES
  190. COPY_16_BYTES
  191. #endif
  192. #endif
  193. #endif
  194. bdnz 53b
  195. 63: srwi. r0,r5,2
  196. mtctr r0
  197. beq 64f
  198. 30: lwzu r0,4(r4)
  199. stwu r0,4(r6)
  200. bdnz 30b
  201. 64: andi. r0,r5,3
  202. mtctr r0
  203. beq+ 65f
  204. 40: lbz r0,4(r4)
  205. stb r0,4(r6)
  206. addi r4,r4,1
  207. addi r6,r6,1
  208. bdnz 40b
  209. 65: blr
  210. _GLOBAL(memmove)
  211. cmplw 0,r3,r4
  212. bgt backwards_memcpy
  213. /* fall through */
  214. _GLOBAL(memcpy)
  215. srwi. r7,r5,3
  216. addi r6,r3,-4
  217. addi r4,r4,-4
  218. beq 2f /* if less than 8 bytes to do */
  219. andi. r0,r6,3 /* get dest word aligned */
  220. mtctr r7
  221. bne 5f
  222. 1: lwz r7,4(r4)
  223. lwzu r8,8(r4)
  224. stw r7,4(r6)
  225. stwu r8,8(r6)
  226. bdnz 1b
  227. andi. r5,r5,7
  228. 2: cmplwi 0,r5,4
  229. blt 3f
  230. lwzu r0,4(r4)
  231. addi r5,r5,-4
  232. stwu r0,4(r6)
  233. 3: cmpwi 0,r5,0
  234. beqlr
  235. mtctr r5
  236. addi r4,r4,3
  237. addi r6,r6,3
  238. 4: lbzu r0,1(r4)
  239. stbu r0,1(r6)
  240. bdnz 4b
  241. blr
  242. 5: subfic r0,r0,4
  243. mtctr r0
  244. 6: lbz r7,4(r4)
  245. addi r4,r4,1
  246. stb r7,4(r6)
  247. addi r6,r6,1
  248. bdnz 6b
  249. subf r5,r0,r5
  250. rlwinm. r7,r5,32-3,3,31
  251. beq 2b
  252. mtctr r7
  253. b 1b
  254. _GLOBAL(backwards_memcpy)
  255. rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
  256. add r6,r3,r5
  257. add r4,r4,r5
  258. beq 2f
  259. andi. r0,r6,3
  260. mtctr r7
  261. bne 5f
  262. 1: lwz r7,-4(r4)
  263. lwzu r8,-8(r4)
  264. stw r7,-4(r6)
  265. stwu r8,-8(r6)
  266. bdnz 1b
  267. andi. r5,r5,7
  268. 2: cmplwi 0,r5,4
  269. blt 3f
  270. lwzu r0,-4(r4)
  271. subi r5,r5,4
  272. stwu r0,-4(r6)
  273. 3: cmpwi 0,r5,0
  274. beqlr
  275. mtctr r5
  276. 4: lbzu r0,-1(r4)
  277. stbu r0,-1(r6)
  278. bdnz 4b
  279. blr
  280. 5: mtctr r0
  281. 6: lbzu r7,-1(r4)
  282. stbu r7,-1(r6)
  283. bdnz 6b
  284. subf r5,r0,r5
  285. rlwinm. r7,r5,32-3,3,31
  286. beq 2b
  287. mtctr r7
  288. b 1b
  289. _GLOBAL(__copy_tofrom_user)
  290. addi r4,r4,-4
  291. addi r6,r3,-4
  292. neg r0,r3
  293. andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
  294. beq 58f
  295. cmplw 0,r5,r0 /* is this more than total to do? */
  296. blt 63f /* if not much to do */
  297. andi. r8,r0,3 /* get it word-aligned first */
  298. mtctr r8
  299. beq+ 61f
  300. 70: lbz r9,4(r4) /* do some bytes */
  301. 71: stb r9,4(r6)
  302. addi r4,r4,1
  303. addi r6,r6,1
  304. bdnz 70b
  305. 61: subf r5,r0,r5
  306. srwi. r0,r0,2
  307. mtctr r0
  308. beq 58f
  309. 72: lwzu r9,4(r4) /* do some words */
  310. 73: stwu r9,4(r6)
  311. bdnz 72b
  312. .section __ex_table,"a"
  313. .align 2
  314. .long 70b,100f
  315. .long 71b,101f
  316. .long 72b,102f
  317. .long 73b,103f
  318. .text
  319. 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
  320. clrlwi r5,r5,32-LG_CACHELINE_BYTES
  321. li r11,4
  322. beq 63f
  323. /* Here we decide how far ahead to prefetch the source */
  324. li r3,4
  325. cmpwi r0,1
  326. li r7,0
  327. ble 114f
  328. li r7,1
  329. #if MAX_COPY_PREFETCH > 1
  330. /* Heuristically, for large transfers we prefetch
  331. MAX_COPY_PREFETCH cachelines ahead. For small transfers
  332. we prefetch 1 cacheline ahead. */
  333. cmpwi r0,MAX_COPY_PREFETCH
  334. ble 112f
  335. li r7,MAX_COPY_PREFETCH
  336. 112: mtctr r7
  337. 111: dcbt r3,r4
  338. addi r3,r3,CACHELINE_BYTES
  339. bdnz 111b
  340. #else
  341. dcbt r3,r4
  342. addi r3,r3,CACHELINE_BYTES
  343. #endif /* MAX_COPY_PREFETCH > 1 */
  344. 114: subf r8,r7,r0
  345. mr r0,r7
  346. mtctr r8
  347. 53: dcbt r3,r4
  348. 54: dcbz r11,r6
  349. .section __ex_table,"a"
  350. .align 2
  351. .long 54b,105f
  352. .text
  353. /* the main body of the cacheline loop */
  354. COPY_16_BYTES_WITHEX(0)
  355. #if L1_CACHE_BYTES >= 32
  356. COPY_16_BYTES_WITHEX(1)
  357. #if L1_CACHE_BYTES >= 64
  358. COPY_16_BYTES_WITHEX(2)
  359. COPY_16_BYTES_WITHEX(3)
  360. #if L1_CACHE_BYTES >= 128
  361. COPY_16_BYTES_WITHEX(4)
  362. COPY_16_BYTES_WITHEX(5)
  363. COPY_16_BYTES_WITHEX(6)
  364. COPY_16_BYTES_WITHEX(7)
  365. #endif
  366. #endif
  367. #endif
  368. bdnz 53b
  369. cmpwi r0,0
  370. li r3,4
  371. li r7,0
  372. bne 114b
  373. 63: srwi. r0,r5,2
  374. mtctr r0
  375. beq 64f
  376. 30: lwzu r0,4(r4)
  377. 31: stwu r0,4(r6)
  378. bdnz 30b
  379. 64: andi. r0,r5,3
  380. mtctr r0
  381. beq+ 65f
  382. 40: lbz r0,4(r4)
  383. 41: stb r0,4(r6)
  384. addi r4,r4,1
  385. addi r6,r6,1
  386. bdnz 40b
  387. 65: li r3,0
  388. blr
  389. /* read fault, initial single-byte copy */
  390. 100: li r9,0
  391. b 90f
  392. /* write fault, initial single-byte copy */
  393. 101: li r9,1
  394. 90: subf r5,r8,r5
  395. li r3,0
  396. b 99f
  397. /* read fault, initial word copy */
  398. 102: li r9,0
  399. b 91f
  400. /* write fault, initial word copy */
  401. 103: li r9,1
  402. 91: li r3,2
  403. b 99f
  404. /*
  405. * this stuff handles faults in the cacheline loop and branches to either
  406. * 104f (if in read part) or 105f (if in write part), after updating r5
  407. */
  408. COPY_16_BYTES_EXCODE(0)
  409. #if L1_CACHE_BYTES >= 32
  410. COPY_16_BYTES_EXCODE(1)
  411. #if L1_CACHE_BYTES >= 64
  412. COPY_16_BYTES_EXCODE(2)
  413. COPY_16_BYTES_EXCODE(3)
  414. #if L1_CACHE_BYTES >= 128
  415. COPY_16_BYTES_EXCODE(4)
  416. COPY_16_BYTES_EXCODE(5)
  417. COPY_16_BYTES_EXCODE(6)
  418. COPY_16_BYTES_EXCODE(7)
  419. #endif
  420. #endif
  421. #endif
  422. /* read fault in cacheline loop */
  423. 104: li r9,0
  424. b 92f
  425. /* fault on dcbz (effectively a write fault) */
  426. /* or write fault in cacheline loop */
  427. 105: li r9,1
  428. 92: li r3,LG_CACHELINE_BYTES
  429. mfctr r8
  430. add r0,r0,r8
  431. b 106f
  432. /* read fault in final word loop */
  433. 108: li r9,0
  434. b 93f
  435. /* write fault in final word loop */
  436. 109: li r9,1
  437. 93: andi. r5,r5,3
  438. li r3,2
  439. b 99f
  440. /* read fault in final byte loop */
  441. 110: li r9,0
  442. b 94f
  443. /* write fault in final byte loop */
  444. 111: li r9,1
  445. 94: li r5,0
  446. li r3,0
  447. /*
  448. * At this stage the number of bytes not copied is
  449. * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
  450. */
  451. 99: mfctr r0
  452. 106: slw r3,r0,r3
  453. add. r3,r3,r5
  454. beq 120f /* shouldn't happen */
  455. cmpwi 0,r9,0
  456. bne 120f
  457. /* for a read fault, first try to continue the copy one byte at a time */
  458. mtctr r3
  459. 130: lbz r0,4(r4)
  460. 131: stb r0,4(r6)
  461. addi r4,r4,1
  462. addi r6,r6,1
  463. bdnz 130b
  464. /* then clear out the destination: r3 bytes starting at 4(r6) */
  465. 132: mfctr r3
  466. srwi. r0,r3,2
  467. li r9,0
  468. mtctr r0
  469. beq 113f
  470. 112: stwu r9,4(r6)
  471. bdnz 112b
  472. 113: andi. r0,r3,3
  473. mtctr r0
  474. beq 120f
  475. 114: stb r9,4(r6)
  476. addi r6,r6,1
  477. bdnz 114b
  478. 120: blr
  479. .section __ex_table,"a"
  480. .align 2
  481. .long 30b,108b
  482. .long 31b,109b
  483. .long 40b,110b
  484. .long 41b,111b
  485. .long 130b,132b
  486. .long 131b,120b
  487. .long 112b,120b
  488. .long 114b,120b
  489. .text