memcpy_power7.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2012
  17. *
  18. * Author: Anton Blanchard <anton@au.ibm.com>
  19. */
  20. #include <asm/ppc_asm.h>
  21. _GLOBAL(memcpy_power7)
  22. #ifdef __BIG_ENDIAN__
  23. #define LVS(VRT,RA,RB) lvsl VRT,RA,RB
  24. #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
  25. #else
  26. #define LVS(VRT,RA,RB) lvsr VRT,RA,RB
  27. #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
  28. #endif
  29. #ifdef CONFIG_ALTIVEC
  30. cmpldi r5,16
  31. cmpldi cr1,r5,4096
  32. std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  33. blt .Lshort_copy
  34. bgt cr1,.Lvmx_copy
  35. #else
  36. cmpldi r5,16
  37. std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  38. blt .Lshort_copy
  39. #endif
  40. .Lnonvmx_copy:
  41. /* Get the source 8B aligned */
  42. neg r6,r4
  43. mtocrf 0x01,r6
  44. clrldi r6,r6,(64-3)
  45. bf cr7*4+3,1f
  46. lbz r0,0(r4)
  47. addi r4,r4,1
  48. stb r0,0(r3)
  49. addi r3,r3,1
  50. 1: bf cr7*4+2,2f
  51. lhz r0,0(r4)
  52. addi r4,r4,2
  53. sth r0,0(r3)
  54. addi r3,r3,2
  55. 2: bf cr7*4+1,3f
  56. lwz r0,0(r4)
  57. addi r4,r4,4
  58. stw r0,0(r3)
  59. addi r3,r3,4
  60. 3: sub r5,r5,r6
  61. cmpldi r5,128
  62. blt 5f
  63. mflr r0
  64. stdu r1,-STACKFRAMESIZE(r1)
  65. std r14,STK_REG(R14)(r1)
  66. std r15,STK_REG(R15)(r1)
  67. std r16,STK_REG(R16)(r1)
  68. std r17,STK_REG(R17)(r1)
  69. std r18,STK_REG(R18)(r1)
  70. std r19,STK_REG(R19)(r1)
  71. std r20,STK_REG(R20)(r1)
  72. std r21,STK_REG(R21)(r1)
  73. std r22,STK_REG(R22)(r1)
  74. std r0,STACKFRAMESIZE+16(r1)
  75. srdi r6,r5,7
  76. mtctr r6
  77. /* Now do cacheline (128B) sized loads and stores. */
  78. .align 5
  79. 4:
  80. ld r0,0(r4)
  81. ld r6,8(r4)
  82. ld r7,16(r4)
  83. ld r8,24(r4)
  84. ld r9,32(r4)
  85. ld r10,40(r4)
  86. ld r11,48(r4)
  87. ld r12,56(r4)
  88. ld r14,64(r4)
  89. ld r15,72(r4)
  90. ld r16,80(r4)
  91. ld r17,88(r4)
  92. ld r18,96(r4)
  93. ld r19,104(r4)
  94. ld r20,112(r4)
  95. ld r21,120(r4)
  96. addi r4,r4,128
  97. std r0,0(r3)
  98. std r6,8(r3)
  99. std r7,16(r3)
  100. std r8,24(r3)
  101. std r9,32(r3)
  102. std r10,40(r3)
  103. std r11,48(r3)
  104. std r12,56(r3)
  105. std r14,64(r3)
  106. std r15,72(r3)
  107. std r16,80(r3)
  108. std r17,88(r3)
  109. std r18,96(r3)
  110. std r19,104(r3)
  111. std r20,112(r3)
  112. std r21,120(r3)
  113. addi r3,r3,128
  114. bdnz 4b
  115. clrldi r5,r5,(64-7)
  116. ld r14,STK_REG(R14)(r1)
  117. ld r15,STK_REG(R15)(r1)
  118. ld r16,STK_REG(R16)(r1)
  119. ld r17,STK_REG(R17)(r1)
  120. ld r18,STK_REG(R18)(r1)
  121. ld r19,STK_REG(R19)(r1)
  122. ld r20,STK_REG(R20)(r1)
  123. ld r21,STK_REG(R21)(r1)
  124. ld r22,STK_REG(R22)(r1)
  125. addi r1,r1,STACKFRAMESIZE
  126. /* Up to 127B to go */
  127. 5: srdi r6,r5,4
  128. mtocrf 0x01,r6
  129. 6: bf cr7*4+1,7f
  130. ld r0,0(r4)
  131. ld r6,8(r4)
  132. ld r7,16(r4)
  133. ld r8,24(r4)
  134. ld r9,32(r4)
  135. ld r10,40(r4)
  136. ld r11,48(r4)
  137. ld r12,56(r4)
  138. addi r4,r4,64
  139. std r0,0(r3)
  140. std r6,8(r3)
  141. std r7,16(r3)
  142. std r8,24(r3)
  143. std r9,32(r3)
  144. std r10,40(r3)
  145. std r11,48(r3)
  146. std r12,56(r3)
  147. addi r3,r3,64
  148. /* Up to 63B to go */
  149. 7: bf cr7*4+2,8f
  150. ld r0,0(r4)
  151. ld r6,8(r4)
  152. ld r7,16(r4)
  153. ld r8,24(r4)
  154. addi r4,r4,32
  155. std r0,0(r3)
  156. std r6,8(r3)
  157. std r7,16(r3)
  158. std r8,24(r3)
  159. addi r3,r3,32
  160. /* Up to 31B to go */
  161. 8: bf cr7*4+3,9f
  162. ld r0,0(r4)
  163. ld r6,8(r4)
  164. addi r4,r4,16
  165. std r0,0(r3)
  166. std r6,8(r3)
  167. addi r3,r3,16
  168. 9: clrldi r5,r5,(64-4)
  169. /* Up to 15B to go */
  170. .Lshort_copy:
  171. mtocrf 0x01,r5
  172. bf cr7*4+0,12f
  173. lwz r0,0(r4) /* Less chance of a reject with word ops */
  174. lwz r6,4(r4)
  175. addi r4,r4,8
  176. stw r0,0(r3)
  177. stw r6,4(r3)
  178. addi r3,r3,8
  179. 12: bf cr7*4+1,13f
  180. lwz r0,0(r4)
  181. addi r4,r4,4
  182. stw r0,0(r3)
  183. addi r3,r3,4
  184. 13: bf cr7*4+2,14f
  185. lhz r0,0(r4)
  186. addi r4,r4,2
  187. sth r0,0(r3)
  188. addi r3,r3,2
  189. 14: bf cr7*4+3,15f
  190. lbz r0,0(r4)
  191. stb r0,0(r3)
  192. 15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  193. blr
  194. .Lunwind_stack_nonvmx_copy:
  195. addi r1,r1,STACKFRAMESIZE
  196. b .Lnonvmx_copy
  197. #ifdef CONFIG_ALTIVEC
  198. .Lvmx_copy:
  199. mflr r0
  200. std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
  201. std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
  202. std r0,16(r1)
  203. stdu r1,-STACKFRAMESIZE(r1)
  204. bl enter_vmx_copy
  205. cmpwi cr1,r3,0
  206. ld r0,STACKFRAMESIZE+16(r1)
  207. ld r3,STK_REG(R31)(r1)
  208. ld r4,STK_REG(R30)(r1)
  209. ld r5,STK_REG(R29)(r1)
  210. mtlr r0
  211. /*
  212. * We prefetch both the source and destination using enhanced touch
  213. * instructions. We use a stream ID of 0 for the load side and
  214. * 1 for the store side.
  215. */
  216. clrrdi r6,r4,7
  217. clrrdi r9,r3,7
  218. ori r9,r9,1 /* stream=1 */
  219. srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
  220. cmpldi r7,0x3FF
  221. ble 1f
  222. li r7,0x3FF
  223. 1: lis r0,0x0E00 /* depth=7 */
  224. sldi r7,r7,7
  225. or r7,r7,r0
  226. ori r10,r7,1 /* stream=1 */
  227. lis r8,0x8000 /* GO=1 */
  228. clrldi r8,r8,32
  229. .machine push
  230. .machine "power4"
  231. dcbt r0,r6,0b01000
  232. dcbt r0,r7,0b01010
  233. dcbtst r0,r9,0b01000
  234. dcbtst r0,r10,0b01010
  235. eieio
  236. dcbt r0,r8,0b01010 /* GO */
  237. .machine pop
  238. beq cr1,.Lunwind_stack_nonvmx_copy
  239. /*
  240. * If source and destination are not relatively aligned we use a
  241. * slower permute loop.
  242. */
  243. xor r6,r4,r3
  244. rldicl. r6,r6,0,(64-4)
  245. bne .Lvmx_unaligned_copy
  246. /* Get the destination 16B aligned */
  247. neg r6,r3
  248. mtocrf 0x01,r6
  249. clrldi r6,r6,(64-4)
  250. bf cr7*4+3,1f
  251. lbz r0,0(r4)
  252. addi r4,r4,1
  253. stb r0,0(r3)
  254. addi r3,r3,1
  255. 1: bf cr7*4+2,2f
  256. lhz r0,0(r4)
  257. addi r4,r4,2
  258. sth r0,0(r3)
  259. addi r3,r3,2
  260. 2: bf cr7*4+1,3f
  261. lwz r0,0(r4)
  262. addi r4,r4,4
  263. stw r0,0(r3)
  264. addi r3,r3,4
  265. 3: bf cr7*4+0,4f
  266. ld r0,0(r4)
  267. addi r4,r4,8
  268. std r0,0(r3)
  269. addi r3,r3,8
  270. 4: sub r5,r5,r6
  271. /* Get the desination 128B aligned */
  272. neg r6,r3
  273. srdi r7,r6,4
  274. mtocrf 0x01,r7
  275. clrldi r6,r6,(64-7)
  276. li r9,16
  277. li r10,32
  278. li r11,48
  279. bf cr7*4+3,5f
  280. lvx v1,r0,r4
  281. addi r4,r4,16
  282. stvx v1,r0,r3
  283. addi r3,r3,16
  284. 5: bf cr7*4+2,6f
  285. lvx v1,r0,r4
  286. lvx v0,r4,r9
  287. addi r4,r4,32
  288. stvx v1,r0,r3
  289. stvx v0,r3,r9
  290. addi r3,r3,32
  291. 6: bf cr7*4+1,7f
  292. lvx v3,r0,r4
  293. lvx v2,r4,r9
  294. lvx v1,r4,r10
  295. lvx v0,r4,r11
  296. addi r4,r4,64
  297. stvx v3,r0,r3
  298. stvx v2,r3,r9
  299. stvx v1,r3,r10
  300. stvx v0,r3,r11
  301. addi r3,r3,64
  302. 7: sub r5,r5,r6
  303. srdi r6,r5,7
  304. std r14,STK_REG(R14)(r1)
  305. std r15,STK_REG(R15)(r1)
  306. std r16,STK_REG(R16)(r1)
  307. li r12,64
  308. li r14,80
  309. li r15,96
  310. li r16,112
  311. mtctr r6
  312. /*
  313. * Now do cacheline sized loads and stores. By this stage the
  314. * cacheline stores are also cacheline aligned.
  315. */
  316. .align 5
  317. 8:
  318. lvx v7,r0,r4
  319. lvx v6,r4,r9
  320. lvx v5,r4,r10
  321. lvx v4,r4,r11
  322. lvx v3,r4,r12
  323. lvx v2,r4,r14
  324. lvx v1,r4,r15
  325. lvx v0,r4,r16
  326. addi r4,r4,128
  327. stvx v7,r0,r3
  328. stvx v6,r3,r9
  329. stvx v5,r3,r10
  330. stvx v4,r3,r11
  331. stvx v3,r3,r12
  332. stvx v2,r3,r14
  333. stvx v1,r3,r15
  334. stvx v0,r3,r16
  335. addi r3,r3,128
  336. bdnz 8b
  337. ld r14,STK_REG(R14)(r1)
  338. ld r15,STK_REG(R15)(r1)
  339. ld r16,STK_REG(R16)(r1)
  340. /* Up to 127B to go */
  341. clrldi r5,r5,(64-7)
  342. srdi r6,r5,4
  343. mtocrf 0x01,r6
  344. bf cr7*4+1,9f
  345. lvx v3,r0,r4
  346. lvx v2,r4,r9
  347. lvx v1,r4,r10
  348. lvx v0,r4,r11
  349. addi r4,r4,64
  350. stvx v3,r0,r3
  351. stvx v2,r3,r9
  352. stvx v1,r3,r10
  353. stvx v0,r3,r11
  354. addi r3,r3,64
  355. 9: bf cr7*4+2,10f
  356. lvx v1,r0,r4
  357. lvx v0,r4,r9
  358. addi r4,r4,32
  359. stvx v1,r0,r3
  360. stvx v0,r3,r9
  361. addi r3,r3,32
  362. 10: bf cr7*4+3,11f
  363. lvx v1,r0,r4
  364. addi r4,r4,16
  365. stvx v1,r0,r3
  366. addi r3,r3,16
  367. /* Up to 15B to go */
  368. 11: clrldi r5,r5,(64-4)
  369. mtocrf 0x01,r5
  370. bf cr7*4+0,12f
  371. ld r0,0(r4)
  372. addi r4,r4,8
  373. std r0,0(r3)
  374. addi r3,r3,8
  375. 12: bf cr7*4+1,13f
  376. lwz r0,0(r4)
  377. addi r4,r4,4
  378. stw r0,0(r3)
  379. addi r3,r3,4
  380. 13: bf cr7*4+2,14f
  381. lhz r0,0(r4)
  382. addi r4,r4,2
  383. sth r0,0(r3)
  384. addi r3,r3,2
  385. 14: bf cr7*4+3,15f
  386. lbz r0,0(r4)
  387. stb r0,0(r3)
  388. 15: addi r1,r1,STACKFRAMESIZE
  389. ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  390. b exit_vmx_copy /* tail call optimise */
  391. .Lvmx_unaligned_copy:
  392. /* Get the destination 16B aligned */
  393. neg r6,r3
  394. mtocrf 0x01,r6
  395. clrldi r6,r6,(64-4)
  396. bf cr7*4+3,1f
  397. lbz r0,0(r4)
  398. addi r4,r4,1
  399. stb r0,0(r3)
  400. addi r3,r3,1
  401. 1: bf cr7*4+2,2f
  402. lhz r0,0(r4)
  403. addi r4,r4,2
  404. sth r0,0(r3)
  405. addi r3,r3,2
  406. 2: bf cr7*4+1,3f
  407. lwz r0,0(r4)
  408. addi r4,r4,4
  409. stw r0,0(r3)
  410. addi r3,r3,4
  411. 3: bf cr7*4+0,4f
  412. lwz r0,0(r4) /* Less chance of a reject with word ops */
  413. lwz r7,4(r4)
  414. addi r4,r4,8
  415. stw r0,0(r3)
  416. stw r7,4(r3)
  417. addi r3,r3,8
  418. 4: sub r5,r5,r6
  419. /* Get the desination 128B aligned */
  420. neg r6,r3
  421. srdi r7,r6,4
  422. mtocrf 0x01,r7
  423. clrldi r6,r6,(64-7)
  424. li r9,16
  425. li r10,32
  426. li r11,48
  427. LVS(v16,0,r4) /* Setup permute control vector */
  428. lvx v0,0,r4
  429. addi r4,r4,16
  430. bf cr7*4+3,5f
  431. lvx v1,r0,r4
  432. VPERM(v8,v0,v1,v16)
  433. addi r4,r4,16
  434. stvx v8,r0,r3
  435. addi r3,r3,16
  436. vor v0,v1,v1
  437. 5: bf cr7*4+2,6f
  438. lvx v1,r0,r4
  439. VPERM(v8,v0,v1,v16)
  440. lvx v0,r4,r9
  441. VPERM(v9,v1,v0,v16)
  442. addi r4,r4,32
  443. stvx v8,r0,r3
  444. stvx v9,r3,r9
  445. addi r3,r3,32
  446. 6: bf cr7*4+1,7f
  447. lvx v3,r0,r4
  448. VPERM(v8,v0,v3,v16)
  449. lvx v2,r4,r9
  450. VPERM(v9,v3,v2,v16)
  451. lvx v1,r4,r10
  452. VPERM(v10,v2,v1,v16)
  453. lvx v0,r4,r11
  454. VPERM(v11,v1,v0,v16)
  455. addi r4,r4,64
  456. stvx v8,r0,r3
  457. stvx v9,r3,r9
  458. stvx v10,r3,r10
  459. stvx v11,r3,r11
  460. addi r3,r3,64
  461. 7: sub r5,r5,r6
  462. srdi r6,r5,7
  463. std r14,STK_REG(R14)(r1)
  464. std r15,STK_REG(R15)(r1)
  465. std r16,STK_REG(R16)(r1)
  466. li r12,64
  467. li r14,80
  468. li r15,96
  469. li r16,112
  470. mtctr r6
  471. /*
  472. * Now do cacheline sized loads and stores. By this stage the
  473. * cacheline stores are also cacheline aligned.
  474. */
  475. .align 5
  476. 8:
  477. lvx v7,r0,r4
  478. VPERM(v8,v0,v7,v16)
  479. lvx v6,r4,r9
  480. VPERM(v9,v7,v6,v16)
  481. lvx v5,r4,r10
  482. VPERM(v10,v6,v5,v16)
  483. lvx v4,r4,r11
  484. VPERM(v11,v5,v4,v16)
  485. lvx v3,r4,r12
  486. VPERM(v12,v4,v3,v16)
  487. lvx v2,r4,r14
  488. VPERM(v13,v3,v2,v16)
  489. lvx v1,r4,r15
  490. VPERM(v14,v2,v1,v16)
  491. lvx v0,r4,r16
  492. VPERM(v15,v1,v0,v16)
  493. addi r4,r4,128
  494. stvx v8,r0,r3
  495. stvx v9,r3,r9
  496. stvx v10,r3,r10
  497. stvx v11,r3,r11
  498. stvx v12,r3,r12
  499. stvx v13,r3,r14
  500. stvx v14,r3,r15
  501. stvx v15,r3,r16
  502. addi r3,r3,128
  503. bdnz 8b
  504. ld r14,STK_REG(R14)(r1)
  505. ld r15,STK_REG(R15)(r1)
  506. ld r16,STK_REG(R16)(r1)
  507. /* Up to 127B to go */
  508. clrldi r5,r5,(64-7)
  509. srdi r6,r5,4
  510. mtocrf 0x01,r6
  511. bf cr7*4+1,9f
  512. lvx v3,r0,r4
  513. VPERM(v8,v0,v3,v16)
  514. lvx v2,r4,r9
  515. VPERM(v9,v3,v2,v16)
  516. lvx v1,r4,r10
  517. VPERM(v10,v2,v1,v16)
  518. lvx v0,r4,r11
  519. VPERM(v11,v1,v0,v16)
  520. addi r4,r4,64
  521. stvx v8,r0,r3
  522. stvx v9,r3,r9
  523. stvx v10,r3,r10
  524. stvx v11,r3,r11
  525. addi r3,r3,64
  526. 9: bf cr7*4+2,10f
  527. lvx v1,r0,r4
  528. VPERM(v8,v0,v1,v16)
  529. lvx v0,r4,r9
  530. VPERM(v9,v1,v0,v16)
  531. addi r4,r4,32
  532. stvx v8,r0,r3
  533. stvx v9,r3,r9
  534. addi r3,r3,32
  535. 10: bf cr7*4+3,11f
  536. lvx v1,r0,r4
  537. VPERM(v8,v0,v1,v16)
  538. addi r4,r4,16
  539. stvx v8,r0,r3
  540. addi r3,r3,16
  541. /* Up to 15B to go */
  542. 11: clrldi r5,r5,(64-4)
  543. addi r4,r4,-16 /* Unwind the +16 load offset */
  544. mtocrf 0x01,r5
  545. bf cr7*4+0,12f
  546. lwz r0,0(r4) /* Less chance of a reject with word ops */
  547. lwz r6,4(r4)
  548. addi r4,r4,8
  549. stw r0,0(r3)
  550. stw r6,4(r3)
  551. addi r3,r3,8
  552. 12: bf cr7*4+1,13f
  553. lwz r0,0(r4)
  554. addi r4,r4,4
  555. stw r0,0(r3)
  556. addi r3,r3,4
  557. 13: bf cr7*4+2,14f
  558. lhz r0,0(r4)
  559. addi r4,r4,2
  560. sth r0,0(r3)
  561. addi r3,r3,2
  562. 14: bf cr7*4+3,15f
  563. lbz r0,0(r4)
  564. stb r0,0(r3)
  565. 15: addi r1,r1,STACKFRAMESIZE
  566. ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  567. b exit_vmx_copy /* tail call optimise */
  568. #endif /* CONFIG_ALTIVEC */