lusercopy.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * User Space Access Routines
  3. *
  4. * Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
  5. * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
  6. * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
  7. * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
  8. * Copyright (C) 2017 Helge Deller <deller@gmx.de>
  9. * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. */
  26. /*
  27. * These routines still have plenty of room for optimization
  28. * (word & doubleword load/store, dual issue, store hints, etc.).
  29. */
  30. /*
  31. * The following routines assume that space register 3 (sr3) contains
  32. * the space id associated with the current users address space.
  33. */
  34. .text
  35. #include <asm/assembly.h>
  36. #include <asm/errno.h>
  37. #include <linux/linkage.h>
  38. /*
  39. * get_sr gets the appropriate space value into
  40. * sr1 for kernel/user space access, depending
  41. * on the flag stored in the task structure.
  42. */
  43. .macro get_sr
  44. mfctl %cr30,%r1
  45. ldw TI_SEGMENT(%r1),%r22
  46. mfsp %sr3,%r1
  47. or,<> %r22,%r0,%r0
  48. copy %r0,%r1
  49. mtsp %r1,%sr1
  50. .endm
  51. .macro fixup_branch lbl
  52. ldil L%\lbl, %r1
  53. ldo R%\lbl(%r1), %r1
  54. bv %r0(%r1)
  55. .endm
  56. /*
  57. * unsigned long lclear_user(void *to, unsigned long n)
  58. *
  59. * Returns 0 for success.
  60. * otherwise, returns number of bytes not transferred.
  61. */
  62. ENTRY_CFI(lclear_user)
  63. .proc
  64. .callinfo NO_CALLS
  65. .entry
  66. comib,=,n 0,%r25,$lclu_done
  67. get_sr
  68. $lclu_loop:
  69. addib,<> -1,%r25,$lclu_loop
  70. 1: stbs,ma %r0,1(%sr1,%r26)
  71. $lclu_done:
  72. bv %r0(%r2)
  73. copy %r25,%r28
  74. .exit
  75. ENDPROC_CFI(lclear_user)
  76. .section .fixup,"ax"
  77. 2: fixup_branch $lclu_done
  78. ldo 1(%r25),%r25
  79. .previous
  80. ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
  81. .procend
  82. /*
  83. * long lstrnlen_user(char *s, long n)
  84. *
  85. * Returns 0 if exception before zero byte or reaching N,
  86. * N+1 if N would be exceeded,
  87. * else strlen + 1 (i.e. includes zero byte).
  88. */
  89. ENTRY_CFI(lstrnlen_user)
  90. .proc
  91. .callinfo NO_CALLS
  92. .entry
  93. comib,= 0,%r25,$lslen_nzero
  94. copy %r26,%r24
  95. get_sr
  96. 1: ldbs,ma 1(%sr1,%r26),%r1
  97. $lslen_loop:
  98. comib,=,n 0,%r1,$lslen_done
  99. addib,<> -1,%r25,$lslen_loop
  100. 2: ldbs,ma 1(%sr1,%r26),%r1
  101. $lslen_done:
  102. bv %r0(%r2)
  103. sub %r26,%r24,%r28
  104. .exit
  105. $lslen_nzero:
  106. b $lslen_done
  107. ldo 1(%r26),%r26 /* special case for N == 0 */
  108. ENDPROC_CFI(lstrnlen_user)
  109. .section .fixup,"ax"
  110. 3: fixup_branch $lslen_done
  111. copy %r24,%r26 /* reset r26 so 0 is returned on fault */
  112. .previous
  113. ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
  114. ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
  115. .procend
  116. /*
  117. * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
  118. *
  119. * Inputs:
  120. * - sr1 already contains space of source region
  121. * - sr2 already contains space of destination region
  122. *
  123. * Returns:
  124. * - number of bytes that could not be copied.
  125. * On success, this will be zero.
  126. *
  127. * This code is based on a C-implementation of a copy routine written by
  128. * Randolph Chung, which in turn was derived from the glibc.
  129. *
  130. * Several strategies are tried to try to get the best performance for various
  131. * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
  132. * at a time using general registers. Unaligned copies are handled either by
  133. * aligning the destination and then using shift-and-write method, or in a few
  134. * cases by falling back to a byte-at-a-time copy.
  135. *
  136. * Testing with various alignments and buffer sizes shows that this code is
  137. * often >10x faster than a simple byte-at-a-time copy, even for strangely
  138. * aligned operands. It is interesting to note that the glibc version of memcpy
  139. * (written in C) is actually quite fast already. This routine is able to beat
  140. * it by 30-40% for aligned copies because of the loop unrolling, but in some
  141. * cases the glibc version is still slightly faster. This lends more
  142. * credibility that gcc can generate very good code as long as we are careful.
  143. *
  144. * Possible optimizations:
  145. * - add cache prefetching
  146. * - try not to use the post-increment address modifiers; they may create
  147. * additional interlocks. Assumption is that those were only efficient on old
  148. * machines (pre PA8000 processors)
  149. */
  150. dst = arg0
  151. src = arg1
  152. len = arg2
  153. end = arg3
  154. t1 = r19
  155. t2 = r20
  156. t3 = r21
  157. t4 = r22
  158. srcspc = sr1
  159. dstspc = sr2
  160. t0 = r1
  161. a1 = t1
  162. a2 = t2
  163. a3 = t3
  164. a0 = t4
  165. save_src = ret0
  166. save_dst = ret1
  167. save_len = r31
  168. ENTRY_CFI(pa_memcpy)
  169. .proc
  170. .callinfo NO_CALLS
  171. .entry
  172. /* Last destination address */
  173. add dst,len,end
  174. /* short copy with less than 16 bytes? */
  175. cmpib,COND(>>=),n 15,len,.Lbyte_loop
  176. /* same alignment? */
  177. xor src,dst,t0
  178. extru t0,31,2,t1
  179. cmpib,<>,n 0,t1,.Lunaligned_copy
  180. #ifdef CONFIG_64BIT
  181. /* only do 64-bit copies if we can get aligned. */
  182. extru t0,31,3,t1
  183. cmpib,<>,n 0,t1,.Lalign_loop32
  184. /* loop until we are 64-bit aligned */
  185. .Lalign_loop64:
  186. extru dst,31,3,t1
  187. cmpib,=,n 0,t1,.Lcopy_loop_16_start
  188. 20: ldb,ma 1(srcspc,src),t1
  189. 21: stb,ma t1,1(dstspc,dst)
  190. b .Lalign_loop64
  191. ldo -1(len),len
  192. ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
  193. ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
  194. .Lcopy_loop_16_start:
  195. ldi 31,t0
  196. .Lcopy_loop_16:
  197. cmpb,COND(>>=),n t0,len,.Lword_loop
  198. 10: ldd 0(srcspc,src),t1
  199. 11: ldd 8(srcspc,src),t2
  200. ldo 16(src),src
  201. 12: std,ma t1,8(dstspc,dst)
  202. 13: std,ma t2,8(dstspc,dst)
  203. 14: ldd 0(srcspc,src),t1
  204. 15: ldd 8(srcspc,src),t2
  205. ldo 16(src),src
  206. 16: std,ma t1,8(dstspc,dst)
  207. 17: std,ma t2,8(dstspc,dst)
  208. ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
  209. ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
  210. ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
  211. ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
  212. ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
  213. ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
  214. ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
  215. ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
  216. b .Lcopy_loop_16
  217. ldo -32(len),len
  218. .Lword_loop:
  219. cmpib,COND(>>=),n 3,len,.Lbyte_loop
  220. 20: ldw,ma 4(srcspc,src),t1
  221. 21: stw,ma t1,4(dstspc,dst)
  222. b .Lword_loop
  223. ldo -4(len),len
  224. ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
  225. ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
  226. #endif /* CONFIG_64BIT */
  227. /* loop until we are 32-bit aligned */
  228. .Lalign_loop32:
  229. extru dst,31,2,t1
  230. cmpib,=,n 0,t1,.Lcopy_loop_8
  231. 20: ldb,ma 1(srcspc,src),t1
  232. 21: stb,ma t1,1(dstspc,dst)
  233. b .Lalign_loop32
  234. ldo -1(len),len
  235. ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
  236. ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
  237. .Lcopy_loop_8:
  238. cmpib,COND(>>=),n 15,len,.Lbyte_loop
  239. 10: ldw 0(srcspc,src),t1
  240. 11: ldw 4(srcspc,src),t2
  241. 12: stw,ma t1,4(dstspc,dst)
  242. 13: stw,ma t2,4(dstspc,dst)
  243. 14: ldw 8(srcspc,src),t1
  244. 15: ldw 12(srcspc,src),t2
  245. ldo 16(src),src
  246. 16: stw,ma t1,4(dstspc,dst)
  247. 17: stw,ma t2,4(dstspc,dst)
  248. ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
  249. ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
  250. ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
  251. ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
  252. ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
  253. ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
  254. ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
  255. ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
  256. b .Lcopy_loop_8
  257. ldo -16(len),len
  258. .Lbyte_loop:
  259. cmpclr,COND(<>) len,%r0,%r0
  260. b,n .Lcopy_done
  261. 20: ldb 0(srcspc,src),t1
  262. ldo 1(src),src
  263. 21: stb,ma t1,1(dstspc,dst)
  264. b .Lbyte_loop
  265. ldo -1(len),len
  266. ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
  267. ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
  268. .Lcopy_done:
  269. bv %r0(%r2)
  270. sub end,dst,ret0
  271. /* src and dst are not aligned the same way. */
  272. /* need to go the hard way */
  273. .Lunaligned_copy:
  274. /* align until dst is 32bit-word-aligned */
  275. extru dst,31,2,t1
  276. cmpib,=,n 0,t1,.Lcopy_dstaligned
  277. 20: ldb 0(srcspc,src),t1
  278. ldo 1(src),src
  279. 21: stb,ma t1,1(dstspc,dst)
  280. b .Lunaligned_copy
  281. ldo -1(len),len
  282. ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
  283. ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
  284. .Lcopy_dstaligned:
  285. /* store src, dst and len in safe place */
  286. copy src,save_src
  287. copy dst,save_dst
  288. copy len,save_len
  289. /* len now needs give number of words to copy */
  290. SHRREG len,2,len
  291. /*
  292. * Copy from a not-aligned src to an aligned dst using shifts.
  293. * Handles 4 words per loop.
  294. */
  295. depw,z src,28,2,t0
  296. subi 32,t0,t0
  297. mtsar t0
  298. extru len,31,2,t0
  299. cmpib,= 2,t0,.Lcase2
  300. /* Make src aligned by rounding it down. */
  301. depi 0,31,2,src
  302. cmpiclr,<> 3,t0,%r0
  303. b,n .Lcase3
  304. cmpiclr,<> 1,t0,%r0
  305. b,n .Lcase1
  306. .Lcase0:
  307. cmpb,COND(=) %r0,len,.Lcda_finish
  308. nop
  309. 1: ldw,ma 4(srcspc,src), a3
  310. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  311. 1: ldw,ma 4(srcspc,src), a0
  312. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  313. b,n .Ldo3
  314. .Lcase1:
  315. 1: ldw,ma 4(srcspc,src), a2
  316. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  317. 1: ldw,ma 4(srcspc,src), a3
  318. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  319. ldo -1(len),len
  320. cmpb,COND(=),n %r0,len,.Ldo0
  321. .Ldo4:
  322. 1: ldw,ma 4(srcspc,src), a0
  323. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  324. shrpw a2, a3, %sar, t0
  325. 1: stw,ma t0, 4(dstspc,dst)
  326. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
  327. .Ldo3:
  328. 1: ldw,ma 4(srcspc,src), a1
  329. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  330. shrpw a3, a0, %sar, t0
  331. 1: stw,ma t0, 4(dstspc,dst)
  332. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
  333. .Ldo2:
  334. 1: ldw,ma 4(srcspc,src), a2
  335. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  336. shrpw a0, a1, %sar, t0
  337. 1: stw,ma t0, 4(dstspc,dst)
  338. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
  339. .Ldo1:
  340. 1: ldw,ma 4(srcspc,src), a3
  341. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  342. shrpw a1, a2, %sar, t0
  343. 1: stw,ma t0, 4(dstspc,dst)
  344. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
  345. ldo -4(len),len
  346. cmpb,COND(<>) %r0,len,.Ldo4
  347. nop
  348. .Ldo0:
  349. shrpw a2, a3, %sar, t0
  350. 1: stw,ma t0, 4(dstspc,dst)
  351. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
  352. .Lcda_rdfault:
  353. .Lcda_finish:
  354. /* calculate new src, dst and len and jump to byte-copy loop */
  355. sub dst,save_dst,t0
  356. add save_src,t0,src
  357. b .Lbyte_loop
  358. sub save_len,t0,len
  359. .Lcase3:
  360. 1: ldw,ma 4(srcspc,src), a0
  361. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  362. 1: ldw,ma 4(srcspc,src), a1
  363. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  364. b .Ldo2
  365. ldo 1(len),len
  366. .Lcase2:
  367. 1: ldw,ma 4(srcspc,src), a1
  368. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  369. 1: ldw,ma 4(srcspc,src), a2
  370. ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
  371. b .Ldo1
  372. ldo 2(len),len
  373. /* fault exception fixup handlers: */
  374. #ifdef CONFIG_64BIT
  375. .Lcopy16_fault:
  376. b .Lcopy_done
  377. 10: std,ma t1,8(dstspc,dst)
  378. ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
  379. #endif
  380. .Lcopy8_fault:
  381. b .Lcopy_done
  382. 10: stw,ma t1,4(dstspc,dst)
  383. ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
  384. .exit
  385. ENDPROC_CFI(pa_memcpy)
  386. .procend
  387. .end