memcpy.S 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /* MN10300 Optimised simple memory to memory copy
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <asm/cache.h>
  12. .section .text
  13. .balign L1_CACHE_BYTES
  14. ###############################################################################
  15. #
  16. # void *memcpy(void *dst, const void *src, size_t n)
  17. #
  18. ###############################################################################
  19. .globl memcpy
  20. .type memcpy,@function
  21. memcpy:
  22. movm [d2,d3],(sp)
  23. mov d0,(12,sp)
  24. mov d1,(16,sp)
  25. mov (20,sp),d2 # count
  26. mov d0,a0 # dst
  27. mov d1,a1 # src
  28. mov d0,e3 # the return value
  29. cmp +0,d2
  30. beq memcpy_done # return if zero-length copy
  31. # see if the three parameters are all four-byte aligned
  32. or d0,d1,d3
  33. or d2,d3
  34. and +3,d3
  35. bne memcpy_1 # jump if not
  36. # we want to transfer as much as we can in chunks of 32 bytes
  37. cmp +31,d2
  38. bls memcpy_4_remainder # 4-byte aligned remainder
  39. movm [exreg1],(sp)
  40. add -32,d2
  41. mov +32,d3
  42. memcpy_4_loop:
  43. mov (a1+),d0
  44. mov (a1+),d1
  45. mov (a1+),e0
  46. mov (a1+),e1
  47. mov (a1+),e4
  48. mov (a1+),e5
  49. mov (a1+),e6
  50. mov (a1+),e7
  51. mov d0,(a0+)
  52. mov d1,(a0+)
  53. mov e0,(a0+)
  54. mov e1,(a0+)
  55. mov e4,(a0+)
  56. mov e5,(a0+)
  57. mov e6,(a0+)
  58. mov e7,(a0+)
  59. sub d3,d2
  60. bcc memcpy_4_loop
  61. movm (sp),[exreg1]
  62. add d3,d2
  63. beq memcpy_4_no_remainder
  64. memcpy_4_remainder:
  65. # cut 4-7 words down to 0-3
  66. cmp +16,d2
  67. bcs memcpy_4_three_or_fewer_words
  68. mov (a1+),d0
  69. mov (a1+),d1
  70. mov (a1+),e0
  71. mov (a1+),e1
  72. mov d0,(a0+)
  73. mov d1,(a0+)
  74. mov e0,(a0+)
  75. mov e1,(a0+)
  76. add -16,d2
  77. beq memcpy_4_no_remainder
  78. # copy the remaining 1, 2 or 3 words
  79. memcpy_4_three_or_fewer_words:
  80. cmp +8,d2
  81. bcs memcpy_4_one_word
  82. beq memcpy_4_two_words
  83. mov (a1+),d0
  84. mov d0,(a0+)
  85. memcpy_4_two_words:
  86. mov (a1+),d0
  87. mov d0,(a0+)
  88. memcpy_4_one_word:
  89. mov (a1+),d0
  90. mov d0,(a0+)
  91. memcpy_4_no_remainder:
  92. # check we copied the correct amount
  93. # TODO: REMOVE CHECK
  94. sub e3,a0,d2
  95. mov (20,sp),d1
  96. cmp d2,d1
  97. beq memcpy_done
  98. break
  99. break
  100. break
  101. memcpy_done:
  102. mov e3,a0
  103. ret [d2,d3],8
  104. # handle misaligned copying
  105. memcpy_1:
  106. add -1,d2
  107. mov +1,d3
  108. setlb # setlb requires the next insns
  109. # to occupy exactly 4 bytes
  110. sub d3,d2
  111. movbu (a1),d0
  112. movbu d0,(a0)
  113. add_add d3,a1,d3,a0
  114. lcc
  115. mov e3,a0
  116. ret [d2,d3],8
  117. memcpy_end:
  118. .size memcpy, memcpy_end-memcpy