memset.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /*
  2. * linux/arch/alpha/lib/memset.S
  3. *
  4. * This is an efficient (and small) implementation of the C library "memset()"
  5. * function for the alpha.
  6. *
  7. * (C) Copyright 1996 Linus Torvalds
  8. *
  9. * This routine is "moral-ware": you are free to use it any way you wish, and
  10. * the only obligation I put on you is a moral one: if you make any improvements
  11. * to the routine, please send me your improvements for me to use similarly.
  12. *
  13. * The scheduling comments are according to the EV5 documentation (and done by
  14. * hand, so they might well be incorrect, please do tell me about it..)
  15. */
  16. #include <asm/export.h>
  17. .set noat
  18. .set noreorder
  19. .text
  20. .globl memset
  21. .globl __memset
  22. .globl ___memset
  23. .globl __memsetw
  24. .globl __constant_c_memset
  25. .ent ___memset
  26. .align 5
  27. ___memset:
  28. .frame $30,0,$26,0
  29. .prologue 0
  30. and $17,255,$1 /* E1 */
  31. insbl $17,1,$17 /* .. E0 */
  32. bis $17,$1,$17 /* E0 (p-c latency, next cycle) */
  33. sll $17,16,$1 /* E1 (p-c latency, next cycle) */
  34. bis $17,$1,$17 /* E0 (p-c latency, next cycle) */
  35. sll $17,32,$1 /* E1 (p-c latency, next cycle) */
  36. bis $17,$1,$17 /* E0 (p-c latency, next cycle) */
  37. ldq_u $31,0($30) /* .. E1 */
  38. .align 5
  39. __constant_c_memset:
  40. addq $18,$16,$6 /* E0 */
  41. bis $16,$16,$0 /* .. E1 */
  42. xor $16,$6,$1 /* E0 */
  43. ble $18,end /* .. E1 */
  44. bic $1,7,$1 /* E0 */
  45. beq $1,within_one_quad /* .. E1 (note EV5 zero-latency forwarding) */
  46. and $16,7,$3 /* E0 */
  47. beq $3,aligned /* .. E1 (note EV5 zero-latency forwarding) */
  48. ldq_u $4,0($16) /* E0 */
  49. bis $16,$16,$5 /* .. E1 */
  50. insql $17,$16,$2 /* E0 */
  51. subq $3,8,$3 /* .. E1 */
  52. addq $18,$3,$18 /* E0 $18 is new count ($3 is negative) */
  53. mskql $4,$16,$4 /* .. E1 (and possible load stall) */
  54. subq $16,$3,$16 /* E0 $16 is new aligned destination */
  55. bis $2,$4,$1 /* .. E1 */
  56. bis $31,$31,$31 /* E0 */
  57. ldq_u $31,0($30) /* .. E1 */
  58. stq_u $1,0($5) /* E0 */
  59. bis $31,$31,$31 /* .. E1 */
  60. .align 4
  61. aligned:
  62. sra $18,3,$3 /* E0 */
  63. and $18,7,$18 /* .. E1 */
  64. bis $16,$16,$5 /* E0 */
  65. beq $3,no_quad /* .. E1 */
  66. .align 3
  67. loop:
  68. stq $17,0($5) /* E0 */
  69. subq $3,1,$3 /* .. E1 */
  70. addq $5,8,$5 /* E0 */
  71. bne $3,loop /* .. E1 */
  72. no_quad:
  73. bis $31,$31,$31 /* E0 */
  74. beq $18,end /* .. E1 */
  75. ldq $7,0($5) /* E0 */
  76. mskqh $7,$6,$2 /* .. E1 (and load stall) */
  77. insqh $17,$6,$4 /* E0 */
  78. bis $2,$4,$1 /* .. E1 */
  79. stq $1,0($5) /* E0 */
  80. ret $31,($26),1 /* .. E1 */
  81. .align 3
  82. within_one_quad:
  83. ldq_u $1,0($16) /* E0 */
  84. insql $17,$16,$2 /* E1 */
  85. mskql $1,$16,$4 /* E0 (after load stall) */
  86. bis $2,$4,$2 /* E0 */
  87. mskql $2,$6,$4 /* E0 */
  88. mskqh $1,$6,$2 /* .. E1 */
  89. bis $2,$4,$1 /* E0 */
  90. stq_u $1,0($16) /* E0 */
  91. end:
  92. ret $31,($26),1 /* E1 */
  93. .end ___memset
  94. EXPORT_SYMBOL(___memset)
  95. EXPORT_SYMBOL(__constant_c_memset)
  96. .align 5
  97. .ent __memsetw
  98. __memsetw:
  99. .prologue 0
  100. inswl $17,0,$1 /* E0 */
  101. inswl $17,2,$2 /* E0 */
  102. inswl $17,4,$3 /* E0 */
  103. or $1,$2,$1 /* .. E1 */
  104. inswl $17,6,$4 /* E0 */
  105. or $1,$3,$1 /* .. E1 */
  106. or $1,$4,$17 /* E0 */
  107. br __constant_c_memset /* .. E1 */
  108. .end __memsetw
  109. EXPORT_SYMBOL(__memsetw)
  110. memset = ___memset
  111. __memset = ___memset
  112. EXPORT_SYMBOL(memset)
  113. EXPORT_SYMBOL(__memset)