round_Xsig.S 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*---------------------------------------------------------------------------+
  2. | round_Xsig.S |
  3. | |
  4. | Copyright (C) 1992,1993,1994,1995 |
  5. | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
  6. | Australia. E-mail billm@jacobi.maths.monash.edu.au |
  7. | |
  8. | Normalize and round a 12 byte quantity. |
  9. | Call from C as: |
  10. | int round_Xsig(Xsig *n) |
  11. | |
  12. | Normalize a 12 byte quantity. |
  13. | Call from C as: |
  14. | int norm_Xsig(Xsig *n) |
  15. | |
  16. | Each function returns the size of the shift (nr of bits). |
  17. | |
  18. +---------------------------------------------------------------------------*/
  19. .file "round_Xsig.S"
  20. #include "fpu_emu.h"
  21. .text
  22. ENTRY(round_Xsig)
  23. pushl %ebp
  24. movl %esp,%ebp
  25. pushl %ebx /* Reserve some space */
  26. pushl %ebx
  27. pushl %esi
  28. movl PARAM1,%esi
  29. movl 8(%esi),%edx
  30. movl 4(%esi),%ebx
  31. movl (%esi),%eax
  32. movl $0,-4(%ebp)
  33. orl %edx,%edx /* ms bits */
  34. js L_round /* Already normalized */
  35. jnz L_shift_1 /* Shift left 1 - 31 bits */
  36. movl %ebx,%edx
  37. movl %eax,%ebx
  38. xorl %eax,%eax
  39. movl $-32,-4(%ebp)
  40. /* We need to shift left by 1 - 31 bits */
  41. L_shift_1:
  42. bsrl %edx,%ecx /* get the required shift in %ecx */
  43. subl $31,%ecx
  44. negl %ecx
  45. subl %ecx,-4(%ebp)
  46. shld %cl,%ebx,%edx
  47. shld %cl,%eax,%ebx
  48. shl %cl,%eax
  49. L_round:
  50. testl $0x80000000,%eax
  51. jz L_exit
  52. addl $1,%ebx
  53. adcl $0,%edx
  54. jnz L_exit
  55. movl $0x80000000,%edx
  56. incl -4(%ebp)
  57. L_exit:
  58. movl %edx,8(%esi)
  59. movl %ebx,4(%esi)
  60. movl %eax,(%esi)
  61. movl -4(%ebp),%eax
  62. popl %esi
  63. popl %ebx
  64. leave
  65. ret
  66. ENTRY(norm_Xsig)
  67. pushl %ebp
  68. movl %esp,%ebp
  69. pushl %ebx /* Reserve some space */
  70. pushl %ebx
  71. pushl %esi
  72. movl PARAM1,%esi
  73. movl 8(%esi),%edx
  74. movl 4(%esi),%ebx
  75. movl (%esi),%eax
  76. movl $0,-4(%ebp)
  77. orl %edx,%edx /* ms bits */
  78. js L_n_exit /* Already normalized */
  79. jnz L_n_shift_1 /* Shift left 1 - 31 bits */
  80. movl %ebx,%edx
  81. movl %eax,%ebx
  82. xorl %eax,%eax
  83. movl $-32,-4(%ebp)
  84. orl %edx,%edx /* ms bits */
  85. js L_n_exit /* Normalized now */
  86. jnz L_n_shift_1 /* Shift left 1 - 31 bits */
  87. movl %ebx,%edx
  88. movl %eax,%ebx
  89. xorl %eax,%eax
  90. addl $-32,-4(%ebp)
  91. jmp L_n_exit /* Might not be normalized,
  92. but shift no more. */
  93. /* We need to shift left by 1 - 31 bits */
  94. L_n_shift_1:
  95. bsrl %edx,%ecx /* get the required shift in %ecx */
  96. subl $31,%ecx
  97. negl %ecx
  98. subl %ecx,-4(%ebp)
  99. shld %cl,%ebx,%edx
  100. shld %cl,%eax,%ebx
  101. shl %cl,%eax
  102. L_n_exit:
  103. movl %edx,8(%esi)
  104. movl %ebx,4(%esi)
  105. movl %eax,(%esi)
  106. movl -4(%ebp),%eax
  107. popl %esi
  108. popl %ebx
  109. leave
  110. ret