jquantf-sse2-64.asm 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. ;
  2. ; jquantf.asm - sample data conversion and quantization (64-bit SSE & SSE2)
  3. ;
  4. ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
  5. ; Copyright (C) 2009, D. R. Commander.
  6. ;
  7. ; Based on the x86 SIMD extension for IJG JPEG library
  8. ; Copyright (C) 1999-2006, MIYASAKA Masaru.
  9. ; For conditions of distribution and use, see copyright notice in jsimdext.inc
  10. ;
  11. ; This file should be assembled with NASM (Netwide Assembler),
  12. ; can *not* be assembled with Microsoft's MASM or any compatible
  13. ; assembler (including Borland's Turbo Assembler).
  14. ; NASM is available from http://nasm.sourceforge.net/ or
  15. ; http://sourceforge.net/project/showfiles.php?group_id=6208
  16. ;
  17. ; [TAB8]
  18. %include "jsimdext.inc"
  19. %include "jdct.inc"
  20. ; --------------------------------------------------------------------------
  21. SECTION SEG_TEXT
  22. BITS 64
  23. ;
  24. ; Load data into workspace, applying unsigned->signed conversion
  25. ;
  26. ; GLOBAL(void)
  27. ; jsimd_convsamp_float_sse2 (JSAMPARRAY sample_data, JDIMENSION start_col,
  28. ; FAST_FLOAT *workspace);
  29. ;
  30. ; r10 = JSAMPARRAY sample_data
  31. ; r11 = JDIMENSION start_col
  32. ; r12 = FAST_FLOAT *workspace
  33. align 16
  34. global EXTN(jsimd_convsamp_float_sse2)
  35. EXTN(jsimd_convsamp_float_sse2):
  36. push rbp
  37. mov rax,rsp
  38. mov rbp,rsp
  39. collect_args
  40. push rbx
  41. pcmpeqw xmm7,xmm7
  42. psllw xmm7,7
  43. packsswb xmm7,xmm7 ; xmm7 = PB_CENTERJSAMPLE (0x808080..)
  44. mov rsi, r10
  45. mov eax, r11d
  46. mov rdi, r12
  47. mov rcx, DCTSIZE/2
  48. .convloop:
  49. mov rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; (JSAMPLE *)
  50. mov rdx, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; (JSAMPLE *)
  51. movq xmm0, XMM_MMWORD [rbx+rax*SIZEOF_JSAMPLE]
  52. movq xmm1, XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE]
  53. psubb xmm0,xmm7 ; xmm0=(01234567)
  54. psubb xmm1,xmm7 ; xmm1=(89ABCDEF)
  55. punpcklbw xmm0,xmm0 ; xmm0=(*0*1*2*3*4*5*6*7)
  56. punpcklbw xmm1,xmm1 ; xmm1=(*8*9*A*B*C*D*E*F)
  57. punpcklwd xmm2,xmm0 ; xmm2=(***0***1***2***3)
  58. punpckhwd xmm0,xmm0 ; xmm0=(***4***5***6***7)
  59. punpcklwd xmm3,xmm1 ; xmm3=(***8***9***A***B)
  60. punpckhwd xmm1,xmm1 ; xmm1=(***C***D***E***F)
  61. psrad xmm2,(DWORD_BIT-BYTE_BIT) ; xmm2=(0123)
  62. psrad xmm0,(DWORD_BIT-BYTE_BIT) ; xmm0=(4567)
  63. cvtdq2ps xmm2,xmm2 ; xmm2=(0123)
  64. cvtdq2ps xmm0,xmm0 ; xmm0=(4567)
  65. psrad xmm3,(DWORD_BIT-BYTE_BIT) ; xmm3=(89AB)
  66. psrad xmm1,(DWORD_BIT-BYTE_BIT) ; xmm1=(CDEF)
  67. cvtdq2ps xmm3,xmm3 ; xmm3=(89AB)
  68. cvtdq2ps xmm1,xmm1 ; xmm1=(CDEF)
  69. movaps XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_FAST_FLOAT)], xmm2
  70. movaps XMMWORD [XMMBLOCK(0,1,rdi,SIZEOF_FAST_FLOAT)], xmm0
  71. movaps XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_FAST_FLOAT)], xmm3
  72. movaps XMMWORD [XMMBLOCK(1,1,rdi,SIZEOF_FAST_FLOAT)], xmm1
  73. add rsi, byte 2*SIZEOF_JSAMPROW
  74. add rdi, byte 2*DCTSIZE*SIZEOF_FAST_FLOAT
  75. dec rcx
  76. jnz short .convloop
  77. pop rbx
  78. uncollect_args
  79. pop rbp
  80. ret
  81. ; --------------------------------------------------------------------------
  82. ;
  83. ; Quantize/descale the coefficients, and store into coef_block
  84. ;
  85. ; GLOBAL(void)
  86. ; jsimd_quantize_float_sse2 (JCOEFPTR coef_block, FAST_FLOAT *divisors,
  87. ; FAST_FLOAT *workspace);
  88. ;
  89. ; r10 = JCOEFPTR coef_block
  90. ; r11 = FAST_FLOAT *divisors
  91. ; r12 = FAST_FLOAT *workspace
  92. align 16
  93. global EXTN(jsimd_quantize_float_sse2)
  94. EXTN(jsimd_quantize_float_sse2):
  95. push rbp
  96. mov rax,rsp
  97. mov rbp,rsp
  98. collect_args
  99. mov rsi, r12
  100. mov rdx, r11
  101. mov rdi, r10
  102. mov rax, DCTSIZE2/16
  103. .quantloop:
  104. movaps xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_FAST_FLOAT)]
  105. movaps xmm1, XMMWORD [XMMBLOCK(0,1,rsi,SIZEOF_FAST_FLOAT)]
  106. mulps xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
  107. mulps xmm1, XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)]
  108. movaps xmm2, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_FAST_FLOAT)]
  109. movaps xmm3, XMMWORD [XMMBLOCK(1,1,rsi,SIZEOF_FAST_FLOAT)]
  110. mulps xmm2, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
  111. mulps xmm3, XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)]
  112. cvtps2dq xmm0,xmm0
  113. cvtps2dq xmm1,xmm1
  114. cvtps2dq xmm2,xmm2
  115. cvtps2dq xmm3,xmm3
  116. packssdw xmm0,xmm1
  117. packssdw xmm2,xmm3
  118. movdqa XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_JCOEF)], xmm0
  119. movdqa XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_JCOEF)], xmm2
  120. add rsi, byte 16*SIZEOF_FAST_FLOAT
  121. add rdx, byte 16*SIZEOF_FAST_FLOAT
  122. add rdi, byte 16*SIZEOF_JCOEF
  123. dec rax
  124. jnz short .quantloop
  125. uncollect_args
  126. pop rbp
  127. ret
  128. ; For some reason, the OS X linker does not honor the request to align the
  129. ; segment unless we do this.
  130. align 16