sse_optimized.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. ////////////////////////////////////////////////////////////////////////////////
  2. ///
  3. /// SSE optimized routines for Pentium-III, Athlon-XP and later CPUs. All SSE
  4. /// optimized functions have been gathered into this single source
  5. /// code file, regardless to their class or original source code file, in order
  6. /// to ease porting the library to other compiler and processor platforms.
  7. ///
  8. /// The SSE-optimizations are programmed using SSE compiler intrinsics that
  9. /// are supported both by Microsoft Visual C++ and GCC compilers, so this file
  10. /// should compile with both toolsets.
  11. ///
  12. /// NOTICE: If using Visual Studio 6.0, you'll need to install the "Visual C++
  13. /// 6.0 processor pack" update to support SSE instruction set. The update is
  14. /// available for download at Microsoft Developers Network, see here:
  15. /// http://msdn.microsoft.com/en-us/vstudio/aa718349.aspx
  16. ///
  17. /// If the above URL is expired or removed, go to "http://msdn.microsoft.com" and
  18. /// perform a search with keywords "processor pack".
  19. ///
  20. /// Author : Copyright (c) Olli Parviainen
  21. /// Author e-mail : oparviai 'at' iki.fi
  22. /// SoundTouch WWW: http://www.surina.net/soundtouch
  23. ///
  24. ////////////////////////////////////////////////////////////////////////////////
  25. //
  26. // License :
  27. //
  28. // SoundTouch audio processing library
  29. // Copyright (c) Olli Parviainen
  30. //
  31. // This library is free software; you can redistribute it and/or
  32. // modify it under the terms of the GNU Lesser General Public
  33. // License as published by the Free Software Foundation; either
  34. // version 2.1 of the License, or (at your option) any later version.
  35. //
  36. // This library is distributed in the hope that it will be useful,
  37. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  38. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  39. // Lesser General Public License for more details.
  40. //
  41. // You should have received a copy of the GNU Lesser General Public
  42. // License along with this library; if not, write to the Free Software
  43. // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  44. //
  45. ////////////////////////////////////////////////////////////////////////////////
  46. #include "cpu_detect.h"
  47. #include "STTypes.h"
  48. using namespace soundtouch;
  49. #ifdef SOUNDTOUCH_ALLOW_SSE
  50. // SSE routines available only with float sample type
  51. //////////////////////////////////////////////////////////////////////////////
  52. //
  53. // implementation of SSE optimized functions of class 'TDStretchSSE'
  54. //
  55. //////////////////////////////////////////////////////////////////////////////
  56. #include "TDStretch.h"
  57. #include <xmmintrin.h>
  58. #include <math.h>
  59. // Calculates cross correlation of two buffers
  60. double TDStretchSSE::calcCrossCorr(const float *pV1, const float *pV2, double &anorm)
  61. {
  62. int i;
  63. const float *pVec1;
  64. const __m128 *pVec2;
  65. __m128 vSum, vNorm;
  66. // Note. It means a major slow-down if the routine needs to tolerate
  67. // unaligned __m128 memory accesses. It's way faster if we can skip
  68. // unaligned slots and use _mm_load_ps instruction instead of _mm_loadu_ps.
  69. // This can mean up to ~ 10-fold difference (incl. part of which is
  70. // due to skipping every second round for stereo sound though).
  71. //
  72. // Compile-time define SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION is provided
  73. // for choosing if this little cheating is allowed.
  74. #ifdef ST_SIMD_AVOID_UNALIGNED
  75. // Little cheating allowed, return valid correlation only for
  76. // aligned locations, meaning every second round for stereo sound.
  77. #define _MM_LOAD _mm_load_ps
  78. if (((ulongptr)pV1) & 15) return -1e50; // skip unaligned locations
  79. #else
  80. // No cheating allowed, use unaligned load & take the resulting
  81. // performance hit.
  82. #define _MM_LOAD _mm_loadu_ps
  83. #endif
  84. // ensure overlapLength is divisible by 8
  85. assert((overlapLength % 8) == 0);
  86. // Calculates the cross-correlation value between 'pV1' and 'pV2' vectors
  87. // Note: pV2 _must_ be aligned to 16-bit boundary, pV1 need not.
  88. pVec1 = (const float*)pV1;
  89. pVec2 = (const __m128*)pV2;
  90. vSum = vNorm = _mm_setzero_ps();
  91. // Unroll the loop by factor of 4 * 4 operations. Use same routine for
  92. // stereo & mono, for mono it just means twice the amount of unrolling.
  93. for (i = 0; i < channels * overlapLength / 16; i ++)
  94. {
  95. __m128 vTemp;
  96. // vSum += pV1[0..3] * pV2[0..3]
  97. vTemp = _MM_LOAD(pVec1);
  98. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp ,pVec2[0]));
  99. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  100. // vSum += pV1[4..7] * pV2[4..7]
  101. vTemp = _MM_LOAD(pVec1 + 4);
  102. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[1]));
  103. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  104. // vSum += pV1[8..11] * pV2[8..11]
  105. vTemp = _MM_LOAD(pVec1 + 8);
  106. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[2]));
  107. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  108. // vSum += pV1[12..15] * pV2[12..15]
  109. vTemp = _MM_LOAD(pVec1 + 12);
  110. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[3]));
  111. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  112. pVec1 += 16;
  113. pVec2 += 4;
  114. }
  115. // return value = vSum[0] + vSum[1] + vSum[2] + vSum[3]
  116. float *pvNorm = (float*)&vNorm;
  117. float norm = (pvNorm[0] + pvNorm[1] + pvNorm[2] + pvNorm[3]);
  118. anorm = norm;
  119. float *pvSum = (float*)&vSum;
  120. return (double)(pvSum[0] + pvSum[1] + pvSum[2] + pvSum[3]) / sqrt(norm < 1e-9 ? 1.0 : norm);
  121. /* This is approximately corresponding routine in C-language yet without normalization:
  122. double corr, norm;
  123. uint i;
  124. // Calculates the cross-correlation value between 'pV1' and 'pV2' vectors
  125. corr = norm = 0.0;
  126. for (i = 0; i < channels * overlapLength / 16; i ++)
  127. {
  128. corr += pV1[0] * pV2[0] +
  129. pV1[1] * pV2[1] +
  130. pV1[2] * pV2[2] +
  131. pV1[3] * pV2[3] +
  132. pV1[4] * pV2[4] +
  133. pV1[5] * pV2[5] +
  134. pV1[6] * pV2[6] +
  135. pV1[7] * pV2[7] +
  136. pV1[8] * pV2[8] +
  137. pV1[9] * pV2[9] +
  138. pV1[10] * pV2[10] +
  139. pV1[11] * pV2[11] +
  140. pV1[12] * pV2[12] +
  141. pV1[13] * pV2[13] +
  142. pV1[14] * pV2[14] +
  143. pV1[15] * pV2[15];
  144. for (j = 0; j < 15; j ++) norm += pV1[j] * pV1[j];
  145. pV1 += 16;
  146. pV2 += 16;
  147. }
  148. return corr / sqrt(norm);
  149. */
  150. }
  151. double TDStretchSSE::calcCrossCorrAccumulate(const float *pV1, const float *pV2, double &norm)
  152. {
  153. // call usual calcCrossCorr function because SSE does not show big benefit of
  154. // accumulating "norm" value, and also the "norm" rolling algorithm would get
  155. // complicated due to SSE-specific alignment-vs-nonexact correlation rules.
  156. return calcCrossCorr(pV1, pV2, norm);
  157. }
  158. //////////////////////////////////////////////////////////////////////////////
  159. //
  160. // implementation of SSE optimized functions of class 'FIRFilter'
  161. //
  162. //////////////////////////////////////////////////////////////////////////////
  163. #include "FIRFilter.h"
  164. FIRFilterSSE::FIRFilterSSE() : FIRFilter()
  165. {
  166. filterCoeffsAlign = nullptr;
  167. filterCoeffsUnalign = nullptr;
  168. }
  169. FIRFilterSSE::~FIRFilterSSE()
  170. {
  171. delete[] filterCoeffsUnalign;
  172. filterCoeffsAlign = nullptr;
  173. filterCoeffsUnalign = nullptr;
  174. }
  175. // (overloaded) Calculates filter coefficients for SSE routine
  176. void FIRFilterSSE::setCoefficients(const float *coeffs, uint newLength, uint uResultDivFactor)
  177. {
  178. uint i;
  179. float fDivider;
  180. FIRFilter::setCoefficients(coeffs, newLength, uResultDivFactor);
  181. // Scale the filter coefficients so that it won't be necessary to scale the filtering result
  182. // also rearrange coefficients suitably for SSE
  183. // Ensure that filter coeffs array is aligned to 16-byte boundary
  184. delete[] filterCoeffsUnalign;
  185. filterCoeffsUnalign = new float[2 * newLength + 4];
  186. filterCoeffsAlign = (float *)SOUNDTOUCH_ALIGN_POINTER_16(filterCoeffsUnalign);
  187. fDivider = (float)resultDivider;
  188. // rearrange the filter coefficients for mmx routines
  189. for (i = 0; i < newLength; i ++)
  190. {
  191. filterCoeffsAlign[2 * i + 0] =
  192. filterCoeffsAlign[2 * i + 1] = coeffs[i + 0] / fDivider;
  193. }
  194. }
  195. // SSE-optimized version of the filter routine for stereo sound
  196. uint FIRFilterSSE::evaluateFilterStereo(float *dest, const float *source, uint numSamples) const
  197. {
  198. int count = (int)((numSamples - length) & (uint)-2);
  199. int j;
  200. assert(count % 2 == 0);
  201. if (count < 2) return 0;
  202. assert(source != nullptr);
  203. assert(dest != nullptr);
  204. assert((length % 8) == 0);
  205. assert(filterCoeffsAlign != nullptr);
  206. assert(((ulongptr)filterCoeffsAlign) % 16 == 0);
  207. // filter is evaluated for two stereo samples with each iteration, thus use of 'j += 2'
  208. #pragma omp parallel for
  209. for (j = 0; j < count; j += 2)
  210. {
  211. const float *pSrc;
  212. float *pDest;
  213. const __m128 *pFil;
  214. __m128 sum1, sum2;
  215. uint i;
  216. pSrc = (const float*)source + j * 2; // source audio data
  217. pDest = dest + j * 2; // destination audio data
  218. pFil = (const __m128*)filterCoeffsAlign; // filter coefficients. NOTE: Assumes coefficients
  219. // are aligned to 16-byte boundary
  220. sum1 = sum2 = _mm_setzero_ps();
  221. for (i = 0; i < length / 8; i ++)
  222. {
  223. // Unroll loop for efficiency & calculate filter for 2*2 stereo samples
  224. // at each pass
  225. // sum1 is accu for 2*2 filtered stereo sound data at the primary sound data offset
  226. // sum2 is accu for 2*2 filtered stereo sound data for the next sound sample offset.
  227. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc) , pFil[0]));
  228. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 2), pFil[0]));
  229. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 4), pFil[1]));
  230. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 6), pFil[1]));
  231. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 8) , pFil[2]));
  232. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 10), pFil[2]));
  233. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 12), pFil[3]));
  234. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 14), pFil[3]));
  235. pSrc += 16;
  236. pFil += 4;
  237. }
  238. // Now sum1 and sum2 both have a filtered 2-channel sample each, but we still need
  239. // to sum the two hi- and lo-floats of these registers together.
  240. // post-shuffle & add the filtered values and store to dest.
  241. _mm_storeu_ps(pDest, _mm_add_ps(
  242. _mm_shuffle_ps(sum1, sum2, _MM_SHUFFLE(1,0,3,2)), // s2_1 s2_0 s1_3 s1_2
  243. _mm_shuffle_ps(sum1, sum2, _MM_SHUFFLE(3,2,1,0)) // s2_3 s2_2 s1_1 s1_0
  244. ));
  245. }
  246. // Ideas for further improvement:
  247. // 1. If it could be guaranteed that 'source' were always aligned to 16-byte
  248. // boundary, a faster aligned '_mm_load_ps' instruction could be used.
  249. // 2. If it could be guaranteed that 'dest' were always aligned to 16-byte
  250. // boundary, a faster '_mm_store_ps' instruction could be used.
  251. return (uint)count;
  252. /* original routine in C-language. please notice the C-version has differently
  253. organized coefficients though.
  254. double suml1, suml2;
  255. double sumr1, sumr2;
  256. uint i, j;
  257. for (j = 0; j < count; j += 2)
  258. {
  259. const float *ptr;
  260. const float *pFil;
  261. suml1 = sumr1 = 0.0;
  262. suml2 = sumr2 = 0.0;
  263. ptr = src;
  264. pFil = filterCoeffs;
  265. for (i = 0; i < lengthLocal; i ++)
  266. {
  267. // unroll loop for efficiency.
  268. suml1 += ptr[0] * pFil[0] +
  269. ptr[2] * pFil[2] +
  270. ptr[4] * pFil[4] +
  271. ptr[6] * pFil[6];
  272. sumr1 += ptr[1] * pFil[1] +
  273. ptr[3] * pFil[3] +
  274. ptr[5] * pFil[5] +
  275. ptr[7] * pFil[7];
  276. suml2 += ptr[8] * pFil[0] +
  277. ptr[10] * pFil[2] +
  278. ptr[12] * pFil[4] +
  279. ptr[14] * pFil[6];
  280. sumr2 += ptr[9] * pFil[1] +
  281. ptr[11] * pFil[3] +
  282. ptr[13] * pFil[5] +
  283. ptr[15] * pFil[7];
  284. ptr += 16;
  285. pFil += 8;
  286. }
  287. dest[0] = (float)suml1;
  288. dest[1] = (float)sumr1;
  289. dest[2] = (float)suml2;
  290. dest[3] = (float)sumr2;
  291. src += 4;
  292. dest += 4;
  293. }
  294. */
  295. }
  296. #endif // SOUNDTOUCH_ALLOW_SSE