Details | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 2 | mjames | 1 | /* ---------------------------------------------------------------------- |
| 2 | * Project: CMSIS DSP Library |
||
| 3 | * Title: arm_cfft_radix2_q15.c |
||
| 4 | * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Fixed point processing function |
||
| 5 | * |
||
| 6 | * $Date: 27. January 2017 |
||
| 7 | * $Revision: V.1.5.1 |
||
| 8 | * |
||
| 9 | * Target Processor: Cortex-M cores |
||
| 10 | * -------------------------------------------------------------------- */ |
||
| 11 | /* |
||
| 12 | * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. |
||
| 13 | * |
||
| 14 | * SPDX-License-Identifier: Apache-2.0 |
||
| 15 | * |
||
| 16 | * Licensed under the Apache License, Version 2.0 (the License); you may |
||
| 17 | * not use this file except in compliance with the License. |
||
| 18 | * You may obtain a copy of the License at |
||
| 19 | * |
||
| 20 | * www.apache.org/licenses/LICENSE-2.0 |
||
| 21 | * |
||
| 22 | * Unless required by applicable law or agreed to in writing, software |
||
| 23 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT |
||
| 24 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||
| 25 | * See the License for the specific language governing permissions and |
||
| 26 | * limitations under the License. |
||
| 27 | */ |
||
| 28 | |||
| 29 | #include "arm_math.h" |
||
| 30 | |||
| 31 | void arm_radix2_butterfly_q15( |
||
| 32 | q15_t * pSrc, |
||
| 33 | uint32_t fftLen, |
||
| 34 | q15_t * pCoef, |
||
| 35 | uint16_t twidCoefModifier); |
||
| 36 | |||
| 37 | void arm_radix2_butterfly_inverse_q15( |
||
| 38 | q15_t * pSrc, |
||
| 39 | uint32_t fftLen, |
||
| 40 | q15_t * pCoef, |
||
| 41 | uint16_t twidCoefModifier); |
||
| 42 | |||
| 43 | void arm_bitreversal_q15( |
||
| 44 | q15_t * pSrc, |
||
| 45 | uint32_t fftLen, |
||
| 46 | uint16_t bitRevFactor, |
||
| 47 | uint16_t * pBitRevTab); |
||
| 48 | |||
| 49 | /** |
||
| 50 | * @ingroup groupTransforms |
||
| 51 | */ |
||
| 52 | |||
| 53 | /** |
||
| 54 | * @addtogroup ComplexFFT |
||
| 55 | * @{ |
||
| 56 | */ |
||
| 57 | |||
| 58 | /** |
||
| 59 | * @details |
||
| 60 | * @brief Processing function for the fixed-point CFFT/CIFFT. |
||
| 61 | * @deprecated Do not use this function. It has been superseded by \ref arm_cfft_q15 and will be removed |
||
| 62 | * @param[in] *S points to an instance of the fixed-point CFFT/CIFFT structure. |
||
| 63 | * @param[in, out] *pSrc points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place. |
||
| 64 | * @return none. |
||
| 65 | */ |
||
| 66 | |||
| 67 | void arm_cfft_radix2_q15( |
||
| 68 | const arm_cfft_radix2_instance_q15 * S, |
||
| 69 | q15_t * pSrc) |
||
| 70 | { |
||
| 71 | |||
| 72 | if (S->ifftFlag == 1U) |
||
| 73 | { |
||
| 74 | arm_radix2_butterfly_inverse_q15(pSrc, S->fftLen, |
||
| 75 | S->pTwiddle, S->twidCoefModifier); |
||
| 76 | } |
||
| 77 | else |
||
| 78 | { |
||
| 79 | arm_radix2_butterfly_q15(pSrc, S->fftLen, |
||
| 80 | S->pTwiddle, S->twidCoefModifier); |
||
| 81 | } |
||
| 82 | |||
| 83 | arm_bitreversal_q15(pSrc, S->fftLen, S->bitRevFactor, S->pBitRevTable); |
||
| 84 | } |
||
| 85 | |||
| 86 | /** |
||
| 87 | * @} end of ComplexFFT group |
||
| 88 | */ |
||
| 89 | |||
| 90 | void arm_radix2_butterfly_q15( |
||
| 91 | q15_t * pSrc, |
||
| 92 | uint32_t fftLen, |
||
| 93 | q15_t * pCoef, |
||
| 94 | uint16_t twidCoefModifier) |
||
| 95 | { |
||
| 96 | #if defined (ARM_MATH_DSP) |
||
| 97 | |||
| 98 | unsigned i, j, k, l; |
||
| 99 | unsigned n1, n2, ia; |
||
| 100 | q15_t in; |
||
| 101 | q31_t T, S, R; |
||
| 102 | q31_t coeff, out1, out2; |
||
| 103 | |||
| 104 | //N = fftLen; |
||
| 105 | n2 = fftLen; |
||
| 106 | |||
| 107 | n1 = n2; |
||
| 108 | n2 = n2 >> 1; |
||
| 109 | ia = 0; |
||
| 110 | |||
| 111 | // loop for groups |
||
| 112 | for (i = 0; i < n2; i++) |
||
| 113 | { |
||
| 114 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 115 | |||
| 116 | ia = ia + twidCoefModifier; |
||
| 117 | |||
| 118 | l = i + n2; |
||
| 119 | |||
| 120 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 121 | in = ((int16_t) (T & 0xFFFF)) >> 1; |
||
| 122 | T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 123 | |||
| 124 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 125 | in = ((int16_t) (S & 0xFFFF)) >> 1; |
||
| 126 | S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 127 | |||
| 128 | R = __QSUB16(T, S); |
||
| 129 | |||
| 130 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 131 | |||
| 132 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 133 | |||
| 134 | out1 = __SMUAD(coeff, R) >> 16; |
||
| 135 | out2 = __SMUSDX(coeff, R); |
||
| 136 | |||
| 137 | #else |
||
| 138 | |||
| 139 | out1 = __SMUSDX(R, coeff) >> 16U; |
||
| 140 | out2 = __SMUAD(coeff, R); |
||
| 141 | |||
| 142 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 143 | |||
| 144 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 145 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 146 | |||
| 147 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 148 | |||
| 149 | ia = ia + twidCoefModifier; |
||
| 150 | |||
| 151 | // loop for butterfly |
||
| 152 | i++; |
||
| 153 | l++; |
||
| 154 | |||
| 155 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 156 | in = ((int16_t) (T & 0xFFFF)) >> 1; |
||
| 157 | T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 158 | |||
| 159 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 160 | in = ((int16_t) (S & 0xFFFF)) >> 1; |
||
| 161 | S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 162 | |||
| 163 | R = __QSUB16(T, S); |
||
| 164 | |||
| 165 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 166 | |||
| 167 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 168 | |||
| 169 | out1 = __SMUAD(coeff, R) >> 16; |
||
| 170 | out2 = __SMUSDX(coeff, R); |
||
| 171 | |||
| 172 | #else |
||
| 173 | |||
| 174 | out1 = __SMUSDX(R, coeff) >> 16U; |
||
| 175 | out2 = __SMUAD(coeff, R); |
||
| 176 | |||
| 177 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 178 | |||
| 179 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 180 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 181 | |||
| 182 | } // groups loop end |
||
| 183 | |||
| 184 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 185 | |||
| 186 | // loop for stage |
||
| 187 | for (k = fftLen / 2; k > 2; k = k >> 1) |
||
| 188 | { |
||
| 189 | n1 = n2; |
||
| 190 | n2 = n2 >> 1; |
||
| 191 | ia = 0; |
||
| 192 | |||
| 193 | // loop for groups |
||
| 194 | for (j = 0; j < n2; j++) |
||
| 195 | { |
||
| 196 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 197 | |||
| 198 | ia = ia + twidCoefModifier; |
||
| 199 | |||
| 200 | // loop for butterfly |
||
| 201 | for (i = j; i < fftLen; i += n1) |
||
| 202 | { |
||
| 203 | l = i + n2; |
||
| 204 | |||
| 205 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 206 | |||
| 207 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 208 | |||
| 209 | R = __QSUB16(T, S); |
||
| 210 | |||
| 211 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 212 | |||
| 213 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 214 | |||
| 215 | out1 = __SMUAD(coeff, R) >> 16; |
||
| 216 | out2 = __SMUSDX(coeff, R); |
||
| 217 | |||
| 218 | #else |
||
| 219 | |||
| 220 | out1 = __SMUSDX(R, coeff) >> 16U; |
||
| 221 | out2 = __SMUAD(coeff, R); |
||
| 222 | |||
| 223 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 224 | |||
| 225 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 226 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 227 | |||
| 228 | i += n1; |
||
| 229 | |||
| 230 | l = i + n2; |
||
| 231 | |||
| 232 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 233 | |||
| 234 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 235 | |||
| 236 | R = __QSUB16(T, S); |
||
| 237 | |||
| 238 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 239 | |||
| 240 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 241 | |||
| 242 | out1 = __SMUAD(coeff, R) >> 16; |
||
| 243 | out2 = __SMUSDX(coeff, R); |
||
| 244 | |||
| 245 | #else |
||
| 246 | |||
| 247 | out1 = __SMUSDX(R, coeff) >> 16U; |
||
| 248 | out2 = __SMUAD(coeff, R); |
||
| 249 | |||
| 250 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 251 | |||
| 252 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 253 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 254 | |||
| 255 | } // butterfly loop end |
||
| 256 | |||
| 257 | } // groups loop end |
||
| 258 | |||
| 259 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 260 | } // stages loop end |
||
| 261 | |||
| 262 | n1 = n2; |
||
| 263 | n2 = n2 >> 1; |
||
| 264 | ia = 0; |
||
| 265 | |||
| 266 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 267 | |||
| 268 | ia = ia + twidCoefModifier; |
||
| 269 | |||
| 270 | // loop for butterfly |
||
| 271 | for (i = 0; i < fftLen; i += n1) |
||
| 272 | { |
||
| 273 | l = i + n2; |
||
| 274 | |||
| 275 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 276 | |||
| 277 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 278 | |||
| 279 | R = __QSUB16(T, S); |
||
| 280 | |||
| 281 | _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S); |
||
| 282 | |||
| 283 | _SIMD32_OFFSET(pSrc + (2U * l)) = R; |
||
| 284 | |||
| 285 | i += n1; |
||
| 286 | l = i + n2; |
||
| 287 | |||
| 288 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 289 | |||
| 290 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 291 | |||
| 292 | R = __QSUB16(T, S); |
||
| 293 | |||
| 294 | _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S); |
||
| 295 | |||
| 296 | _SIMD32_OFFSET(pSrc + (2U * l)) = R; |
||
| 297 | |||
| 298 | } // groups loop end |
||
| 299 | |||
| 300 | |||
| 301 | #else |
||
| 302 | |||
| 303 | unsigned i, j, k, l; |
||
| 304 | unsigned n1, n2, ia; |
||
| 305 | q15_t xt, yt, cosVal, sinVal; |
||
| 306 | |||
| 307 | |||
| 308 | //N = fftLen; |
||
| 309 | n2 = fftLen; |
||
| 310 | |||
| 311 | n1 = n2; |
||
| 312 | n2 = n2 >> 1; |
||
| 313 | ia = 0; |
||
| 314 | |||
| 315 | // loop for groups |
||
| 316 | for (j = 0; j < n2; j++) |
||
| 317 | { |
||
| 318 | cosVal = pCoef[ia * 2]; |
||
| 319 | sinVal = pCoef[(ia * 2) + 1]; |
||
| 320 | ia = ia + twidCoefModifier; |
||
| 321 | |||
| 322 | // loop for butterfly |
||
| 323 | for (i = j; i < fftLen; i += n1) |
||
| 324 | { |
||
| 325 | l = i + n2; |
||
| 326 | xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U); |
||
| 327 | pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U; |
||
| 328 | |||
| 329 | yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U); |
||
| 330 | pSrc[2 * i + 1] = |
||
| 331 | ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U; |
||
| 332 | |||
| 333 | pSrc[2U * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) + |
||
| 334 | ((int16_t) (((q31_t) yt * sinVal) >> 16))); |
||
| 335 | |||
| 336 | pSrc[2U * l + 1U] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) - |
||
| 337 | ((int16_t) (((q31_t) xt * sinVal) >> 16))); |
||
| 338 | |||
| 339 | } // butterfly loop end |
||
| 340 | |||
| 341 | } // groups loop end |
||
| 342 | |||
| 343 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 344 | |||
| 345 | // loop for stage |
||
| 346 | for (k = fftLen / 2; k > 2; k = k >> 1) |
||
| 347 | { |
||
| 348 | n1 = n2; |
||
| 349 | n2 = n2 >> 1; |
||
| 350 | ia = 0; |
||
| 351 | |||
| 352 | // loop for groups |
||
| 353 | for (j = 0; j < n2; j++) |
||
| 354 | { |
||
| 355 | cosVal = pCoef[ia * 2]; |
||
| 356 | sinVal = pCoef[(ia * 2) + 1]; |
||
| 357 | ia = ia + twidCoefModifier; |
||
| 358 | |||
| 359 | // loop for butterfly |
||
| 360 | for (i = j; i < fftLen; i += n1) |
||
| 361 | { |
||
| 362 | l = i + n2; |
||
| 363 | xt = pSrc[2 * i] - pSrc[2 * l]; |
||
| 364 | pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1U; |
||
| 365 | |||
| 366 | yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; |
||
| 367 | pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1U; |
||
| 368 | |||
| 369 | pSrc[2U * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) + |
||
| 370 | ((int16_t) (((q31_t) yt * sinVal) >> 16))); |
||
| 371 | |||
| 372 | pSrc[2U * l + 1U] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) - |
||
| 373 | ((int16_t) (((q31_t) xt * sinVal) >> 16))); |
||
| 374 | |||
| 375 | } // butterfly loop end |
||
| 376 | |||
| 377 | } // groups loop end |
||
| 378 | |||
| 379 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 380 | } // stages loop end |
||
| 381 | |||
| 382 | n1 = n2; |
||
| 383 | n2 = n2 >> 1; |
||
| 384 | ia = 0; |
||
| 385 | |||
| 386 | // loop for groups |
||
| 387 | for (j = 0; j < n2; j++) |
||
| 388 | { |
||
| 389 | cosVal = pCoef[ia * 2]; |
||
| 390 | sinVal = pCoef[(ia * 2) + 1]; |
||
| 391 | |||
| 392 | ia = ia + twidCoefModifier; |
||
| 393 | |||
| 394 | // loop for butterfly |
||
| 395 | for (i = j; i < fftLen; i += n1) |
||
| 396 | { |
||
| 397 | l = i + n2; |
||
| 398 | xt = pSrc[2 * i] - pSrc[2 * l]; |
||
| 399 | pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]); |
||
| 400 | |||
| 401 | yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; |
||
| 402 | pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]); |
||
| 403 | |||
| 404 | pSrc[2U * l] = xt; |
||
| 405 | |||
| 406 | pSrc[2U * l + 1U] = yt; |
||
| 407 | |||
| 408 | } // butterfly loop end |
||
| 409 | |||
| 410 | } // groups loop end |
||
| 411 | |||
| 412 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 413 | |||
| 414 | #endif // #if defined (ARM_MATH_DSP) |
||
| 415 | |||
| 416 | } |
||
| 417 | |||
| 418 | |||
| 419 | void arm_radix2_butterfly_inverse_q15( |
||
| 420 | q15_t * pSrc, |
||
| 421 | uint32_t fftLen, |
||
| 422 | q15_t * pCoef, |
||
| 423 | uint16_t twidCoefModifier) |
||
| 424 | { |
||
| 425 | #if defined (ARM_MATH_DSP) |
||
| 426 | |||
| 427 | unsigned i, j, k, l; |
||
| 428 | unsigned n1, n2, ia; |
||
| 429 | q15_t in; |
||
| 430 | q31_t T, S, R; |
||
| 431 | q31_t coeff, out1, out2; |
||
| 432 | |||
| 433 | //N = fftLen; |
||
| 434 | n2 = fftLen; |
||
| 435 | |||
| 436 | n1 = n2; |
||
| 437 | n2 = n2 >> 1; |
||
| 438 | ia = 0; |
||
| 439 | |||
| 440 | // loop for groups |
||
| 441 | for (i = 0; i < n2; i++) |
||
| 442 | { |
||
| 443 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 444 | |||
| 445 | ia = ia + twidCoefModifier; |
||
| 446 | |||
| 447 | l = i + n2; |
||
| 448 | |||
| 449 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 450 | in = ((int16_t) (T & 0xFFFF)) >> 1; |
||
| 451 | T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 452 | |||
| 453 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 454 | in = ((int16_t) (S & 0xFFFF)) >> 1; |
||
| 455 | S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 456 | |||
| 457 | R = __QSUB16(T, S); |
||
| 458 | |||
| 459 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 460 | |||
| 461 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 462 | |||
| 463 | out1 = __SMUSD(coeff, R) >> 16; |
||
| 464 | out2 = __SMUADX(coeff, R); |
||
| 465 | #else |
||
| 466 | |||
| 467 | out1 = __SMUADX(R, coeff) >> 16U; |
||
| 468 | out2 = __SMUSD(__QSUB(0, coeff), R); |
||
| 469 | |||
| 470 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 471 | |||
| 472 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 473 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 474 | |||
| 475 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 476 | |||
| 477 | ia = ia + twidCoefModifier; |
||
| 478 | |||
| 479 | // loop for butterfly |
||
| 480 | i++; |
||
| 481 | l++; |
||
| 482 | |||
| 483 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 484 | in = ((int16_t) (T & 0xFFFF)) >> 1; |
||
| 485 | T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 486 | |||
| 487 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 488 | in = ((int16_t) (S & 0xFFFF)) >> 1; |
||
| 489 | S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); |
||
| 490 | |||
| 491 | R = __QSUB16(T, S); |
||
| 492 | |||
| 493 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 494 | |||
| 495 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 496 | |||
| 497 | out1 = __SMUSD(coeff, R) >> 16; |
||
| 498 | out2 = __SMUADX(coeff, R); |
||
| 499 | #else |
||
| 500 | |||
| 501 | out1 = __SMUADX(R, coeff) >> 16U; |
||
| 502 | out2 = __SMUSD(__QSUB(0, coeff), R); |
||
| 503 | |||
| 504 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 505 | |||
| 506 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 507 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 508 | |||
| 509 | } // groups loop end |
||
| 510 | |||
| 511 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 512 | |||
| 513 | // loop for stage |
||
| 514 | for (k = fftLen / 2; k > 2; k = k >> 1) |
||
| 515 | { |
||
| 516 | n1 = n2; |
||
| 517 | n2 = n2 >> 1; |
||
| 518 | ia = 0; |
||
| 519 | |||
| 520 | // loop for groups |
||
| 521 | for (j = 0; j < n2; j++) |
||
| 522 | { |
||
| 523 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 524 | |||
| 525 | ia = ia + twidCoefModifier; |
||
| 526 | |||
| 527 | // loop for butterfly |
||
| 528 | for (i = j; i < fftLen; i += n1) |
||
| 529 | { |
||
| 530 | l = i + n2; |
||
| 531 | |||
| 532 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 533 | |||
| 534 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 535 | |||
| 536 | R = __QSUB16(T, S); |
||
| 537 | |||
| 538 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 539 | |||
| 540 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 541 | |||
| 542 | out1 = __SMUSD(coeff, R) >> 16; |
||
| 543 | out2 = __SMUADX(coeff, R); |
||
| 544 | |||
| 545 | #else |
||
| 546 | |||
| 547 | out1 = __SMUADX(R, coeff) >> 16U; |
||
| 548 | out2 = __SMUSD(__QSUB(0, coeff), R); |
||
| 549 | |||
| 550 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 551 | |||
| 552 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 553 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 554 | |||
| 555 | i += n1; |
||
| 556 | |||
| 557 | l = i + n2; |
||
| 558 | |||
| 559 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 560 | |||
| 561 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 562 | |||
| 563 | R = __QSUB16(T, S); |
||
| 564 | |||
| 565 | _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); |
||
| 566 | |||
| 567 | #ifndef ARM_MATH_BIG_ENDIAN |
||
| 568 | |||
| 569 | out1 = __SMUSD(coeff, R) >> 16; |
||
| 570 | out2 = __SMUADX(coeff, R); |
||
| 571 | #else |
||
| 572 | |||
| 573 | out1 = __SMUADX(R, coeff) >> 16U; |
||
| 574 | out2 = __SMUSD(__QSUB(0, coeff), R); |
||
| 575 | |||
| 576 | #endif // #ifndef ARM_MATH_BIG_ENDIAN |
||
| 577 | |||
| 578 | _SIMD32_OFFSET(pSrc + (2U * l)) = |
||
| 579 | (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); |
||
| 580 | |||
| 581 | } // butterfly loop end |
||
| 582 | |||
| 583 | } // groups loop end |
||
| 584 | |||
| 585 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 586 | } // stages loop end |
||
| 587 | |||
| 588 | n1 = n2; |
||
| 589 | n2 = n2 >> 1; |
||
| 590 | ia = 0; |
||
| 591 | |||
| 592 | // loop for groups |
||
| 593 | for (j = 0; j < n2; j++) |
||
| 594 | { |
||
| 595 | coeff = _SIMD32_OFFSET(pCoef + (ia * 2U)); |
||
| 596 | |||
| 597 | ia = ia + twidCoefModifier; |
||
| 598 | |||
| 599 | // loop for butterfly |
||
| 600 | for (i = j; i < fftLen; i += n1) |
||
| 601 | { |
||
| 602 | l = i + n2; |
||
| 603 | |||
| 604 | T = _SIMD32_OFFSET(pSrc + (2 * i)); |
||
| 605 | |||
| 606 | S = _SIMD32_OFFSET(pSrc + (2 * l)); |
||
| 607 | |||
| 608 | R = __QSUB16(T, S); |
||
| 609 | |||
| 610 | _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S); |
||
| 611 | |||
| 612 | _SIMD32_OFFSET(pSrc + (2U * l)) = R; |
||
| 613 | |||
| 614 | } // butterfly loop end |
||
| 615 | |||
| 616 | } // groups loop end |
||
| 617 | |||
| 618 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 619 | |||
| 620 | #else |
||
| 621 | |||
| 622 | |||
| 623 | unsigned i, j, k, l; |
||
| 624 | unsigned n1, n2, ia; |
||
| 625 | q15_t xt, yt, cosVal, sinVal; |
||
| 626 | |||
| 627 | //N = fftLen; |
||
| 628 | n2 = fftLen; |
||
| 629 | |||
| 630 | n1 = n2; |
||
| 631 | n2 = n2 >> 1; |
||
| 632 | ia = 0; |
||
| 633 | |||
| 634 | // loop for groups |
||
| 635 | for (j = 0; j < n2; j++) |
||
| 636 | { |
||
| 637 | cosVal = pCoef[ia * 2]; |
||
| 638 | sinVal = pCoef[(ia * 2) + 1]; |
||
| 639 | ia = ia + twidCoefModifier; |
||
| 640 | |||
| 641 | // loop for butterfly |
||
| 642 | for (i = j; i < fftLen; i += n1) |
||
| 643 | { |
||
| 644 | l = i + n2; |
||
| 645 | xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U); |
||
| 646 | pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U; |
||
| 647 | |||
| 648 | yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U); |
||
| 649 | pSrc[2 * i + 1] = |
||
| 650 | ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U; |
||
| 651 | |||
| 652 | pSrc[2U * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) - |
||
| 653 | ((int16_t) (((q31_t) yt * sinVal) >> 16))); |
||
| 654 | |||
| 655 | pSrc[2U * l + 1U] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) + |
||
| 656 | ((int16_t) (((q31_t) xt * sinVal) >> 16))); |
||
| 657 | |||
| 658 | } // butterfly loop end |
||
| 659 | |||
| 660 | } // groups loop end |
||
| 661 | |||
| 662 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 663 | |||
| 664 | // loop for stage |
||
| 665 | for (k = fftLen / 2; k > 2; k = k >> 1) |
||
| 666 | { |
||
| 667 | n1 = n2; |
||
| 668 | n2 = n2 >> 1; |
||
| 669 | ia = 0; |
||
| 670 | |||
| 671 | // loop for groups |
||
| 672 | for (j = 0; j < n2; j++) |
||
| 673 | { |
||
| 674 | cosVal = pCoef[ia * 2]; |
||
| 675 | sinVal = pCoef[(ia * 2) + 1]; |
||
| 676 | ia = ia + twidCoefModifier; |
||
| 677 | |||
| 678 | // loop for butterfly |
||
| 679 | for (i = j; i < fftLen; i += n1) |
||
| 680 | { |
||
| 681 | l = i + n2; |
||
| 682 | xt = pSrc[2 * i] - pSrc[2 * l]; |
||
| 683 | pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1U; |
||
| 684 | |||
| 685 | yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; |
||
| 686 | pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1U; |
||
| 687 | |||
| 688 | pSrc[2U * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) - |
||
| 689 | ((int16_t) (((q31_t) yt * sinVal) >> 16))); |
||
| 690 | |||
| 691 | pSrc[2U * l + 1U] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) + |
||
| 692 | ((int16_t) (((q31_t) xt * sinVal) >> 16))); |
||
| 693 | |||
| 694 | } // butterfly loop end |
||
| 695 | |||
| 696 | } // groups loop end |
||
| 697 | |||
| 698 | twidCoefModifier = twidCoefModifier << 1U; |
||
| 699 | } // stages loop end |
||
| 700 | |||
| 701 | n1 = n2; |
||
| 702 | n2 = n2 >> 1; |
||
| 703 | ia = 0; |
||
| 704 | |||
| 705 | cosVal = pCoef[ia * 2]; |
||
| 706 | sinVal = pCoef[(ia * 2) + 1]; |
||
| 707 | |||
| 708 | ia = ia + twidCoefModifier; |
||
| 709 | |||
| 710 | // loop for butterfly |
||
| 711 | for (i = 0; i < fftLen; i += n1) |
||
| 712 | { |
||
| 713 | l = i + n2; |
||
| 714 | xt = pSrc[2 * i] - pSrc[2 * l]; |
||
| 715 | pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]); |
||
| 716 | |||
| 717 | yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; |
||
| 718 | pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]); |
||
| 719 | |||
| 720 | pSrc[2U * l] = xt; |
||
| 721 | |||
| 722 | pSrc[2U * l + 1U] = yt; |
||
| 723 | |||
| 724 | } // groups loop end |
||
| 725 | |||
| 726 | |||
| 727 | #endif // #if defined (ARM_MATH_DSP) |
||
| 728 | |||
| 729 | } |