Subversion Repositories DashDisplay

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 mjames 1
/* ----------------------------------------------------------------------  
2
* Copyright (C) 2010-2014 ARM Limited. All rights reserved.  
3
*  
4
* $Date:        19. March 2015
5
* $Revision:    V.1.4.5  
6
*  
7
* Project:          CMSIS DSP Library  
8
* Title:            arm_cfft_radix2_q15.c  
9
*  
10
* Description:  Radix-2 Decimation in Frequency CFFT & CIFFT Fixed point processing function  
11
*  
12
*  
13
* Target Processor: Cortex-M4/Cortex-M3/Cortex-M0
14
*  
15
* Redistribution and use in source and binary forms, with or without
16
* modification, are permitted provided that the following conditions
17
* are met:
18
*   - Redistributions of source code must retain the above copyright
19
*     notice, this list of conditions and the following disclaimer.
20
*   - Redistributions in binary form must reproduce the above copyright
21
*     notice, this list of conditions and the following disclaimer in
22
*     the documentation and/or other materials provided with the
23
*     distribution.
24
*   - Neither the name of ARM LIMITED nor the names of its contributors
25
*     may be used to endorse or promote products derived from this
26
*     software without specific prior written permission.
27
*
28
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
31
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
32
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
33
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
34
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
35
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
36
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
38
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39
* POSSIBILITY OF SUCH DAMAGE.  
40
* -------------------------------------------------------------------- */
41
 
42
#include "arm_math.h"
43
 
44
void arm_radix2_butterfly_q15(
45
  q15_t * pSrc,
46
  uint32_t fftLen,
47
  q15_t * pCoef,
48
  uint16_t twidCoefModifier);
49
 
50
void arm_radix2_butterfly_inverse_q15(
51
  q15_t * pSrc,
52
  uint32_t fftLen,
53
  q15_t * pCoef,
54
  uint16_t twidCoefModifier);
55
 
56
void arm_bitreversal_q15(
57
  q15_t * pSrc,
58
  uint32_t fftLen,
59
  uint16_t bitRevFactor,
60
  uint16_t * pBitRevTab);
61
 
62
/**  
63
 * @ingroup groupTransforms  
64
 */
65
 
66
/**  
67
 * @addtogroup ComplexFFT  
68
 * @{  
69
 */
70
 
71
/**  
72
 * @details  
73
 * @brief Processing function for the fixed-point CFFT/CIFFT.  
74
 * @deprecated Do not use this function.  It has been superseded by \ref arm_cfft_q15 and will be removed
75
 * @param[in]      *S    points to an instance of the fixed-point CFFT/CIFFT structure.  
76
 * @param[in, out] *pSrc points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place.  
77
 * @return none.  
78
 */
79
 
80
void arm_cfft_radix2_q15(
81
  const arm_cfft_radix2_instance_q15 * S,
82
  q15_t * pSrc)
83
{
84
 
85
  if(S->ifftFlag == 1u)
86
  {
87
    arm_radix2_butterfly_inverse_q15(pSrc, S->fftLen,
88
                                     S->pTwiddle, S->twidCoefModifier);
89
  }
90
  else
91
  {
92
    arm_radix2_butterfly_q15(pSrc, S->fftLen,
93
                             S->pTwiddle, S->twidCoefModifier);
94
  }
95
 
96
  arm_bitreversal_q15(pSrc, S->fftLen, S->bitRevFactor, S->pBitRevTable);
97
}
98
 
99
/**  
100
 * @} end of ComplexFFT group  
101
 */
102
 
103
void arm_radix2_butterfly_q15(
104
  q15_t * pSrc,
105
  uint32_t fftLen,
106
  q15_t * pCoef,
107
  uint16_t twidCoefModifier)
108
{
109
#ifndef ARM_MATH_CM0_FAMILY
110
 
111
  unsigned i, j, k, l;
112
  unsigned n1, n2, ia;
113
  q15_t in;
114
  q31_t T, S, R;
115
  q31_t coeff, out1, out2;
116
 
117
  //N = fftLen; 
118
  n2 = fftLen;
119
 
120
  n1 = n2;
121
  n2 = n2 >> 1;
122
  ia = 0;
123
 
124
  // loop for groups 
125
  for (i = 0; i < n2; i++)
126
  {
127
    coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
128
 
129
    ia = ia + twidCoefModifier;
130
 
131
    l = i + n2;
132
 
133
    T = _SIMD32_OFFSET(pSrc + (2 * i));
134
    in = ((int16_t) (T & 0xFFFF)) >> 1;
135
    T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF);
136
 
137
    S = _SIMD32_OFFSET(pSrc + (2 * l));
138
    in = ((int16_t) (S & 0xFFFF)) >> 1;
139
    S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF);
140
 
141
    R = __QSUB16(T, S);
142
 
143
    _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
144
 
145
#ifndef ARM_MATH_BIG_ENDIAN
146
 
147
    out1 = __SMUAD(coeff, R) >> 16;
148
    out2 = __SMUSDX(coeff, R);
149
 
150
#else
151
 
152
    out1 = __SMUSDX(R, coeff) >> 16u;
153
    out2 = __SMUAD(coeff, R);
154
 
155
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
156
 
157
    _SIMD32_OFFSET(pSrc + (2u * l)) =
158
      (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
159
 
160
    coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
161
 
162
    ia = ia + twidCoefModifier;
163
 
164
    // loop for butterfly 
165
    i++;
166
    l++;
167
 
168
    T = _SIMD32_OFFSET(pSrc + (2 * i));
169
    in = ((int16_t) (T & 0xFFFF)) >> 1;
170
    T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF);
171
 
172
    S = _SIMD32_OFFSET(pSrc + (2 * l));
173
    in = ((int16_t) (S & 0xFFFF)) >> 1;
174
    S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF);
175
 
176
    R = __QSUB16(T, S);
177
 
178
    _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
179
 
180
#ifndef ARM_MATH_BIG_ENDIAN
181
 
182
    out1 = __SMUAD(coeff, R) >> 16;
183
    out2 = __SMUSDX(coeff, R);
184
 
185
#else
186
 
187
    out1 = __SMUSDX(R, coeff) >> 16u;
188
    out2 = __SMUAD(coeff, R);
189
 
190
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
191
 
192
    _SIMD32_OFFSET(pSrc + (2u * l)) =
193
      (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
194
 
195
  }                             // groups loop end 
196
 
197
  twidCoefModifier = twidCoefModifier << 1u;
198
 
199
  // loop for stage 
200
  for (k = fftLen / 2; k > 2; k = k >> 1)
201
  {
202
    n1 = n2;
203
    n2 = n2 >> 1;
204
    ia = 0;
205
 
206
    // loop for groups 
207
    for (j = 0; j < n2; j++)
208
    {
209
      coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
210
 
211
      ia = ia + twidCoefModifier;
212
 
213
      // loop for butterfly 
214
      for (i = j; i < fftLen; i += n1)
215
      {
216
        l = i + n2;
217
 
218
        T = _SIMD32_OFFSET(pSrc + (2 * i));
219
 
220
        S = _SIMD32_OFFSET(pSrc + (2 * l));
221
 
222
        R = __QSUB16(T, S);
223
 
224
        _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
225
 
226
#ifndef ARM_MATH_BIG_ENDIAN
227
 
228
        out1 = __SMUAD(coeff, R) >> 16;
229
        out2 = __SMUSDX(coeff, R);
230
 
231
#else
232
 
233
        out1 = __SMUSDX(R, coeff) >> 16u;
234
        out2 = __SMUAD(coeff, R);
235
 
236
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
237
 
238
        _SIMD32_OFFSET(pSrc + (2u * l)) =
239
          (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
240
 
241
        i += n1;
242
 
243
        l = i + n2;
244
 
245
        T = _SIMD32_OFFSET(pSrc + (2 * i));
246
 
247
        S = _SIMD32_OFFSET(pSrc + (2 * l));
248
 
249
        R = __QSUB16(T, S);
250
 
251
        _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
252
 
253
#ifndef ARM_MATH_BIG_ENDIAN
254
 
255
        out1 = __SMUAD(coeff, R) >> 16;
256
        out2 = __SMUSDX(coeff, R);
257
 
258
#else
259
 
260
        out1 = __SMUSDX(R, coeff) >> 16u;
261
        out2 = __SMUAD(coeff, R);
262
 
263
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
264
 
265
        _SIMD32_OFFSET(pSrc + (2u * l)) =
266
          (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
267
 
268
      }                         // butterfly loop end 
269
 
270
    }                           // groups loop end 
271
 
272
    twidCoefModifier = twidCoefModifier << 1u;
273
  }                             // stages loop end 
274
 
275
  n1 = n2;
276
  n2 = n2 >> 1;
277
  ia = 0;
278
 
279
  coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
280
 
281
  ia = ia + twidCoefModifier;
282
 
283
  // loop for butterfly 
284
  for (i = 0; i < fftLen; i += n1)
285
  {
286
    l = i + n2;
287
 
288
    T = _SIMD32_OFFSET(pSrc + (2 * i));
289
 
290
    S = _SIMD32_OFFSET(pSrc + (2 * l));
291
 
292
    R = __QSUB16(T, S);
293
 
294
    _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S);
295
 
296
    _SIMD32_OFFSET(pSrc + (2u * l)) = R;
297
 
298
    i += n1;
299
    l = i + n2;
300
 
301
    T = _SIMD32_OFFSET(pSrc + (2 * i));
302
 
303
    S = _SIMD32_OFFSET(pSrc + (2 * l));
304
 
305
    R = __QSUB16(T, S);
306
 
307
    _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S);
308
 
309
    _SIMD32_OFFSET(pSrc + (2u * l)) = R;
310
 
311
  }                             // groups loop end 
312
 
313
 
314
#else
315
 
316
  unsigned i, j, k, l;
317
  unsigned n1, n2, ia;
318
  q15_t xt, yt, cosVal, sinVal;
319
 
320
 
321
  //N = fftLen; 
322
  n2 = fftLen;
323
 
324
  n1 = n2;
325
  n2 = n2 >> 1;
326
  ia = 0;
327
 
328
  // loop for groups 
329
  for (j = 0; j < n2; j++)
330
  {
331
    cosVal = pCoef[ia * 2];
332
    sinVal = pCoef[(ia * 2) + 1];
333
    ia = ia + twidCoefModifier;
334
 
335
    // loop for butterfly 
336
    for (i = j; i < fftLen; i += n1)
337
    {
338
      l = i + n2;
339
      xt = (pSrc[2 * i] >> 1u) - (pSrc[2 * l] >> 1u);
340
      pSrc[2 * i] = ((pSrc[2 * i] >> 1u) + (pSrc[2 * l] >> 1u)) >> 1u;
341
 
342
      yt = (pSrc[2 * i + 1] >> 1u) - (pSrc[2 * l + 1] >> 1u);
343
      pSrc[2 * i + 1] =
344
        ((pSrc[2 * l + 1] >> 1u) + (pSrc[2 * i + 1] >> 1u)) >> 1u;
345
 
346
      pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) +
347
                      ((int16_t) (((q31_t) yt * sinVal) >> 16)));
348
 
349
      pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) -
350
                           ((int16_t) (((q31_t) xt * sinVal) >> 16)));
351
 
352
    }                           // butterfly loop end 
353
 
354
  }                             // groups loop end 
355
 
356
  twidCoefModifier = twidCoefModifier << 1u;
357
 
358
  // loop for stage 
359
  for (k = fftLen / 2; k > 2; k = k >> 1)
360
  {
361
    n1 = n2;
362
    n2 = n2 >> 1;
363
    ia = 0;
364
 
365
    // loop for groups 
366
    for (j = 0; j < n2; j++)
367
    {
368
      cosVal = pCoef[ia * 2];
369
      sinVal = pCoef[(ia * 2) + 1];
370
      ia = ia + twidCoefModifier;
371
 
372
      // loop for butterfly 
373
      for (i = j; i < fftLen; i += n1)
374
      {
375
        l = i + n2;
376
        xt = pSrc[2 * i] - pSrc[2 * l];
377
        pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1u;
378
 
379
        yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
380
        pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1u;
381
 
382
        pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) +
383
                        ((int16_t) (((q31_t) yt * sinVal) >> 16)));
384
 
385
        pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) -
386
                             ((int16_t) (((q31_t) xt * sinVal) >> 16)));
387
 
388
      }                         // butterfly loop end 
389
 
390
    }                           // groups loop end 
391
 
392
    twidCoefModifier = twidCoefModifier << 1u;
393
  }                             // stages loop end 
394
 
395
  n1 = n2;
396
  n2 = n2 >> 1;
397
  ia = 0;
398
 
399
  // loop for groups 
400
  for (j = 0; j < n2; j++)
401
  {
402
    cosVal = pCoef[ia * 2];
403
    sinVal = pCoef[(ia * 2) + 1];
404
 
405
    ia = ia + twidCoefModifier;
406
 
407
    // loop for butterfly 
408
    for (i = j; i < fftLen; i += n1)
409
    {
410
      l = i + n2;
411
      xt = pSrc[2 * i] - pSrc[2 * l];
412
      pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]);
413
 
414
      yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
415
      pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]);
416
 
417
      pSrc[2u * l] = xt;
418
 
419
      pSrc[2u * l + 1u] = yt;
420
 
421
    }                           // butterfly loop end 
422
 
423
  }                             // groups loop end 
424
 
425
  twidCoefModifier = twidCoefModifier << 1u;
426
 
427
#endif //             #ifndef ARM_MATH_CM0_FAMILY
428
 
429
}
430
 
431
 
432
void arm_radix2_butterfly_inverse_q15(
433
  q15_t * pSrc,
434
  uint32_t fftLen,
435
  q15_t * pCoef,
436
  uint16_t twidCoefModifier)
437
{
438
#ifndef ARM_MATH_CM0_FAMILY
439
 
440
  unsigned i, j, k, l;
441
  unsigned n1, n2, ia;
442
  q15_t in;
443
  q31_t T, S, R;
444
  q31_t coeff, out1, out2;
445
 
446
  //N = fftLen; 
447
  n2 = fftLen;
448
 
449
  n1 = n2;
450
  n2 = n2 >> 1;
451
  ia = 0;
452
 
453
  // loop for groups 
454
  for (i = 0; i < n2; i++)
455
  {
456
    coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
457
 
458
    ia = ia + twidCoefModifier;
459
 
460
    l = i + n2;
461
 
462
    T = _SIMD32_OFFSET(pSrc + (2 * i));
463
    in = ((int16_t) (T & 0xFFFF)) >> 1;
464
    T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF);
465
 
466
    S = _SIMD32_OFFSET(pSrc + (2 * l));
467
    in = ((int16_t) (S & 0xFFFF)) >> 1;
468
    S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF);
469
 
470
    R = __QSUB16(T, S);
471
 
472
    _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
473
 
474
#ifndef ARM_MATH_BIG_ENDIAN
475
 
476
    out1 = __SMUSD(coeff, R) >> 16;
477
    out2 = __SMUADX(coeff, R);
478
#else
479
 
480
    out1 = __SMUADX(R, coeff) >> 16u;
481
    out2 = __SMUSD(__QSUB(0, coeff), R);
482
 
483
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
484
 
485
    _SIMD32_OFFSET(pSrc + (2u * l)) =
486
      (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
487
 
488
    coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
489
 
490
    ia = ia + twidCoefModifier;
491
 
492
    // loop for butterfly 
493
    i++;
494
    l++;
495
 
496
    T = _SIMD32_OFFSET(pSrc + (2 * i));
497
    in = ((int16_t) (T & 0xFFFF)) >> 1;
498
    T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF);
499
 
500
    S = _SIMD32_OFFSET(pSrc + (2 * l));
501
    in = ((int16_t) (S & 0xFFFF)) >> 1;
502
    S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF);
503
 
504
    R = __QSUB16(T, S);
505
 
506
    _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
507
 
508
#ifndef ARM_MATH_BIG_ENDIAN
509
 
510
    out1 = __SMUSD(coeff, R) >> 16;
511
    out2 = __SMUADX(coeff, R);
512
#else
513
 
514
    out1 = __SMUADX(R, coeff) >> 16u;
515
    out2 = __SMUSD(__QSUB(0, coeff), R);
516
 
517
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
518
 
519
    _SIMD32_OFFSET(pSrc + (2u * l)) =
520
      (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
521
 
522
  }                             // groups loop end 
523
 
524
  twidCoefModifier = twidCoefModifier << 1u;
525
 
526
  // loop for stage 
527
  for (k = fftLen / 2; k > 2; k = k >> 1)
528
  {
529
    n1 = n2;
530
    n2 = n2 >> 1;
531
    ia = 0;
532
 
533
    // loop for groups 
534
    for (j = 0; j < n2; j++)
535
    {
536
      coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
537
 
538
      ia = ia + twidCoefModifier;
539
 
540
      // loop for butterfly 
541
      for (i = j; i < fftLen; i += n1)
542
      {
543
        l = i + n2;
544
 
545
        T = _SIMD32_OFFSET(pSrc + (2 * i));
546
 
547
        S = _SIMD32_OFFSET(pSrc + (2 * l));
548
 
549
        R = __QSUB16(T, S);
550
 
551
        _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
552
 
553
#ifndef ARM_MATH_BIG_ENDIAN
554
 
555
        out1 = __SMUSD(coeff, R) >> 16;
556
        out2 = __SMUADX(coeff, R);
557
 
558
#else
559
 
560
        out1 = __SMUADX(R, coeff) >> 16u;
561
        out2 = __SMUSD(__QSUB(0, coeff), R);
562
 
563
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
564
 
565
        _SIMD32_OFFSET(pSrc + (2u * l)) =
566
          (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
567
 
568
        i += n1;
569
 
570
        l = i + n2;
571
 
572
        T = _SIMD32_OFFSET(pSrc + (2 * i));
573
 
574
        S = _SIMD32_OFFSET(pSrc + (2 * l));
575
 
576
        R = __QSUB16(T, S);
577
 
578
        _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
579
 
580
#ifndef ARM_MATH_BIG_ENDIAN
581
 
582
        out1 = __SMUSD(coeff, R) >> 16;
583
        out2 = __SMUADX(coeff, R);
584
#else
585
 
586
        out1 = __SMUADX(R, coeff) >> 16u;
587
        out2 = __SMUSD(__QSUB(0, coeff), R);
588
 
589
#endif //     #ifndef ARM_MATH_BIG_ENDIAN
590
 
591
        _SIMD32_OFFSET(pSrc + (2u * l)) =
592
          (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
593
 
594
      }                         // butterfly loop end 
595
 
596
    }                           // groups loop end 
597
 
598
    twidCoefModifier = twidCoefModifier << 1u;
599
  }                             // stages loop end 
600
 
601
  n1 = n2;
602
  n2 = n2 >> 1;
603
  ia = 0;
604
 
605
  // loop for groups 
606
  for (j = 0; j < n2; j++)
607
  {
608
    coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
609
 
610
    ia = ia + twidCoefModifier;
611
 
612
    // loop for butterfly 
613
    for (i = j; i < fftLen; i += n1)
614
    {
615
      l = i + n2;
616
 
617
      T = _SIMD32_OFFSET(pSrc + (2 * i));
618
 
619
      S = _SIMD32_OFFSET(pSrc + (2 * l));
620
 
621
      R = __QSUB16(T, S);
622
 
623
      _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S);
624
 
625
      _SIMD32_OFFSET(pSrc + (2u * l)) = R;
626
 
627
    }                           // butterfly loop end 
628
 
629
  }                             // groups loop end 
630
 
631
  twidCoefModifier = twidCoefModifier << 1u;
632
 
633
#else
634
 
635
 
636
  unsigned i, j, k, l;
637
  unsigned n1, n2, ia;
638
  q15_t xt, yt, cosVal, sinVal;
639
 
640
  //N = fftLen; 
641
  n2 = fftLen;
642
 
643
  n1 = n2;
644
  n2 = n2 >> 1;
645
  ia = 0;
646
 
647
  // loop for groups 
648
  for (j = 0; j < n2; j++)
649
  {
650
    cosVal = pCoef[ia * 2];
651
    sinVal = pCoef[(ia * 2) + 1];
652
    ia = ia + twidCoefModifier;
653
 
654
    // loop for butterfly 
655
    for (i = j; i < fftLen; i += n1)
656
    {
657
      l = i + n2;
658
      xt = (pSrc[2 * i] >> 1u) - (pSrc[2 * l] >> 1u);
659
      pSrc[2 * i] = ((pSrc[2 * i] >> 1u) + (pSrc[2 * l] >> 1u)) >> 1u;
660
 
661
      yt = (pSrc[2 * i + 1] >> 1u) - (pSrc[2 * l + 1] >> 1u);
662
      pSrc[2 * i + 1] =
663
        ((pSrc[2 * l + 1] >> 1u) + (pSrc[2 * i + 1] >> 1u)) >> 1u;
664
 
665
      pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) -
666
                      ((int16_t) (((q31_t) yt * sinVal) >> 16)));
667
 
668
      pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) +
669
                           ((int16_t) (((q31_t) xt * sinVal) >> 16)));
670
 
671
    }                           // butterfly loop end 
672
 
673
  }                             // groups loop end 
674
 
675
  twidCoefModifier = twidCoefModifier << 1u;
676
 
677
  // loop for stage 
678
  for (k = fftLen / 2; k > 2; k = k >> 1)
679
  {
680
    n1 = n2;
681
    n2 = n2 >> 1;
682
    ia = 0;
683
 
684
    // loop for groups 
685
    for (j = 0; j < n2; j++)
686
    {
687
      cosVal = pCoef[ia * 2];
688
      sinVal = pCoef[(ia * 2) + 1];
689
      ia = ia + twidCoefModifier;
690
 
691
      // loop for butterfly 
692
      for (i = j; i < fftLen; i += n1)
693
      {
694
        l = i + n2;
695
        xt = pSrc[2 * i] - pSrc[2 * l];
696
        pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1u;
697
 
698
        yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
699
        pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1u;
700
 
701
        pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) -
702
                        ((int16_t) (((q31_t) yt * sinVal) >> 16)));
703
 
704
        pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) +
705
                             ((int16_t) (((q31_t) xt * sinVal) >> 16)));
706
 
707
      }                         // butterfly loop end 
708
 
709
    }                           // groups loop end 
710
 
711
    twidCoefModifier = twidCoefModifier << 1u;
712
  }                             // stages loop end 
713
 
714
  n1 = n2;
715
  n2 = n2 >> 1;
716
  ia = 0;
717
 
718
  cosVal = pCoef[ia * 2];
719
  sinVal = pCoef[(ia * 2) + 1];
720
 
721
  ia = ia + twidCoefModifier;
722
 
723
  // loop for butterfly 
724
  for (i = 0; i < fftLen; i += n1)
725
  {
726
    l = i + n2;
727
    xt = pSrc[2 * i] - pSrc[2 * l];
728
    pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]);
729
 
730
    yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
731
    pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]);
732
 
733
    pSrc[2u * l] = xt;
734
 
735
    pSrc[2u * l + 1u] = yt;
736
 
737
  }                             // groups loop end 
738
 
739
 
740
#endif //             #ifndef ARM_MATH_CM0_FAMILY
741
 
742
}