00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026 #include "arm_math.h"
00027
00055 void arm_cfft_radix4_q15(
00056 const arm_cfft_radix4_instance_q15 * S,
00057 q15_t * pSrc)
00058 {
00059 if(S->ifftFlag == 1u)
00060 {
00061
00062 arm_radix4_butterfly_inverse_q15(pSrc, S->fftLen, S->pTwiddle,
00063 S->twidCoefModifier);
00064 }
00065 else
00066 {
00067
00068 arm_radix4_butterfly_q15(pSrc, S->fftLen, S->pTwiddle,
00069 S->twidCoefModifier);
00070 }
00071
00072 if(S->bitReverseFlag == 1u)
00073 {
00074
00075 arm_bitreversal_q15(pSrc, S->fftLen, S->bitRevFactor, S->pBitRevTable);
00076 }
00077
00078 }
00079
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00127 void arm_radix4_butterfly_q15(
00128 q15_t * pSrc16,
00129 uint32_t fftLen,
00130 q15_t * pCoef16,
00131 uint32_t twidCoefModifier)
00132 {
00133 q31_t R, S, T, U;
00134 q31_t C1, C2, C3, out1, out2;
00135 uint32_t n1, n2, ic, i0, i1, i2, i3, j, k;
00136 q15_t in;
00137
00138 q15_t *ptr1;
00139
00140
00141
00142 q31_t xaya, xbyb, xcyc, xdyd;
00143
00144
00145
00146
00147
00148
00149 n2 = fftLen;
00150 n1 = n2;
00151
00152
00153 n2 >>= 2u;
00154
00155
00156 ic = 0u;
00157
00158
00159 i0 = 0u;
00160 j = n2;
00161
00162
00163
00164
00165 do
00166 {
00167
00168
00169
00170
00171 i1 = i0 + n2;
00172 i2 = i1 + n2;
00173 i3 = i2 + n2;
00174
00175
00176
00177 T = _SIMD32_OFFSET(pSrc16 + (2u * i0));
00178 in = ((int16_t) (T & 0xFFFF)) >> 2;
00179 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00180
00181
00182 S = _SIMD32_OFFSET(pSrc16 + (2u * i2));
00183 in = ((int16_t) (S & 0xFFFF)) >> 2;
00184 S = ((S >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00185
00186
00187 R = __QADD16(T, S);
00188
00189
00190 S = __QSUB16(T, S);
00191
00192
00193
00194 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00195 in = ((int16_t) (T & 0xFFFF)) >> 2;
00196 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00197
00198
00199 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00200 in = ((int16_t) (U & 0xFFFF)) >> 2;
00201 U = ((U >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00202
00203
00204 T = __QADD16(T, U);
00205
00206
00207
00208
00209 _SIMD32_OFFSET(pSrc16 + (2u * i0)) = __SHADD16(R, T);
00210
00211
00212 R = __QSUB16(R, T);
00213
00214
00215 C2 = _SIMD32_OFFSET(pCoef16 + (4u * ic));
00216
00217 #ifndef ARM_MATH_BIG_ENDIAN
00218
00219
00220 out1 = __SMUAD(C2, R) >> 16u;
00221
00222 out2 = __SMUSDX(C2, R);
00223
00224 #else
00225
00226
00227 out1 = __SMUSDX(R, C2) >> 16u;
00228
00229 out2 = __SMUAD(C2, R);
00230
00231 #endif
00232
00233
00234
00235 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00236 in = ((int16_t) (T & 0xFFFF)) >> 2;
00237 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00238
00239
00240
00241 _SIMD32_OFFSET(pSrc16 + (2u * i1)) = (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00242
00243
00244
00245 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00246 in = ((int16_t) (U & 0xFFFF)) >> 2;
00247 U = ((U >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00248
00249
00250 T = __QSUB16(T, U);
00251
00252 #ifndef ARM_MATH_BIG_ENDIAN
00253
00254
00255 R = __QASX(S, T);
00256
00257 S = __QSAX(S, T);
00258
00259 #else
00260
00261
00262 R = __QSAX(S, T);
00263
00264 S = __QASX(S, T);
00265
00266 #endif
00267
00268
00269 C1 = _SIMD32_OFFSET(pCoef16 + (2u * ic));
00270
00271
00272 #ifndef ARM_MATH_BIG_ENDIAN
00273
00274
00275 out1 = __SMUAD(C1, S) >> 16u;
00276
00277 out2 = __SMUSDX(C1, S);
00278
00279 #else
00280
00281
00282 out1 = __SMUSDX(S, C1) >> 16u;
00283
00284 out2 = __SMUAD(C1, S);
00285
00286 #endif
00287
00288
00289 _SIMD32_OFFSET(pSrc16 + (2u * i2)) = ((out2) & 0xFFFF0000) | ((out1) & 0x0000FFFF);
00290
00291
00292
00293 C3 = _SIMD32_OFFSET(pCoef16 + (6u * ic));
00294
00295
00296 #ifndef ARM_MATH_BIG_ENDIAN
00297
00298
00299 out1 = __SMUAD(C3, R) >> 16u;
00300
00301 out2 = __SMUSDX(C3, R);
00302
00303 #else
00304
00305
00306 out1 = __SMUSDX(R, C3) >> 16u;
00307
00308 out2 = __SMUAD(C3, R);
00309
00310 #endif
00311
00312
00313 _SIMD32_OFFSET(pSrc16 + (2u * i3)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00314
00315
00316 ic = ic + twidCoefModifier;
00317
00318
00319 i0 = i0 + 1u;
00320
00321 } while(--j);
00322
00323
00324
00325
00326
00327
00328
00329
00330 twidCoefModifier <<= 2u;
00331
00332
00333 for (k = fftLen / 4u; k > 4u; k >>= 2u)
00334 {
00335
00336 n1 = n2;
00337 n2 >>= 2u;
00338 ic = 0u;
00339
00340 for (j = 0u; j <= (n2 - 1u); j++)
00341 {
00342
00343 C1 = _SIMD32_OFFSET(pCoef16 + (2u * ic));
00344 C2 = _SIMD32_OFFSET(pCoef16 + (4u * ic));
00345 C3 = _SIMD32_OFFSET(pCoef16 + (6u * ic));
00346
00347
00348 ic = ic + twidCoefModifier;
00349
00350
00351 for (i0 = j; i0 < fftLen; i0 += n1)
00352 {
00353
00354
00355 i1 = i0 + n2;
00356 i2 = i1 + n2;
00357 i3 = i2 + n2;
00358
00359
00360
00361 T = _SIMD32_OFFSET(pSrc16 + (2u * i0));
00362
00363
00364 S = _SIMD32_OFFSET(pSrc16 + (2u * i2));
00365
00366
00367 R = __QADD16(T, S);
00368
00369
00370 S = __QSUB16(T, S);
00371
00372
00373
00374 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00375
00376
00377 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00378
00379
00380 T = __QADD16(T, U);
00381
00382
00383
00384
00385
00386 out1 = __SHADD16(R, T);
00387 in = ((int16_t) (out1 & 0xFFFF)) >> 1;
00388 out1 = ((out1 >> 1) & 0xFFFF0000) | (in & 0xFFFF);
00389 _SIMD32_OFFSET(pSrc16 + (2u * i0)) = out1;
00390
00391
00392 R = __SHSUB16(R, T);
00393
00394 #ifndef ARM_MATH_BIG_ENDIAN
00395
00396
00397 out1 = __SMUAD(C2, R) >> 16u;
00398
00399
00400 out2 = __SMUSDX(C2, R);
00401
00402 #else
00403
00404
00405 out1 = __SMUSDX(R, C2) >> 16u;
00406
00407
00408 out2 = __SMUAD(C2, R);
00409
00410 #endif
00411
00412
00413
00414 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00415
00416
00417
00418
00419 _SIMD32_OFFSET(pSrc16 + (2u * i1)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00420
00421
00422
00423
00424 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00425
00426
00427 T = __QSUB16(T, U);
00428
00429 #ifndef ARM_MATH_BIG_ENDIAN
00430
00431
00432 R = __SHASX(S, T);
00433
00434
00435 S = __SHSAX(S, T);
00436
00437
00438
00439 out1 = __SMUAD(C1, S) >> 16u;
00440 out2 = __SMUSDX(C1, S);
00441
00442 #else
00443
00444
00445 R = __SHSAX(S, T);
00446
00447
00448 S = __SHASX(S, T);
00449
00450
00451
00452 out1 = __SMUSDX(S, C1) >> 16u;
00453 out2 = __SMUAD(C1, S);
00454
00455 #endif
00456
00457
00458
00459 _SIMD32_OFFSET(pSrc16 + (2u * i2)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00460
00461
00462
00463 #ifndef ARM_MATH_BIG_ENDIAN
00464
00465 out1 = __SMUAD(C3, R) >> 16u;
00466 out2 = __SMUSDX(C3, R);
00467
00468 #else
00469
00470 out1 = __SMUSDX(R, C3) >> 16u;
00471 out2 = __SMUAD(C3, R);
00472
00473 #endif
00474
00475
00476
00477 _SIMD32_OFFSET(pSrc16 + (2u * i3)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00478 }
00479 }
00480
00481 twidCoefModifier <<= 2u;
00482 }
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492 j = fftLen >> 2;
00493
00494 ptr1 = &pSrc16[0];
00495
00496
00497
00498
00499 do
00500 {
00501
00502 xaya = *__SIMD32(ptr1)++;
00503
00504
00505 xbyb = *__SIMD32(ptr1)++;
00506
00507
00508 xcyc = *__SIMD32(ptr1)++;
00509
00510
00511 xdyd = *__SIMD32(ptr1)++;
00512
00513
00514 R = __QADD16(xaya, xcyc);
00515
00516
00517 T = __QADD16(xbyb, xdyd);
00518
00519
00520 ptr1 = ptr1 - 8u;
00521
00522
00523
00524
00525 *__SIMD32(ptr1)++ = __SHADD16(R, T);
00526
00527
00528 T = __QADD16(xbyb, xdyd);
00529
00530
00531
00532 *__SIMD32(ptr1)++ = __SHSUB16(R, T);
00533
00534
00535 S = __QSUB16(xaya, xcyc);
00536
00537
00538
00539 U = __QSUB16(xbyb, xdyd);
00540
00541 #ifndef ARM_MATH_BIG_ENDIAN
00542
00543
00544
00545 *__SIMD32(ptr1)++ = __SHSAX(S, U);
00546
00547
00548
00549
00550 *__SIMD32(ptr1)++ = __SHASX(S, U);
00551
00552 #else
00553
00554
00555
00556 *__SIMD32(ptr1)++ = __SHASX(S, U);
00557
00558
00559
00560
00561 *__SIMD32(ptr1)++ = __SHSAX(S, U);
00562
00563
00564 #endif
00565
00566 }while(--j);
00567
00568
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578 }
00579
00580
00590
00591
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615
00616
00617
00618
00619
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630 void arm_radix4_butterfly_inverse_q15(
00631 q15_t * pSrc16,
00632 uint32_t fftLen,
00633 q15_t * pCoef16,
00634 uint32_t twidCoefModifier)
00635 {
00636 q31_t R, S, T, U;
00637 q31_t C1, C2, C3, out1, out2;
00638 uint32_t n1, n2, ic, i0, i1, i2, i3, j, k;
00639 q15_t in;
00640
00641 q15_t *ptr1;
00642
00643
00644
00645 q31_t xaya, xbyb, xcyc, xdyd;
00646
00647
00648
00649
00650
00651
00652 n2 = fftLen;
00653 n1 = n2;
00654
00655
00656 n2 >>= 2u;
00657
00658
00659 ic = 0u;
00660
00661
00662 i0 = 0u;
00663 j = n2;
00664
00665
00666
00667
00668 do
00669 {
00670
00671
00672
00673
00674 i1 = i0 + n2;
00675 i2 = i1 + n2;
00676 i3 = i2 + n2;
00677
00678
00679
00680 T = _SIMD32_OFFSET(pSrc16 + (2u * i0));
00681 in = ((int16_t) (T & 0xFFFF)) >> 2;
00682 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00683
00684
00685 S = _SIMD32_OFFSET(pSrc16 + (2u * i2));
00686 in = ((int16_t) (S & 0xFFFF)) >> 2;
00687 S = ((S >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00688
00689
00690 R = __QADD16(T, S);
00691
00692
00693 S = __QSUB16(T, S);
00694
00695
00696
00697 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00698 in = ((int16_t) (T & 0xFFFF)) >> 2;
00699 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00700
00701
00702 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00703 in = ((int16_t) (U & 0xFFFF)) >> 2;
00704 U = ((U >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00705
00706
00707 T = __QADD16(T, U);
00708
00709
00710
00711
00712 _SIMD32_OFFSET(pSrc16 + (2u * i0)) = __SHADD16(R, T);
00713
00714
00715 R = __QSUB16(R, T);
00716
00717
00718 C2 = _SIMD32_OFFSET(pCoef16 + (4u * ic));
00719
00720 #ifndef ARM_MATH_BIG_ENDIAN
00721
00722
00723 out1 = __SMUSD(C2, R) >> 16u;
00724
00725 out2 = __SMUADX(C2, R);
00726
00727 #else
00728
00729
00730 out1 = __SMUADX(C2, R) >> 16u;
00731
00732 out2 = __SMUSD(-C2, R);
00733
00734 #endif
00735
00736
00737
00738 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00739 in = ((int16_t) (T & 0xFFFF)) >> 2;
00740 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00741
00742
00743
00744 _SIMD32_OFFSET(pSrc16 + (2u * i1)) = (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00745
00746
00747
00748 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00749 in = ((int16_t) (U & 0xFFFF)) >> 2;
00750 U = ((U >> 2) & 0xFFFF0000) | (in & 0xFFFF);
00751
00752
00753 T = __QSUB16(T, U);
00754
00755 #ifndef ARM_MATH_BIG_ENDIAN
00756
00757
00758 R = __QSAX(S, T);
00759
00760 S = __QASX(S, T);
00761
00762 #else
00763
00764
00765 R = __QASX(S, T);
00766
00767 S = __QSAX(S, T);
00768
00769 #endif
00770
00771
00772 C1 = _SIMD32_OFFSET(pCoef16 + (2u * ic));
00773
00774
00775 #ifndef ARM_MATH_BIG_ENDIAN
00776
00777
00778 out1 = __SMUSD(C1, S) >> 16u;
00779
00780 out2 = __SMUADX(C1, S);
00781
00782 #else
00783
00784
00785 out1 = __SMUADX(C1, S) >> 16u;
00786
00787 out2 = __SMUSD(-C1, S);
00788
00789 #endif
00790
00791
00792 _SIMD32_OFFSET(pSrc16 + (2u * i2)) = ((out2) & 0xFFFF0000) | ((out1) & 0x0000FFFF);
00793
00794
00795
00796 C3 = _SIMD32_OFFSET(pCoef16 + (6u * ic));
00797
00798
00799 #ifndef ARM_MATH_BIG_ENDIAN
00800
00801
00802 out1 = __SMUSD(C3, R) >> 16u;
00803
00804 out2 = __SMUADX(C3, R);
00805
00806 #else
00807
00808
00809 out1 = __SMUADX(C3, R) >> 16u;
00810
00811 out2 = __SMUSD(-C3, R);
00812
00813 #endif
00814
00815
00816 _SIMD32_OFFSET(pSrc16 + (2u * i3)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00817
00818
00819 ic = ic + twidCoefModifier;
00820
00821
00822 i0 = i0 + 1u;
00823
00824 } while(--j);
00825
00826
00827
00828
00829
00830
00831
00832
00833 twidCoefModifier <<= 2u;
00834
00835
00836 for (k = fftLen / 4u; k > 4u; k >>= 2u)
00837 {
00838
00839 n1 = n2;
00840 n2 >>= 2u;
00841 ic = 0u;
00842
00843 for (j = 0u; j <= (n2 - 1u); j++)
00844 {
00845
00846 C1 = _SIMD32_OFFSET(pCoef16 + (2u * ic));
00847 C2 = _SIMD32_OFFSET(pCoef16 + (4u * ic));
00848 C3 = _SIMD32_OFFSET(pCoef16 + (6u * ic));
00849
00850
00851 ic = ic + twidCoefModifier;
00852
00853
00854 for (i0 = j; i0 < fftLen; i0 += n1)
00855 {
00856
00857
00858 i1 = i0 + n2;
00859 i2 = i1 + n2;
00860 i3 = i2 + n2;
00861
00862
00863
00864 T = _SIMD32_OFFSET(pSrc16 + (2u * i0));
00865
00866
00867 S = _SIMD32_OFFSET(pSrc16 + (2u * i2));
00868
00869
00870 R = __QADD16(T, S);
00871
00872
00873 S = __QSUB16(T, S);
00874
00875
00876
00877 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00878
00879
00880 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00881
00882
00883 T = __QADD16(T, U);
00884
00885
00886
00887
00888
00889 out1 = __SHADD16(R, T);
00890 in = ((int16_t) (out1 & 0xFFFF)) >> 1;
00891 out1 = ((out1 >> 1) & 0xFFFF0000) | (in & 0xFFFF);
00892 _SIMD32_OFFSET(pSrc16 + (2u * i0)) = out1;
00893
00894
00895 R = __SHSUB16(R, T);
00896
00897 #ifndef ARM_MATH_BIG_ENDIAN
00898
00899
00900 out1 = __SMUSD(C2, R) >> 16u;
00901
00902
00903 out2 = __SMUADX(C2, R);
00904
00905 #else
00906
00907
00908 out1 = __SMUADX(R, C2) >> 16u;
00909
00910
00911 out2 = __SMUSD(-C2, R);
00912
00913 #endif
00914
00915
00916
00917 T = _SIMD32_OFFSET(pSrc16 + (2u * i1));
00918
00919
00920
00921
00922 _SIMD32_OFFSET(pSrc16 + (2u * i1)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00923
00924
00925
00926
00927 U = _SIMD32_OFFSET(pSrc16 + (2u * i3));
00928
00929
00930 T = __QSUB16(T, U);
00931
00932 #ifndef ARM_MATH_BIG_ENDIAN
00933
00934
00935 R = __SHSAX(S, T);
00936
00937
00938 S = __SHASX(S, T);
00939
00940
00941
00942 out1 = __SMUSD(C1, S) >> 16u;
00943 out2 = __SMUADX(C1, S);
00944
00945 #else
00946
00947
00948 R = __SHASX(S, T);
00949
00950
00951 S = __SHSAX(S, T);
00952
00953
00954
00955 out1 = __SMUADX(S, C1) >> 16u;
00956 out2 = __SMUSD(-C1, S);
00957
00958 #endif
00959
00960
00961
00962 _SIMD32_OFFSET(pSrc16 + (2u * i2)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00963
00964
00965
00966 #ifndef ARM_MATH_BIG_ENDIAN
00967
00968 out1 = __SMUSD(C3, R) >> 16u;
00969 out2 = __SMUADX(C3, R);
00970
00971 #else
00972
00973 out1 = __SMUADX(C3, R) >> 16u;
00974 out2 = __SMUSD(-C3, R);
00975
00976 #endif
00977
00978
00979
00980 _SIMD32_OFFSET(pSrc16 + (2u * i3)) = ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
00981 }
00982 }
00983
00984 twidCoefModifier <<= 2u;
00985 }
00986
00987
00988
00989
00990
00991
00992
00993
00994 j = fftLen >> 2;
00995
00996 ptr1 = &pSrc16[0];
00997
00998
00999
01000
01001 do
01002 {
01003
01004 xaya = *__SIMD32(ptr1)++;
01005
01006
01007 xbyb = *__SIMD32(ptr1)++;
01008
01009
01010 xcyc = *__SIMD32(ptr1)++;
01011
01012
01013 xdyd = *__SIMD32(ptr1)++;
01014
01015
01016 R = __QADD16(xaya, xcyc);
01017
01018
01019 T = __QADD16(xbyb, xdyd);
01020
01021
01022 ptr1 = ptr1 - 8u;
01023
01024
01025
01026
01027 *__SIMD32(ptr1)++ = __SHADD16(R, T);
01028
01029
01030 T = __QADD16(xbyb, xdyd);
01031
01032
01033
01034 *__SIMD32(ptr1)++ = __SHSUB16(R, T);
01035
01036
01037 S = __QSUB16(xaya, xcyc);
01038
01039
01040
01041 U = __QSUB16(xbyb, xdyd);
01042
01043 #ifndef ARM_MATH_BIG_ENDIAN
01044
01045
01046
01047 *__SIMD32(ptr1)++ = __SHASX(S, U);
01048
01049
01050
01051
01052 *__SIMD32(ptr1)++ = __SHSAX(S, U);
01053
01054 #else
01055
01056
01057
01058 *__SIMD32(ptr1)++ = __SHSAX(S, U);
01059
01060
01061
01062
01063 *__SIMD32(ptr1)++ = __SHASX(S, U);
01064
01065
01066 #endif
01067
01068
01069 }while(--j);
01070
01071
01072
01073
01074
01075
01076
01077 }
01078
01079
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089 void arm_bitreversal_q15(
01090 q15_t * pSrc16,
01091 uint32_t fftLen,
01092 uint16_t bitRevFactor,
01093 uint16_t * pBitRevTab)
01094 {
01095 q31_t in;
01096 uint32_t fftLenBy2, fftLenBy2p1;
01097 uint32_t i, j;
01098
01099
01100 j = 0u;
01101 fftLenBy2 = fftLen / 2u;
01102 fftLenBy2p1 = (fftLen / 2u) + 1u;
01103
01104
01105 for (i = 0u; i <= (fftLenBy2 - 2u); i += 2u)
01106 {
01107 if(i < j)
01108 {
01109
01110
01111 in = _SIMD32_OFFSET(pSrc16 +i * 2u);
01112 _SIMD32_OFFSET(pSrc16 + i * 2u) = _SIMD32_OFFSET(pSrc16 + j * 2u);
01113 _SIMD32_OFFSET(pSrc16 + j * 2u) = in;
01114
01115
01116
01117 in = _SIMD32_OFFSET(pSrc16 + (i + fftLenBy2p1) * 2u);
01118 _SIMD32_OFFSET(pSrc16 + (i + fftLenBy2p1) * 2u) = _SIMD32_OFFSET(pSrc16 + (j + fftLenBy2p1) * 2u);
01119 _SIMD32_OFFSET(pSrc16 + (j + fftLenBy2p1) * 2u) = in;
01120 }
01121
01122
01123
01124 in = _SIMD32_OFFSET(pSrc16 + (i + 1u) * 2u);
01125 _SIMD32_OFFSET(pSrc16 + (i + 1u) * 2u) = _SIMD32_OFFSET(pSrc16 + (j + fftLenBy2) * 2u);
01126 _SIMD32_OFFSET(pSrc16 + (j + fftLenBy2) * 2u) = in;
01127
01128
01129 j = *pBitRevTab;
01130
01131
01132 pBitRevTab += bitRevFactor;
01133 }
01134 }