Merge with upstream-master

Change-Id: Idee6f8eed7cc2646074cafb5e88ed808607b8fc4
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..378eac2
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+build
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..701a96c
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "greenffts"]
+	path = greenffts
+	url = https://github.com/hayguen/greenffts.git
+[submodule "kissfft"]
+	path = kissfft
+	url = https://github.com/hayguen/kissfft.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..9b6bc10
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,295 @@
+cmake_minimum_required(VERSION 2.8)
+project(PRETTY_FAST_FFT)
+
+# smaller library size?
+option(USE_TYPE_FLOAT  "activate single precision 'float'?" ON)
+option(USE_TYPE_DOUBLE "activate 'double' precision float?" ON)
+
+# architecture/optimization options
+option(USE_SIMD        "use SIMD (SSE/AVX/NEON/ALTIVEC) CPU features? - " ON)
+option(USE_SIMD_NEON   "force using NEON on ARM? (requires USE_SIMD)" OFF)
+option(USE_SCALAR_VECT "use 4-element vector scalar operations (if no other SIMD)" ON)
+
+# test options
+option(USE_BENCH_FFTW  "use (system-installed) FFTW3 in fft benchmark?" OFF)
+option(USE_BENCH_GREEN "use Green FFT in fft benchmark? - if exists in subdir" ON)
+option(USE_BENCH_KISS  "use KissFFT in fft benchmark? - if exists in subdir" ON)
+
+option(USE_DEBUG_ASAN  "use GCC's address sanitizer?" OFF)
+
+
+# C90 requires the gcc extensions for function attributes like always_inline
+# C99 provides the function attributes: no gcc extensions required
+set(CMAKE_C_STANDARD 99)
+set(CMAKE_C_EXTENSIONS OFF)
+
+set(CMAKE_CXX_STANDARD 98)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+
+if ( (NOT USE_TYPE_FLOAT) AND (NOT USE_TYPE_DOUBLE) )
+  message(FATAL_ERROR "activate at least one of USE_TYPE_FLOAT or USE_TYPE_DOUBLE")
+endif()
+
+
+if (USE_DEBUG_ASAN)
+  set(ASANLIB "asan")
+else()
+  set(ASANLIB "")
+endif()
+
+
+if (USE_BENCH_GREEN)
+  if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/greenffts/CMakeLists.txt")
+    message(STATUS "found subdir greenffts")
+    set(PATH_GREEN "${CMAKE_CURRENT_LIST_DIR}/greenffts")
+    add_subdirectory( "${PATH_GREEN}" )
+  else()
+    message(WARNING "GreenFFT not found in subdir greenffts")
+  endif()
+endif()
+
+if (USE_BENCH_KISS)
+  # git submodule add https://github.com/hayguen/kissfft.git
+  if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/kissfft/CMakeLists.txt")
+    message(STATUS "found subdir kissfft")
+    set(PATH_KISS "${CMAKE_CURRENT_LIST_DIR}/kissfft")
+    add_subdirectory( "${PATH_KISS}" )
+  else()
+    message(WARNING "KissFFT not found in subdir kissfft")
+  endif()
+endif()
+
+
+########################################################################
+# select the release build type by default to get optimization flags
+########################################################################
+if(NOT CMAKE_BUILD_TYPE)
+   set(CMAKE_BUILD_TYPE "Release")
+   message(STATUS "Build type not specified: defaulting to release.")
+endif(NOT CMAKE_BUILD_TYPE)
+
+if ("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
+  # using Visual Studio C++
+  message(STATUS "INFO: detected MSVC: will not link math lib m")
+  set(MATHLIB "")
+else()
+  message(STATUS "INFO: detected NO MSVC: ${CMAKE_C_COMPILER_ID}: will link math lib m")
+  set(MATHLIB "m")
+endif()
+
+set( SIMD_FLOAT_HDRS simd/pf_float.h simd/pf_sse1_float.h simd/pf_altivec_float.h simd/pf_neon_float.h simd/pf_scalar_float.h )
+set( SIMD_DOUBLE_HDRS simd/pf_double.h simd/pf_avx_double.h simd/pf_scalar_double.h )
+
+if (USE_TYPE_FLOAT)
+  set( FLOAT_SOURCES pffft.c pffft.h ${SIMD_FLOAT_HDRS} )
+else()
+  set( FLOAT_SOURCES  )
+endif()
+
+
+if (USE_TYPE_DOUBLE)
+  set( DOUBLE_SOURCES pffft_double.c pffft_double.h ${SIMD_DOUBLE_HDRS} )
+else()
+  set( DOUBLE_SOURCES )
+endif()
+
+######################################################
+
+add_library(PFFFT STATIC ${FLOAT_SOURCES} ${DOUBLE_SOURCES} pffft_common.c pffft_priv_impl.h pffft.hpp )
+target_compile_definitions(PFFFT PRIVATE _USE_MATH_DEFINES)
+if (USE_SCALAR_VECT)
+  target_compile_definitions(PFFFT PRIVATE PFFFT_SCALVEC_ENABLED=1)
+endif()
+if (USE_DEBUG_ASAN)
+  target_compile_options(PFFFT PRIVATE "-fsanitize=address")
+endif()
+if (NOT USE_SIMD)
+  target_compile_definitions(PFFFT PRIVATE PFFFT_SIMD_DISABLE=1)
+endif()
+if (USE_SIMD AND USE_SIMD_NEON)
+  target_compile_definitions(PFFFT PRIVATE PFFFT_ENABLE_NEON=1)
+  target_compile_options(PFFFT PRIVATE "-mfpu=neon")
+endif()
+if (USE_SIMD AND USE_TYPE_DOUBLE)
+  if(WIN32)
+    set_property(SOURCE pffft_double.c PROPERTY COMPILE_FLAGS "/arch:AVX")
+  else()
+    set_property(SOURCE pffft_double.c PROPERTY COMPILE_FLAGS "-march=native")
+  endif()
+endif()
+target_link_libraries( PFFFT ${MATHLIB} )
+set_property(TARGET PFFFT APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
+)
+
+######################################################
+
+add_library(FFTPACK STATIC fftpack.c fftpack.h)
+target_compile_definitions(FFTPACK PRIVATE _USE_MATH_DEFINES)
+target_link_libraries( FFTPACK ${MATHLIB} )
+set_property(TARGET FFTPACK APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
+)
+
+######################################################
+
+if (USE_TYPE_FLOAT)
+  # only 'float' supported in PFFASTCONV
+  add_library(PFFASTCONV STATIC pffastconv.c pffastconv.h pffft.h )
+  target_compile_definitions(PFFASTCONV PRIVATE _USE_MATH_DEFINES)
+  if (USE_DEBUG_ASAN)
+    target_compile_options(PFFASTCONV PRIVATE "-fsanitize=address")
+  endif()
+  target_link_libraries( PFFASTCONV PFFFT ${ASANLIB} ${MATHLIB} )
+  set_property(TARGET PFFASTCONV APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+    $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
+  )
+endif()
+
+#######################################################
+
+if (USE_TYPE_FLOAT)
+  add_executable( test_pffft_float  test_pffft.c )
+  target_compile_definitions(test_pffft_float PRIVATE _USE_MATH_DEFINES)
+  target_compile_definitions(test_pffft_float PRIVATE PFFFT_ENABLE_FLOAT)
+  target_link_libraries( test_pffft_float  PFFFT ${ASANLIB} )
+endif()
+
+######################################################
+
+if (USE_TYPE_DOUBLE)
+  add_executable( test_pffft_double  test_pffft.c )
+  target_compile_definitions(test_pffft_double PRIVATE _USE_MATH_DEFINES)
+  target_compile_definitions(test_pffft_double PRIVATE PFFFT_ENABLE_DOUBLE)
+  target_link_libraries( test_pffft_double  PFFFT ${ASANLIB} )
+endif()
+
+######################################################
+
+add_executable( test_pffft_cpp test_pffft.cpp )
+target_compile_definitions(test_pffft_cpp PRIVATE _USE_MATH_DEFINES)
+if (USE_TYPE_FLOAT)
+  target_compile_definitions(test_pffft_cpp PRIVATE PFFFT_ENABLE_FLOAT)
+endif()
+if (USE_TYPE_DOUBLE)
+  target_compile_definitions(test_pffft_cpp PRIVATE PFFFT_ENABLE_DOUBLE)
+endif()
+target_link_libraries( test_pffft_cpp  PFFFT ${ASANLIB} )
+
+######################################################
+
+add_executable( test_pffft_cpp_11 test_pffft.cpp )
+target_compile_definitions(test_pffft_cpp_11 PRIVATE _USE_MATH_DEFINES)
+if (USE_TYPE_FLOAT)
+  target_compile_definitions(test_pffft_cpp_11 PRIVATE PFFFT_ENABLE_FLOAT)
+endif()
+if (USE_TYPE_DOUBLE)
+  target_compile_definitions(test_pffft_cpp_11 PRIVATE PFFFT_ENABLE_DOUBLE)
+endif()
+target_link_libraries( test_pffft_cpp_11  PFFFT ${ASANLIB} )
+
+set_property(TARGET test_pffft_cpp_11 PROPERTY CXX_STANDARD 11)
+set_property(TARGET test_pffft_cpp_11 PROPERTY CXX_STANDARD_REQUIRED ON)
+
+######################################################
+
+if (USE_TYPE_FLOAT)
+  add_executable(test_pffastconv   test_pffastconv.c
+    ${SIMD_FLOAT_HDRS} ${SIMD_DOUBLE_HDRS}
+  )
+  target_compile_definitions(test_pffastconv PRIVATE _USE_MATH_DEFINES)
+  if (USE_DEBUG_ASAN)
+    target_compile_options(test_pffastconv PRIVATE "-fsanitize=address")
+  endif()
+  if (NOT USE_SIMD)
+    target_compile_definitions(test_pffastconv PRIVATE PFFFT_SIMD_DISABLE=1)
+  endif()
+  if (USE_SIMD AND USE_SIMD_NEON)
+    target_compile_definitions(test_pffastconv PRIVATE PFFFT_ENABLE_NEON=1)
+    target_compile_options(test_pffastconv PRIVATE "-mfpu=neon")
+  endif()
+  target_link_libraries( test_pffastconv  PFFASTCONV ${ASANLIB} ${MATHLIB} )
+endif()
+
+######################################################
+
+if (USE_TYPE_FLOAT)
+  add_executable(bench_pffft_float   bench_pffft.c pffft.h fftpack.h)
+  target_compile_definitions(bench_pffft_float PRIVATE _USE_MATH_DEFINES)
+  target_compile_definitions(bench_pffft_float PRIVATE PFFFT_ENABLE_FLOAT)
+
+  target_link_libraries( bench_pffft_float  PFFFT FFTPACK ${ASANLIB} )
+
+  if (USE_BENCH_FFTW)
+    target_compile_definitions(bench_pffft_float PRIVATE HAVE_FFTW=1)
+    target_link_libraries(bench_pffft_float  fftw3f)
+  endif()
+
+  if (PATH_GREEN AND USE_BENCH_GREEN)
+    target_compile_definitions(bench_pffft_float PRIVATE HAVE_GREEN_FFTS=1)
+    target_link_libraries(bench_pffft_float  GreenFFT)
+  endif()
+
+  if (PATH_KISS AND USE_BENCH_KISS)
+    target_compile_definitions(bench_pffft_float PRIVATE HAVE_KISS_FFT=1)
+    target_link_libraries(bench_pffft_float  KissFFT)
+  endif()
+endif()
+
+if (USE_TYPE_DOUBLE)
+  add_executable(bench_pffft_double   bench_pffft.c pffft.h fftpack.h)
+  target_compile_definitions(bench_pffft_double PRIVATE _USE_MATH_DEFINES)
+  target_compile_definitions(bench_pffft_double PRIVATE PFFFT_ENABLE_DOUBLE)
+  target_link_libraries( bench_pffft_double  PFFFT ${ASANLIB} )
+
+  if (USE_BENCH_FFTW)
+    target_compile_definitions(bench_pffft_double PRIVATE HAVE_FFTW=1)
+    target_link_libraries(bench_pffft_double  fftw3)
+  endif()
+endif()
+
+######################################################
+
+enable_testing()
+
+if (USE_TYPE_FLOAT)
+
+  add_test(NAME bench_pffft_pow2
+    COMMAND "${CMAKE_CURRENT_BINARY_DIR}/bench_pffft_float"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+  add_test(NAME bench_pffft_non2
+    COMMAND "${CMAKE_CURRENT_BINARY_DIR}/bench_pffft_float" "--non-pow2"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+  add_test(NAME bench_plots
+    COMMAND bash "-c" "${CMAKE_CURRENT_SOURCE_DIR}/plots.sh"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+  add_test(NAME test_pfconv_lens_symetric
+    COMMAND "${CMAKE_CURRENT_BINARY_DIR}/test_pffastconv" "--no-bench" "--quick" "--sym"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+  add_test(NAME test_pfconv_lens_non_sym
+    COMMAND "${CMAKE_CURRENT_BINARY_DIR}/test_pffastconv" "--no-bench" "--quick"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+  add_test(NAME bench_pfconv_symetric
+    COMMAND "${CMAKE_CURRENT_BINARY_DIR}/test_pffastconv" "--no-len" "--sym"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+  add_test(NAME bench_pfconv_non_sym
+    COMMAND "${CMAKE_CURRENT_BINARY_DIR}/test_pffastconv" "--no-len"
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+  )
+
+endif()
+
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..1ee09cd
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,38 @@
+
+Copyright (c) 2020  Dario Mambro ( dario.mambro@gmail.com )
+Copyright (c) 2019  Hayati Ayguen ( h_ayguen@web.de )
+Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+Copyright (c) 2004 the University Corporation for Atmospheric
+Research ("UCAR"). All rights reserved. Developed by NCAR's
+Computational and Information Systems Laboratory, UCAR,
+www.cisl.ucar.edu.
+
+Redistribution and use of the Software in source and binary forms,
+with or without modification, is permitted provided that the
+following conditions are met:
+
+- Neither the names of NCAR's Computational and Information Systems
+Laboratory, the University Corporation for Atmospheric Research,
+nor the names of its sponsors or contributors may be used to
+endorse or promote products derived from this Software without
+specific prior written permission.  
+
+- Redistributions of source code must retain the above copyright
+notices, this list of conditions, and the disclaimer below.
+
+- Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions, and the disclaimer below in the
+documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..27dd530
--- /dev/null
+++ b/README.md
@@ -0,0 +1,124 @@
+# PFFFT: a pretty fast FFT and fast convolution with PFFASTCONV
+
+## TL;DR
+
+PFFFT does 1D Fast Fourier Transforms, of single precision real and
+complex vectors. It tries do it fast, it tries to be correct, and it
+tries to be small. Computations do take advantage of SSE1 instructions
+on x86 cpus, Altivec on powerpc cpus, and NEON on ARM cpus. The
+license is BSD-like.
+
+
+PFFASTCONV does fast convolution (FIR filtering), of single precision 
+real vectors, utilizing the PFFFT library. The license is BSD-like.
+
+
+## Why does it exist:
+
+I was in search of a good performing FFT library , preferably very
+small and with a very liberal license.
+
+When one says "fft library", FFTW ("Fastest Fourier Transform in the
+West") is probably the first name that comes to mind -- I guess that
+99% of open-source projects that need a FFT do use FFTW, and are happy
+with it. However, it is quite a large library , which does everything
+fft related (2d transforms, 3d transforms, other transformations such
+as discrete cosine , or fast hartley). And it is licensed under the
+GNU GPL , which means that it cannot be used in non open-source
+products.
+
+An alternative to FFTW that is really small, is the venerable FFTPACK
+v4, which is available on NETLIB. A more recent version (v5) exists,
+but it is larger as it deals with multi-dimensional transforms. This
+is a library that is written in FORTRAN 77, a language that is now
+considered as a bit antiquated by many. FFTPACKv4 was written in 1985,
+by Dr Paul Swarztrauber of NCAR, more than 25 years ago ! And despite
+its age, benchmarks show it that it still a very good performing FFT
+library, see for example the 1d single precision benchmarks
+[here](http://www.fftw.org/speed/opteron-2.2GHz-32bit/). It is however not
+competitive with the fastest ones, such as FFTW, Intel MKL, AMD ACML,
+Apple vDSP. The reason for that is that those libraries do take
+advantage of the SSE SIMD instructions available on Intel CPUs,
+available since the days of the Pentium III. These instructions deal
+with small vectors of 4 floats at a time, instead of a single float
+for a traditionnal FPU, so when using these instructions one may expect
+a 4-fold performance improvement.
+
+The idea was to take this fortran fftpack v4 code, translate to C,
+modify it to deal with those SSE instructions, and check that the
+final performance is not completely ridiculous when compared to other
+SIMD FFT libraries. Translation to C was performed with [f2c](
+http://www.netlib.org/f2c/). The resulting file was a bit edited in
+order to remove the thousands of gotos that were introduced by
+f2c. You will find the fftpack.h and fftpack.c sources in the
+repository, this a complete translation of [fftpack](
+http://www.netlib.org/fftpack/), with the discrete cosine transform
+and the test program. There is no license information in the netlib
+repository, but it was confirmed to me by the fftpack v5 curators that
+the [same terms do apply to fftpack v4]
+(http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html). This is a
+"BSD-like" license, it is compatible with proprietary projects.
+
+Adapting fftpack to deal with the SIMD 4-element vectors instead of
+scalar single precision numbers was more complex than I originally
+thought, especially with the real transforms, and I ended up writing
+more code than I planned..
+
+
+## The code:
+
+### Good old C:
+The FFT API is very very simple, just make sure that you read the comments in `pffft.h`.
+
+The Fast convolution's API is also very simple, just make sure that you read the comments 
+in `pffastconv.h`.
+
+### C++:
+A simple C++ wrapper is available in `pffft.hpp`.
+
+
+### Git:
+This archive's source can be downloaded with git including the submodules:
+```
+git clone --recursive https://github.com/hayguen/pffft.git
+```
+
+With `--recursive` the submodules for Green and Kiss-FFT are also fetched,
+to use them in the benchmark. You can omit the `--recursive`-option.
+
+For retrieving the submodules later:
+```
+git submodule update --init
+```
+
+
+## CMake:
+There's now CMake support to build the static libraries `libPFFFT.a` 
+and `libPFFASTCONV.a` from the source files, plus the additional 
+`libFFTPACK.a` library. Later one's sources are there anyway for the benchmark.
+
+
+## Comparison with other FFTs:
+
+The idea was not to break speed records, but to get a decently fast
+fft that is at least 50% as fast as the fastest FFT -- especially on
+slowest computers . I'm more focused on getting the best performance
+on slow cpus (Atom, Intel Core 1, old Athlons, ARM Cortex-A9...), than
+on getting top performance on today fastest cpus.
+
+It can be used in a real-time context as the fft functions do not
+perform any memory allocation -- that is why they accept a 'work'
+array in their arguments.
+
+It is also a bit focused on performing 1D convolutions, that is why it
+provides "unordered" FFTs , and a fourier domain convolution
+operation.
+
+
+## Benchmark results
+
+The benchmark results are stored in a separate git-repository:
+See [https://github.com/hayguen/pffft_benchmarks](https://github.com/hayguen/pffft_benchmarks).
+
+This is to keep the sources small.
+
diff --git a/bench_all.sh b/bench_all.sh
new file mode 100755
index 0000000..37be94c
--- /dev/null
+++ b/bench_all.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+FFTW="ON"
+CMAKEOPT=""
+# CMAKEOPT="-DUSE_NEON=ON"
+
+if [ ! -z "$1" ]; then
+  FFTW="$1"
+fi
+
+if [ ! -d build ]; then
+  mkdir build
+  cd build
+else
+  cd build
+  make clean
+  rm *.csv *.txt *.png
+fi
+
+echo "" >ToolChain.cmake
+if [ -z "${GCC_WITH_CMAKE}" ]; then
+  GCC_WITH_CMAKE="gcc"
+else
+  GCCPATH=$(basename "${GCC_WITH_CMAKE}")
+  echo "SET(CMAKE_C_COMPILER     ${GCCPATH})" >>ToolChain.cmake
+fi
+if [ -z "${GPP_WITH_CMAKE}" ]; then
+  GPP_WITH_CMAKE="g++"
+else
+  GPPPATH=$(basename "${GPP_WITH_CMAKE}")
+  echo "SET(CMAKE_CXX_COMPILER   ${GPPPATH})" >>ToolChain.cmake
+fi
+
+
+#cmake -DCMAKE_TOOLCHAIN_FILE=ToolChain.cmake -DUSE_FFTW=${FFTW} -DUSE_SIMD=OFF ${CMAKEOPT} ../
+#make clean
+#make
+#echo -e "\n\nrunning without simd (==scalar) .."
+#time ctest -V
+
+cmake -DCMAKE_TOOLCHAIN_FILE=ToolChain.cmake -DUSE_FFTW=${FFTW} -DUSE_SIMD=ON ${CMAKEOPT} ../
+#make clean
+make
+echo -e "\n\nrunning with simd .."
+time ctest -V
+
+
+echo "$@" >infos.txt
+echo "FFTW=${FFTW}" >>infos.txt
+echo "CMAKEOPT=${CMAKEOPT}" >>infos.txt
+
+
+echo "" >>infos.txt
+echo "${GCC_WITH_CMAKE} --version:" >>infos.txt
+${GCC_WITH_CMAKE} --version &>>infos.txt
+
+echo "" >>infos.txt
+echo "${GPP_WITH_CMAKE} --version:" >>infos.txt
+${GPP_WITH_CMAKE} --version &>>infos.txt
+
+
+echo "" >>infos.txt
+echo "lscpu:" >>infos.txt
+lscpu >>infos.txt
+
+echo "" >>infos.txt
+echo "lsb_release -a" >>infos.txt
+lsb_release -a &>>infos.txt
+
+echo "" >>infos.txt
+echo "cat /etc/*-release" >>infos.txt
+cat /etc/*-release &>>infos.txt
+
+
+echo "" >>infos.txt
+echo "cat /proc/cpuinfo:" >>infos.txt
+cat /proc/cpuinfo >>infos.txt
+
+
+tar zcvf ../pffft_bench_${GCCPATH}_${HOSTNAME}.tar.gz --exclude=CMakeCache.txt *.csv *.txt *.png
+echo "all benchmark results in pffft_bench_${GCCPATH}_${HOSTNAME}.tar.gz"
diff --git a/bench_pffft.c b/bench_pffft.c
new file mode 100644
index 0000000..49d4faa
--- /dev/null
+++ b/bench_pffft.c
@@ -0,0 +1,1207 @@
+/*
+  Copyright (c) 2013 Julien Pommier.
+  Copyright (c) 2019  Hayati Ayguen ( h_ayguen@web.de )
+
+  Small test & bench for PFFFT, comparing its performance with the scalar FFTPACK, FFTW, and Apple vDSP
+
+  How to build: 
+
+  on linux, with fftw3:
+  gcc -o test_pffft -DHAVE_FFTW -msse -mfpmath=sse -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -lm
+
+  on macos, without fftw3:
+  clang -o test_pffft -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -framework Accelerate
+
+  on macos, with fftw3:
+  clang -o test_pffft -DHAVE_FFTW -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -framework Accelerate
+
+  as alternative: replace clang by gcc.
+
+  on windows, with visual c++:
+  cl /Ox -D_USE_MATH_DEFINES /arch:SSE test_pffft.c pffft.c fftpack.c
+  
+  build without SIMD instructions:
+  gcc -o test_pffft -DPFFFT_SIMD_DISABLE -O3 -Wall -W pffft.c test_pffft.c fftpack.c -lm
+
+ */
+
+#define CONCAT_TOKENS(A, B)  A ## B
+
+#ifdef PFFFT_ENABLE_FLOAT
+#include "pffft.h"
+
+typedef float pffft_scalar;
+typedef PFFFT_Setup PFFFT_SETUP;
+#define PFFFT_FUNC(F)  CONCAT_TOKENS(pffft_, F)
+
+#else
+/*
+Note: adapted for double precision dynamic range version.
+*/
+#include "pffft_double.h"
+
+typedef double pffft_scalar;
+typedef PFFFTD_Setup PFFFT_SETUP;
+#define PFFFT_FUNC(F)  CONCAT_TOKENS(pffftd_, F)
+#endif
+
+
+#ifdef PFFFT_ENABLE_FLOAT
+
+#include "fftpack.h"
+
+#ifdef HAVE_GREEN_FFTS
+#include "fftext.h"
+#endif
+
+#ifdef HAVE_KISS_FFT
+#include <kiss_fft.h>
+#include <kiss_fftr.h>
+#endif
+
+#endif
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <assert.h>
+#include <string.h>
+
+#ifdef HAVE_SYS_TIMES
+#  include <sys/times.h>
+#  include <unistd.h>
+#endif
+
+#ifdef HAVE_VECLIB
+#  include <Accelerate/Accelerate.h>
+#endif
+
+#ifdef HAVE_FFTW
+#  include <fftw3.h>
+
+#ifdef PFFFT_ENABLE_FLOAT
+typedef fftwf_plan FFTW_PLAN;
+typedef fftwf_complex FFTW_COMPLEX;
+#define FFTW_FUNC(F)  CONCAT_TOKENS(fftwf_, F)
+#else
+typedef fftw_plan FFTW_PLAN;
+typedef fftw_complex FFTW_COMPLEX;
+#define FFTW_FUNC(F)  CONCAT_TOKENS(fftw_, F)
+#endif
+
+#endif /* HAVE_FFTW */
+
+#ifndef M_LN2
+  #define M_LN2   0.69314718055994530942  /* log_e 2 */
+#endif
+
+
+#define NUM_FFT_ALGOS  8
+enum {
+  ALGO_FFTPACK = 0,
+  ALGO_VECLIB,
+  ALGO_FFTW_ESTIM,
+  ALGO_FFTW_AUTO,
+  ALGO_GREEN,
+  ALGO_KISS,
+  ALGO_PFFFT_U, /* = 6 */
+  ALGO_PFFFT_O  /* = 7 */
+};
+
+#define NUM_TYPES      7
+enum {
+  TYPE_PREP = 0,         /* time for preparation in ms */
+  TYPE_DUR_NS = 1,       /* time per fft in ns */
+  TYPE_DUR_FASTEST = 2,  /* relative time to fastest */
+  TYPE_REL_PFFFT = 3,    /* relative time to ALGO_PFFFT */
+  TYPE_ITER = 4,         /* # of iterations in measurement */
+  TYPE_MFLOPS = 5,       /* MFlops/sec */
+  TYPE_DUR_TOT = 6       /* test duration in sec */
+};
+/* double tmeas[NUM_TYPES][NUM_FFT_ALGOS]; */
+
+const char * algoName[NUM_FFT_ALGOS] = {
+  "FFTPack      ",
+  "vDSP (vec)   ",
+  "FFTW F(estim)",
+  "FFTW F(auto) ",
+  "Green        ",
+  "Kiss         ",
+  "PFFFT-U(simd)",  /* unordered */
+  "PFFFT (simd) "   /* ordered */
+};
+
+
+int compiledInAlgo[NUM_FFT_ALGOS] = {
+#ifdef PFFFT_ENABLE_FLOAT
+  1, /* "FFTPack    " */
+#else
+  0, /* "FFTPack    " */
+#endif
+#if defined(HAVE_VECLIB) && defined(PFFFT_ENABLE_FLOAT)
+  1, /* "vDSP (vec) " */
+#else
+  0,
+#endif
+#if defined(HAVE_FFTW)
+  1, /* "FFTW(estim)" */
+  1, /* "FFTW (auto)" */
+#else
+  0, 0,
+#endif
+#if defined(HAVE_GREEN_FFTS) && defined(PFFFT_ENABLE_FLOAT)
+  1, /* "Green      " */
+#else
+  0,
+#endif
+#if defined(HAVE_KISS_FFT) && defined(PFFFT_ENABLE_FLOAT)
+  1, /* "Kiss       " */
+#else
+  0,
+#endif
+  1, /* "PFFFT_U    " */
+  1  /* "PFFFT_O    " */
+};
+
+const char * algoTableHeader[NUM_FFT_ALGOS][2] = {
+{ "| real FFTPack ", "| cplx FFTPack " },
+{ "|  real   vDSP ", "|  cplx   vDSP " },
+{ "|real FFTWestim", "|cplx FFTWestim" },
+{ "|real FFTWauto ", "|cplx FFTWauto " },
+{ "|  real  Green ", "|  cplx  Green " },
+{ "|  real   Kiss ", "|  cplx   Kiss " },
+{ "| real PFFFT-U ", "| cplx PFFFT-U " },
+{ "|  real  PFFFT ", "|  cplx  PFFFT " } };
+
+const char * typeText[NUM_TYPES] = {
+  "preparation in ms",
+  "time per fft in ns",
+  "relative to fastest",
+  "relative to pffft",
+  "measured_num_iters",
+  "mflops",
+  "test duration in sec"
+};
+
+const char * typeFilenamePart[NUM_TYPES] = {
+  "1-preparation-in-ms",
+  "2-timePerFft-in-ns",
+  "3-rel-fastest",
+  "4-rel-pffft",
+  "5-num-iter",
+  "6-mflops",
+  "7-duration-in-sec"
+};
+
+#define SAVE_ALL_TYPES  0
+
+const int saveType[NUM_TYPES] = {
+  1, /* "1-preparation-in-ms" */
+  0, /* "2-timePerFft-in-ns"  */
+  0, /* "3-rel-fastest"       */
+  1, /* "4-rel-pffft"         */
+  1, /* "5-num-iter"          */
+  1, /* "6-mflops"            */
+  1, /* "7-duration-in-sec"   */
+};
+
+
+#define MAX(x,y) ((x)>(y)?(x):(y))
+#define MIN(x,y) ((x)<(y)?(x):(y))
+
+unsigned Log2(unsigned v) {
+  /* we don't need speed records .. obvious way is good enough */
+  /* https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogObvious */
+  /* Find the log base 2 of an integer with the MSB N set in O(N) operations (the obvious way):
+   * unsigned v: 32-bit word to find the log base 2 of */
+  unsigned r = 0; /* r will be lg(v) */
+  while (v >>= 1)
+  {
+    r++;
+  }
+  return r;
+}
+
+
+double frand() {
+  return rand()/(double)RAND_MAX;
+}
+
+#if defined(HAVE_SYS_TIMES)
+  inline double uclock_sec(void) {
+    static double ttclk = 0.;
+    struct tms t;
+    if (ttclk == 0.)
+      ttclk = sysconf(_SC_CLK_TCK);
+    times(&t);
+    /* use only the user time of this process - not realtime, which depends on OS-scheduler .. */
+    return ((double)t.tms_utime)) / ttclk;
+  }
+# else
+  double uclock_sec(void)
+{ return (double)clock()/(double)CLOCKS_PER_SEC; }
+#endif
+
+
+/* compare results with the regular fftpack */
+void pffft_validate_N(int N, int cplx) {
+
+#ifdef PFFFT_ENABLE_FLOAT
+
+  int Nfloat = N*(cplx?2:1);
+  int Nbytes = Nfloat * sizeof(pffft_scalar);
+  float *ref, *in, *out, *tmp, *tmp2;
+  PFFFT_SETUP *s = PFFFT_FUNC(new_setup)(N, cplx ? PFFFT_COMPLEX : PFFFT_REAL);
+  int pass;
+
+
+  if (!s) { printf("Skipping N=%d, not supported\n", N); return; }
+  ref = PFFFT_FUNC(aligned_malloc)(Nbytes);
+  in = PFFFT_FUNC(aligned_malloc)(Nbytes);
+  out = PFFFT_FUNC(aligned_malloc)(Nbytes);
+  tmp = PFFFT_FUNC(aligned_malloc)(Nbytes);
+  tmp2 = PFFFT_FUNC(aligned_malloc)(Nbytes);
+
+  for (pass=0; pass < 2; ++pass) {
+    float ref_max = 0;
+    int k;
+    /* printf("N=%d pass=%d cplx=%d\n", N, pass, cplx); */
+    /* compute reference solution with FFTPACK */
+    if (pass == 0) {
+      float *wrk = malloc(2*Nbytes+15*sizeof(pffft_scalar));
+      for (k=0; k < Nfloat; ++k) {
+        ref[k] = in[k] = frand()*2-1; 
+        out[k] = 1e30;
+      }
+      if (!cplx) {
+        rffti(N, wrk);
+        rfftf(N, ref, wrk);
+        /* use our ordering for real ffts instead of the one of fftpack */
+        {
+          float refN=ref[N-1];
+          for (k=N-2; k >= 1; --k) ref[k+1] = ref[k]; 
+          ref[1] = refN;
+        }
+      } else {
+        cffti(N, wrk);
+        cfftf(N, ref, wrk);
+      }
+      free(wrk);
+    }
+
+    for (k = 0; k < Nfloat; ++k) ref_max = MAX(ref_max, fabs(ref[k]));
+
+      
+    /* pass 0 : non canonical ordering of transform coefficients */
+    if (pass == 0) {
+      /* test forward transform, with different input / output */
+      PFFFT_FUNC(transform)(s, in, tmp, 0, PFFFT_FORWARD);
+      memcpy(tmp2, tmp, Nbytes);
+      memcpy(tmp, in, Nbytes);
+      PFFFT_FUNC(transform)(s, tmp, tmp, 0, PFFFT_FORWARD);
+      for (k = 0; k < Nfloat; ++k) {
+        assert(tmp2[k] == tmp[k]);
+      }
+
+      /* test reordering */
+      PFFFT_FUNC(zreorder)(s, tmp, out, PFFFT_FORWARD);
+      PFFFT_FUNC(zreorder)(s, out, tmp, PFFFT_BACKWARD);
+      for (k = 0; k < Nfloat; ++k) {
+        assert(tmp2[k] == tmp[k]);
+      }
+      PFFFT_FUNC(zreorder)(s, tmp, out, PFFFT_FORWARD);
+    } else {
+      /* pass 1 : canonical ordering of transform coeffs. */
+      PFFFT_FUNC(transform_ordered)(s, in, tmp, 0, PFFFT_FORWARD);
+      memcpy(tmp2, tmp, Nbytes);
+      memcpy(tmp, in, Nbytes);
+      PFFFT_FUNC(transform_ordered)(s, tmp, tmp, 0, PFFFT_FORWARD);
+      for (k = 0; k < Nfloat; ++k) {
+        assert(tmp2[k] == tmp[k]);
+      }
+      memcpy(out, tmp, Nbytes);
+    }
+
+    {
+      for (k=0; k < Nfloat; ++k) {
+        if (!(fabs(ref[k] - out[k]) < 1e-3*ref_max)) {
+          printf("%s forward PFFFT mismatch found for N=%d\n", (cplx?"CPLX":"REAL"), N);
+          exit(1);
+        }
+      }
+
+      if (pass == 0) PFFFT_FUNC(transform)(s, tmp, out, 0, PFFFT_BACKWARD);
+      else   PFFFT_FUNC(transform_ordered)(s, tmp, out, 0, PFFFT_BACKWARD);
+      memcpy(tmp2, out, Nbytes);
+      memcpy(out, tmp, Nbytes);
+      if (pass == 0) PFFFT_FUNC(transform)(s, out, out, 0, PFFFT_BACKWARD);
+      else   PFFFT_FUNC(transform_ordered)(s, out, out, 0, PFFFT_BACKWARD);
+      for (k = 0; k < Nfloat; ++k) {
+        assert(tmp2[k] == out[k]);
+        out[k] *= 1.f/N;
+      }
+      for (k = 0; k < Nfloat; ++k) {
+        if (fabs(in[k] - out[k]) > 1e-3 * ref_max) {
+          printf("pass=%d, %s IFFFT does not match for N=%d\n", pass, (cplx?"CPLX":"REAL"), N); break;
+          exit(1);
+        }
+      }
+    }
+
+    /* quick test of the circular convolution in fft domain */
+    {
+      float conv_err = 0, conv_max = 0;
+
+      PFFFT_FUNC(zreorder)(s, ref, tmp, PFFFT_FORWARD);
+      memset(out, 0, Nbytes);
+      PFFFT_FUNC(zconvolve_accumulate)(s, ref, ref, out, 1.0);
+      PFFFT_FUNC(zreorder)(s, out, tmp2, PFFFT_FORWARD);
+      
+      for (k=0; k < Nfloat; k += 2) {
+        float ar = tmp[k], ai=tmp[k+1];
+        if (cplx || k > 0) {
+          tmp[k] = ar*ar - ai*ai;
+          tmp[k+1] = 2*ar*ai;
+        } else {
+          tmp[0] = ar*ar;
+          tmp[1] = ai*ai;
+        }
+      }
+      
+      for (k=0; k < Nfloat; ++k) {
+        float d = fabs(tmp[k] - tmp2[k]), e = fabs(tmp[k]);
+        if (d > conv_err) conv_err = d;
+        if (e > conv_max) conv_max = e;
+      }
+      if (conv_err > 1e-5*conv_max) {
+        printf("zconvolve error ? %g %g\n", conv_err, conv_max); exit(1);
+      }
+    }
+
+  }
+
+  printf("%s PFFFT is OK for N=%d\n", (cplx?"CPLX":"REAL"), N); fflush(stdout);
+
+  PFFFT_FUNC(destroy_setup)(s);
+  PFFFT_FUNC(aligned_free)(ref);
+  PFFFT_FUNC(aligned_free)(in);
+  PFFFT_FUNC(aligned_free)(out);
+  PFFFT_FUNC(aligned_free)(tmp);
+  PFFFT_FUNC(aligned_free)(tmp2);
+
+#endif /* PFFFT_ENABLE_FLOAT */
+}
+
+void pffft_validate(int cplx) {
+  static int Ntest[] = { 16, 32, 64, 96, 128, 160, 192, 256, 288, 384, 5*96, 512, 576, 5*128, 800, 864, 1024, 2048, 2592, 4000, 4096, 12000, 36864, 0};
+  int k;
+  for (k = 0; Ntest[k]; ++k) {
+    int N = Ntest[k];
+    if (N == 16 && !cplx) continue;
+    pffft_validate_N(N, cplx);
+  }
+}
+
+int array_output_format = 1;
+
+
+void print_table(const char *txt, FILE *tableFile) {
+  fprintf(stdout, "%s", txt);
+  if (tableFile && tableFile != stdout)
+    fprintf(tableFile, "%s", txt);
+}
+
+void print_table_flops(float mflops, FILE *tableFile) {
+  fprintf(stdout, "|%11.0f   ", mflops);
+  if (tableFile && tableFile != stdout)
+    fprintf(tableFile, "|%11.0f   ", mflops);
+}
+
+void print_table_fftsize(int N, FILE *tableFile) {
+  fprintf(stdout, "|%9d  ", N);
+  if (tableFile && tableFile != stdout)
+    fprintf(tableFile, "|%9d  ", N);
+}
+
+double show_output(const char *name, int N, int cplx, float flops, float t0, float t1, int max_iter, FILE *tableFile) {
+  double T = (double)(t1-t0)/2/max_iter * 1e9;
+  float mflops = flops/1e6/(t1 - t0 + 1e-16);
+  if (array_output_format) {
+    if (flops != -1)
+      print_table_flops(mflops, tableFile);
+    else
+      print_table("|        n/a   ", tableFile);
+  } else {
+    if (flops != -1) {
+      printf("N=%5d, %s %16s : %6.0f MFlops [t=%6.0f ns, %d runs]\n", N, (cplx?"CPLX":"REAL"), name, mflops, (t1-t0)/2/max_iter * 1e9, max_iter);
+    }
+  }
+  fflush(stdout);
+  return T;
+}
+
+double cal_benchmark(int N, int cplx) {
+  const int log2N = Log2(N);
+  int Nfloat = (cplx ? N*2 : N);
+  int Nbytes = Nfloat * sizeof(pffft_scalar);
+  pffft_scalar *X = PFFFT_FUNC(aligned_malloc)(Nbytes), *Y = PFFFT_FUNC(aligned_malloc)(Nbytes), *Z = PFFFT_FUNC(aligned_malloc)(Nbytes);
+  double t0, t1, tstop, T, nI;
+  int k, iter;
+
+  assert( PFFFT_FUNC(is_power_of_two)(N) );
+  for (k = 0; k < Nfloat; ++k) {
+    X[k] = sqrtf(k+1);
+  }
+
+  /* PFFFT-U (unordered) benchmark */
+  PFFFT_SETUP *s = PFFFT_FUNC(new_setup)(N, cplx ? PFFFT_COMPLEX : PFFFT_REAL);
+  assert(s);
+  iter = 0;
+  t0 = uclock_sec();
+  tstop = t0 + 0.25;  /* benchmark duration: 250 ms */
+  do {
+    for ( k = 0; k < 512; ++k ) {
+      PFFFT_FUNC(transform)(s, X, Z, Y, PFFFT_FORWARD);
+      PFFFT_FUNC(transform)(s, X, Z, Y, PFFFT_BACKWARD);
+      ++iter;
+    }
+    t1 = uclock_sec();
+  } while ( t1 < tstop );
+  PFFFT_FUNC(destroy_setup)(s);
+  PFFFT_FUNC(aligned_free)(X);
+  PFFFT_FUNC(aligned_free)(Y);
+  PFFFT_FUNC(aligned_free)(Z);
+
+  T = ( t1 - t0 );  /* duration per fft() */
+  nI = ((double)iter) * ( log2N * N );  /* number of iterations "normalized" to O(N) = N*log2(N) */
+  return (nI / T);    /* normalized iterations per second */
+}
+
+
+
+void benchmark_ffts(int N, int cplx, int withFFTWfullMeas, double iterCal, double tmeas[NUM_TYPES][NUM_FFT_ALGOS], int haveAlgo[NUM_FFT_ALGOS], FILE *tableFile ) {
+  const int log2N = Log2(N);
+  int nextPow2N = PFFFT_FUNC(next_power_of_two)(N);
+  int log2NextN = Log2(nextPow2N);
+  int pffftPow2N = nextPow2N;
+
+  int Nfloat = (cplx ? MAX(nextPow2N, pffftPow2N)*2 : MAX(nextPow2N, pffftPow2N));
+  int Nmax, k, iter;
+  int Nbytes = Nfloat * sizeof(pffft_scalar);
+
+  pffft_scalar *X = PFFFT_FUNC(aligned_malloc)(Nbytes + sizeof(pffft_scalar)), *Y = PFFFT_FUNC(aligned_malloc)(Nbytes + 2*sizeof(pffft_scalar) ), *Z = PFFFT_FUNC(aligned_malloc)(Nbytes);
+  double te, t0, t1, tstop, flops, Tfastest;
+
+  const double max_test_duration = 0.150;   /* test duration 150 ms */
+  double numIter = max_test_duration * iterCal / ( log2N * N );  /* number of iteration for max_test_duration */
+  const int step_iter = MAX(1, ((int)(0.01 * numIter)) );  /* one hundredth */
+  int max_iter = MAX(1, ((int)numIter) );  /* minimum 1 iteration */
+
+  const float checkVal = 12345.0F;
+
+  /* printf("benchmark_ffts(N = %d, cplx = %d): Nfloat = %d, X_mem = 0x%p, X = %p\n", N, cplx, Nfloat, X_mem, X); */
+
+  memset( X, 0, Nfloat * sizeof(pffft_scalar) );
+  if ( Nfloat < 32 ) {
+    for (k = 0; k < Nfloat; k += 4)
+      X[k] = sqrtf(k+1);
+  } else {
+    for (k = 0; k < Nfloat; k += (Nfloat/16) )
+      X[k] = sqrtf(k+1);
+  }
+
+  for ( k = 0; k < NUM_TYPES; ++k )
+  {
+    for ( iter = 0; iter < NUM_FFT_ALGOS; ++iter )
+      tmeas[k][iter] = 0.0;
+  }
+
+
+  /* FFTPack benchmark */
+  Nmax = (cplx ? N*2 : N);
+  X[Nmax] = checkVal;
+#ifdef PFFFT_ENABLE_FLOAT
+  {
+    float *wrk = malloc(2*Nbytes + 15*sizeof(pffft_scalar));
+    te = uclock_sec();  
+    if (cplx) cffti(N, wrk);
+    else      rffti(N, wrk);
+    t0 = uclock_sec();
+    tstop = t0 + max_test_duration;
+    max_iter = 0;
+    do {
+      for ( k = 0; k < step_iter; ++k ) {
+        if (cplx) {
+          assert( X[Nmax] == checkVal );
+          cfftf(N, X, wrk);
+          assert( X[Nmax] == checkVal );
+          cfftb(N, X, wrk);
+          assert( X[Nmax] == checkVal );
+        } else {
+          assert( X[Nmax] == checkVal );
+          rfftf(N, X, wrk);
+          assert( X[Nmax] == checkVal );
+          rfftb(N, X, wrk);
+          assert( X[Nmax] == checkVal );
+        }
+        ++max_iter;
+      }
+      t1 = uclock_sec();
+    } while ( t1 < tstop );
+
+    free(wrk);
+
+    flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+    tmeas[TYPE_ITER][ALGO_FFTPACK] = max_iter;
+    tmeas[TYPE_MFLOPS][ALGO_FFTPACK] = flops/1e6/(t1 - t0 + 1e-16);
+    tmeas[TYPE_DUR_TOT][ALGO_FFTPACK] = t1 - t0;
+    tmeas[TYPE_DUR_NS][ALGO_FFTPACK] = show_output("FFTPack", N, cplx, flops, t0, t1, max_iter, tableFile);
+    tmeas[TYPE_PREP][ALGO_FFTPACK] = (t0 - te) * 1e3;
+    haveAlgo[ALGO_FFTPACK] = 1;
+  }
+#endif
+
+#if defined(HAVE_VECLIB) && defined(PFFFT_ENABLE_FLOAT)
+  Nmax = (cplx ? nextPow2N*2 : nextPow2N);
+  X[Nmax] = checkVal;
+  te = uclock_sec();
+  if ( 1 || PFFFT_FUNC(is_power_of_two)(N) ) {
+    FFTSetup setup;
+
+    setup = vDSP_create_fftsetup(log2NextN, FFT_RADIX2);
+    DSPSplitComplex zsamples;
+    zsamples.realp = &X[0];
+    zsamples.imagp = &X[Nfloat/2];
+    t0 = uclock_sec();
+    tstop = t0 + max_test_duration;
+    max_iter = 0;
+    do {
+      for ( k = 0; k < step_iter; ++k ) {
+        if (cplx) {
+          assert( X[Nmax] == checkVal );
+          vDSP_fft_zip(setup, &zsamples, 1, log2NextN, kFFTDirection_Forward);
+          assert( X[Nmax] == checkVal );
+          vDSP_fft_zip(setup, &zsamples, 1, log2NextN, kFFTDirection_Inverse);
+          assert( X[Nmax] == checkVal );
+        } else {
+          assert( X[Nmax] == checkVal );
+          vDSP_fft_zrip(setup, &zsamples, 1, log2NextN, kFFTDirection_Forward); 
+          assert( X[Nmax] == checkVal );
+          vDSP_fft_zrip(setup, &zsamples, 1, log2NextN, kFFTDirection_Inverse);
+          assert( X[Nmax] == checkVal );
+        }
+        ++max_iter;
+      }
+      t1 = uclock_sec();
+    } while ( t1 < tstop );
+
+    vDSP_destroy_fftsetup(setup);
+    flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+    tmeas[TYPE_ITER][ALGO_VECLIB] = max_iter;
+    tmeas[TYPE_MFLOPS][ALGO_VECLIB] = flops/1e6/(t1 - t0 + 1e-16);
+    tmeas[TYPE_DUR_TOT][ALGO_VECLIB] = t1 - t0;
+    tmeas[TYPE_DUR_NS][ALGO_VECLIB] = show_output("vDSP", N, cplx, flops, t0, t1, max_iter, tableFile);
+    tmeas[TYPE_PREP][ALGO_VECLIB] = (t0 - te) * 1e3;
+    haveAlgo[ALGO_VECLIB] = 1;
+  } else {
+    show_output("vDSP", N, cplx, -1, -1, -1, -1, tableFile);
+  }
+#endif
+
+#if defined(HAVE_FFTW)
+  Nmax = (cplx ? N*2 : N);
+  X[Nmax] = checkVal;
+  {
+    /* int flags = (N <= (256*1024) ? FFTW_MEASURE : FFTW_ESTIMATE);  measure takes a lot of time on largest ffts */
+    int flags = FFTW_ESTIMATE;
+    te = uclock_sec();
+
+    FFTW_PLAN planf, planb;
+    FFTW_COMPLEX *in = (FFTW_COMPLEX*) FFTW_FUNC(malloc)(sizeof(FFTW_COMPLEX) * N);
+    FFTW_COMPLEX *out = (FFTW_COMPLEX*) FFTW_FUNC(malloc)(sizeof(FFTW_COMPLEX) * N);
+    memset(in, 0, sizeof(FFTW_COMPLEX) * N);
+    if (cplx) {
+      planf = FFTW_FUNC(plan_dft_1d)(N, in, out, FFTW_FORWARD, flags);
+      planb = FFTW_FUNC(plan_dft_1d)(N, in, out, FFTW_BACKWARD, flags);
+    } else {
+      planf = FFTW_FUNC(plan_dft_r2c_1d)(N, (pffft_scalar*)in, out, flags);
+      planb = FFTW_FUNC(plan_dft_c2r_1d)(N, in, (pffft_scalar*)out, flags);
+    }
+
+    t0 = uclock_sec();
+    tstop = t0 + max_test_duration;
+    max_iter = 0;
+    do {
+      for ( k = 0; k < step_iter; ++k ) {
+        assert( X[Nmax] == checkVal );
+        FFTW_FUNC(execute)(planf);
+        assert( X[Nmax] == checkVal );
+        FFTW_FUNC(execute)(planb);
+        assert( X[Nmax] == checkVal );
+        ++max_iter;
+      }
+      t1 = uclock_sec();
+    } while ( t1 < tstop );
+
+    FFTW_FUNC(destroy_plan)(planf);
+    FFTW_FUNC(destroy_plan)(planb);
+    FFTW_FUNC(free)(in); FFTW_FUNC(free)(out);
+
+    flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+    tmeas[TYPE_ITER][ALGO_FFTW_ESTIM] = max_iter;
+    tmeas[TYPE_MFLOPS][ALGO_FFTW_ESTIM] = flops/1e6/(t1 - t0 + 1e-16);
+    tmeas[TYPE_DUR_TOT][ALGO_FFTW_ESTIM] = t1 - t0;
+    tmeas[TYPE_DUR_NS][ALGO_FFTW_ESTIM] = show_output((flags == FFTW_MEASURE ? algoName[ALGO_FFTW_AUTO] : algoName[ALGO_FFTW_ESTIM]), N, cplx, flops, t0, t1, max_iter, tableFile);
+    tmeas[TYPE_PREP][ALGO_FFTW_ESTIM] = (t0 - te) * 1e3;
+    haveAlgo[ALGO_FFTW_ESTIM] = 1;
+  }
+  Nmax = (cplx ? N*2 : N);
+  X[Nmax] = checkVal;
+  do {
+    /* int flags = (N <= (256*1024) ? FFTW_MEASURE : FFTW_ESTIMATE);  measure takes a lot of time on largest ffts */
+    /* int flags = FFTW_MEASURE; */
+#if ( defined(__arm__) || defined(__aarch64__) || defined(__arm64__) )
+    int limitFFTsize = 31;  /* takes over a second on Raspberry Pi 3 B+ -- and much much more on higher ffts sizes! */
+#else
+    int limitFFTsize = 2400;  /* take over a second on i7 for fft size 2400 */
+#endif
+    int flags = (N < limitFFTsize ? FFTW_MEASURE : (withFFTWfullMeas ? FFTW_MEASURE : FFTW_ESTIMATE));
+
+    if (flags == FFTW_ESTIMATE) {
+      show_output((flags == FFTW_MEASURE ? algoName[ALGO_FFTW_AUTO] : algoName[ALGO_FFTW_ESTIM]), N, cplx, -1, -1, -1, -1, tableFile);
+      /* copy values from estimation */
+      tmeas[TYPE_ITER][ALGO_FFTW_AUTO] = tmeas[TYPE_ITER][ALGO_FFTW_ESTIM];
+      tmeas[TYPE_DUR_TOT][ALGO_FFTW_AUTO] = tmeas[TYPE_DUR_TOT][ALGO_FFTW_ESTIM];
+      tmeas[TYPE_DUR_NS][ALGO_FFTW_AUTO] = tmeas[TYPE_DUR_NS][ALGO_FFTW_ESTIM];
+      tmeas[TYPE_PREP][ALGO_FFTW_AUTO] = tmeas[TYPE_PREP][ALGO_FFTW_ESTIM];
+    } else {
+      te = uclock_sec();
+      FFTW_PLAN planf, planb;
+      FFTW_COMPLEX *in = (FFTW_COMPLEX*) FFTW_FUNC(malloc)(sizeof(FFTW_COMPLEX) * N);
+      FFTW_COMPLEX *out = (FFTW_COMPLEX*) FFTW_FUNC(malloc)(sizeof(FFTW_COMPLEX) * N);
+      memset(in, 0, sizeof(FFTW_COMPLEX) * N);
+      if (cplx) {
+        planf = FFTW_FUNC(plan_dft_1d)(N, in, out, FFTW_FORWARD, flags);
+        planb = FFTW_FUNC(plan_dft_1d)(N, in, out, FFTW_BACKWARD, flags);
+      } else {
+        planf = FFTW_FUNC(plan_dft_r2c_1d)(N, (pffft_scalar*)in, out, flags);
+        planb = FFTW_FUNC(plan_dft_c2r_1d)(N, in, (pffft_scalar*)out, flags);
+      }
+
+      t0 = uclock_sec();
+      tstop = t0 + max_test_duration;
+      max_iter = 0;
+      do {
+        for ( k = 0; k < step_iter; ++k ) {
+          assert( X[Nmax] == checkVal );
+          FFTW_FUNC(execute)(planf);
+          assert( X[Nmax] == checkVal );
+          FFTW_FUNC(execute)(planb);
+          assert( X[Nmax] == checkVal );
+          ++max_iter;
+        }
+        t1 = uclock_sec();
+      } while ( t1 < tstop );
+
+      FFTW_FUNC(destroy_plan)(planf);
+      FFTW_FUNC(destroy_plan)(planb);
+      FFTW_FUNC(free)(in); FFTW_FUNC(free)(out);
+
+      flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+      tmeas[TYPE_ITER][ALGO_FFTW_AUTO] = max_iter;
+      tmeas[TYPE_MFLOPS][ALGO_FFTW_AUTO] = flops/1e6/(t1 - t0 + 1e-16);
+      tmeas[TYPE_DUR_TOT][ALGO_FFTW_AUTO] = t1 - t0;
+      tmeas[TYPE_DUR_NS][ALGO_FFTW_AUTO] = show_output((flags == FFTW_MEASURE ? algoName[ALGO_FFTW_AUTO] : algoName[ALGO_FFTW_ESTIM]), N, cplx, flops, t0, t1, max_iter, tableFile);
+      tmeas[TYPE_PREP][ALGO_FFTW_AUTO] = (t0 - te) * 1e3;
+      haveAlgo[ALGO_FFTW_AUTO] = 1;
+    }
+  } while (0);
+#else
+  (void)withFFTWfullMeas;
+#endif
+
+#if defined(HAVE_GREEN_FFTS) && defined(PFFFT_ENABLE_FLOAT)
+  Nmax = (cplx ? nextPow2N*2 : nextPow2N);
+  X[Nmax] = checkVal;
+  if ( 1 || PFFFT_FUNC(is_power_of_two)(N) )
+  {
+    te = uclock_sec();
+    fftInit(log2NextN);
+
+    t0 = uclock_sec();
+    tstop = t0 + max_test_duration;
+    max_iter = 0;
+    do {
+      for ( k = 0; k < step_iter; ++k ) {
+        if (cplx) {
+          assert( X[Nmax] == checkVal );
+          ffts(X, log2NextN, 1);
+          assert( X[Nmax] == checkVal );
+          iffts(X, log2NextN, 1);
+          assert( X[Nmax] == checkVal );
+        } else {
+          rffts(X, log2NextN, 1);
+          riffts(X, log2NextN, 1);
+        }
+
+        ++max_iter;
+      }
+      t1 = uclock_sec();
+    } while ( t1 < tstop );
+
+    fftFree();
+
+    flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+    tmeas[TYPE_ITER][ALGO_GREEN] = max_iter;
+    tmeas[TYPE_MFLOPS][ALGO_GREEN] = flops/1e6/(t1 - t0 + 1e-16);
+    tmeas[TYPE_DUR_TOT][ALGO_GREEN] = t1 - t0;
+    tmeas[TYPE_DUR_NS][ALGO_GREEN] = show_output("Green", N, cplx, flops, t0, t1, max_iter, tableFile);
+    tmeas[TYPE_PREP][ALGO_GREEN] = (t0 - te) * 1e3;
+    haveAlgo[ALGO_GREEN] = 1;
+  } else {
+    show_output("Green", N, cplx, -1, -1, -1, -1, tableFile);
+  }
+#endif
+
+#if defined(HAVE_KISS_FFT) && defined(PFFFT_ENABLE_FLOAT)
+  Nmax = (cplx ? nextPow2N*2 : nextPow2N);
+  X[Nmax] = checkVal;
+  if ( 1 || PFFFT_FUNC(is_power_of_two)(N) )
+  {
+    kiss_fft_cfg stf;
+    kiss_fft_cfg sti;
+    kiss_fftr_cfg stfr;
+    kiss_fftr_cfg stir;
+
+    te = uclock_sec();
+    if (cplx) {
+      stf = kiss_fft_alloc(nextPow2N, 0, 0, 0);
+      sti = kiss_fft_alloc(nextPow2N, 1, 0, 0);
+    } else {
+      stfr = kiss_fftr_alloc(nextPow2N, 0, 0, 0);
+      stir = kiss_fftr_alloc(nextPow2N, 1, 0, 0);
+    }
+
+    t0 = uclock_sec();
+    tstop = t0 + max_test_duration;
+    max_iter = 0;
+    do {
+      for ( k = 0; k < step_iter; ++k ) {
+        if (cplx) {
+          assert( X[Nmax] == checkVal );
+          kiss_fft(stf, (const kiss_fft_cpx *)X, (kiss_fft_cpx *)Y);
+          assert( X[Nmax] == checkVal );
+          kiss_fft(sti, (const kiss_fft_cpx *)Y, (kiss_fft_cpx *)X);
+          assert( X[Nmax] == checkVal );
+        } else {
+          assert( X[Nmax] == checkVal );
+          kiss_fftr(stfr, X, (kiss_fft_cpx *)Y);
+          assert( X[Nmax] == checkVal );
+          kiss_fftri(stir, (const kiss_fft_cpx *)Y, X);
+          assert( X[Nmax] == checkVal );
+        }
+        ++max_iter;
+      }
+      t1 = uclock_sec();
+    } while ( t1 < tstop );
+
+    kiss_fft_cleanup();
+
+    flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+    tmeas[TYPE_ITER][ALGO_KISS] = max_iter;
+    tmeas[TYPE_MFLOPS][ALGO_KISS] = flops/1e6/(t1 - t0 + 1e-16);
+    tmeas[TYPE_DUR_TOT][ALGO_KISS] = t1 - t0;
+    tmeas[TYPE_DUR_NS][ALGO_KISS] = show_output("Kiss", N, cplx, flops, t0, t1, max_iter, tableFile);
+    tmeas[TYPE_PREP][ALGO_KISS] = (t0 - te) * 1e3;
+    haveAlgo[ALGO_KISS] = 1;
+  } else {
+    show_output("Kiss", N, cplx, -1, -1, -1, -1, tableFile);
+  }
+#endif
+
+
+  /* PFFFT-U (unordered) benchmark */
+  Nmax = (cplx ? pffftPow2N*2 : pffftPow2N);
+  X[Nmax] = checkVal;
+  if ( pffftPow2N >= PFFFT_FUNC(min_fft_size)(cplx ? PFFFT_COMPLEX : PFFFT_REAL) )
+  {
+    te = uclock_sec();
+    PFFFT_SETUP *s = PFFFT_FUNC(new_setup)(pffftPow2N, cplx ? PFFFT_COMPLEX : PFFFT_REAL);
+    if (s) {
+      t0 = uclock_sec();
+      tstop = t0 + max_test_duration;
+      max_iter = 0;
+      do {
+        for ( k = 0; k < step_iter; ++k ) {
+          assert( X[Nmax] == checkVal );
+          PFFFT_FUNC(transform)(s, X, Z, Y, PFFFT_FORWARD);
+          assert( X[Nmax] == checkVal );
+          PFFFT_FUNC(transform)(s, X, Z, Y, PFFFT_BACKWARD);
+          assert( X[Nmax] == checkVal );
+          ++max_iter;
+        }
+        t1 = uclock_sec();
+      } while ( t1 < tstop );
+
+      PFFFT_FUNC(destroy_setup)(s);
+
+      flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+      tmeas[TYPE_ITER][ALGO_PFFFT_U] = max_iter;
+      tmeas[TYPE_MFLOPS][ALGO_PFFFT_U] = flops/1e6/(t1 - t0 + 1e-16);
+      tmeas[TYPE_DUR_TOT][ALGO_PFFFT_U] = t1 - t0;
+      tmeas[TYPE_DUR_NS][ALGO_PFFFT_U] = show_output("PFFFT-U", N, cplx, flops, t0, t1, max_iter, tableFile);
+      tmeas[TYPE_PREP][ALGO_PFFFT_U] = (t0 - te) * 1e3;
+      haveAlgo[ALGO_PFFFT_U] = 1;
+    }
+  } else {
+    show_output("PFFFT-U", N, cplx, -1, -1, -1, -1, tableFile);
+  }
+
+
+  if ( pffftPow2N >= PFFFT_FUNC(min_fft_size)(cplx ? PFFFT_COMPLEX : PFFFT_REAL) )
+  {
+    te = uclock_sec();
+    PFFFT_SETUP *s = PFFFT_FUNC(new_setup)(pffftPow2N, cplx ? PFFFT_COMPLEX : PFFFT_REAL);
+    if (s) {
+      t0 = uclock_sec();
+      tstop = t0 + max_test_duration;
+      max_iter = 0;
+      do {
+        for ( k = 0; k < step_iter; ++k ) {
+          assert( X[Nmax] == checkVal );
+          PFFFT_FUNC(transform_ordered)(s, X, Z, Y, PFFFT_FORWARD);
+          assert( X[Nmax] == checkVal );
+          PFFFT_FUNC(transform_ordered)(s, X, Z, Y, PFFFT_BACKWARD);
+          assert( X[Nmax] == checkVal );
+          ++max_iter;
+        }
+        t1 = uclock_sec();
+      } while ( t1 < tstop );
+
+      PFFFT_FUNC(destroy_setup)(s);
+
+      flops = (max_iter*2) * ((cplx ? 5 : 2.5)*N*log((double)N)/M_LN2); /* see http://www.fftw.org/speed/method.html */
+      tmeas[TYPE_ITER][ALGO_PFFFT_O] = max_iter;
+      tmeas[TYPE_MFLOPS][ALGO_PFFFT_O] = flops/1e6/(t1 - t0 + 1e-16);
+      tmeas[TYPE_DUR_TOT][ALGO_PFFFT_O] = t1 - t0;
+      tmeas[TYPE_DUR_NS][ALGO_PFFFT_O] = show_output("PFFFT", N, cplx, flops, t0, t1, max_iter, tableFile);
+      tmeas[TYPE_PREP][ALGO_PFFFT_O] = (t0 - te) * 1e3;
+      haveAlgo[ALGO_PFFFT_O] = 1;
+    }
+  } else {
+    show_output("PFFFT", N, cplx, -1, -1, -1, -1, tableFile);
+  }
+
+  if (!array_output_format)
+  {
+    printf("prepare/ms:     ");
+    for ( iter = 0; iter < NUM_FFT_ALGOS; ++iter )
+    {
+      if ( haveAlgo[iter] && tmeas[TYPE_DUR_NS][iter] > 0.0 ) {
+        printf("%s %.3f    ", algoName[iter], tmeas[TYPE_PREP][iter] );
+      }
+    }
+    printf("\n");
+  }
+  Tfastest = 0.0;
+  for ( iter = 0; iter < NUM_FFT_ALGOS; ++iter )
+  {
+    if ( Tfastest == 0.0 || ( tmeas[TYPE_DUR_NS][iter] != 0.0 && tmeas[TYPE_DUR_NS][iter] < Tfastest ) )
+      Tfastest = tmeas[TYPE_DUR_NS][iter];
+  }
+  if ( Tfastest > 0.0 )
+  {
+    if (!array_output_format)
+      printf("relative fast:  ");
+    for ( iter = 0; iter < NUM_FFT_ALGOS; ++iter )
+    {
+      if ( haveAlgo[iter] && tmeas[TYPE_DUR_NS][iter] > 0.0 ) {
+        tmeas[TYPE_DUR_FASTEST][iter] = tmeas[TYPE_DUR_NS][iter] / Tfastest;
+        if (!array_output_format)
+          printf("%s %.3f    ", algoName[iter], tmeas[TYPE_DUR_FASTEST][iter] );
+      }
+    }
+    if (!array_output_format)
+      printf("\n");
+  }
+
+  {
+    if (!array_output_format)
+      printf("relative pffft: ");
+    for ( iter = 0; iter < NUM_FFT_ALGOS; ++iter )
+    {
+      if ( haveAlgo[iter] && tmeas[TYPE_DUR_NS][iter] > 0.0 ) {
+        tmeas[TYPE_REL_PFFFT][iter] = tmeas[TYPE_DUR_NS][iter] / tmeas[TYPE_DUR_NS][ALGO_PFFFT_O];
+        if (!array_output_format)
+          printf("%s %.3f    ", algoName[iter], tmeas[TYPE_REL_PFFFT][iter] );
+      }
+    }
+    if (!array_output_format)
+      printf("\n");
+  }
+
+  if (!array_output_format) {
+    printf("--\n");
+  }
+
+  PFFFT_FUNC(aligned_free)(X);
+  PFFFT_FUNC(aligned_free)(Y);
+  PFFFT_FUNC(aligned_free)(Z);
+}
+
+
+/* small functions inside pffft.c that will detect (compiler) bugs with respect to simd instructions */
+void validate_pffft_simd();
+int  validate_pffft_simd_ex(FILE * DbgOut);
+void validate_pffftd_simd();
+int  validate_pffftd_simd_ex(FILE * DbgOut);
+
+
+
+int main(int argc, char **argv) {
+  /* unfortunately, the fft size must be a multiple of 16 for complex FFTs 
+     and 32 for real FFTs -- a lot of stuff would need to be rewritten to
+     handle other cases (or maybe just switch to a scalar fft, I don't know..) */
+
+#if 0  /* include powers of 2 ? */
+#define NUMNONPOW2LENS  23
+  int NnonPow2[NUMNONPOW2LENS] = {
+    64, 96, 128, 160, 192,   256, 384, 5*96, 512, 5*128,
+    3*256, 800, 1024, 2048, 2400,   4096, 8192, 9*1024, 16384, 32768,
+    256*1024, 1024*1024, -1 };
+#else
+#define NUMNONPOW2LENS  11
+  int NnonPow2[NUMNONPOW2LENS] = {
+    96, 160, 192, 384, 5*96,   5*128,3*256, 800, 2400, 9*1024,
+    -1 };
+#endif
+
+#define NUMPOW2FFTLENS  21
+#define MAXNUMFFTLENS MAX( NUMPOW2FFTLENS, NUMNONPOW2LENS )
+  int Npow2[NUMPOW2FFTLENS];  /* exp = 1 .. 20, -1 */
+  const int *Nvalues = NULL;
+  double tmeas[2][MAXNUMFFTLENS][NUM_TYPES][NUM_FFT_ALGOS];
+  double iterCalReal, iterCalCplx;
+
+  int benchReal=1, benchCplx=1, withFFTWfullMeas=0, outputTable2File=1, usePow2=1;
+  int realCplxIdx, typeIdx;
+  int i, k;
+  FILE *tableFile = NULL;
+
+  int haveAlgo[NUM_FFT_ALGOS];
+  char acCsvFilename[64];
+
+  for ( k = 1; k <= NUMPOW2FFTLENS; ++k )
+    Npow2[k-1] = (k == NUMPOW2FFTLENS) ? -1 : (1 << k);
+  Nvalues = Npow2;  /* set default .. for comparisons .. */
+
+  for ( i = 0; i < NUM_FFT_ALGOS; ++i )
+    haveAlgo[i] = 0;
+
+  printf("pffft architecture:    '%s'\n", PFFFT_FUNC(simd_arch)());
+  printf("pffft SIMD size:       %d\n", PFFFT_FUNC(simd_size)());
+  printf("pffft min real fft:    %d\n", PFFFT_FUNC(min_fft_size)(PFFFT_REAL));
+  printf("pffft min complex fft: %d\n", PFFFT_FUNC(min_fft_size)(PFFFT_COMPLEX));
+  printf("\n");
+
+  for ( i = 1; i < argc; ++i ) {
+    if (!strcmp(argv[i], "--array-format") || !strcmp(argv[i], "--table")) {
+      array_output_format = 1;
+    }
+    else if (!strcmp(argv[i], "--no-tab")) {
+      array_output_format = 0;
+    }
+    else if (!strcmp(argv[i], "--real")) {
+      benchCplx = 0;
+    }
+    else if (!strcmp(argv[i], "--cplx")) {
+      benchReal = 0;
+    }
+    else if (!strcmp(argv[i], "--fftw-full-measure")) {
+      withFFTWfullMeas = 1;
+    }
+    else if (!strcmp(argv[i], "--non-pow2")) {
+      Nvalues = NnonPow2;
+      usePow2 = 0;
+    }
+    else /* if (!strcmp(argv[i], "--help")) */ {
+      printf("usage: %s [--array-format|--table] [--no-tab] [--real|--cplx] [--fftw-full-measure] [--non-pow2]\n", argv[0]);
+      exit(0);
+    }
+  }
+
+#ifdef HAVE_FFTW
+#ifdef PFFFT_ENABLE_DOUBLE
+  algoName[ALGO_FFTW_ESTIM] = "FFTW D(estim)";
+  algoName[ALGO_FFTW_AUTO]  = "FFTW D(auto) ";
+#endif
+
+  if (withFFTWfullMeas)
+  {
+#ifdef PFFFT_ENABLE_FLOAT
+    algoName[ALGO_FFTW_AUTO] = "FFTWF(meas)"; /* "FFTW (auto)" */
+#else
+    algoName[ALGO_FFTW_AUTO] = "FFTWD(meas)"; /* "FFTW (auto)" */
+#endif
+    algoTableHeader[NUM_FFT_ALGOS][0] = "|real FFTWmeas "; /* "|real FFTWauto " */
+    algoTableHeader[NUM_FFT_ALGOS][0] = "|cplx FFTWmeas "; /* "|cplx FFTWauto " */
+  }
+#endif
+
+  if ( PFFFT_FUNC(simd_size)() == 1 )
+  {
+    algoName[ALGO_PFFFT_U] = "PFFFTU scal-1";
+    algoName[ALGO_PFFFT_O] = "PFFFT scal-1 ";
+  }
+  else if ( !strcmp(PFFFT_FUNC(simd_arch)(), "4xScalar") )
+  {
+    algoName[ALGO_PFFFT_U] = "PFFFT-U scal4";
+    algoName[ALGO_PFFFT_O] = "PFFFT scal-4 ";
+  }
+
+
+  clock();
+  /* double TClockDur = 1.0 / CLOCKS_PER_SEC;
+  printf("clock() duration for CLOCKS_PER_SEC = %f sec = %f ms\n", TClockDur, 1000.0 * TClockDur );
+  */
+
+  /* calibrate test duration */
+  {
+    double t0, t1, dur;
+    printf("calibrating fft benchmark duration at size N = 512 ..\n");
+    t0 = uclock_sec();
+    if (benchReal) {
+      iterCalReal = cal_benchmark(512, 0 /* real fft */);
+      printf("real fft iterCal = %f\n", iterCalReal);
+    }
+    if (benchCplx) {
+      iterCalCplx = cal_benchmark(512, 1 /* cplx fft */);
+      printf("cplx fft iterCal = %f\n", iterCalCplx);
+    }
+    t1 = uclock_sec();
+    dur = t1 - t0;
+    printf("calibration done in %f sec.\n\n", dur);
+  }
+
+  if (!array_output_format) {
+    if (benchReal) {
+      for (i=0; Nvalues[i] > 0; ++i)
+        benchmark_ffts(Nvalues[i], 0 /* real fft */, withFFTWfullMeas, iterCalReal, tmeas[0][i], haveAlgo, NULL);
+    }
+    if (benchCplx) {
+      for (i=0; Nvalues[i] > 0; ++i)
+        benchmark_ffts(Nvalues[i], 1 /* cplx fft */, withFFTWfullMeas, iterCalCplx, tmeas[1][i], haveAlgo, NULL);
+    }
+
+  } else {
+
+    if (outputTable2File) {
+      tableFile = fopen( usePow2 ? "bench-fft-table-pow2.txt" : "bench-fft-table-non2.txt", "w");
+    }
+    /* print table headers */
+    printf("table shows MFlops; higher values indicate faster computation\n\n");
+
+    {
+      print_table("| input len ", tableFile);
+      for (realCplxIdx = 0; realCplxIdx < 2; ++realCplxIdx)
+      {
+        if ( (realCplxIdx == 0 && !benchReal) || (realCplxIdx == 1 && !benchCplx) )
+          continue;
+        for (k=0; k < NUM_FFT_ALGOS; ++k)
+        {
+          if ( compiledInAlgo[k] )
+            print_table(algoTableHeader[k][realCplxIdx], tableFile);
+        }
+      }
+      print_table("|\n", tableFile);
+    }
+    /* print table value seperators */
+    {
+      print_table("|----------", tableFile);
+      for (realCplxIdx = 0; realCplxIdx < 2; ++realCplxIdx)
+      {
+        if ( (realCplxIdx == 0 && !benchReal) || (realCplxIdx == 1 && !benchCplx) )
+          continue;
+        for (k=0; k < NUM_FFT_ALGOS; ++k)
+        {
+          if ( compiledInAlgo[k] )
+            print_table(":|-------------", tableFile);
+        }
+      }
+      print_table(":|\n", tableFile);
+    }
+
+    for (i=0; Nvalues[i] > 0; ++i) {
+      {
+        double t0, t1;
+        print_table_fftsize(Nvalues[i], tableFile);
+        t0 = uclock_sec();
+        if (benchReal)
+          benchmark_ffts(Nvalues[i], 0, withFFTWfullMeas, iterCalReal, tmeas[0][i], haveAlgo, tableFile);
+        if (benchCplx)
+          benchmark_ffts(Nvalues[i], 1, withFFTWfullMeas, iterCalCplx, tmeas[1][i], haveAlgo, tableFile);
+        t1 = uclock_sec();
+        print_table("|\n", tableFile);
+        /* printf("all ffts for size %d took %f sec\n", Nvalues[i], t1-t0); */
+        (void)t0;
+        (void)t1;
+      }
+    }
+    fprintf(stdout, " (numbers are given in MFlops)\n");
+    if (outputTable2File) {
+      fclose(tableFile);
+    }
+  }
+
+  printf("\n");
+  printf("now writing .csv files ..\n");
+
+  for (realCplxIdx = 0; realCplxIdx < 2; ++realCplxIdx)
+  {
+    if ( (benchReal && realCplxIdx == 0) || (benchCplx && realCplxIdx == 1) )
+    {
+      for (typeIdx = 0; typeIdx < NUM_TYPES; ++typeIdx)
+      {
+        FILE *f = NULL;
+        if ( !(SAVE_ALL_TYPES || saveType[typeIdx]) )
+          continue;
+        acCsvFilename[0] = 0;
+#ifdef PFFFT_SIMD_DISABLE
+        strcat(acCsvFilename, "scal-");
+#else
+        strcat(acCsvFilename, "simd-");
+#endif
+        strcat(acCsvFilename, (realCplxIdx == 0 ? "real-" : "cplx-"));
+        strcat(acCsvFilename, ( usePow2 ? "pow2-" : "non2-"));
+        assert( strlen(acCsvFilename) + strlen(typeFilenamePart[typeIdx]) + 5 < (sizeof(acCsvFilename) / sizeof(acCsvFilename[0])) );
+        strcat(acCsvFilename, typeFilenamePart[typeIdx]);
+        strcat(acCsvFilename, ".csv");
+        f = fopen(acCsvFilename, "w");
+        if (!f)
+          continue;
+        {
+          fprintf(f, "size, log2, ");
+          for (k=0; k < NUM_FFT_ALGOS; ++k)
+            if ( haveAlgo[k] )
+              fprintf(f, "%s, ", algoName[k]);
+          fprintf(f, "\n");
+        }
+        for (i=0; Nvalues[i] > 0; ++i)
+        {
+          {
+            fprintf(f, "%d, %.3f, ", Nvalues[i], log10((double)Nvalues[i])/log10(2.0) );
+            for (k=0; k < NUM_FFT_ALGOS; ++k)
+              if ( haveAlgo[k] )
+                fprintf(f, "%f, ", tmeas[realCplxIdx][i][typeIdx][k]);
+            fprintf(f, "\n");
+          }
+        }
+        fclose(f);
+      }
+    }
+  }
+
+  return 0;
+}
+
diff --git a/fftpack.c b/fftpack.c
new file mode 100644
index 0000000..d412780
--- /dev/null
+++ b/fftpack.c
@@ -0,0 +1,3122 @@
+/*
+  compile with cc -DTESTING_FFTPACK fftpack.c in order to build the
+  test application.
+
+  This is an f2c translation of the full fftpack sources as found on
+  http://www.netlib.org/fftpack/ The translated code has been
+  slightlty edited to remove the ugliest artefacts of the translation
+  (a hundred of wild GOTOs were wiped during that operation).
+
+  The original fftpack file was written by Paul N. Swarztrauber
+  (Version 4, 1985), in fortran 77.
+
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+
+   ChangeLog:
+   2011/10/02: this is my first release of this file.
+*/
+
+#include "fftpack.h"
+#include <math.h>
+
+typedef fftpack_real real;
+typedef fftpack_int  integer;
+
+typedef struct f77complex {    
+  real r, i;
+} f77complex;   
+
+#ifdef TESTING_FFTPACK
+static real c_abs(f77complex *c) { return sqrt(c->r*c->r + c->i*c->i); }
+static double dmax(double a, double b) { return a < b ? b : a; }
+#endif
+
+/* define own constants required to turn off g++ extensions .. */
+#ifndef M_PI
+  #define M_PI    3.14159265358979323846  /* pi */
+#endif
+
+#ifndef M_SQRT2
+  #define M_SQRT2 1.41421356237309504880  /* sqrt(2) */
+#endif
+
+
+/* translated by f2c (version 20061008), and slightly edited */
+
+static void passfb(integer *nac, integer ido, integer ip, integer l1, integer idl1, 
+                   real *cc, real *c1, real *c2, real *ch, real *ch2, const real *wa, real fsign)
+{
+  /* System generated locals */
+  integer ch_offset, cc_offset,
+    c1_offset, c2_offset, ch2_offset;
+
+  /* Local variables */
+  integer i, j, k, l, jc, lc, ik, idj, idl, inc, idp;
+  real wai, war;
+  integer ipp2, idij, idlj, idot, ipph;
+
+
+#define c1_ref(a_1,a_2,a_3) c1[((a_3)*l1 + (a_2))*ido + a_1]
+#define c2_ref(a_1,a_2) c2[(a_2)*idl1 + a_1]
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*ip + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch2_ref(a_1,a_2) ch2[(a_2)*idl1 + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  c1_offset = 1 + ido * (1 + l1);
+  c1 -= c1_offset;
+  cc_offset = 1 + ido * (1 + ip);
+  cc -= cc_offset;
+  ch2_offset = 1 + idl1;
+  ch2 -= ch2_offset;
+  c2_offset = 1 + idl1;
+  c2 -= c2_offset;
+  --wa;
+
+  /* Function Body */
+  idot = ido / 2;
+  ipp2 = ip + 2;
+  ipph = (ip + 1) / 2;
+  idp = ip * ido;
+
+  if (ido >= l1) {
+    for (j = 2; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      for (k = 1; k <= l1; ++k) {
+        for (i = 1; i <= ido; ++i) {
+          ch_ref(i, k, j) = cc_ref(i, j, k) + cc_ref(i, jc, k);
+          ch_ref(i, k, jc) = cc_ref(i, j, k) - cc_ref(i, jc, k);
+        }
+      }
+    }
+    for (k = 1; k <= l1; ++k) {
+      for (i = 1; i <= ido; ++i) {
+        ch_ref(i, k, 1) = cc_ref(i, 1, k);
+      }
+    }
+  } else {
+    for (j = 2; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      for (i = 1; i <= ido; ++i) {
+        for (k = 1; k <= l1; ++k) {
+          ch_ref(i, k, j) = cc_ref(i, j, k) + cc_ref(i, jc, k);
+          ch_ref(i, k, jc) = cc_ref(i, j, k) - cc_ref(i, jc, k);
+        }
+      }
+    }
+    for (i = 1; i <= ido; ++i) {
+      for (k = 1; k <= l1; ++k) {
+        ch_ref(i, k, 1) = cc_ref(i, 1, k);
+      }
+    }
+  }
+  idl = 2 - ido;
+  inc = 0;
+  for (l = 2; l <= ipph; ++l) {
+    lc = ipp2 - l;
+    idl += ido;
+    for (ik = 1; ik <= idl1; ++ik) {
+      c2_ref(ik, l) = ch2_ref(ik, 1) + wa[idl - 1] * ch2_ref(ik, 2);
+      c2_ref(ik, lc) = fsign*wa[idl] * ch2_ref(ik, ip);
+    }
+    idlj = idl;
+    inc += ido;
+    for (j = 3; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      idlj += inc;
+      if (idlj > idp) {
+        idlj -= idp;
+      }
+      war = wa[idlj - 1];
+      wai = wa[idlj];
+      for (ik = 1; ik <= idl1; ++ik) {
+        c2_ref(ik, l) = c2_ref(ik, l) + war * ch2_ref(ik, j);
+        c2_ref(ik, lc) = c2_ref(ik, lc) + fsign*wai * ch2_ref(ik, jc);
+      }
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    for (ik = 1; ik <= idl1; ++ik) {
+      ch2_ref(ik, 1) = ch2_ref(ik, 1) + ch2_ref(ik, j);
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    jc = ipp2 - j;
+    for (ik = 2; ik <= idl1; ik += 2) {
+      ch2_ref(ik - 1, j) = c2_ref(ik - 1, j) - c2_ref(ik, jc);
+      ch2_ref(ik - 1, jc) = c2_ref(ik - 1, j) + c2_ref(ik, jc);
+      ch2_ref(ik, j) = c2_ref(ik, j) + c2_ref(ik - 1, jc);
+      ch2_ref(ik, jc) = c2_ref(ik, j) - c2_ref(ik - 1, jc);
+    }
+  }
+  *nac = 1;
+  if (ido == 2) {
+    return;
+  }
+  *nac = 0;
+  for (ik = 1; ik <= idl1; ++ik) {
+    c2_ref(ik, 1) = ch2_ref(ik, 1);
+  }
+  for (j = 2; j <= ip; ++j) {
+    for (k = 1; k <= l1; ++k) {
+      c1_ref(1, k, j) = ch_ref(1, k, j);
+      c1_ref(2, k, j) = ch_ref(2, k, j);
+    }
+  }
+  if (idot <= l1) {
+    idij = 0;
+    for (j = 2; j <= ip; ++j) {
+      idij += 2;
+      for (i = 4; i <= ido; i += 2) {
+        idij += 2;
+        for (k = 1; k <= l1; ++k) {
+          c1_ref(i - 1, k, j) = wa[idij - 1] * ch_ref(i - 1, k, j) - fsign*wa[idij] * ch_ref(i, k, j);
+          c1_ref(i, k, j) = wa[idij - 1] * ch_ref(i, k, j) + fsign*wa[idij] * ch_ref(i - 1, k, j);
+        }
+      }
+    }
+    return;
+  }
+  idj = 2 - ido;
+  for (j = 2; j <= ip; ++j) {
+    idj += ido;
+    for (k = 1; k <= l1; ++k) {
+      idij = idj;
+      for (i = 4; i <= ido; i += 2) {
+        idij += 2;
+        c1_ref(i - 1, k, j) = wa[idij - 1] * ch_ref(i - 1, k, j) - fsign*wa[idij] * ch_ref(i, k, j);
+        c1_ref(i, k, j) = wa[idij - 1] * ch_ref(i, k, j) + fsign*wa[idij] * ch_ref(i - 1, k, j);
+      }
+    }
+  }
+} /* passb */
+
+#undef ch2_ref
+#undef ch_ref
+#undef cc_ref
+#undef c2_ref
+#undef c1_ref
+
+
+static void passb2(integer ido, integer l1, const real *cc, real *ch, const real *wa1)
+{
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ti2, tr2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*2 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 3;
+  cc -= cc_offset;
+  --wa1;
+
+  /* Function Body */
+  if (ido <= 2) {
+    for (k = 1; k <= l1; ++k) {
+      ch_ref(1, k, 1) = cc_ref(1, 1, k) + cc_ref(1, 2, k);
+      ch_ref(1, k, 2) = cc_ref(1, 1, k) - cc_ref(1, 2, k);
+      ch_ref(2, k, 1) = cc_ref(2, 1, k) + cc_ref(2, 2, k);
+      ch_ref(2, k, 2) = cc_ref(2, 1, k) - cc_ref(2, 2, k);
+    }
+    return;
+  }
+  for (k = 1; k <= l1; ++k) {
+    for (i = 2; i <= ido; i += 2) {
+      ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + cc_ref(i - 1, 2, k);
+      tr2 = cc_ref(i - 1, 1, k) - cc_ref(i - 1, 2, k);
+      ch_ref(i, k, 1) = cc_ref(i, 1, k) + cc_ref(i, 2, k);
+      ti2 = cc_ref(i, 1, k) - cc_ref(i, 2, k);
+      ch_ref(i, k, 2) = wa1[i - 1] * ti2 + wa1[i] * tr2;
+      ch_ref(i - 1, k, 2) = wa1[i - 1] * tr2 - wa1[i] * ti2;
+    }
+  }
+} /* passb2 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void passb3(integer ido, integer l1, const real *cc, real *ch, const real *wa1, const real *wa2)
+{
+  static const real taur = -.5f;
+  static const real taui = .866025403784439f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*3 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + (ido << 2);
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+
+  /* Function Body */
+  if (ido == 2) {
+    for (k = 1; k <= l1; ++k) {
+      tr2 = cc_ref(1, 2, k) + cc_ref(1, 3, k);
+      cr2 = cc_ref(1, 1, k) + taur * tr2;
+      ch_ref(1, k, 1) = cc_ref(1, 1, k) + tr2;
+      ti2 = cc_ref(2, 2, k) + cc_ref(2, 3, k);
+      ci2 = cc_ref(2, 1, k) + taur * ti2;
+      ch_ref(2, k, 1) = cc_ref(2, 1, k) + ti2;
+      cr3 = taui * (cc_ref(1, 2, k) - cc_ref(1, 3, k));
+      ci3 = taui * (cc_ref(2, 2, k) - cc_ref(2, 3, k));
+      ch_ref(1, k, 2) = cr2 - ci3;
+      ch_ref(1, k, 3) = cr2 + ci3;
+      ch_ref(2, k, 2) = ci2 + cr3;
+      ch_ref(2, k, 3) = ci2 - cr3;
+    }
+  } else {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 2; i <= ido; i += 2) {
+        tr2 = cc_ref(i - 1, 2, k) + cc_ref(i - 1, 3, k);
+        cr2 = cc_ref(i - 1, 1, k) + taur * tr2;
+        ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + tr2;
+        ti2 = cc_ref(i, 2, k) + cc_ref(i, 3, k);
+        ci2 = cc_ref(i, 1, k) + taur * ti2;
+        ch_ref(i, k, 1) = cc_ref(i, 1, k) + ti2;
+        cr3 = taui * (cc_ref(i - 1, 2, k) - cc_ref(i - 1, 3, k));
+        ci3 = taui * (cc_ref(i, 2, k) - cc_ref(i, 3, k));
+        dr2 = cr2 - ci3;
+        dr3 = cr2 + ci3;
+        di2 = ci2 + cr3;
+        di3 = ci2 - cr3;
+        ch_ref(i, k, 2) = wa1[i - 1] * di2 + wa1[i] * dr2;
+        ch_ref(i - 1, k, 2) = wa1[i - 1] * dr2 - wa1[i] * di2;
+        ch_ref(i, k, 3) = wa2[i - 1] * di3 + wa2[i] * dr3;
+        ch_ref(i - 1, k, 3) = wa2[i - 1] * dr3 - wa2[i] * di3;
+      }
+    }
+  }
+} /* passb3 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void passb4(integer ido, integer l1, const real *cc, real *ch, 
+                   const real *wa1, const real *wa2, const real *wa3)
+{
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*4 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 5;
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+
+  /* Function Body */
+  if (ido == 2) {
+    for (k = 1; k <= l1; ++k) {
+      ti1 = cc_ref(2, 1, k) - cc_ref(2, 3, k);
+      ti2 = cc_ref(2, 1, k) + cc_ref(2, 3, k);
+      tr4 = cc_ref(2, 4, k) - cc_ref(2, 2, k);
+      ti3 = cc_ref(2, 2, k) + cc_ref(2, 4, k);
+      tr1 = cc_ref(1, 1, k) - cc_ref(1, 3, k);
+      tr2 = cc_ref(1, 1, k) + cc_ref(1, 3, k);
+      ti4 = cc_ref(1, 2, k) - cc_ref(1, 4, k);
+      tr3 = cc_ref(1, 2, k) + cc_ref(1, 4, k);
+      ch_ref(1, k, 1) = tr2 + tr3;
+      ch_ref(1, k, 3) = tr2 - tr3;
+      ch_ref(2, k, 1) = ti2 + ti3;
+      ch_ref(2, k, 3) = ti2 - ti3;
+      ch_ref(1, k, 2) = tr1 + tr4;
+      ch_ref(1, k, 4) = tr1 - tr4;
+      ch_ref(2, k, 2) = ti1 + ti4;
+      ch_ref(2, k, 4) = ti1 - ti4;
+    }
+  } else {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 2; i <= ido; i += 2) {
+        ti1 = cc_ref(i, 1, k) - cc_ref(i, 3, k);
+        ti2 = cc_ref(i, 1, k) + cc_ref(i, 3, k);
+        ti3 = cc_ref(i, 2, k) + cc_ref(i, 4, k);
+        tr4 = cc_ref(i, 4, k) - cc_ref(i, 2, k);
+        tr1 = cc_ref(i - 1, 1, k) - cc_ref(i - 1, 3, k);
+        tr2 = cc_ref(i - 1, 1, k) + cc_ref(i - 1, 3, k);
+        ti4 = cc_ref(i - 1, 2, k) - cc_ref(i - 1, 4, k);
+        tr3 = cc_ref(i - 1, 2, k) + cc_ref(i - 1, 4, k);
+        ch_ref(i - 1, k, 1) = tr2 + tr3;
+        cr3 = tr2 - tr3;
+        ch_ref(i, k, 1) = ti2 + ti3;
+        ci3 = ti2 - ti3;
+        cr2 = tr1 + tr4;
+        cr4 = tr1 - tr4;
+        ci2 = ti1 + ti4;
+        ci4 = ti1 - ti4;
+        ch_ref(i - 1, k, 2) = wa1[i - 1] * cr2 - wa1[i] * ci2;
+        ch_ref(i, k, 2) = wa1[i - 1] * ci2 + wa1[i] * cr2;
+        ch_ref(i - 1, k, 3) = wa2[i - 1] * cr3 - wa2[i] * ci3;
+        ch_ref(i, k, 3) = wa2[i - 1] * ci3 + wa2[i] * cr3;
+        ch_ref(i - 1, k, 4) = wa3[i - 1] * cr4 - wa3[i] * ci4;
+        ch_ref(i, k, 4) = wa3[i - 1] * ci4 + wa3[i] * cr4;
+      }
+    }
+  }
+} /* passb4 */
+
+#undef ch_ref
+#undef cc_ref
+
+/* passf5 and passb5 merged */
+static void passfb5(integer ido, integer l1, const real *cc, real *ch, 
+                    const real *wa1, const real *wa2, const real *wa3, const real *wa4, real fsign)
+{
+  const real tr11 = .309016994374947f;
+  const real ti11 = .951056516295154f*fsign;
+  const real tr12 = -.809016994374947f;
+  const real ti12 = .587785252292473f*fsign;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
+    ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*5 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 6;
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+  --wa4;
+
+  /* Function Body */
+  if (ido == 2) {
+    for (k = 1; k <= l1; ++k) {
+      ti5 = cc_ref(2, 2, k) - cc_ref(2, 5, k);
+      ti2 = cc_ref(2, 2, k) + cc_ref(2, 5, k);
+      ti4 = cc_ref(2, 3, k) - cc_ref(2, 4, k);
+      ti3 = cc_ref(2, 3, k) + cc_ref(2, 4, k);
+      tr5 = cc_ref(1, 2, k) - cc_ref(1, 5, k);
+      tr2 = cc_ref(1, 2, k) + cc_ref(1, 5, k);
+      tr4 = cc_ref(1, 3, k) - cc_ref(1, 4, k);
+      tr3 = cc_ref(1, 3, k) + cc_ref(1, 4, k);
+      ch_ref(1, k, 1) = cc_ref(1, 1, k) + tr2 + tr3;
+      ch_ref(2, k, 1) = cc_ref(2, 1, k) + ti2 + ti3;
+      cr2 = cc_ref(1, 1, k) + tr11 * tr2 + tr12 * tr3;
+      ci2 = cc_ref(2, 1, k) + tr11 * ti2 + tr12 * ti3;
+      cr3 = cc_ref(1, 1, k) + tr12 * tr2 + tr11 * tr3;
+      ci3 = cc_ref(2, 1, k) + tr12 * ti2 + tr11 * ti3;
+      cr5 = ti11 * tr5 + ti12 * tr4;
+      ci5 = ti11 * ti5 + ti12 * ti4;
+      cr4 = ti12 * tr5 - ti11 * tr4;
+      ci4 = ti12 * ti5 - ti11 * ti4;
+      ch_ref(1, k, 2) = cr2 - ci5;
+      ch_ref(1, k, 5) = cr2 + ci5;
+      ch_ref(2, k, 2) = ci2 + cr5;
+      ch_ref(2, k, 3) = ci3 + cr4;
+      ch_ref(1, k, 3) = cr3 - ci4;
+      ch_ref(1, k, 4) = cr3 + ci4;
+      ch_ref(2, k, 4) = ci3 - cr4;
+      ch_ref(2, k, 5) = ci2 - cr5;
+    }
+  } else {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 2; i <= ido; i += 2) {
+        ti5 = cc_ref(i, 2, k) - cc_ref(i, 5, k);
+        ti2 = cc_ref(i, 2, k) + cc_ref(i, 5, k);
+        ti4 = cc_ref(i, 3, k) - cc_ref(i, 4, k);
+        ti3 = cc_ref(i, 3, k) + cc_ref(i, 4, k);
+        tr5 = cc_ref(i - 1, 2, k) - cc_ref(i - 1, 5, k);
+        tr2 = cc_ref(i - 1, 2, k) + cc_ref(i - 1, 5, k);
+        tr4 = cc_ref(i - 1, 3, k) - cc_ref(i - 1, 4, k);
+        tr3 = cc_ref(i - 1, 3, k) + cc_ref(i - 1, 4, k);
+        ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + tr2 + tr3;
+        ch_ref(i, k, 1) = cc_ref(i, 1, k) + ti2 + ti3;
+        cr2 = cc_ref(i - 1, 1, k) + tr11 * tr2 + tr12 * tr3;
+        ci2 = cc_ref(i, 1, k) + tr11 * ti2 + tr12 * ti3;
+        cr3 = cc_ref(i - 1, 1, k) + tr12 * tr2 + tr11 * tr3;
+        ci3 = cc_ref(i, 1, k) + tr12 * ti2 + tr11 * ti3;
+        cr5 = ti11 * tr5 + ti12 * tr4;
+        ci5 = ti11 * ti5 + ti12 * ti4;
+        cr4 = ti12 * tr5 - ti11 * tr4;
+        ci4 = ti12 * ti5 - ti11 * ti4;
+        dr3 = cr3 - ci4;
+        dr4 = cr3 + ci4;
+        di3 = ci3 + cr4;
+        di4 = ci3 - cr4;
+        dr5 = cr2 + ci5;
+        dr2 = cr2 - ci5;
+        di5 = ci2 - cr5;
+        di2 = ci2 + cr5;
+        ch_ref(i - 1, k, 2) = wa1[i - 1] * dr2 - fsign*wa1[i] * di2;
+        ch_ref(i, k, 2) = wa1[i - 1] * di2 + fsign*wa1[i] * dr2;
+        ch_ref(i - 1, k, 3) = wa2[i - 1] * dr3 - fsign*wa2[i] * di3;
+        ch_ref(i, k, 3) = wa2[i - 1] * di3 + fsign*wa2[i] * dr3;
+        ch_ref(i - 1, k, 4) = wa3[i - 1] * dr4 - fsign*wa3[i] * di4;
+        ch_ref(i, k, 4) = wa3[i - 1] * di4 + fsign*wa3[i] * dr4;
+        ch_ref(i - 1, k, 5) = wa4[i - 1] * dr5 - fsign*wa4[i] * di5;
+        ch_ref(i, k, 5) = wa4[i - 1] * di5 + fsign*wa4[i] * dr5;
+      }
+    }
+  }
+} /* passb5 */
+
+#undef ch_ref
+#undef cc_ref
+
+static void passf2(integer ido, integer l1, const real *cc, real *ch, const real *wa1)
+{
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ti2, tr2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*2 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 3;
+  cc -= cc_offset;
+  --wa1;
+
+  /* Function Body */
+  if (ido == 2) {
+    for (k = 1; k <= l1; ++k) {
+      ch_ref(1, k, 1) = cc_ref(1, 1, k) + cc_ref(1, 2, k);
+      ch_ref(1, k, 2) = cc_ref(1, 1, k) - cc_ref(1, 2, k);
+      ch_ref(2, k, 1) = cc_ref(2, 1, k) + cc_ref(2, 2, k);
+      ch_ref(2, k, 2) = cc_ref(2, 1, k) - cc_ref(2, 2, k);
+    }
+  } else {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 2; i <= ido; i += 2) {
+        ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + cc_ref(i - 1, 2,
+                                                           k);
+        tr2 = cc_ref(i - 1, 1, k) - cc_ref(i - 1, 2, k);
+        ch_ref(i, k, 1) = cc_ref(i, 1, k) + cc_ref(i, 2, k);
+        ti2 = cc_ref(i, 1, k) - cc_ref(i, 2, k);
+        ch_ref(i, k, 2) = wa1[i - 1] * ti2 - wa1[i] * tr2;
+        ch_ref(i - 1, k, 2) = wa1[i - 1] * tr2 + wa1[i] * ti2;
+      }
+    }
+  }
+} /* passf2 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void passf3(integer ido, integer l1, const real *cc, real *ch, 
+                   const real *wa1, const real *wa2)
+{
+  static const real taur = -.5f;
+  static const real taui = -.866025403784439f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*3 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + (ido << 2);
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+
+  /* Function Body */
+  if (ido == 2) {
+    for (k = 1; k <= l1; ++k) {
+      tr2 = cc_ref(1, 2, k) + cc_ref(1, 3, k);
+      cr2 = cc_ref(1, 1, k) + taur * tr2;
+      ch_ref(1, k, 1) = cc_ref(1, 1, k) + tr2;
+      ti2 = cc_ref(2, 2, k) + cc_ref(2, 3, k);
+      ci2 = cc_ref(2, 1, k) + taur * ti2;
+      ch_ref(2, k, 1) = cc_ref(2, 1, k) + ti2;
+      cr3 = taui * (cc_ref(1, 2, k) - cc_ref(1, 3, k));
+      ci3 = taui * (cc_ref(2, 2, k) - cc_ref(2, 3, k));
+      ch_ref(1, k, 2) = cr2 - ci3;
+      ch_ref(1, k, 3) = cr2 + ci3;
+      ch_ref(2, k, 2) = ci2 + cr3;
+      ch_ref(2, k, 3) = ci2 - cr3;
+    }
+  } else {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 2; i <= ido; i += 2) {
+        tr2 = cc_ref(i - 1, 2, k) + cc_ref(i - 1, 3, k);
+        cr2 = cc_ref(i - 1, 1, k) + taur * tr2;
+        ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + tr2;
+        ti2 = cc_ref(i, 2, k) + cc_ref(i, 3, k);
+        ci2 = cc_ref(i, 1, k) + taur * ti2;
+        ch_ref(i, k, 1) = cc_ref(i, 1, k) + ti2;
+        cr3 = taui * (cc_ref(i - 1, 2, k) - cc_ref(i - 1, 3, k));
+        ci3 = taui * (cc_ref(i, 2, k) - cc_ref(i, 3, k));
+        dr2 = cr2 - ci3;
+        dr3 = cr2 + ci3;
+        di2 = ci2 + cr3;
+        di3 = ci2 - cr3;
+        ch_ref(i, k, 2) = wa1[i - 1] * di2 - wa1[i] * dr2;
+        ch_ref(i - 1, k, 2) = wa1[i - 1] * dr2 + wa1[i] * di2;
+        ch_ref(i, k, 3) = wa2[i - 1] * di3 - wa2[i] * dr3;
+        ch_ref(i - 1, k, 3) = wa2[i - 1] * dr3 + wa2[i] * di3;
+      }
+    }
+  }
+} /* passf3 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void passf4(integer ido, integer l1, const real *cc, real *ch, 
+                   const real *wa1, const real *wa2, const real *wa3)
+{
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k;
+  real ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*4 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 5;
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+
+  /* Function Body */
+  if (ido == 2) {
+    for (k = 1; k <= l1; ++k) {
+      ti1 = cc_ref(2, 1, k) - cc_ref(2, 3, k);
+      ti2 = cc_ref(2, 1, k) + cc_ref(2, 3, k);
+      tr4 = cc_ref(2, 2, k) - cc_ref(2, 4, k);
+      ti3 = cc_ref(2, 2, k) + cc_ref(2, 4, k);
+      tr1 = cc_ref(1, 1, k) - cc_ref(1, 3, k);
+      tr2 = cc_ref(1, 1, k) + cc_ref(1, 3, k);
+      ti4 = cc_ref(1, 4, k) - cc_ref(1, 2, k);
+      tr3 = cc_ref(1, 2, k) + cc_ref(1, 4, k);
+      ch_ref(1, k, 1) = tr2 + tr3;
+      ch_ref(1, k, 3) = tr2 - tr3;
+      ch_ref(2, k, 1) = ti2 + ti3;
+      ch_ref(2, k, 3) = ti2 - ti3;
+      ch_ref(1, k, 2) = tr1 + tr4;
+      ch_ref(1, k, 4) = tr1 - tr4;
+      ch_ref(2, k, 2) = ti1 + ti4;
+      ch_ref(2, k, 4) = ti1 - ti4;
+    }
+  } else {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 2; i <= ido; i += 2) {
+        ti1 = cc_ref(i, 1, k) - cc_ref(i, 3, k);
+        ti2 = cc_ref(i, 1, k) + cc_ref(i, 3, k);
+        ti3 = cc_ref(i, 2, k) + cc_ref(i, 4, k);
+        tr4 = cc_ref(i, 2, k) - cc_ref(i, 4, k);
+        tr1 = cc_ref(i - 1, 1, k) - cc_ref(i - 1, 3, k);
+        tr2 = cc_ref(i - 1, 1, k) + cc_ref(i - 1, 3, k);
+        ti4 = cc_ref(i - 1, 4, k) - cc_ref(i - 1, 2, k);
+        tr3 = cc_ref(i - 1, 2, k) + cc_ref(i - 1, 4, k);
+        ch_ref(i - 1, k, 1) = tr2 + tr3;
+        cr3 = tr2 - tr3;
+        ch_ref(i, k, 1) = ti2 + ti3;
+        ci3 = ti2 - ti3;
+        cr2 = tr1 + tr4;
+        cr4 = tr1 - tr4;
+        ci2 = ti1 + ti4;
+        ci4 = ti1 - ti4;
+        ch_ref(i - 1, k, 2) = wa1[i - 1] * cr2 + wa1[i] * ci2;
+        ch_ref(i, k, 2) = wa1[i - 1] * ci2 - wa1[i] * cr2;
+        ch_ref(i - 1, k, 3) = wa2[i - 1] * cr3 + wa2[i] * ci3;
+        ch_ref(i, k, 3) = wa2[i - 1] * ci3 - wa2[i] * cr3;
+        ch_ref(i - 1, k, 4) = wa3[i - 1] * cr4 + wa3[i] * ci4;
+        ch_ref(i, k, 4) = wa3[i - 1] * ci4 - wa3[i] * cr4;
+      }
+    }
+  }
+} /* passf4 */
+
+#undef ch_ref
+#undef cc_ref
+
+static void radb2(integer ido, integer l1, const real *cc, real *ch, const real *wa1)
+{
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ti2, tr2;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*2 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 3;
+  cc -= cc_offset;
+  --wa1;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    ch_ref(1, k, 1) = cc_ref(1, 1, k) + cc_ref(ido, 2, k);
+    ch_ref(1, k, 2) = cc_ref(1, 1, k) - cc_ref(ido, 2, k);
+  }
+  if (ido < 2) return;
+  else if (ido != 2) {
+    idp2 = ido + 2;
+    for (k = 1; k <= l1; ++k) {
+      for (i = 3; i <= ido; i += 2) {
+        ic = idp2 - i;
+        ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + cc_ref(ic - 1, 2, 
+                                                           k);
+        tr2 = cc_ref(i - 1, 1, k) - cc_ref(ic - 1, 2, k);
+        ch_ref(i, k, 1) = cc_ref(i, 1, k) - cc_ref(ic, 2, k);
+        ti2 = cc_ref(i, 1, k) + cc_ref(ic, 2, k);
+        ch_ref(i - 1, k, 2) = wa1[i - 2] * tr2 - wa1[i - 1] * ti2;
+        ch_ref(i, k, 2) = wa1[i - 2] * ti2 + wa1[i - 1] * tr2;
+      }
+    }
+    if (ido % 2 == 1) return;
+  }
+  for (k = 1; k <= l1; ++k) {
+    ch_ref(ido, k, 1) = cc_ref(ido, 1, k) + cc_ref(ido, 1, k);
+    ch_ref(ido, k, 2) = -(cc_ref(1, 2, k) + cc_ref(1, 2, k));
+  }
+} /* radb2 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radb3(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1, const real *wa2)
+{
+  /* Initialized data */
+
+  static const real taur = -.5f;
+  static const real taui = .866025403784439f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*3 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + (ido << 2);
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    tr2 = cc_ref(ido, 2, k) + cc_ref(ido, 2, k);
+    cr2 = cc_ref(1, 1, k) + taur * tr2;
+    ch_ref(1, k, 1) = cc_ref(1, 1, k) + tr2;
+    ci3 = taui * (cc_ref(1, 3, k) + cc_ref(1, 3, k));
+    ch_ref(1, k, 2) = cr2 - ci3;
+    ch_ref(1, k, 3) = cr2 + ci3;
+  }
+  if (ido == 1) {
+    return;
+  }
+  idp2 = ido + 2;
+  for (k = 1; k <= l1; ++k) {
+    for (i = 3; i <= ido; i += 2) {
+      ic = idp2 - i;
+      tr2 = cc_ref(i - 1, 3, k) + cc_ref(ic - 1, 2, k);
+      cr2 = cc_ref(i - 1, 1, k) + taur * tr2;
+      ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + tr2;
+      ti2 = cc_ref(i, 3, k) - cc_ref(ic, 2, k);
+      ci2 = cc_ref(i, 1, k) + taur * ti2;
+      ch_ref(i, k, 1) = cc_ref(i, 1, k) + ti2;
+      cr3 = taui * (cc_ref(i - 1, 3, k) - cc_ref(ic - 1, 2, k));
+      ci3 = taui * (cc_ref(i, 3, k) + cc_ref(ic, 2, k));
+      dr2 = cr2 - ci3;
+      dr3 = cr2 + ci3;
+      di2 = ci2 + cr3;
+      di3 = ci2 - cr3;
+      ch_ref(i - 1, k, 2) = wa1[i - 2] * dr2 - wa1[i - 1] * di2;
+      ch_ref(i, k, 2) = wa1[i - 2] * di2 + wa1[i - 1] * dr2;
+      ch_ref(i - 1, k, 3) = wa2[i - 2] * dr3 - wa2[i - 1] * di3;
+      ch_ref(i, k, 3) = wa2[i - 2] * di3 + wa2[i - 1] * dr3;
+    }
+  }
+} /* radb3 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radb4(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1, const real *wa2, const real *wa3)
+{
+  /* Initialized data */
+
+  static const real sqrt2 = 1.414213562373095f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*4 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 5;
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    tr1 = cc_ref(1, 1, k) - cc_ref(ido, 4, k);
+    tr2 = cc_ref(1, 1, k) + cc_ref(ido, 4, k);
+    tr3 = cc_ref(ido, 2, k) + cc_ref(ido, 2, k);
+    tr4 = cc_ref(1, 3, k) + cc_ref(1, 3, k);
+    ch_ref(1, k, 1) = tr2 + tr3;
+    ch_ref(1, k, 2) = tr1 - tr4;
+    ch_ref(1, k, 3) = tr2 - tr3;
+    ch_ref(1, k, 4) = tr1 + tr4;
+  }
+  if (ido < 2) return;
+  if (ido != 2) {
+    idp2 = ido + 2;
+    for (k = 1; k <= l1; ++k) {
+      for (i = 3; i <= ido; i += 2) {
+        ic = idp2 - i;
+        ti1 = cc_ref(i, 1, k) + cc_ref(ic, 4, k);
+        ti2 = cc_ref(i, 1, k) - cc_ref(ic, 4, k);
+        ti3 = cc_ref(i, 3, k) - cc_ref(ic, 2, k);
+        tr4 = cc_ref(i, 3, k) + cc_ref(ic, 2, k);
+        tr1 = cc_ref(i - 1, 1, k) - cc_ref(ic - 1, 4, k);
+        tr2 = cc_ref(i - 1, 1, k) + cc_ref(ic - 1, 4, k);
+        ti4 = cc_ref(i - 1, 3, k) - cc_ref(ic - 1, 2, k);
+        tr3 = cc_ref(i - 1, 3, k) + cc_ref(ic - 1, 2, k);
+        ch_ref(i - 1, k, 1) = tr2 + tr3;
+        cr3 = tr2 - tr3;
+        ch_ref(i, k, 1) = ti2 + ti3;
+        ci3 = ti2 - ti3;
+        cr2 = tr1 - tr4;
+        cr4 = tr1 + tr4;
+        ci2 = ti1 + ti4;
+        ci4 = ti1 - ti4;
+        ch_ref(i - 1, k, 2) = wa1[i - 2] * cr2 - wa1[i - 1] * ci2;
+        ch_ref(i, k, 2) = wa1[i - 2] * ci2 + wa1[i - 1] * cr2;
+        ch_ref(i - 1, k, 3) = wa2[i - 2] * cr3 - wa2[i - 1] * ci3;
+        ch_ref(i, k, 3) = wa2[i - 2] * ci3 + wa2[i - 1] * cr3;
+        ch_ref(i - 1, k, 4) = wa3[i - 2] * cr4 - wa3[i - 1] * ci4;
+        ch_ref(i, k, 4) = wa3[i - 2] * ci4 + wa3[i - 1] * cr4;
+      }
+    }
+    if (ido % 2 == 1) return;
+  }
+  for (k = 1; k <= l1; ++k) {
+    ti1 = cc_ref(1, 2, k) + cc_ref(1, 4, k);
+    ti2 = cc_ref(1, 4, k) - cc_ref(1, 2, k);
+    tr1 = cc_ref(ido, 1, k) - cc_ref(ido, 3, k);
+    tr2 = cc_ref(ido, 1, k) + cc_ref(ido, 3, k);
+    ch_ref(ido, k, 1) = tr2 + tr2;
+    ch_ref(ido, k, 2) = sqrt2 * (tr1 - ti1);
+    ch_ref(ido, k, 3) = ti2 + ti2;
+    ch_ref(ido, k, 4) = -sqrt2 * (tr1 + ti1);
+  }
+} /* radb4 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radb5(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1, const real *wa2, const real *wa3, const real *wa4)
+{
+  /* Initialized data */
+
+  static const real tr11 = .309016994374947f;
+  static const real ti11 = .951056516295154f;
+  static const real tr12 = -.809016994374947f;
+  static const real ti12 = .587785252292473f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
+    ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*5 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 6;
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+  --wa4;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    ti5 = cc_ref(1, 3, k) + cc_ref(1, 3, k);
+    ti4 = cc_ref(1, 5, k) + cc_ref(1, 5, k);
+    tr2 = cc_ref(ido, 2, k) + cc_ref(ido, 2, k);
+    tr3 = cc_ref(ido, 4, k) + cc_ref(ido, 4, k);
+    ch_ref(1, k, 1) = cc_ref(1, 1, k) + tr2 + tr3;
+    cr2 = cc_ref(1, 1, k) + tr11 * tr2 + tr12 * tr3;
+    cr3 = cc_ref(1, 1, k) + tr12 * tr2 + tr11 * tr3;
+    ci5 = ti11 * ti5 + ti12 * ti4;
+    ci4 = ti12 * ti5 - ti11 * ti4;
+    ch_ref(1, k, 2) = cr2 - ci5;
+    ch_ref(1, k, 3) = cr3 - ci4;
+    ch_ref(1, k, 4) = cr3 + ci4;
+    ch_ref(1, k, 5) = cr2 + ci5;
+  }
+  if (ido == 1) {
+    return;
+  }
+  idp2 = ido + 2;
+  for (k = 1; k <= l1; ++k) {
+    for (i = 3; i <= ido; i += 2) {
+      ic = idp2 - i;
+      ti5 = cc_ref(i, 3, k) + cc_ref(ic, 2, k);
+      ti2 = cc_ref(i, 3, k) - cc_ref(ic, 2, k);
+      ti4 = cc_ref(i, 5, k) + cc_ref(ic, 4, k);
+      ti3 = cc_ref(i, 5, k) - cc_ref(ic, 4, k);
+      tr5 = cc_ref(i - 1, 3, k) - cc_ref(ic - 1, 2, k);
+      tr2 = cc_ref(i - 1, 3, k) + cc_ref(ic - 1, 2, k);
+      tr4 = cc_ref(i - 1, 5, k) - cc_ref(ic - 1, 4, k);
+      tr3 = cc_ref(i - 1, 5, k) + cc_ref(ic - 1, 4, k);
+      ch_ref(i - 1, k, 1) = cc_ref(i - 1, 1, k) + tr2 + tr3;
+      ch_ref(i, k, 1) = cc_ref(i, 1, k) + ti2 + ti3;
+      cr2 = cc_ref(i - 1, 1, k) + tr11 * tr2 + tr12 * tr3;
+      ci2 = cc_ref(i, 1, k) + tr11 * ti2 + tr12 * ti3;
+      cr3 = cc_ref(i - 1, 1, k) + tr12 * tr2 + tr11 * tr3;
+      ci3 = cc_ref(i, 1, k) + tr12 * ti2 + tr11 * ti3;
+      cr5 = ti11 * tr5 + ti12 * tr4;
+      ci5 = ti11 * ti5 + ti12 * ti4;
+      cr4 = ti12 * tr5 - ti11 * tr4;
+      ci4 = ti12 * ti5 - ti11 * ti4;
+      dr3 = cr3 - ci4;
+      dr4 = cr3 + ci4;
+      di3 = ci3 + cr4;
+      di4 = ci3 - cr4;
+      dr5 = cr2 + ci5;
+      dr2 = cr2 - ci5;
+      di5 = ci2 - cr5;
+      di2 = ci2 + cr5;
+      ch_ref(i - 1, k, 2) = wa1[i - 2] * dr2 - wa1[i - 1] * di2;
+      ch_ref(i, k, 2) = wa1[i - 2] * di2 + wa1[i - 1] * dr2;
+      ch_ref(i - 1, k, 3) = wa2[i - 2] * dr3 - wa2[i - 1] * di3;
+      ch_ref(i, k, 3) = wa2[i - 2] * di3 + wa2[i - 1] * dr3;
+      ch_ref(i - 1, k, 4) = wa3[i - 2] * dr4 - wa3[i - 1] * di4;
+      ch_ref(i, k, 4) = wa3[i - 2] * di4 + wa3[i - 1] * dr4;
+      ch_ref(i - 1, k, 5) = wa4[i - 2] * dr5 - wa4[i - 1] * di5;
+      ch_ref(i, k, 5) = wa4[i - 2] * di5 + wa4[i - 1] * dr5;
+    }
+  }
+} /* radb5 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radbg(integer ido, integer ip, integer l1, integer idl1, 
+                  const real *cc, real *c1, real *c2, real *ch, real *ch2, const real *wa)
+{
+  /* System generated locals */
+  integer ch_offset, cc_offset,
+    c1_offset, c2_offset, ch2_offset;
+
+  /* Local variables */
+  integer i, j, k, l, j2, ic, jc, lc, ik, is;
+  real dc2, ai1, ai2, ar1, ar2, ds2;
+  integer nbd;
+  real dcp, arg, dsp, ar1h, ar2h;
+  integer idp2, ipp2, idij, ipph;
+
+
+#define c1_ref(a_1,a_2,a_3) c1[((a_3)*l1 + (a_2))*ido + a_1]
+#define c2_ref(a_1,a_2) c2[(a_2)*idl1 + a_1]
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*ip + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch2_ref(a_1,a_2) ch2[(a_2)*idl1 + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  c1_offset = 1 + ido * (1 + l1);
+  c1 -= c1_offset;
+  cc_offset = 1 + ido * (1 + ip);
+  cc -= cc_offset;
+  ch2_offset = 1 + idl1;
+  ch2 -= ch2_offset;
+  c2_offset = 1 + idl1;
+  c2 -= c2_offset;
+  --wa;
+
+  /* Function Body */
+  arg = (2*M_PI) / (real) (ip);
+  dcp = cos(arg);
+  dsp = sin(arg);
+  idp2 = ido + 2;
+  nbd = (ido - 1) / 2;
+  ipp2 = ip + 2;
+  ipph = (ip + 1) / 2;
+  if (ido >= l1) {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 1; i <= ido; ++i) {
+        ch_ref(i, k, 1) = cc_ref(i, 1, k);
+      }
+    }
+  } else {
+    for (i = 1; i <= ido; ++i) {
+      for (k = 1; k <= l1; ++k) {
+        ch_ref(i, k, 1) = cc_ref(i, 1, k);
+      }
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    jc = ipp2 - j;
+    j2 = j + j;
+    for (k = 1; k <= l1; ++k) {
+      ch_ref(1, k, j) = cc_ref(ido, j2 - 2, k) + cc_ref(ido, j2 - 2, k);
+      ch_ref(1, k, jc) = cc_ref(1, j2 - 1, k) + cc_ref(1, j2 - 1, k);
+    }
+  }
+  if (ido != 1) {
+    if (nbd >= l1) {
+      for (j = 2; j <= ipph; ++j) {
+        jc = ipp2 - j;
+        for (k = 1; k <= l1; ++k) {
+          for (i = 3; i <= ido; i += 2) {
+            ic = idp2 - i;
+            ch_ref(i - 1, k, j) = cc_ref(i - 1, (j << 1) - 1, k) + cc_ref(ic - 1, (j << 1) - 2, k);
+            ch_ref(i - 1, k, jc) = cc_ref(i - 1, (j << 1) - 1, k) - cc_ref(ic - 1, (j << 1) - 2, k);
+            ch_ref(i, k, j) = cc_ref(i, (j << 1) - 1, k) - cc_ref(ic, (j << 1) - 2, k);
+            ch_ref(i, k, jc) = cc_ref(i, (j << 1) - 1, k) + cc_ref(ic, (j << 1) - 2, k);
+          }
+        }
+      }
+    } else {
+      for (j = 2; j <= ipph; ++j) {
+        jc = ipp2 - j;
+        for (i = 3; i <= ido; i += 2) {
+          ic = idp2 - i;
+          for (k = 1; k <= l1; ++k) {
+            ch_ref(i - 1, k, j) = cc_ref(i - 1, (j << 1) - 1, k) + cc_ref(ic - 1, (j << 1) - 2, k);
+            ch_ref(i - 1, k, jc) = cc_ref(i - 1, (j << 1) - 1, k) - cc_ref(ic - 1, (j << 1) - 2, k);
+            ch_ref(i, k, j) = cc_ref(i, (j << 1) - 1, k) - cc_ref(ic, (j << 1) - 2, k);
+            ch_ref(i, k, jc) = cc_ref(i, (j << 1) - 1, k) + cc_ref(ic, (j << 1) - 2, k);
+          }
+        }
+      }
+    }
+  }
+  ar1 = 1.f;
+  ai1 = 0.f;
+  for (l = 2; l <= ipph; ++l) {
+    lc = ipp2 - l;
+    ar1h = dcp * ar1 - dsp * ai1;
+    ai1 = dcp * ai1 + dsp * ar1;
+    ar1 = ar1h;
+    for (ik = 1; ik <= idl1; ++ik) {
+      c2_ref(ik, l) = ch2_ref(ik, 1) + ar1 * ch2_ref(ik, 2);
+      c2_ref(ik, lc) = ai1 * ch2_ref(ik, ip);
+    }
+    dc2 = ar1;
+    ds2 = ai1;
+    ar2 = ar1;
+    ai2 = ai1;
+    for (j = 3; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      ar2h = dc2 * ar2 - ds2 * ai2;
+      ai2 = dc2 * ai2 + ds2 * ar2;
+      ar2 = ar2h;
+      for (ik = 1; ik <= idl1; ++ik) {
+        c2_ref(ik, l) = c2_ref(ik, l) + ar2 * ch2_ref(ik, j);
+        c2_ref(ik, lc) = c2_ref(ik, lc) + ai2 * ch2_ref(ik, jc);
+      }
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    for (ik = 1; ik <= idl1; ++ik) {
+      ch2_ref(ik, 1) = ch2_ref(ik, 1) + ch2_ref(ik, j);
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    jc = ipp2 - j;
+    for (k = 1; k <= l1; ++k) {
+      ch_ref(1, k, j) = c1_ref(1, k, j) - c1_ref(1, k, jc);
+      ch_ref(1, k, jc) = c1_ref(1, k, j) + c1_ref(1, k, jc);
+    }
+  }
+  if (ido != 1) {
+    if (nbd >= l1) {
+      for (j = 2; j <= ipph; ++j) {
+        jc = ipp2 - j;
+        for (k = 1; k <= l1; ++k) {
+          for (i = 3; i <= ido; i += 2) {
+            ch_ref(i - 1, k, j) = c1_ref(i - 1, k, j) - c1_ref(i, k, jc);
+            ch_ref(i - 1, k, jc) = c1_ref(i - 1, k, j) + c1_ref(i, k, jc);
+            ch_ref(i, k, j) = c1_ref(i, k, j) + c1_ref(i - 1, k, jc);
+            ch_ref(i, k, jc) = c1_ref(i, k, j) - c1_ref(i - 1, k, jc);
+          }
+        }
+      }
+    } else {
+      for (j = 2; j <= ipph; ++j) {
+        jc = ipp2 - j;
+        for (i = 3; i <= ido; i += 2) {
+          for (k = 1; k <= l1; ++k) {
+            ch_ref(i - 1, k, j) = c1_ref(i - 1, k, j) - c1_ref(i, k, jc);
+            ch_ref(i - 1, k, jc) = c1_ref(i - 1, k, j) + c1_ref(i, k, jc);
+            ch_ref(i, k, j) = c1_ref(i, k, j) + c1_ref(i - 1, k, jc);
+            ch_ref(i, k, jc) = c1_ref(i, k, j) - c1_ref(i - 1, k, jc);
+          }
+        }
+      }
+    }
+  }
+  if (ido == 1) {
+    return;
+  }
+  for (ik = 1; ik <= idl1; ++ik) {
+    c2_ref(ik, 1) = ch2_ref(ik, 1);
+  }
+  for (j = 2; j <= ip; ++j) {
+    for (k = 1; k <= l1; ++k) {
+      c1_ref(1, k, j) = ch_ref(1, k, j);
+    }
+  }
+  if (nbd <= l1) {
+    is = -(ido);
+    for (j = 2; j <= ip; ++j) {
+      is += ido;
+      idij = is;
+      for (i = 3; i <= ido; i += 2) {
+        idij += 2;
+        for (k = 1; k <= l1; ++k) {
+          c1_ref(i - 1, k, j) = wa[idij - 1] * ch_ref(i - 1, k, j) 
+            - wa[idij] * ch_ref(i, k, j);
+          c1_ref(i, k, j) = wa[idij - 1] * ch_ref(i, k, j) + wa[idij] * ch_ref(i - 1, k, j);
+        }
+      }
+    }
+  } else {
+    is = -(ido);
+    for (j = 2; j <= ip; ++j) {
+      is += ido;
+      for (k = 1; k <= l1; ++k) {
+        idij = is;
+        for (i = 3; i <= ido; i += 2) {
+          idij += 2;
+          c1_ref(i - 1, k, j) = wa[idij - 1] * ch_ref(i - 1, k, j) 
+            - wa[idij] * ch_ref(i, k, j);
+          c1_ref(i, k, j) = wa[idij - 1] * ch_ref(i, k, j) + wa[idij] * ch_ref(i - 1, k, j);
+        }
+      }
+    }
+  }
+} /* radbg */
+
+#undef ch2_ref
+#undef ch_ref
+#undef cc_ref
+#undef c2_ref
+#undef c1_ref
+
+
+static void radf2(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1)
+{
+  /* System generated locals */
+  integer ch_offset, cc_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ti2, tr2;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*2 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * 3;
+  ch -= ch_offset;
+  cc_offset = 1 + ido * (1 + l1);
+  cc -= cc_offset;
+  --wa1;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    ch_ref(1, 1, k) = cc_ref(1, k, 1) + cc_ref(1, k, 2);
+    ch_ref(ido, 2, k) = cc_ref(1, k, 1) - cc_ref(1, k, 2);
+  }
+  if (ido < 2) return;
+  if (ido != 2) {
+    idp2 = ido + 2;
+    for (k = 1; k <= l1; ++k) {
+      for (i = 3; i <= ido; i += 2) {
+        ic = idp2 - i;
+        tr2 = wa1[i - 2] * cc_ref(i - 1, k, 2) + wa1[i - 1] * 
+          cc_ref(i, k, 2);
+        ti2 = wa1[i - 2] * cc_ref(i, k, 2) - wa1[i - 1] * cc_ref(
+                                                                 i - 1, k, 2);
+        ch_ref(i, 1, k) = cc_ref(i, k, 1) + ti2;
+        ch_ref(ic, 2, k) = ti2 - cc_ref(i, k, 1);
+        ch_ref(i - 1, 1, k) = cc_ref(i - 1, k, 1) + tr2;
+        ch_ref(ic - 1, 2, k) = cc_ref(i - 1, k, 1) - tr2;
+      }
+    }
+    if (ido % 2 == 1) {
+      return;
+    }
+  }
+  for (k = 1; k <= l1; ++k) {
+    ch_ref(1, 2, k) = -cc_ref(ido, k, 2);
+    ch_ref(ido, 1, k) = cc_ref(ido, k, 1);
+  }
+} /* radf2 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radf3(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1, const real *wa2)
+{
+  static const real taur = -.5f;
+  static const real taui = .866025403784439f;
+
+  /* System generated locals */
+  integer ch_offset, cc_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*3 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + (ido << 2);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * (1 + l1);
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    cr2 = cc_ref(1, k, 2) + cc_ref(1, k, 3);
+    ch_ref(1, 1, k) = cc_ref(1, k, 1) + cr2;
+    ch_ref(1, 3, k) = taui * (cc_ref(1, k, 3) - cc_ref(1, k, 2));
+    ch_ref(ido, 2, k) = cc_ref(1, k, 1) + taur * cr2;
+  }
+  if (ido == 1) {
+    return;
+  }
+  idp2 = ido + 2;
+  for (k = 1; k <= l1; ++k) {
+    for (i = 3; i <= ido; i += 2) {
+      ic = idp2 - i;
+      dr2 = wa1[i - 2] * cc_ref(i - 1, k, 2) + wa1[i - 1] * 
+        cc_ref(i, k, 2);
+      di2 = wa1[i - 2] * cc_ref(i, k, 2) - wa1[i - 1] * cc_ref(
+                                                               i - 1, k, 2);
+      dr3 = wa2[i - 2] * cc_ref(i - 1, k, 3) + wa2[i - 1] * 
+        cc_ref(i, k, 3);
+      di3 = wa2[i - 2] * cc_ref(i, k, 3) - wa2[i - 1] * cc_ref(
+                                                               i - 1, k, 3);
+      cr2 = dr2 + dr3;
+      ci2 = di2 + di3;
+      ch_ref(i - 1, 1, k) = cc_ref(i - 1, k, 1) + cr2;
+      ch_ref(i, 1, k) = cc_ref(i, k, 1) + ci2;
+      tr2 = cc_ref(i - 1, k, 1) + taur * cr2;
+      ti2 = cc_ref(i, k, 1) + taur * ci2;
+      tr3 = taui * (di2 - di3);
+      ti3 = taui * (dr3 - dr2);
+      ch_ref(i - 1, 3, k) = tr2 + tr3;
+      ch_ref(ic - 1, 2, k) = tr2 - tr3;
+      ch_ref(i, 3, k) = ti2 + ti3;
+      ch_ref(ic, 2, k) = ti3 - ti2;
+    }
+  }
+} /* radf3 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radf4(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1, const real *wa2, const real *wa3)
+{
+  /* Initialized data */
+
+  static const real hsqt2 = .7071067811865475f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*4 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * 5;
+  ch -= ch_offset;
+  cc_offset = 1 + ido * (1 + l1);
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    tr1 = cc_ref(1, k, 2) + cc_ref(1, k, 4);
+    tr2 = cc_ref(1, k, 1) + cc_ref(1, k, 3);
+    ch_ref(1, 1, k) = tr1 + tr2;
+    ch_ref(ido, 4, k) = tr2 - tr1;
+    ch_ref(ido, 2, k) = cc_ref(1, k, 1) - cc_ref(1, k, 3);
+    ch_ref(1, 3, k) = cc_ref(1, k, 4) - cc_ref(1, k, 2);
+  }  
+  if (ido < 2) return;
+  if (ido != 2) {
+    idp2 = ido + 2;
+    for (k = 1; k <= l1; ++k) {
+      for (i = 3; i <= ido; i += 2) {
+        ic = idp2 - i;
+        cr2 = wa1[i - 2] * cc_ref(i - 1, k, 2) + wa1[i - 1] * 
+          cc_ref(i, k, 2);
+        ci2 = wa1[i - 2] * cc_ref(i, k, 2) - wa1[i - 1] * cc_ref(
+                                                                 i - 1, k, 2);
+        cr3 = wa2[i - 2] * cc_ref(i - 1, k, 3) + wa2[i - 1] * 
+          cc_ref(i, k, 3);
+        ci3 = wa2[i - 2] * cc_ref(i, k, 3) - wa2[i - 1] * cc_ref(
+                                                                 i - 1, k, 3);
+        cr4 = wa3[i - 2] * cc_ref(i - 1, k, 4) + wa3[i - 1] * 
+          cc_ref(i, k, 4);
+        ci4 = wa3[i - 2] * cc_ref(i, k, 4) - wa3[i - 1] * cc_ref(
+                                                                 i - 1, k, 4);
+        tr1 = cr2 + cr4;
+        tr4 = cr4 - cr2;
+        ti1 = ci2 + ci4;
+        ti4 = ci2 - ci4;
+        ti2 = cc_ref(i, k, 1) + ci3;
+        ti3 = cc_ref(i, k, 1) - ci3;
+        tr2 = cc_ref(i - 1, k, 1) + cr3;
+        tr3 = cc_ref(i - 1, k, 1) - cr3;
+        ch_ref(i - 1, 1, k) = tr1 + tr2;
+        ch_ref(ic - 1, 4, k) = tr2 - tr1;
+        ch_ref(i, 1, k) = ti1 + ti2;
+        ch_ref(ic, 4, k) = ti1 - ti2;
+        ch_ref(i - 1, 3, k) = ti4 + tr3;
+        ch_ref(ic - 1, 2, k) = tr3 - ti4;
+        ch_ref(i, 3, k) = tr4 + ti3;
+        ch_ref(ic, 2, k) = tr4 - ti3;
+      }
+    }
+    if (ido % 2 == 1) {
+      return;
+    }
+  }
+  for (k = 1; k <= l1; ++k) {
+    ti1 = -hsqt2 * (cc_ref(ido, k, 2) + cc_ref(ido, k, 4));
+    tr1 = hsqt2 * (cc_ref(ido, k, 2) - cc_ref(ido, k, 4));
+    ch_ref(ido, 1, k) = tr1 + cc_ref(ido, k, 1);
+    ch_ref(ido, 3, k) = cc_ref(ido, k, 1) - tr1;
+    ch_ref(1, 2, k) = ti1 - cc_ref(ido, k, 3);
+    ch_ref(1, 4, k) = ti1 + cc_ref(ido, k, 3);
+  }
+} /* radf4 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radf5(integer ido, integer l1, const real *cc, real *ch, 
+                  const real *wa1, const real *wa2, const real *wa3, const real *wa4)
+{
+  /* Initialized data */
+
+  static const real tr11 = .309016994374947f;
+  static const real ti11 = .951056516295154f;
+  static const real tr12 = -.809016994374947f;
+  static const real ti12 = .587785252292473f;
+
+  /* System generated locals */
+  integer cc_offset, ch_offset;
+
+  /* Local variables */
+  integer i, k, ic;
+  real ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5,
+    cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5;
+  integer idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*5 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * 6;
+  ch -= ch_offset;
+  cc_offset = 1 + ido * (1 + l1);
+  cc -= cc_offset;
+  --wa1;
+  --wa2;
+  --wa3;
+  --wa4;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    cr2 = cc_ref(1, k, 5) + cc_ref(1, k, 2);
+    ci5 = cc_ref(1, k, 5) - cc_ref(1, k, 2);
+    cr3 = cc_ref(1, k, 4) + cc_ref(1, k, 3);
+    ci4 = cc_ref(1, k, 4) - cc_ref(1, k, 3);
+    ch_ref(1, 1, k) = cc_ref(1, k, 1) + cr2 + cr3;
+    ch_ref(ido, 2, k) = cc_ref(1, k, 1) + tr11 * cr2 + tr12 * cr3;
+    ch_ref(1, 3, k) = ti11 * ci5 + ti12 * ci4;
+    ch_ref(ido, 4, k) = cc_ref(1, k, 1) + tr12 * cr2 + tr11 * cr3;
+    ch_ref(1, 5, k) = ti12 * ci5 - ti11 * ci4;
+  }
+  if (ido == 1) {
+    return;
+  }
+  idp2 = ido + 2;
+  for (k = 1; k <= l1; ++k) {
+    for (i = 3; i <= ido; i += 2) {
+      ic = idp2 - i;
+      dr2 = wa1[i - 2] * cc_ref(i - 1, k, 2) + wa1[i - 1] * cc_ref(i, k, 2);
+      di2 = wa1[i - 2] * cc_ref(i, k, 2) - wa1[i - 1] * cc_ref(i - 1, k, 2);
+      dr3 = wa2[i - 2] * cc_ref(i - 1, k, 3) + wa2[i - 1] * cc_ref(i, k, 3);
+      di3 = wa2[i - 2] * cc_ref(i, k, 3) - wa2[i - 1] * cc_ref(i - 1, k, 3);
+      dr4 = wa3[i - 2] * cc_ref(i - 1, k, 4) + wa3[i - 1] * cc_ref(i, k, 4);
+      di4 = wa3[i - 2] * cc_ref(i, k, 4) - wa3[i - 1] * cc_ref(i - 1, k, 4);
+      dr5 = wa4[i - 2] * cc_ref(i - 1, k, 5) + wa4[i - 1] * cc_ref(i, k, 5);
+      di5 = wa4[i - 2] * cc_ref(i, k, 5) - wa4[i - 1] * cc_ref(i - 1, k, 5);
+      cr2 = dr2 + dr5;
+      ci5 = dr5 - dr2;
+      cr5 = di2 - di5;
+      ci2 = di2 + di5;
+      cr3 = dr3 + dr4;
+      ci4 = dr4 - dr3;
+      cr4 = di3 - di4;
+      ci3 = di3 + di4;
+      ch_ref(i - 1, 1, k) = cc_ref(i - 1, k, 1) + cr2 + cr3;
+      ch_ref(i, 1, k) = cc_ref(i, k, 1) + ci2 + ci3;
+      tr2 = cc_ref(i - 1, k, 1) + tr11 * cr2 + tr12 * cr3;
+      ti2 = cc_ref(i, k, 1) + tr11 * ci2 + tr12 * ci3;
+      tr3 = cc_ref(i - 1, k, 1) + tr12 * cr2 + tr11 * cr3;
+      ti3 = cc_ref(i, k, 1) + tr12 * ci2 + tr11 * ci3;
+      tr5 = ti11 * cr5 + ti12 * cr4;
+      ti5 = ti11 * ci5 + ti12 * ci4;
+      tr4 = ti12 * cr5 - ti11 * cr4;
+      ti4 = ti12 * ci5 - ti11 * ci4;
+      ch_ref(i - 1, 3, k) = tr2 + tr5;
+      ch_ref(ic - 1, 2, k) = tr2 - tr5;
+      ch_ref(i, 3, k) = ti2 + ti5;
+      ch_ref(ic, 2, k) = ti5 - ti2;
+      ch_ref(i - 1, 5, k) = tr3 + tr4;
+      ch_ref(ic - 1, 4, k) = tr3 - tr4;
+      ch_ref(i, 5, k) = ti3 + ti4;
+      ch_ref(ic, 4, k) = ti4 - ti3;
+    }
+  }
+} /* radf5 */
+
+#undef ch_ref
+#undef cc_ref
+
+
+static void radfg(integer ido, integer ip, integer l1, integer idl1, 
+                  real *cc, real *c1, real *c2, real *ch, real *ch2, const real *wa)
+{
+  /* System generated locals */
+  integer ch_offset, cc_offset,
+    c1_offset, c2_offset, ch2_offset;
+
+  /* Local variables */
+  integer i, j, k, l, j2, ic, jc, lc, ik, is;
+  real dc2, ai1, ai2, ar1, ar2, ds2;
+  integer nbd;
+  real dcp, arg, dsp, ar1h, ar2h;
+  integer idp2, ipp2, idij, ipph;
+
+
+#define c1_ref(a_1,a_2,a_3) c1[((a_3)*l1 + (a_2))*ido + a_1]
+#define c2_ref(a_1,a_2) c2[(a_2)*idl1 + a_1]
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*ip + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch2_ref(a_1,a_2) ch2[(a_2)*idl1 + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  c1_offset = 1 + ido * (1 + l1);
+  c1 -= c1_offset;
+  cc_offset = 1 + ido * (1 + ip);
+  cc -= cc_offset;
+  ch2_offset = 1 + idl1;
+  ch2 -= ch2_offset;
+  c2_offset = 1 + idl1;
+  c2 -= c2_offset;
+  --wa;
+
+  /* Function Body */
+  arg = (2*M_PI) / (real) (ip);
+  dcp = cos(arg);
+  dsp = sin(arg);
+  ipph = (ip + 1) / 2;
+  ipp2 = ip + 2;
+  idp2 = ido + 2;
+  nbd = (ido - 1) / 2;
+  if (ido == 1) {
+    for (ik = 1; ik <= idl1; ++ik) {
+      c2_ref(ik, 1) = ch2_ref(ik, 1);
+    }
+  } else {
+    for (ik = 1; ik <= idl1; ++ik) {
+      ch2_ref(ik, 1) = c2_ref(ik, 1);
+    }
+    for (j = 2; j <= ip; ++j) {
+      for (k = 1; k <= l1; ++k) {
+        ch_ref(1, k, j) = c1_ref(1, k, j);
+      }
+    }
+    if (nbd <= l1) {
+      is = -(ido);
+      for (j = 2; j <= ip; ++j) {
+        is += ido;
+        idij = is;
+        for (i = 3; i <= ido; i += 2) {
+          idij += 2;
+          for (k = 1; k <= l1; ++k) {
+            ch_ref(i - 1, k, j) = wa[idij - 1] * c1_ref(i - 1, k, j)
+              + wa[idij] * c1_ref(i, k, j);
+            ch_ref(i, k, j) = wa[idij - 1] * c1_ref(i, k, j) - wa[
+                                                                  idij] * c1_ref(i - 1, k, j);
+          }
+        }
+      }
+    } else {
+      is = -(ido);
+      for (j = 2; j <= ip; ++j) {
+        is += ido;
+        for (k = 1; k <= l1; ++k) {
+          idij = is;
+          for (i = 3; i <= ido; i += 2) {
+            idij += 2;
+            ch_ref(i - 1, k, j) = wa[idij - 1] * c1_ref(i - 1, k, j) 
+              + wa[idij] * c1_ref(i, k, j);
+            ch_ref(i, k, j) = wa[idij - 1] * c1_ref(i, k, j) - wa[
+                                                                  idij] * c1_ref(i - 1, k, j);
+          }
+        }
+      }
+    }
+    if (nbd >= l1) {
+      for (j = 2; j <= ipph; ++j) {
+        jc = ipp2 - j;
+        for (k = 1; k <= l1; ++k) {
+          for (i = 3; i <= ido; i += 2) {
+            c1_ref(i - 1, k, j) = ch_ref(i - 1, k, j) + ch_ref(i - 
+                                                               1, k, jc);
+            c1_ref(i - 1, k, jc) = ch_ref(i, k, j) - ch_ref(i, k, 
+                                                            jc);
+            c1_ref(i, k, j) = ch_ref(i, k, j) + ch_ref(i, k, jc);
+            c1_ref(i, k, jc) = ch_ref(i - 1, k, jc) - ch_ref(i - 1, 
+                                                             k, j);
+          }
+        }
+      }
+    } else {
+      for (j = 2; j <= ipph; ++j) {
+        jc = ipp2 - j;
+        for (i = 3; i <= ido; i += 2) {
+          for (k = 1; k <= l1; ++k) {
+            c1_ref(i - 1, k, j) = ch_ref(i - 1, k, j) + ch_ref(i - 
+                                                               1, k, jc);
+            c1_ref(i - 1, k, jc) = ch_ref(i, k, j) - ch_ref(i, k, 
+                                                            jc);
+            c1_ref(i, k, j) = ch_ref(i, k, j) + ch_ref(i, k, jc);
+            c1_ref(i, k, jc) = ch_ref(i - 1, k, jc) - ch_ref(i - 1, 
+                                                             k, j);
+          }
+        }
+      }
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    jc = ipp2 - j;
+    for (k = 1; k <= l1; ++k) {
+      c1_ref(1, k, j) = ch_ref(1, k, j) + ch_ref(1, k, jc);
+      c1_ref(1, k, jc) = ch_ref(1, k, jc) - ch_ref(1, k, j);
+    }
+  }
+
+  ar1 = 1.f;
+  ai1 = 0.f;
+  for (l = 2; l <= ipph; ++l) {
+    lc = ipp2 - l;
+    ar1h = dcp * ar1 - dsp * ai1;
+    ai1 = dcp * ai1 + dsp * ar1;
+    ar1 = ar1h;
+    for (ik = 1; ik <= idl1; ++ik) {
+      ch2_ref(ik, l) = c2_ref(ik, 1) + ar1 * c2_ref(ik, 2);
+      ch2_ref(ik, lc) = ai1 * c2_ref(ik, ip);
+    }
+    dc2 = ar1;
+    ds2 = ai1;
+    ar2 = ar1;
+    ai2 = ai1;
+    for (j = 3; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      ar2h = dc2 * ar2 - ds2 * ai2;
+      ai2 = dc2 * ai2 + ds2 * ar2;
+      ar2 = ar2h;
+      for (ik = 1; ik <= idl1; ++ik) {
+        ch2_ref(ik, l) = ch2_ref(ik, l) + ar2 * c2_ref(ik, j);
+        ch2_ref(ik, lc) = ch2_ref(ik, lc) + ai2 * c2_ref(ik, jc);
+      }
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    for (ik = 1; ik <= idl1; ++ik) {
+      ch2_ref(ik, 1) = ch2_ref(ik, 1) + c2_ref(ik, j);
+    }
+  }
+
+  if (ido >= l1) {
+    for (k = 1; k <= l1; ++k) {
+      for (i = 1; i <= ido; ++i) {
+        cc_ref(i, 1, k) = ch_ref(i, k, 1);
+      }
+    }
+  } else {
+    for (i = 1; i <= ido; ++i) {
+      for (k = 1; k <= l1; ++k) {
+        cc_ref(i, 1, k) = ch_ref(i, k, 1);
+      }
+    }
+  }
+  for (j = 2; j <= ipph; ++j) {
+    jc = ipp2 - j;
+    j2 = j + j;
+    for (k = 1; k <= l1; ++k) {
+      cc_ref(ido, j2 - 2, k) = ch_ref(1, k, j);
+      cc_ref(1, j2 - 1, k) = ch_ref(1, k, jc);
+    }
+  }
+  if (ido == 1) {
+    return;
+  }
+  if (nbd >= l1) {
+    for (j = 2; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      j2 = j + j;
+      for (k = 1; k <= l1; ++k) {
+        for (i = 3; i <= ido; i += 2) {
+          ic = idp2 - i;
+          cc_ref(i - 1, j2 - 1, k) = ch_ref(i - 1, k, j) + ch_ref(
+                                                                  i - 1, k, jc);
+          cc_ref(ic - 1, j2 - 2, k) = ch_ref(i - 1, k, j) - ch_ref(
+                                                                   i - 1, k, jc);
+          cc_ref(i, j2 - 1, k) = ch_ref(i, k, j) + ch_ref(i, k, 
+                                                          jc);
+          cc_ref(ic, j2 - 2, k) = ch_ref(i, k, jc) - ch_ref(i, k, j)
+            ;
+        }
+      }
+    }
+  } else {
+    for (j = 2; j <= ipph; ++j) {
+      jc = ipp2 - j;
+      j2 = j + j;
+      for (i = 3; i <= ido; i += 2) {
+        ic = idp2 - i;
+        for (k = 1; k <= l1; ++k) {
+          cc_ref(i - 1, j2 - 1, k) = ch_ref(i - 1, k, j) + ch_ref(
+                                                                  i - 1, k, jc);
+          cc_ref(ic - 1, j2 - 2, k) = ch_ref(i - 1, k, j) - ch_ref(
+                                                                   i - 1, k, jc);
+          cc_ref(i, j2 - 1, k) = ch_ref(i, k, j) + ch_ref(i, k, 
+                                                          jc);
+          cc_ref(ic, j2 - 2, k) = ch_ref(i, k, jc) - ch_ref(i, k, j)
+            ;
+        }
+      }
+    }
+  }
+} /* radfg */
+
+#undef ch2_ref
+#undef ch_ref
+#undef cc_ref
+#undef c2_ref
+#undef c1_ref
+
+
+static void cfftb1(integer n, real *c, real *ch, const real *wa, integer *ifac)
+{
+  integer i, k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, nac, ido, 
+    idl1, idot;
+
+  /* Function Body */
+  nf = ifac[1];
+  na = 0;
+  l1 = 1;
+  iw = 0;
+  for (k1 = 1; k1 <= nf; ++k1) {
+    ip = ifac[k1 + 1];
+    l2 = ip * l1;
+    ido = n / l2;
+    idot = ido + ido;
+    idl1 = idot * l1;
+    switch (ip) {
+      case 4:
+        ix2 = iw + idot;
+        ix3 = ix2 + idot;
+        passb4(idot, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2], &wa[ix3]);
+        na = 1 - na;
+        break;
+      case 2:
+        passb2(idot, l1, na?ch:c, na?c:ch, &wa[iw]);
+        na = 1 - na;
+        break;
+      case 3:
+        ix2 = iw + idot;
+        passb3(idot, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2]);
+        na = 1 - na;
+        break;
+      case 5:
+        ix2 = iw + idot;
+        ix3 = ix2 + idot;
+        ix4 = ix3 + idot;
+        passfb5(idot, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], +1);
+        na = 1 - na;
+        break;
+      default:
+        if (na == 0) {
+          passfb(&nac, idot, ip, l1, idl1, c, c, c, ch, ch, &wa[iw], +1);
+        } else {
+          passfb(&nac, idot, ip, l1, idl1, ch, ch, ch, c, c, &wa[iw], +1);
+        }
+        if (nac != 0) {
+          na = 1 - na;
+        }
+        break;
+    }
+    l1 = l2;
+    iw += (ip - 1) * idot;
+  }
+  if (na == 0) {
+    return;
+  }
+  for (i = 0; i < 2*n; ++i) {
+    c[i] = ch[i];
+  }
+} /* cfftb1 */
+
+void cfftb(integer n, real *c, real *wsave)
+{
+  integer iw1, iw2;
+
+  /* Parameter adjustments */
+  --wsave;
+  --c;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  iw1 = 2*n + 1;
+  iw2 = iw1 + 2*n;
+  cfftb1(n, &c[1], &wsave[1], &wsave[iw1], (int*)&wsave[iw2]);
+} /* cfftb */
+
+static void cfftf1(integer n, real *c, real *ch, const real *wa, integer *ifac)
+{
+  /* Local variables */
+  integer i, k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, nac, ido, 
+    idl1, idot;
+
+  /* Function Body */
+  nf = ifac[1];
+  na = 0;
+  l1 = 1;
+  iw = 0;
+  for (k1 = 1; k1 <= nf; ++k1) {
+    ip = ifac[k1 + 1];
+    l2 = ip * l1;
+    ido = n / l2;
+    idot = ido + ido;
+    idl1 = idot * l1;
+    switch (ip) {
+      case 4:
+        ix2 = iw + idot;
+        ix3 = ix2 + idot;
+        passf4(idot, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2], &wa[ix3]);
+        na = 1 - na;
+        break;
+      case 2:
+        passf2(idot, l1, na?ch:c, na?c:ch, &wa[iw]);
+        na = 1 - na;
+        break;
+      case 3:
+        ix2 = iw + idot;
+        passf3(idot, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2]);
+        na = 1 - na;
+        break;
+      case 5:
+        ix2 = iw + idot;
+        ix3 = ix2 + idot;
+        ix4 = ix3 + idot;
+        passfb5(idot, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], -1);
+        na = 1 - na;
+        break;
+      default:
+        if (na == 0) {
+          passfb(&nac, idot, ip, l1, idl1, c, c, c, ch, ch, &wa[iw], -1);
+        } else {
+          passfb(&nac, idot, ip, l1, idl1, ch, ch, ch, c, c, &wa[iw], -1);
+        }
+        if (nac != 0) {
+          na = 1 - na;
+        }
+        break;
+    }
+    l1 = l2;
+    iw += (ip - 1)*idot;
+  }
+  if (na == 0) {
+    return;
+  }
+  for (i = 0; i < 2*n; ++i) {
+    c[i] = ch[i];
+  }
+} /* cfftf1 */
+
+void cfftf(integer n, real *c, real *wsave)
+{
+  integer iw1, iw2;
+
+  /* Parameter adjustments */
+  --wsave;
+  --c;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  iw1 = 2*n + 1;
+  iw2 = iw1 + 2*n;
+  cfftf1(n, &c[1], &wsave[1], &wsave[iw1], (int*)&wsave[iw2]);
+} /* cfftf */
+
+static int decompose(integer n, integer *ifac, integer ntryh[4]) {  
+  integer ntry=0, nl = n, nf = 0, nq, nr, i, j = 0;
+  do {
+    if (j < 4) {
+      ntry = ntryh[j];
+    } else {
+      ntry += 2;
+    }
+    ++j;
+  L104:
+    nq = nl / ntry;
+    nr = nl - ntry * nq;
+    if (nr != 0) continue;
+    ++nf;
+    ifac[nf + 2] = ntry;
+    nl = nq;
+    if (ntry == 2 && nf != 1) {
+      for (i = 2; i <= nf; ++i) {
+        integer ib = nf - i + 2;
+        ifac[ib + 2] = ifac[ib + 1];
+      }
+      ifac[3] = 2;
+    }
+    if (nl != 1) {
+      goto L104;
+    }
+  } while (nl != 1);
+  ifac[1] = n;
+  ifac[2] = nf;  
+  return nf;
+}
+
+static void cffti1(integer n, real *wa, integer *ifac)
+{
+  static integer ntryh[4] = { 3,4,2,5 };
+
+  /* Local variables */
+  integer i, j, i1, k1, l1, l2;
+  real fi;
+  integer ld, ii, nf, ip;
+  real arg;
+  integer ido, ipm;
+  real argh;
+  integer idot;
+  real argld;
+
+  /* Parameter adjustments */
+  --ifac;
+  --wa;
+
+  nf = decompose(n, ifac, ntryh);
+
+  argh = (2*M_PI) / (real) (n);
+  i = 2;
+  l1 = 1;
+  for (k1 = 1; k1 <= nf; ++k1) {
+    ip = ifac[k1 + 2];
+    ld = 0;
+    l2 = l1 * ip;
+    ido = n / l2;
+    idot = ido + ido + 2;
+    ipm = ip - 1;
+    for (j = 1; j <= ipm; ++j) {
+      i1 = i;
+      wa[i - 1] = 1.f;
+      wa[i] = 0.f;
+      ld += l1;
+      fi = 0.f;
+      argld = (real) ld * argh;
+      for (ii = 4; ii <= idot; ii += 2) {
+        i += 2;
+        fi += 1.f;
+        arg = fi * argld;
+        wa[i - 1] = cos(arg);
+        wa[i] = sin(arg);
+      }
+      if (ip > 5) {
+        wa[i1 - 1] = wa[i - 1];
+        wa[i1] = wa[i];
+      };
+    }
+    l1 = l2;
+  }
+} /* cffti1 */
+
+void cffti(integer n, real *wsave)
+{
+  integer iw1, iw2;
+  /* Parameter adjustments */
+  --wsave;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  iw1 = 2*n + 1;
+  iw2 = iw1 + 2*n;
+  cffti1(n, &wsave[iw1], (int*)&wsave[iw2]);
+  return;
+} /* cffti */
+
+static void rfftb1(integer n, real *c, real *ch, const real *wa, integer *ifac)
+{
+  /* Local variables */
+  integer i, k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, ido, idl1;
+
+  /* Function Body */
+  nf = ifac[1];
+  na = 0;
+  l1 = 1;
+  iw = 0;
+  for (k1 = 1; k1 <= nf; ++k1) {
+    ip = ifac[k1 + 1];
+    l2 = ip * l1;
+    ido = n / l2;
+    idl1 = ido * l1;
+    switch (ip) {
+      case 4:
+        ix2 = iw + ido;
+        ix3 = ix2 + ido;
+        radb4(ido, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2], &wa[ix3]);
+        na = 1 - na;
+        break;
+      case 2:
+        radb2(ido, l1, na?ch:c, na?c:ch, &wa[iw]);
+        na = 1 - na;
+        break;
+      case 3:
+        ix2 = iw + ido;
+        radb3(ido, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2]);
+        na = 1 - na;
+        break;
+      case 5:
+        ix2 = iw + ido;
+        ix3 = ix2 + ido;
+        ix4 = ix3 + ido;
+        radb5(ido, l1, na?ch:c, na?c:ch, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
+        na = 1 - na;
+        break;
+      default:
+        if (na == 0) {
+          radbg(ido, ip, l1, idl1, c, c, c, ch, ch, &wa[iw]);
+        } else {
+          radbg(ido, ip, l1, idl1, ch, ch, ch, c, c, &wa[iw]);
+        }
+        if (ido == 1) {
+          na = 1 - na;
+        }
+        break;
+    }
+    l1 = l2;
+    iw += (ip - 1) * ido;
+  }
+  if (na == 0) {
+    return;
+  }
+  for (i = 0; i < n; ++i) {
+    c[i] = ch[i];
+  }
+} /* rfftb1 */
+
+static void rfftf1(integer n, real *c, real *ch, const real *wa, integer *ifac)
+{
+  /* Local variables */
+  integer i, k1, l1, l2, na, kh, nf, ip, iw, ix2, ix3, ix4, ido, idl1;
+
+  /* Function Body */
+  nf = ifac[1];
+  na = 1;
+  l2 = n;
+  iw = n-1;
+  for (k1 = 1; k1 <= nf; ++k1) {
+    kh = nf - k1;
+    ip = ifac[kh + 2];
+    l1 = l2 / ip;
+    ido = n / l2;
+    idl1 = ido * l1;
+    iw -= (ip - 1) * ido;
+    na = 1 - na;
+    switch (ip) {
+      case 4:
+        ix2 = iw + ido;
+        ix3 = ix2 + ido;
+        radf4(ido, l1, na ? ch : c, na ? c : ch, &wa[iw], &wa[ix2], &wa[ix3]);
+        break;
+      case 2:
+        radf2(ido, l1, na ? ch : c, na ? c : ch, &wa[iw]);
+        break;
+      case 3:        
+        ix2 = iw + ido;
+        radf3(ido, l1, na ? ch : c, na ? c : ch, &wa[iw], &wa[ix2]);
+        break;
+      case 5:
+        ix2 = iw + ido;
+        ix3 = ix2 + ido;
+        ix4 = ix3 + ido;
+        radf5(ido, l1, na ? ch : c, na ? c : ch, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
+        break;
+      default:
+        if (ido == 1) {
+          na = 1 - na;
+        }
+        if (na == 0) {
+          radfg(ido, ip, l1, idl1, c, c, c, ch, ch, &wa[iw]);
+          na = 1;
+        } else {
+          radfg(ido, ip, l1, idl1, ch, ch, ch, c, c, &wa[iw]);
+          na = 0;
+        }
+        break;
+    }
+    l2 = l1;
+  }
+  if (na == 1) {
+    return;
+  }
+  for (i = 0; i < n; ++i) {
+    c[i] = ch[i];
+  }
+}
+
+void rfftb(integer n, real *r, real *wsave)
+{
+
+  /* Parameter adjustments */
+  --wsave;
+  --r;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  rfftb1(n, &r[1], &wsave[1], &wsave[n + 1], (int*)&wsave[(n << 1) + 1]);
+} /* rfftb */
+
+static void rffti1(integer n, real *wa, integer *ifac)
+{
+  static integer ntryh[4] = { 4,2,3,5 };
+
+  /* Local variables */
+  integer i, j, k1, l1, l2;
+  real fi;
+  integer ld, ii, nf, ip, is;
+  real arg;
+  integer ido, ipm;
+  integer nfm1;
+  real argh;
+  real argld;
+
+  /* Parameter adjustments */
+  --ifac;
+  --wa;
+
+  nf = decompose(n, ifac, ntryh);
+
+  argh = (2*M_PI) / (real) (n);
+  is = 0;
+  nfm1 = nf - 1;
+  l1 = 1;
+  if (nfm1 == 0) {
+    return;
+  }
+  for (k1 = 1; k1 <= nfm1; ++k1) {
+    ip = ifac[k1 + 2];
+    ld = 0;
+    l2 = l1 * ip;
+    ido = n / l2;
+    ipm = ip - 1;
+    for (j = 1; j <= ipm; ++j) {
+      ld += l1;
+      i = is;
+      argld = (real) ld * argh;
+      fi = 0.f;
+      for (ii = 3; ii <= ido; ii += 2) {
+        i += 2;
+        fi += 1.f;
+        arg = fi * argld;
+        wa[i - 1] = cos(arg);
+        wa[i] = sin(arg);
+      }
+      is += ido;
+    }
+    l1 = l2;
+  }
+} /* rffti1 */
+
+void rfftf(integer n, real *r, real *wsave)
+{
+
+  /* Parameter adjustments */
+  --wsave;
+  --r;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  rfftf1(n, &r[1], &wsave[1], &wsave[n + 1], (int*)&wsave[(n << 1) + 1]);
+} /* rfftf */
+
+void rffti(integer n, real *wsave)
+{
+  /* Parameter adjustments */
+  --wsave;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  rffti1(n, &wsave[n + 1], (int*)&wsave[(n << 1) + 1]);
+  return;
+} /* rffti */
+
+static void cosqb1(integer n, real *x, real *w, real *xh)
+{
+  /* Local variables */
+  integer i, k, kc, np2, ns2;
+  real xim1;
+  integer modn;
+
+  /* Parameter adjustments */
+  --xh;
+  --w;
+  --x;
+
+  /* Function Body */
+  ns2 = (n + 1) / 2;
+  np2 = n + 2;
+  for (i = 3; i <= n; i += 2) {
+    xim1 = x[i - 1] + x[i];
+    x[i] -= x[i - 1];
+    x[i - 1] = xim1;
+  }
+  x[1] += x[1];
+  modn = n % 2;
+  if (modn == 0) {
+    x[n] += x[n];
+  }
+  rfftb(n, &x[1], &xh[1]);
+  for (k = 2; k <= ns2; ++k) {
+    kc = np2 - k;
+    xh[k] = w[k - 1] * x[kc] + w[kc - 1] * x[k];
+    xh[kc] = w[k - 1] * x[k] - w[kc - 1] * x[kc];
+  }
+  if (modn == 0) {
+    x[ns2 + 1] = w[ns2] * (x[ns2 + 1] + x[ns2 + 1]);
+  }
+  for (k = 2; k <= ns2; ++k) {
+    kc = np2 - k;
+    x[k] = xh[k] + xh[kc];
+    x[kc] = xh[k] - xh[kc];
+  }
+  x[1] += x[1];
+} /* cosqb1 */
+
+void cosqb(integer n, real *x, real *wsave)
+{
+  static const real tsqrt2 = 2.82842712474619f;
+
+  /* Local variables */
+  real x1;
+
+  /* Parameter adjustments */
+  --wsave;
+  --x;
+
+  if (n < 2) {
+    x[1] *= 4.f;
+  } else if (n == 2) {
+    x1 = (x[1] + x[2]) * 4.f;
+    x[2] = tsqrt2 * (x[1] - x[2]);
+    x[1] = x1;
+  } else {
+    cosqb1(n, &x[1], &wsave[1], &wsave[n + 1]);
+  }
+} /* cosqb */
+
+static void cosqf1(integer n, real *x, real *w, real *xh)
+{
+  /* Local variables */
+  integer i, k, kc, np2, ns2;
+  real xim1;
+  integer modn;
+
+  /* Parameter adjustments */
+  --xh;
+  --w;
+  --x;
+
+  /* Function Body */
+  ns2 = (n + 1) / 2;
+  np2 = n + 2;
+  for (k = 2; k <= ns2; ++k) {
+    kc = np2 - k;
+    xh[k] = x[k] + x[kc];
+    xh[kc] = x[k] - x[kc];
+  }
+  modn = n % 2;
+  if (modn == 0) {
+    xh[ns2 + 1] = x[ns2 + 1] + x[ns2 + 1];
+  }
+  for (k = 2; k <= ns2; ++k) {
+    kc = np2 - k;
+    x[k] = w[k - 1] * xh[kc] + w[kc - 1] * xh[k];
+    x[kc] = w[k - 1] * xh[k] - w[kc - 1] * xh[kc];
+  }
+  if (modn == 0) {
+    x[ns2 + 1] = w[ns2] * xh[ns2 + 1];
+  }
+  rfftf(n, &x[1], &xh[1]);
+  for (i = 3; i <= n; i += 2) {
+    xim1 = x[i - 1] - x[i];
+    x[i] = x[i - 1] + x[i];
+    x[i - 1] = xim1;
+  }
+} /* cosqf1 */
+
+void cosqf(integer n, real *x, real *wsave)
+{
+  static const real sqrt2 = 1.4142135623731f;
+
+  /* Local variables */
+  real tsqx;
+
+  /* Parameter adjustments */
+  --wsave;
+  --x;
+
+  if (n == 2) {
+    tsqx = sqrt2 * x[2];
+    x[2] = x[1] - tsqx;
+    x[1] += tsqx;
+  } else if (n > 2) {
+    cosqf1(n, &x[1], &wsave[1], &wsave[n + 1]);
+  }
+} /* cosqf */
+
+void cosqi(integer n, real *wsave)
+{
+  /* Local variables */
+  integer k;
+  real fk, dt;
+
+  /* Parameter adjustments */
+  --wsave;
+
+  dt = M_PI/2 / (real) (n);
+  fk = 0.f;
+  for (k = 1; k <= n; ++k) {
+    fk += 1.f;
+    wsave[k] = cos(fk * dt);
+  }
+  rffti(n, &wsave[n + 1]);
+} /* cosqi */
+
+void cost(integer n, real *x, real *wsave)
+{
+  /* Local variables */
+  integer i, k;
+  real c1, t1, t2;
+  integer kc;
+  real xi;
+  integer nm1, np1;
+  real x1h;
+  integer ns2;
+  real tx2, x1p3, xim2;
+  integer modn;
+
+  /* Parameter adjustments */
+  --wsave;
+  --x;
+
+  /* Function Body */
+  nm1 = n - 1;
+  np1 = n + 1;
+  ns2 = n / 2;
+  if (n < 2) {
+  } else if (n == 2) {
+    x1h = x[1] + x[2];
+    x[2] = x[1] - x[2];
+    x[1] = x1h;
+  } else if (n == 3) {
+    x1p3 = x[1] + x[3];
+    tx2 = x[2] + x[2];
+    x[2] = x[1] - x[3];
+    x[1] = x1p3 + tx2;
+    x[3] = x1p3 - tx2;
+  } else {
+    c1 = x[1] - x[n];
+    x[1] += x[n];
+    for (k = 2; k <= ns2; ++k) {
+      kc = np1 - k;
+      t1 = x[k] + x[kc];
+      t2 = x[k] - x[kc];
+      c1 += wsave[kc] * t2;
+      t2 = wsave[k] * t2;
+      x[k] = t1 - t2;
+      x[kc] = t1 + t2;
+    }
+    modn = n % 2;
+    if (modn != 0) {
+      x[ns2 + 1] += x[ns2 + 1];
+    }
+    rfftf(nm1, &x[1], &wsave[n + 1]);
+    xim2 = x[2];
+    x[2] = c1;
+    for (i = 4; i <= n; i += 2) {
+      xi = x[i];
+      x[i] = x[i - 2] - x[i - 1];
+      x[i - 1] = xim2;
+      xim2 = xi;
+    }
+    if (modn != 0) {
+      x[n] = xim2;
+    }
+  }
+} /* cost */
+
+void costi(integer n, real *wsave)
+{
+  /* Initialized data */
+
+  /* Local variables */
+  integer k, kc;
+  real fk, dt;
+  integer nm1, np1, ns2;
+
+  /* Parameter adjustments */
+  --wsave;
+
+  /* Function Body */
+  if (n <= 3) {
+    return;
+  }
+  nm1 = n - 1;
+  np1 = n + 1;
+  ns2 = n / 2;
+  dt = M_PI / (real) nm1;
+  fk = 0.f;
+  for (k = 2; k <= ns2; ++k) {
+    kc = np1 - k;
+    fk += 1.f;
+    wsave[k] = sin(fk * dt) * 2.f;
+    wsave[kc] = cos(fk * dt) * 2.f;
+  }
+  rffti(nm1, &wsave[n + 1]);
+} /* costi */
+
+void sinqb(integer n, real *x, real *wsave)
+{
+  /* Local variables */
+  integer k, kc, ns2;
+  real xhold;
+
+  /* Parameter adjustments */
+  --wsave;
+  --x;
+
+  /* Function Body */
+  if (n <= 1) {
+    x[1] *= 4.f;
+    return;
+  }
+  ns2 = n / 2;
+  for (k = 2; k <= n; k += 2) {
+    x[k] = -x[k];
+  }
+  cosqb(n, &x[1], &wsave[1]);
+  for (k = 1; k <= ns2; ++k) {
+    kc = n - k;
+    xhold = x[k];
+    x[k] = x[kc + 1];
+    x[kc + 1] = xhold;
+  }
+} /* sinqb */
+
+void sinqf(integer n, real *x, real *wsave)
+{
+  /* Local variables */
+  integer k, kc, ns2;
+  real xhold;
+
+  /* Parameter adjustments */
+  --wsave;
+  --x;
+
+  /* Function Body */
+  if (n == 1) {
+    return;
+  }
+  ns2 = n / 2;
+  for (k = 1; k <= ns2; ++k) {
+    kc = n - k;
+    xhold = x[k];
+    x[k] = x[kc + 1];
+    x[kc + 1] = xhold;
+  }
+  cosqf(n, &x[1], &wsave[1]);
+  for (k = 2; k <= n; k += 2) {
+    x[k] = -x[k];
+  }
+} /* sinqf */
+
+void sinqi(integer n, real *wsave)
+{
+
+  /* Parameter adjustments */
+  --wsave;
+
+  /* Function Body */
+  cosqi(n, &wsave[1]);
+} /* sinqi */
+
+static void sint1(integer n, real *war, real *was, real *xh, real *
+                  x, integer *ifac)
+{
+  /* Initialized data */
+
+  static const real sqrt3 = 1.73205080756888f;
+
+  /* Local variables */
+  integer i, k;
+  real t1, t2;
+  integer kc, np1, ns2, modn;
+  real xhold;
+
+  /* Parameter adjustments */
+  --ifac;
+  --x;
+  --xh;
+  --was;
+  --war;
+
+  /* Function Body */
+  for (i = 1; i <= n; ++i) {
+    xh[i] = war[i];
+    war[i] = x[i];
+  }
+  
+  if (n < 2) {
+    xh[1] += xh[1];
+  } else if (n == 2) {
+    xhold = sqrt3 * (xh[1] + xh[2]);
+    xh[2] = sqrt3 * (xh[1] - xh[2]);
+    xh[1] = xhold;
+  } else {
+    np1 = n + 1;
+    ns2 = n / 2;
+    x[1] = 0.f;
+    for (k = 1; k <= ns2; ++k) {
+      kc = np1 - k;
+      t1 = xh[k] - xh[kc];
+      t2 = was[k] * (xh[k] + xh[kc]);
+      x[k + 1] = t1 + t2;
+      x[kc + 1] = t2 - t1;
+    }
+    modn = n % 2;
+    if (modn != 0) {
+      x[ns2 + 2] = xh[ns2 + 1] * 4.f;
+    }
+    rfftf1(np1, &x[1], &xh[1], &war[1], &ifac[1]);
+    xh[1] = x[1] * .5f;
+    for (i = 3; i <= n; i += 2) {
+      xh[i - 1] = -x[i];
+      xh[i] = xh[i - 2] + x[i - 1];
+    }
+    if (modn == 0) {
+      xh[n] = -x[n + 1];
+    }
+  }
+  for (i = 1; i <= n; ++i) {
+    x[i] = war[i];
+    war[i] = xh[i];
+  }
+} /* sint1 */
+
+void sinti(integer n, real *wsave)
+{
+  /* Local variables */
+  integer k;
+  real dt;
+  integer np1, ns2;
+
+  /* Parameter adjustments */
+  --wsave;
+
+  /* Function Body */
+  if (n <= 1) {
+    return;
+  }
+  ns2 = n / 2;
+  np1 = n + 1;
+  dt = M_PI / (real) np1;
+  for (k = 1; k <= ns2; ++k) {
+    wsave[k] = sin(k * dt) * 2.f;
+  }
+  rffti(np1, &wsave[ns2 + 1]);
+} /* sinti */
+
+void sint(integer n, real *x, real *wsave)
+{
+  integer np1, iw1, iw2, iw3;
+
+  /* Parameter adjustments */
+  --wsave;
+  --x;
+
+  /* Function Body */
+  np1 = n + 1;
+  iw1 = n / 2 + 1;
+  iw2 = iw1 + np1;
+  iw3 = iw2 + np1;
+  sint1(n, &x[1], &wsave[1], &wsave[iw1], &wsave[iw2], (int*)&wsave[iw3]);
+} /* sint */
+
+#ifdef TESTING_FFTPACK
+#include <stdio.h>
+
+int main(void)
+{
+  static integer nd[] = { 120,91,54,49,32,28,24,8,4,3,2 };
+
+  /* System generated locals */
+  real r1, r2, r3;
+  f77complex q1, q2, q3;
+
+  /* Local variables */
+  integer i, j, k, n;
+  real w[2000], x[200], y[200], cf, fn, dt;
+  f77complex cx[200], cy[200];
+  real xh[200];
+  integer nz, nm1, np1, ns2;
+  real arg, tfn;
+  real sum, arg1, arg2;
+  real sum1, sum2, dcfb;
+  integer modn;
+  real rftb, rftf;
+  real sqrt2;
+  real rftfb;
+  real costt, sintt, dcfftb, dcfftf, cosqfb, costfb;
+  real sinqfb;
+  real sintfb;
+  real cosqbt, cosqft, sinqbt, sinqft;
+
+
+
+  /*     * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+  /*                       VERSION 4  APRIL 1985 */
+
+  /*                         A TEST DRIVER FOR */
+  /*          A PACKAGE OF FORTRAN SUBPROGRAMS FOR THE FAST FOURIER */
+  /*           TRANSFORM OF PERIODIC AND OTHER SYMMETRIC SEQUENCES */
+
+  /*                              BY */
+
+  /*                       PAUL N SWARZTRAUBER */
+
+  /*       NATIONAL CENTER FOR ATMOSPHERIC RESEARCH  BOULDER,COLORADO 80307 */
+
+  /*        WHICH IS SPONSORED BY THE NATIONAL SCIENCE FOUNDATION */
+
+  /*     * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+  /*             THIS PROGRAM TESTS THE PACKAGE OF FAST FOURIER */
+  /*     TRANSFORMS FOR BOTH COMPLEX AND REAL PERIODIC SEQUENCES AND */
+  /*     CERTIAN OTHER SYMMETRIC SEQUENCES THAT ARE LISTED BELOW. */
+
+  /*     1.   RFFTI     INITIALIZE  RFFTF AND RFFTB */
+  /*     2.   RFFTF     FORWARD TRANSFORM OF A REAL PERIODIC SEQUENCE */
+  /*     3.   RFFTB     BACKWARD TRANSFORM OF A REAL COEFFICIENT ARRAY */
+
+  /*     4.   EZFFTI    INITIALIZE EZFFTF AND EZFFTB */
+  /*     5.   EZFFTF    A SIMPLIFIED REAL PERIODIC FORWARD TRANSFORM */
+  /*     6.   EZFFTB    A SIMPLIFIED REAL PERIODIC BACKWARD TRANSFORM */
+
+  /*     7.   SINTI     INITIALIZE SINT */
+  /*     8.   SINT      SINE TRANSFORM OF A REAL ODD SEQUENCE */
+
+  /*     9.   COSTI     INITIALIZE COST */
+  /*     10.  COST      COSINE TRANSFORM OF A REAL EVEN SEQUENCE */
+
+  /*     11.  SINQI     INITIALIZE SINQF AND SINQB */
+  /*     12.  SINQF     FORWARD SINE TRANSFORM WITH ODD WAVE NUMBERS */
+  /*     13.  SINQB     UNNORMALIZED INVERSE OF SINQF */
+
+  /*     14.  COSQI     INITIALIZE COSQF AND COSQB */
+  /*     15.  COSQF     FORWARD COSINE TRANSFORM WITH ODD WAVE NUMBERS */
+  /*     16.  COSQB     UNNORMALIZED INVERSE OF COSQF */
+
+  /*     17.  CFFTI     INITIALIZE CFFTF AND CFFTB */
+  /*     18.  CFFTF     FORWARD TRANSFORM OF A COMPLEX PERIODIC SEQUENCE */
+  /*     19.  CFFTB     UNNORMALIZED INVERSE OF CFFTF */
+
+
+  sqrt2 = sqrt(2.f);
+  int all_ok = 1;
+  for (nz = 1; nz <= (int)(sizeof nd/sizeof nd[0]); ++nz) {
+    n = nd[nz - 1];
+    modn = n % 2;
+    fn = (real) n;
+    tfn = fn + fn;
+    np1 = n + 1;
+    nm1 = n - 1;
+    for (j = 1; j <= np1; ++j) {
+      x[j - 1] = sin((real) j * sqrt2);
+      y[j - 1] = x[j - 1];
+      xh[j - 1] = x[j - 1];
+    }
+
+    /*     TEST SUBROUTINES RFFTI,RFFTF AND RFFTB */
+
+    rffti(n, w);
+    dt = (2*M_PI) / fn;
+    ns2 = (n + 1) / 2;
+    if (ns2 < 2) {
+      goto L104;
+    }
+    for (k = 2; k <= ns2; ++k) {
+      sum1 = 0.f;
+      sum2 = 0.f;
+      arg = (real) (k - 1) * dt;
+      for (i = 1; i <= n; ++i) {
+        arg1 = (real) (i - 1) * arg;
+        sum1 += x[i - 1] * cos(arg1);
+        sum2 += x[i - 1] * sin(arg1);
+      }
+      y[(k << 1) - 3] = sum1;
+      y[(k << 1) - 2] = -sum2;
+    }
+  L104:
+    sum1 = 0.f;
+    sum2 = 0.f;
+    for (i = 1; i <= nm1; i += 2) {
+      sum1 += x[i - 1];
+      sum2 += x[i];
+    }
+    if (modn == 1) {
+      sum1 += x[n - 1];
+    }
+    y[0] = sum1 + sum2;
+    if (modn == 0) {
+      y[n - 1] = sum1 - sum2;
+    }
+    rfftf(n, x, w);
+    rftf = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = rftf, r3 = (r1 = x[i - 1] - y[i - 1], fabs(r1));
+      rftf = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+    }
+    rftf /= fn;
+    for (i = 1; i <= n; ++i) {
+      sum = x[0] * .5f;
+      arg = (real) (i - 1) * dt;
+      if (ns2 < 2) {
+        goto L108;
+      }
+      for (k = 2; k <= ns2; ++k) {
+        arg1 = (real) (k - 1) * arg;
+        sum = sum + x[(k << 1) - 3] * cos(arg1) - x[(k << 1) - 2] * 
+          sin(arg1);
+      }
+    L108:
+      if (modn == 0) {
+        sum += (real)pow(-1, i-1) * .5f * x[n - 1];
+      }
+      y[i - 1] = sum + sum;
+    }
+    rfftb(n, x, w);
+    rftb = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = rftb, r3 = (r1 = x[i - 1] - y[i - 1], fabs(r1));
+      rftb = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+      y[i - 1] = xh[i - 1];
+    }
+    rfftb(n, y, w);
+    rfftf(n, y, w);
+    cf = 1.f / fn;
+    rftfb = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = rftfb, r3 = (r1 = cf * y[i - 1] - x[i - 1], fabs(
+                                                            r1));
+      rftfb = dmax(r2,r3);
+    }
+
+    /*     TEST SUBROUTINES SINTI AND SINT */
+
+    dt = M_PI / fn;
+    for (i = 1; i <= nm1; ++i) {
+      x[i - 1] = xh[i - 1];
+    }
+    for (i = 1; i <= nm1; ++i) {
+      y[i - 1] = 0.f;
+      arg1 = (real) i * dt;
+      for (k = 1; k <= nm1; ++k) {
+        y[i - 1] += x[k - 1] * sin((real) k * arg1);
+      }
+      y[i - 1] += y[i - 1];
+    }
+    sinti(nm1, w);
+    sint(nm1, x, w);
+    cf = .5f / fn;
+    sintt = 0.f;
+    for (i = 1; i <= nm1; ++i) {
+      /* Computing MAX */
+      r2 = sintt, r3 = (r1 = x[i - 1] - y[i - 1], fabs(r1));
+      sintt = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+      y[i - 1] = x[i - 1];
+    }
+    sintt = cf * sintt;
+    sint(nm1, x, w);
+    sint(nm1, x, w);
+    sintfb = 0.f;
+    for (i = 1; i <= nm1; ++i) {
+      /* Computing MAX */
+      r2 = sintfb, r3 = (r1 = cf * x[i - 1] - y[i - 1], fabs(
+                                                             r1));
+      sintfb = dmax(r2,r3);
+    }
+
+    /*     TEST SUBROUTINES COSTI AND COST */
+
+    for (i = 1; i <= np1; ++i) {
+      x[i - 1] = xh[i - 1];
+    }
+    for (i = 1; i <= np1; ++i) {
+      y[i - 1] = (x[0] + (real) pow(-1, i+1) * x[n]) * .5f;
+      arg = (real) (i - 1) * dt;
+      for (k = 2; k <= n; ++k) {
+        y[i - 1] += x[k - 1] * cos((real) (k - 1) * arg);
+      }
+      y[i - 1] += y[i - 1];
+    }
+    costi(np1, w);
+    cost(np1, x, w);
+    costt = 0.f;
+    for (i = 1; i <= np1; ++i) {
+      /* Computing MAX */
+      r2 = costt, r3 = (r1 = x[i - 1] - y[i - 1], fabs(r1));
+      costt = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+      y[i - 1] = xh[i - 1];
+    }
+    costt = cf * costt;
+    cost(np1, x, w);
+    cost(np1, x, w);
+    costfb = 0.f;
+    for (i = 1; i <= np1; ++i) {
+      /* Computing MAX */
+      r2 = costfb, r3 = (r1 = cf * x[i - 1] - y[i - 1], fabs(
+                                                             r1));
+      costfb = dmax(r2,r3);
+    }
+
+    /*     TEST SUBROUTINES SINQI,SINQF AND SINQB */
+
+    cf = .25f / fn;
+    for (i = 1; i <= n; ++i) {
+      y[i - 1] = xh[i - 1];
+    }
+    dt = M_PI / (fn + fn);
+    for (i = 1; i <= n; ++i) {
+      x[i - 1] = 0.f;
+      arg = dt * (real) i;
+      for (k = 1; k <= n; ++k) {
+        x[i - 1] += y[k - 1] * sin((real) (k + k - 1) * arg);
+      }
+      x[i - 1] *= 4.f;
+    }
+    sinqi(n, w);
+    sinqb(n, y, w);
+    sinqbt = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = sinqbt, r3 = (r1 = y[i - 1] - x[i - 1], fabs(r1))
+        ;
+      sinqbt = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+    }
+    sinqbt = cf * sinqbt;
+    for (i = 1; i <= n; ++i) {
+      arg = (real) (i + i - 1) * dt;
+      y[i - 1] = (real) pow(-1, i+1) * .5f * x[n - 1];
+      for (k = 1; k <= nm1; ++k) {
+        y[i - 1] += x[k - 1] * sin((real) k * arg);
+      }
+      y[i - 1] += y[i - 1];
+    }
+    sinqf(n, x, w);
+    sinqft = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = sinqft, r3 = (r1 = x[i - 1] - y[i - 1], fabs(r1))
+        ;
+      sinqft = dmax(r2,r3);
+      y[i - 1] = xh[i - 1];
+      x[i - 1] = xh[i - 1];
+    }
+    sinqf(n, y, w);
+    sinqb(n, y, w);
+    sinqfb = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = sinqfb, r3 = (r1 = cf * y[i - 1] - x[i - 1], fabs(
+                                                             r1));
+      sinqfb = dmax(r2,r3);
+    }
+
+    /*     TEST SUBROUTINES COSQI,COSQF AND COSQB */
+
+    for (i = 1; i <= n; ++i) {
+      y[i - 1] = xh[i - 1];
+    }
+    for (i = 1; i <= n; ++i) {
+      x[i - 1] = 0.f;
+      arg = (real) (i - 1) * dt;
+      for (k = 1; k <= n; ++k) {
+        x[i - 1] += y[k - 1] * cos((real) (k + k - 1) * arg);
+      }
+      x[i - 1] *= 4.f;
+    }
+    cosqi(n, w);
+    cosqb(n, y, w);
+    cosqbt = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = cosqbt, r3 = (r1 = x[i - 1] - y[i - 1], fabs(r1))
+        ;
+      cosqbt = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+    }
+    cosqbt = cf * cosqbt;
+    for (i = 1; i <= n; ++i) {
+      y[i - 1] = x[0] * .5f;
+      arg = (real) (i + i - 1) * dt;
+      for (k = 2; k <= n; ++k) {
+        y[i - 1] += x[k - 1] * cos((real) (k - 1) * arg);
+      }
+      y[i - 1] += y[i - 1];
+    }
+    cosqf(n, x, w);
+    cosqft = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = cosqft, r3 = (r1 = y[i - 1] - x[i - 1], fabs(r1))
+        ;
+      cosqft = dmax(r2,r3);
+      x[i - 1] = xh[i - 1];
+      y[i - 1] = xh[i - 1];
+    }
+    cosqft = cf * cosqft;
+    cosqb(n, x, w);
+    cosqf(n, x, w);
+    cosqfb = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      r2 = cosqfb, r3 = (r1 = cf * x[i - 1] - y[i - 1], fabs(r1));
+      cosqfb = dmax(r2,r3);
+    }
+
+    /*     TEST  CFFTI,CFFTF,CFFTB */
+
+    for (i = 1; i <= n; ++i) {
+      r1 = cos(sqrt2 * (real) i);
+      r2 = sin(sqrt2 * (real) (i * i));
+      q1.r = r1, q1.i = r2;
+      cx[i-1].r = q1.r, cx[i-1].i = q1.i;
+    }
+    dt = (2*M_PI) / fn;
+    for (i = 1; i <= n; ++i) {
+      arg1 = -((real) (i - 1)) * dt;
+      cy[i-1].r = 0.f, cy[i-1].i = 0.f;
+      for (k = 1; k <= n; ++k) {
+        arg2 = (real) (k - 1) * arg1;
+        r1 = cos(arg2);
+        r2 = sin(arg2);
+        q3.r = r1, q3.i = r2;
+        q2.r = q3.r * cx[k-1].r - q3.i * cx[k-1].i, q2.i = 
+          q3.r * cx[k-1].i + q3.i * cx[k-1].r;
+        q1.r = cy[i-1].r + q2.r, q1.i = cy[i-1].i + q2.i;
+        cy[i-1].r = q1.r, cy[i-1].i = q1.i;
+      }
+    }
+    cffti(n, w);
+    cfftf(n, (real*)cx, w);
+    dcfftf = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      q1.r = cx[i-1].r - cy[i-1].r, q1.i = cx[i-1].i - cy[i-1]
+        .i;
+      r1 = dcfftf, r2 = c_abs(&q1);
+      dcfftf = dmax(r1,r2);
+      q1.r = cx[i-1].r / fn, q1.i = cx[i-1].i / fn;
+      cx[i-1].r = q1.r, cx[i-1].i = q1.i;
+    }
+    dcfftf /= fn;
+    for (i = 1; i <= n; ++i) {
+      arg1 = (real) (i - 1) * dt;
+      cy[i-1].r = 0.f, cy[i-1].i = 0.f;
+      for (k = 1; k <= n; ++k) {
+        arg2 = (real) (k - 1) * arg1;
+        r1 = cos(arg2);
+        r2 = sin(arg2);
+        q3.r = r1, q3.i = r2;
+        q2.r = q3.r * cx[k-1].r - q3.i * cx[k-1].i, q2.i = 
+          q3.r * cx[k-1].i + q3.i * cx[k-1].r;
+        q1.r = cy[i-1].r + q2.r, q1.i = cy[i-1].i + q2.i;
+        cy[i-1].r = q1.r, cy[i-1].i = q1.i;
+      }
+    }
+    cfftb(n, (real*)cx, w);
+    dcfftb = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      q1.r = cx[i-1].r - cy[i-1].r, q1.i = cx[i-1].i - cy[i-1].i;
+      r1 = dcfftb, r2 = c_abs(&q1);
+      dcfftb = dmax(r1,r2);
+      cx[i-1].r = cy[i-1].r, cx[i-1].i = cy[i-1].i;
+    }
+    cf = 1.f / fn;
+    cfftf(n, (real*)cx, w);
+    cfftb(n, (real*)cx, w);
+    dcfb = 0.f;
+    for (i = 1; i <= n; ++i) {
+      /* Computing MAX */
+      q2.r = cf * cx[i-1].r, q2.i = cf * cx[i-1].i;
+      q1.r = q2.r - cy[i-1].r, q1.i = q2.i - cy[i-1].i;
+      r1 = dcfb, r2 = c_abs(&q1);
+      dcfb = dmax(r1,r2);
+    }
+    printf("%d\tRFFTF  %10.3g\tRFFTB  %10.ge\tRFFTFB %10.3g", n, rftf, rftb, rftfb);
+    printf(  "\tSINT   %10.3g\tSINTFB %10.ge\tCOST   %10.3g\n", sintt, sintfb, costt);
+    printf(  "\tCOSTFB %10.3g\tSINQF  %10.ge\tSINQB  %10.3g", costfb, sinqft, sinqbt);
+    printf(  "\tSINQFB %10.3g\tCOSQF  %10.ge\tCOSQB  %10.3g\n", sinqfb, cosqft, cosqbt);
+    printf(  "\tCOSQFB %10.3g\t", cosqfb);
+    printf(  "\tCFFTF  %10.ge\tCFFTB  %10.3g\n", dcfftf, dcfftb);
+    printf(  "\tCFFTFB %10.3g\n", dcfb);
+
+#define CHECK(x) if (x > 1e-3) { printf(#x " failed: %g\n", x); all_ok = 0; }
+    CHECK(rftf); CHECK(rftb); CHECK(rftfb); CHECK(sintt); CHECK(sintfb); CHECK(costt);
+    CHECK(costfb); CHECK(sinqft); CHECK(sinqbt); CHECK(sinqfb); CHECK(cosqft); CHECK(cosqbt);
+    CHECK(cosqfb); CHECK(dcfftf); CHECK(dcfftb);
+  }
+
+  if (all_ok) printf("Everything looks fine.\n"); 
+  else printf("ERRORS WERE DETECTED.\n");
+  /*
+    expected:
+    120     RFFTF   2.786e-06       RFFTB   6.847e-04       RFFTFB  2.795e-07       SINT    1.312e-06       SINTFB  1.237e-06       COST    1.319e-06
+    COSTFB  4.355e-06       SINQF   3.281e-04       SINQB   1.876e-06       SINQFB  2.198e-07       COSQF   6.199e-07       COSQB   2.193e-06
+    COSQFB  2.300e-07       DEZF    5.573e-06       DEZB    1.363e-05       DEZFB   1.371e-06       CFFTF   5.590e-06       CFFTB   4.751e-05
+    CFFTFB  4.215e-07
+    54      RFFTF   4.708e-07       RFFTB   3.052e-05       RFFTFB  3.439e-07       SINT    3.532e-07       SINTFB  4.145e-07       COST    3.002e-07
+    COSTFB  6.343e-07       SINQF   4.959e-05       SINQB   4.415e-07       SINQFB  2.882e-07       COSQF   2.826e-07       COSQB   2.472e-07
+    COSQFB  3.439e-07       DEZF    9.388e-07       DEZB    5.066e-06       DEZFB   5.960e-07       CFFTF   1.426e-06       CFFTB   9.482e-06
+    CFFTFB  2.980e-07
+    49      RFFTF   4.476e-07       RFFTB   5.341e-05       RFFTFB  2.574e-07       SINT    9.196e-07       SINTFB  9.401e-07       COST    8.174e-07
+    COSTFB  1.331e-06       SINQF   4.005e-05       SINQB   9.342e-07       SINQFB  3.057e-07       COSQF   2.530e-07       COSQB   6.228e-07
+    COSQFB  4.826e-07       DEZF    9.071e-07       DEZB    4.590e-06       DEZFB   5.960e-07       CFFTF   2.095e-06       CFFTB   1.414e-05
+    CFFTFB  7.398e-07
+    32      RFFTF   4.619e-07       RFFTB   2.861e-05       RFFTFB  1.192e-07       SINT    3.874e-07       SINTFB  4.172e-07       COST    4.172e-07
+    COSTFB  1.699e-06       SINQF   2.551e-05       SINQB   6.407e-07       SINQFB  2.980e-07       COSQF   1.639e-07       COSQB   1.714e-07
+    COSQFB  2.384e-07       DEZF    1.013e-06       DEZB    2.339e-06       DEZFB   7.749e-07       CFFTF   1.127e-06       CFFTB   6.744e-06
+    CFFTFB  2.666e-07
+    4       RFFTF   1.490e-08       RFFTB   1.490e-07       RFFTFB  5.960e-08       SINT    7.451e-09       SINTFB  0.000e+00       COST    2.980e-08
+    COSTFB  1.192e-07       SINQF   4.768e-07       SINQB   2.980e-08       SINQFB  5.960e-08       COSQF   2.608e-08       COSQB   5.960e-08
+    COSQFB  1.192e-07       DEZF    2.980e-08       DEZB    5.960e-08       DEZFB   0.000e+00       CFFTF   6.664e-08       CFFTB   5.960e-08
+    CFFTFB  6.144e-08
+    3       RFFTF   3.974e-08       RFFTB   1.192e-07       RFFTFB  3.303e-08       SINT    1.987e-08       SINTFB  1.069e-08       COST    4.967e-08
+    COSTFB  5.721e-08       SINQF   8.941e-08       SINQB   2.980e-08       SINQFB  1.259e-07       COSQF   7.451e-09       COSQB   4.967e-08
+    COSQFB  7.029e-08       DEZF    1.192e-07       DEZB    5.960e-08       DEZFB   5.960e-08       CFFTF   7.947e-08       CFFTB   8.429e-08
+    CFFTFB  9.064e-08
+    2       RFFTF   0.000e+00       RFFTB   0.000e+00       RFFTFB  0.000e+00       SINT    0.000e+00       SINTFB  0.000e+00       COST    0.000e+00
+    COSTFB  0.000e+00       SINQF   1.192e-07       SINQB   2.980e-08       SINQFB  5.960e-08       COSQF   7.451e-09       COSQB   1.490e-08
+    COSQFB  0.000e+00       DEZF    0.000e+00       DEZB    0.000e+00       DEZFB   0.000e+00       CFFTF   0.000e+00       CFFTB   5.960e-08
+    CFFTFB  5.960e-08
+    Everything looks fine.
+
+  */
+
+  return all_ok ? 0 : 1;
+}
+#endif /* TESTING_FFTPACK */
diff --git a/fftpack.h b/fftpack.h
new file mode 100644
index 0000000..45cb742
--- /dev/null
+++ b/fftpack.h
@@ -0,0 +1,799 @@
+/*
+  Interface for the f2c translation of fftpack as found on http://www.netlib.org/fftpack/
+  
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+
+   ChangeLog:
+   2011/10/02: this is my first release of this file.
+*/
+
+#ifndef FFTPACK_H
+#define FFTPACK_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* just define FFTPACK_DOUBLE_PRECISION if you want to build it as a double precision fft */
+
+#ifndef FFTPACK_DOUBLE_PRECISION
+  typedef float fftpack_real;
+  typedef int   fftpack_int;
+#else
+  typedef double fftpack_real;
+  typedef int    fftpack_int;
+#endif
+
+  void cffti(fftpack_int n, fftpack_real *wsave);
+
+  void cfftf(fftpack_int n, fftpack_real *c, fftpack_real *wsave);
+
+  void cfftb(fftpack_int n, fftpack_real *c, fftpack_real *wsave);
+
+  void rffti(fftpack_int n, fftpack_real *wsave);
+  void rfftf(fftpack_int n, fftpack_real *r, fftpack_real *wsave);
+  void rfftb(fftpack_int n, fftpack_real *r, fftpack_real *wsave);
+
+  void cosqi(fftpack_int n, fftpack_real *wsave);
+  void cosqf(fftpack_int n, fftpack_real *x, fftpack_real *wsave);
+  void cosqb(fftpack_int n, fftpack_real *x, fftpack_real *wsave);
+
+  void costi(fftpack_int n, fftpack_real *wsave);
+  void cost(fftpack_int n, fftpack_real *x, fftpack_real *wsave);
+
+  void sinqi(fftpack_int n, fftpack_real *wsave);
+  void sinqb(fftpack_int n, fftpack_real *x, fftpack_real *wsave);
+  void sinqf(fftpack_int n, fftpack_real *x, fftpack_real *wsave);
+
+  void sinti(fftpack_int n, fftpack_real *wsave);
+  void sint(fftpack_int n, fftpack_real *x, fftpack_real *wsave);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FFTPACK_H */
+
+/*
+
+                      FFTPACK
+
+* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+
+                  version 4  april 1985
+
+     a package of fortran subprograms for the fast fourier
+      transform of periodic and other symmetric sequences
+
+                         by
+
+                  paul n swarztrauber
+
+  national center for atmospheric research  boulder,colorado 80307
+
+   which is sponsored by the national science foundation
+
+* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+
+
+this package consists of programs which perform fast fourier
+transforms for both complex and real periodic sequences and
+certain other symmetric sequences that are listed below.
+
+1.   rffti     initialize  rfftf and rfftb
+2.   rfftf     forward transform of a real periodic sequence
+3.   rfftb     backward transform of a real coefficient array
+
+4.   ezffti    initialize ezfftf and ezfftb
+5.   ezfftf    a simplified real periodic forward transform
+6.   ezfftb    a simplified real periodic backward transform
+
+7.   sinti     initialize sint
+8.   sint      sine transform of a real odd sequence
+
+9.   costi     initialize cost
+10.  cost      cosine transform of a real even sequence
+
+11.  sinqi     initialize sinqf and sinqb
+12.  sinqf     forward sine transform with odd wave numbers
+13.  sinqb     unnormalized inverse of sinqf
+
+14.  cosqi     initialize cosqf and cosqb
+15.  cosqf     forward cosine transform with odd wave numbers
+16.  cosqb     unnormalized inverse of cosqf
+
+17.  cffti     initialize cfftf and cfftb
+18.  cfftf     forward transform of a complex periodic sequence
+19.  cfftb     unnormalized inverse of cfftf
+
+
+******************************************************************
+
+subroutine rffti(n,wsave)
+
+  ****************************************************************
+
+subroutine rffti initializes the array wsave which is used in
+both rfftf and rfftb. the prime factorization of n together with
+a tabulation of the trigonometric functions are computed and
+stored in wsave.
+
+input parameter
+
+n       the length of the sequence to be transformed.
+
+output parameter
+
+wsave   a work array which must be dimensioned at least 2*n+15.
+        the same work array can be used for both rfftf and rfftb
+        as long as n remains unchanged. different wsave arrays
+        are required for different values of n. the contents of
+        wsave must not be changed between calls of rfftf or rfftb.
+
+******************************************************************
+
+subroutine rfftf(n,r,wsave)
+
+******************************************************************
+
+subroutine rfftf computes the fourier coefficients of a real
+perodic sequence (fourier analysis). the transform is defined
+below at output parameter r.
+
+input parameters
+
+n       the length of the array r to be transformed.  the method
+        is most efficient when n is a product of small primes.
+        n may change so long as different work arrays are provided
+
+r       a real array of length n which contains the sequence
+        to be transformed
+
+wsave   a work array which must be dimensioned at least 2*n+15.
+        in the program that calls rfftf. the wsave array must be
+        initialized by calling subroutine rffti(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+        the same wsave array can be used by rfftf and rfftb.
+
+
+output parameters
+
+r       r(1) = the sum from i=1 to i=n of r(i)
+
+        if n is even set l =n/2   , if n is odd set l = (n+1)/2
+
+          then for k = 2,...,l
+
+             r(2*k-2) = the sum from i = 1 to i = n of
+
+                  r(i)*cos((k-1)*(i-1)*2*pi/n)
+
+             r(2*k-1) = the sum from i = 1 to i = n of
+
+                 -r(i)*sin((k-1)*(i-1)*2*pi/n)
+
+        if n is even
+
+             r(n) = the sum from i = 1 to i = n of
+
+                  (-1)**(i-1)*r(i)
+
+ *****  note
+             this transform is unnormalized since a call of rfftf
+             followed by a call of rfftb will multiply the input
+             sequence by n.
+
+wsave   contains results which must not be destroyed between
+        calls of rfftf or rfftb.
+
+
+******************************************************************
+
+subroutine rfftb(n,r,wsave)
+
+******************************************************************
+
+subroutine rfftb computes the real perodic sequence from its
+fourier coefficients (fourier synthesis). the transform is defined
+below at output parameter r.
+
+input parameters
+
+n       the length of the array r to be transformed.  the method
+        is most efficient when n is a product of small primes.
+        n may change so long as different work arrays are provided
+
+r       a real array of length n which contains the sequence
+        to be transformed
+
+wsave   a work array which must be dimensioned at least 2*n+15.
+        in the program that calls rfftb. the wsave array must be
+        initialized by calling subroutine rffti(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+        the same wsave array can be used by rfftf and rfftb.
+
+
+output parameters
+
+r       for n even and for i = 1,...,n
+
+             r(i) = r(1)+(-1)**(i-1)*r(n)
+
+                  plus the sum from k=2 to k=n/2 of
+
+                   2.*r(2*k-2)*cos((k-1)*(i-1)*2*pi/n)
+
+                  -2.*r(2*k-1)*sin((k-1)*(i-1)*2*pi/n)
+
+        for n odd and for i = 1,...,n
+
+             r(i) = r(1) plus the sum from k=2 to k=(n+1)/2 of
+
+                  2.*r(2*k-2)*cos((k-1)*(i-1)*2*pi/n)
+
+                 -2.*r(2*k-1)*sin((k-1)*(i-1)*2*pi/n)
+
+ *****  note
+             this transform is unnormalized since a call of rfftf
+             followed by a call of rfftb will multiply the input
+             sequence by n.
+
+wsave   contains results which must not be destroyed between
+        calls of rfftb or rfftf.
+
+******************************************************************
+
+subroutine sinti(n,wsave)
+
+******************************************************************
+
+subroutine sinti initializes the array wsave which is used in
+subroutine sint. the prime factorization of n together with
+a tabulation of the trigonometric functions are computed and
+stored in wsave.
+
+input parameter
+
+n       the length of the sequence to be transformed.  the method
+        is most efficient when n+1 is a product of small primes.
+
+output parameter
+
+wsave   a work array with at least int(2.5*n+15) locations.
+        different wsave arrays are required for different values
+        of n. the contents of wsave must not be changed between
+        calls of sint.
+
+******************************************************************
+
+subroutine sint(n,x,wsave)
+
+******************************************************************
+
+subroutine sint computes the discrete fourier sine transform
+of an odd sequence x(i). the transform is defined below at
+output parameter x.
+
+sint is the unnormalized inverse of itself since a call of sint
+followed by another call of sint will multiply the input sequence
+x by 2*(n+1).
+
+the array wsave which is used by subroutine sint must be
+initialized by calling subroutine sinti(n,wsave).
+
+input parameters
+
+n       the length of the sequence to be transformed.  the method
+        is most efficient when n+1 is the product of small primes.
+
+x       an array which contains the sequence to be transformed
+
+
+wsave   a work array with dimension at least int(2.5*n+15)
+        in the program that calls sint. the wsave array must be
+        initialized by calling subroutine sinti(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+
+output parameters
+
+x       for i=1,...,n
+
+             x(i)= the sum from k=1 to k=n
+
+                  2*x(k)*sin(k*i*pi/(n+1))
+
+             a call of sint followed by another call of
+             sint will multiply the sequence x by 2*(n+1).
+             hence sint is the unnormalized inverse
+             of itself.
+
+wsave   contains initialization calculations which must not be
+        destroyed between calls of sint.
+
+******************************************************************
+
+subroutine costi(n,wsave)
+
+******************************************************************
+
+subroutine costi initializes the array wsave which is used in
+subroutine cost. the prime factorization of n together with
+a tabulation of the trigonometric functions are computed and
+stored in wsave.
+
+input parameter
+
+n       the length of the sequence to be transformed.  the method
+        is most efficient when n-1 is a product of small primes.
+
+output parameter
+
+wsave   a work array which must be dimensioned at least 3*n+15.
+        different wsave arrays are required for different values
+        of n. the contents of wsave must not be changed between
+        calls of cost.
+
+******************************************************************
+
+subroutine cost(n,x,wsave)
+
+******************************************************************
+
+subroutine cost computes the discrete fourier cosine transform
+of an even sequence x(i). the transform is defined below at output
+parameter x.
+
+cost is the unnormalized inverse of itself since a call of cost
+followed by another call of cost will multiply the input sequence
+x by 2*(n-1). the transform is defined below at output parameter x
+
+the array wsave which is used by subroutine cost must be
+initialized by calling subroutine costi(n,wsave).
+
+input parameters
+
+n       the length of the sequence x. n must be greater than 1.
+        the method is most efficient when n-1 is a product of
+        small primes.
+
+x       an array which contains the sequence to be transformed
+
+wsave   a work array which must be dimensioned at least 3*n+15
+        in the program that calls cost. the wsave array must be
+        initialized by calling subroutine costi(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+
+output parameters
+
+x       for i=1,...,n
+
+            x(i) = x(1)+(-1)**(i-1)*x(n)
+
+             + the sum from k=2 to k=n-1
+
+                 2*x(k)*cos((k-1)*(i-1)*pi/(n-1))
+
+             a call of cost followed by another call of
+             cost will multiply the sequence x by 2*(n-1)
+             hence cost is the unnormalized inverse
+             of itself.
+
+wsave   contains initialization calculations which must not be
+        destroyed between calls of cost.
+
+******************************************************************
+
+subroutine sinqi(n,wsave)
+
+******************************************************************
+
+subroutine sinqi initializes the array wsave which is used in
+both sinqf and sinqb. the prime factorization of n together with
+a tabulation of the trigonometric functions are computed and
+stored in wsave.
+
+input parameter
+
+n       the length of the sequence to be transformed. the method
+        is most efficient when n is a product of small primes.
+
+output parameter
+
+wsave   a work array which must be dimensioned at least 3*n+15.
+        the same work array can be used for both sinqf and sinqb
+        as long as n remains unchanged. different wsave arrays
+        are required for different values of n. the contents of
+        wsave must not be changed between calls of sinqf or sinqb.
+
+******************************************************************
+
+subroutine sinqf(n,x,wsave)
+
+******************************************************************
+
+subroutine sinqf computes the fast fourier transform of quarter
+wave data. that is , sinqf computes the coefficients in a sine
+series representation with only odd wave numbers. the transform
+is defined below at output parameter x.
+
+sinqb is the unnormalized inverse of sinqf since a call of sinqf
+followed by a call of sinqb will multiply the input sequence x
+by 4*n.
+
+the array wsave which is used by subroutine sinqf must be
+initialized by calling subroutine sinqi(n,wsave).
+
+
+input parameters
+
+n       the length of the array x to be transformed.  the method
+        is most efficient when n is a product of small primes.
+
+x       an array which contains the sequence to be transformed
+
+wsave   a work array which must be dimensioned at least 3*n+15.
+        in the program that calls sinqf. the wsave array must be
+        initialized by calling subroutine sinqi(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+
+output parameters
+
+x       for i=1,...,n
+
+             x(i) = (-1)**(i-1)*x(n)
+
+                + the sum from k=1 to k=n-1 of
+
+                2*x(k)*sin((2*i-1)*k*pi/(2*n))
+
+             a call of sinqf followed by a call of
+             sinqb will multiply the sequence x by 4*n.
+             therefore sinqb is the unnormalized inverse
+             of sinqf.
+
+wsave   contains initialization calculations which must not
+        be destroyed between calls of sinqf or sinqb.
+
+******************************************************************
+
+subroutine sinqb(n,x,wsave)
+
+******************************************************************
+
+subroutine sinqb computes the fast fourier transform of quarter
+wave data. that is , sinqb computes a sequence from its
+representation in terms of a sine series with odd wave numbers.
+the transform is defined below at output parameter x.
+
+sinqf is the unnormalized inverse of sinqb since a call of sinqb
+followed by a call of sinqf will multiply the input sequence x
+by 4*n.
+
+the array wsave which is used by subroutine sinqb must be
+initialized by calling subroutine sinqi(n,wsave).
+
+
+input parameters
+
+n       the length of the array x to be transformed.  the method
+        is most efficient when n is a product of small primes.
+
+x       an array which contains the sequence to be transformed
+
+wsave   a work array which must be dimensioned at least 3*n+15.
+        in the program that calls sinqb. the wsave array must be
+        initialized by calling subroutine sinqi(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+
+output parameters
+
+x       for i=1,...,n
+
+             x(i)= the sum from k=1 to k=n of
+
+               4*x(k)*sin((2k-1)*i*pi/(2*n))
+
+             a call of sinqb followed by a call of
+             sinqf will multiply the sequence x by 4*n.
+             therefore sinqf is the unnormalized inverse
+             of sinqb.
+
+wsave   contains initialization calculations which must not
+        be destroyed between calls of sinqb or sinqf.
+
+******************************************************************
+
+subroutine cosqi(n,wsave)
+
+******************************************************************
+
+subroutine cosqi initializes the array wsave which is used in
+both cosqf and cosqb. the prime factorization of n together with
+a tabulation of the trigonometric functions are computed and
+stored in wsave.
+
+input parameter
+
+n       the length of the array to be transformed.  the method
+        is most efficient when n is a product of small primes.
+
+output parameter
+
+wsave   a work array which must be dimensioned at least 3*n+15.
+        the same work array can be used for both cosqf and cosqb
+        as long as n remains unchanged. different wsave arrays
+        are required for different values of n. the contents of
+        wsave must not be changed between calls of cosqf or cosqb.
+
+******************************************************************
+
+subroutine cosqf(n,x,wsave)
+
+******************************************************************
+
+subroutine cosqf computes the fast fourier transform of quarter
+wave data. that is , cosqf computes the coefficients in a cosine
+series representation with only odd wave numbers. the transform
+is defined below at output parameter x
+
+cosqf is the unnormalized inverse of cosqb since a call of cosqf
+followed by a call of cosqb will multiply the input sequence x
+by 4*n.
+
+the array wsave which is used by subroutine cosqf must be
+initialized by calling subroutine cosqi(n,wsave).
+
+
+input parameters
+
+n       the length of the array x to be transformed.  the method
+        is most efficient when n is a product of small primes.
+
+x       an array which contains the sequence to be transformed
+
+wsave   a work array which must be dimensioned at least 3*n+15
+        in the program that calls cosqf. the wsave array must be
+        initialized by calling subroutine cosqi(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+
+output parameters
+
+x       for i=1,...,n
+
+             x(i) = x(1) plus the sum from k=2 to k=n of
+
+                2*x(k)*cos((2*i-1)*(k-1)*pi/(2*n))
+
+             a call of cosqf followed by a call of
+             cosqb will multiply the sequence x by 4*n.
+             therefore cosqb is the unnormalized inverse
+             of cosqf.
+
+wsave   contains initialization calculations which must not
+        be destroyed between calls of cosqf or cosqb.
+
+******************************************************************
+
+subroutine cosqb(n,x,wsave)
+
+******************************************************************
+
+subroutine cosqb computes the fast fourier transform of quarter
+wave data. that is , cosqb computes a sequence from its
+representation in terms of a cosine series with odd wave numbers.
+the transform is defined below at output parameter x.
+
+cosqb is the unnormalized inverse of cosqf since a call of cosqb
+followed by a call of cosqf will multiply the input sequence x
+by 4*n.
+
+the array wsave which is used by subroutine cosqb must be
+initialized by calling subroutine cosqi(n,wsave).
+
+
+input parameters
+
+n       the length of the array x to be transformed.  the method
+        is most efficient when n is a product of small primes.
+
+x       an array which contains the sequence to be transformed
+
+wsave   a work array that must be dimensioned at least 3*n+15
+        in the program that calls cosqb. the wsave array must be
+        initialized by calling subroutine cosqi(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+
+output parameters
+
+x       for i=1,...,n
+
+             x(i)= the sum from k=1 to k=n of
+
+               4*x(k)*cos((2*k-1)*(i-1)*pi/(2*n))
+
+             a call of cosqb followed by a call of
+             cosqf will multiply the sequence x by 4*n.
+             therefore cosqf is the unnormalized inverse
+             of cosqb.
+
+wsave   contains initialization calculations which must not
+        be destroyed between calls of cosqb or cosqf.
+
+******************************************************************
+
+subroutine cffti(n,wsave)
+
+******************************************************************
+
+subroutine cffti initializes the array wsave which is used in
+both cfftf and cfftb. the prime factorization of n together with
+a tabulation of the trigonometric functions are computed and
+stored in wsave.
+
+input parameter
+
+n       the length of the sequence to be transformed
+
+output parameter
+
+wsave   a work array which must be dimensioned at least 4*n+15
+        the same work array can be used for both cfftf and cfftb
+        as long as n remains unchanged. different wsave arrays
+        are required for different values of n. the contents of
+        wsave must not be changed between calls of cfftf or cfftb.
+
+******************************************************************
+
+subroutine cfftf(n,c,wsave)
+
+******************************************************************
+
+subroutine cfftf computes the forward complex discrete fourier
+transform (the fourier analysis). equivalently , cfftf computes
+the fourier coefficients of a complex periodic sequence.
+the transform is defined below at output parameter c.
+
+the transform is not normalized. to obtain a normalized transform
+the output must be divided by n. otherwise a call of cfftf
+followed by a call of cfftb will multiply the sequence by n.
+
+the array wsave which is used by subroutine cfftf must be
+initialized by calling subroutine cffti(n,wsave).
+
+input parameters
+
+
+n      the length of the complex sequence c. the method is
+       more efficient when n is the product of small primes. n
+
+c      a complex array of length n which contains the sequence
+
+wsave   a real work array which must be dimensioned at least 4n+15
+        in the program that calls cfftf. the wsave array must be
+        initialized by calling subroutine cffti(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+        the same wsave array can be used by cfftf and cfftb.
+
+output parameters
+
+c      for j=1,...,n
+
+           c(j)=the sum from k=1,...,n of
+
+                 c(k)*exp(-i*(j-1)*(k-1)*2*pi/n)
+
+                       where i=sqrt(-1)
+
+wsave   contains initialization calculations which must not be
+        destroyed between calls of subroutine cfftf or cfftb
+
+******************************************************************
+
+subroutine cfftb(n,c,wsave)
+
+******************************************************************
+
+subroutine cfftb computes the backward complex discrete fourier
+transform (the fourier synthesis). equivalently , cfftb computes
+a complex periodic sequence from its fourier coefficients.
+the transform is defined below at output parameter c.
+
+a call of cfftf followed by a call of cfftb will multiply the
+sequence by n.
+
+the array wsave which is used by subroutine cfftb must be
+initialized by calling subroutine cffti(n,wsave).
+
+input parameters
+
+
+n      the length of the complex sequence c. the method is
+       more efficient when n is the product of small primes.
+
+c      a complex array of length n which contains the sequence
+
+wsave   a real work array which must be dimensioned at least 4n+15
+        in the program that calls cfftb. the wsave array must be
+        initialized by calling subroutine cffti(n,wsave) and a
+        different wsave array must be used for each different
+        value of n. this initialization does not have to be
+        repeated so long as n remains unchanged thus subsequent
+        transforms can be obtained faster than the first.
+        the same wsave array can be used by cfftf and cfftb.
+
+output parameters
+
+c      for j=1,...,n
+
+           c(j)=the sum from k=1,...,n of
+
+                 c(k)*exp(i*(j-1)*(k-1)*2*pi/n)
+
+                       where i=sqrt(-1)
+
+wsave   contains initialization calculations which must not be
+        destroyed between calls of subroutine cfftf or cfftb
+
+*/
diff --git a/greenffts b/greenffts
new file mode 160000
index 0000000..0c1fc0e
--- /dev/null
+++ b/greenffts
@@ -0,0 +1 @@
+Subproject commit 0c1fc0eb4c254c256c07eaaa3b2d424bddbef864
diff --git a/kissfft b/kissfft
new file mode 160000
index 0000000..bf9eadc
--- /dev/null
+++ b/kissfft
@@ -0,0 +1 @@
+Subproject commit bf9eadcc6ae744a4347daf4ef72c0fca44a1713d
diff --git a/pffastconv.c b/pffastconv.c
new file mode 100644
index 0000000..8bb2a65
--- /dev/null
+++ b/pffastconv.c
@@ -0,0 +1,264 @@
+/*
+  Copyright (c) 2019  Hayati Ayguen ( h_ayguen@web.de )
+ */
+
+#include "pffastconv.h"
+#include "pffft.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <math.h>
+#include <assert.h>
+#include <string.h>
+
+#define FASTCONV_DBG_OUT  0
+
+
+/* detect compiler flavour */
+#if defined(_MSC_VER)
+#  define RESTRICT __restrict
+#pragma warning( disable : 4244 4305 4204 4456 )
+#elif defined(__GNUC__)
+#  define RESTRICT __restrict
+#endif
+
+
+void *pffastconv_malloc(size_t nb_bytes)
+{
+  return pffft_aligned_malloc(nb_bytes);
+}
+
+void pffastconv_free(void *p)
+{
+  pffft_aligned_free(p);
+}
+
+int pffastconv_simd_size()
+{
+  return pffft_simd_size();
+}
+
+
+
+struct PFFASTCONV_Setup
+{
+  float * Xt;      /* input == x in time domain - copy for alignment */
+  float * Xf;      /* input == X in freq domain */
+  float * Hf;      /* filterCoeffs == H in freq domain */
+  float * Mf;      /* input * filterCoeffs in freq domain */
+  PFFFT_Setup *st;
+  int filterLen;   /* convolution length */
+  int Nfft;        /* FFT/block length */
+  int flags;
+  float scale;
+};
+
+
+PFFASTCONV_Setup * pffastconv_new_setup( const float * filterCoeffs, int filterLen, int * blockLen, int flags )
+{
+  PFFASTCONV_Setup * s = NULL;
+  const int cplxFactor = ( (flags & PFFASTCONV_CPLX_INP_OUT) && (flags & PFFASTCONV_CPLX_SINGLE_FFT) ) ? 2 : 1;
+  const int minFftLen = 2*pffft_simd_size()*pffft_simd_size();
+  int i, Nfft = 2 * pffft_next_power_of_two(filterLen -1);
+#if FASTCONV_DBG_OUT
+  const int iOldBlkLen = *blockLen;
+#endif
+
+  if ( Nfft < minFftLen )
+    Nfft = minFftLen;
+
+  if ( flags & PFFASTCONV_CPLX_FILTER )
+    return NULL;
+
+  s = pffastconv_malloc( sizeof(struct PFFASTCONV_Setup) );
+
+  if ( *blockLen > Nfft ) {
+    Nfft = *blockLen;
+    Nfft = pffft_next_power_of_two(Nfft);
+  }
+  *blockLen = Nfft;  /* this is in (complex) samples */
+
+  Nfft *= cplxFactor;
+
+  if ( (flags & PFFASTCONV_DIRECT_INP) && !(flags & PFFASTCONV_CPLX_INP_OUT) )
+    s->Xt = NULL;
+  else
+    s->Xt = pffastconv_malloc((unsigned)Nfft * sizeof(float));
+  s->Xf = pffastconv_malloc((unsigned)Nfft * sizeof(float));
+  s->Hf = pffastconv_malloc((unsigned)Nfft * sizeof(float));
+  s->Mf = pffastconv_malloc((unsigned)Nfft * sizeof(float));
+  s->st = pffft_new_setup(Nfft, PFFFT_REAL);  /* with complex: we do 2 x fft() */
+  s->filterLen = filterLen;        /* filterLen == convolution length == length of impulse response */
+  if ( cplxFactor == 2 )
+    s->filterLen = 2 * filterLen - 1;
+  s->Nfft = Nfft;  /* FFT/block length */
+  s->flags = flags;
+  s->scale = (float)( 1.0 / Nfft );
+
+  memset( s->Xt, 0, (unsigned)Nfft * sizeof(float) );
+  if ( flags & PFFASTCONV_CORRELATION ) {
+    for ( i = 0; i < filterLen; ++i )
+      s->Xt[ ( Nfft - cplxFactor * i ) & (Nfft -1) ] = filterCoeffs[ i ];
+  } else {
+    for ( i = 0; i < filterLen; ++i )
+      s->Xt[ ( Nfft - cplxFactor * i ) & (Nfft -1) ] = filterCoeffs[ filterLen - 1 - i ];
+  }
+
+  pffft_transform(s->st, s->Xt, s->Hf, /* tmp = */ s->Mf, PFFFT_FORWARD);
+
+#if FASTCONV_DBG_OUT
+  printf("\n  fastConvSetup(filterLen = %d, blockLen %d) --> blockLen %d, OutLen = %d\n"
+    , filterLen, iOldBlkLen, *blockLen, Nfft - filterLen +1 );
+#endif
+
+  return s;
+}
+
+
+void pffastconv_destroy_setup( PFFASTCONV_Setup * s )
+{
+  if (!s)
+    return;
+  pffft_destroy_setup(s->st);
+  pffastconv_free(s->Mf);
+  pffastconv_free(s->Hf);
+  pffastconv_free(s->Xf);
+  if ( s->Xt )
+    pffastconv_free(s->Xt);
+  pffastconv_free(s);
+}
+
+
+int pffastconv_apply(PFFASTCONV_Setup * s, const float *input_, int cplxInputLen, float *output_, int applyFlush)
+{
+  const float * RESTRICT X = input_;
+  float * RESTRICT Y = output_;
+  const int Nfft = s->Nfft;
+  const int filterLen = s->filterLen;
+  const int flags = s->flags;
+  const int cplxFactor = ( (flags & PFFASTCONV_CPLX_INP_OUT) && (flags & PFFASTCONV_CPLX_SINGLE_FFT) ) ? 2 : 1;
+  const int inputLen = cplxFactor * cplxInputLen;
+  int inpOff, procLen, numOut = 0, j, part, cplxOff;
+
+  /* applyFlush != 0:
+   *     inputLen - inpOff -filterLen + 1 > 0
+   * <=> inputLen -filterLen + 1 > inpOff
+   * <=> inpOff < inputLen -filterLen + 1
+   * 
+   * applyFlush == 0:
+   *     inputLen - inpOff >= Nfft
+   * <=> inputLen - Nfft >= inpOff
+   * <=> inpOff <= inputLen - Nfft
+   * <=> inpOff < inputLen - Nfft + 1
+   */
+
+  if ( cplxFactor == 2 )
+  {
+    const int maxOff = applyFlush ? (inputLen -filterLen + 1) : (inputLen - Nfft + 1);
+#if 0
+    printf( "*** inputLen %d, filterLen %d, Nfft %d => maxOff %d\n", inputLen, filterLen, Nfft, maxOff);
+#endif
+    for ( inpOff = 0; inpOff < maxOff; inpOff += numOut )
+    {
+      procLen = ( (inputLen - inpOff) >= Nfft ) ? Nfft : (inputLen - inpOff);
+      numOut = ( procLen - filterLen + 1 ) & ( ~1 );
+      if (!numOut)
+        break;
+#if 0
+      if (!inpOff)
+        printf("*** inpOff = %d, numOut = %d\n", inpOff, numOut);
+      if (inpOff + filterLen + 2 >= maxOff )
+        printf("*** inpOff = %d, inpOff + numOut = %d\n", inpOff, inpOff + numOut);
+#endif
+
+      if ( flags & PFFASTCONV_DIRECT_INP )
+      {
+        pffft_transform(s->st, X + inpOff, s->Xf, /* tmp = */ s->Mf, PFFFT_FORWARD);
+      }
+      else
+      {
+        memcpy( s->Xt, X + inpOff, (unsigned)procLen * sizeof(float) );
+        if ( procLen < Nfft )
+          memset( s->Xt + procLen, 0, (unsigned)(Nfft - procLen) * sizeof(float) );
+    
+        pffft_transform(s->st, s->Xt, s->Xf, /* tmp = */ s->Mf, PFFFT_FORWARD);
+      }
+
+      pffft_zconvolve_no_accu(s->st, s->Xf, s->Hf, /* tmp = */ s->Mf, s->scale);
+
+      if ( flags & PFFASTCONV_DIRECT_OUT )
+      {
+        pffft_transform(s->st, s->Mf, Y + inpOff, s->Xf, PFFFT_BACKWARD);
+      }
+      else
+      {
+        pffft_transform(s->st, s->Mf, s->Xf, /* tmp = */ s->Xt, PFFFT_BACKWARD);
+        memcpy( Y + inpOff, s->Xf, (unsigned)numOut * sizeof(float) );
+      }
+    }
+    return inpOff / cplxFactor;
+  }
+  else
+  {
+    const int maxOff = applyFlush ? (inputLen -filterLen + 1) : (inputLen - Nfft + 1);
+    const int numParts = (flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1;
+
+    for ( inpOff = 0; inpOff < maxOff; inpOff += numOut )
+    {
+      procLen = ( (inputLen - inpOff) >= Nfft ) ? Nfft : (inputLen - inpOff);
+      numOut = procLen - filterLen + 1;
+
+      for ( part = 0; part < numParts; ++part )  /* iterate per real/imag component */
+      {
+
+        if ( flags & PFFASTCONV_CPLX_INP_OUT )
+        {
+          cplxOff = 2 * inpOff + part;
+          for ( j = 0; j < procLen; ++j )
+            s->Xt[j] = X[cplxOff + 2 * j];
+          if ( procLen < Nfft )
+            memset( s->Xt + procLen, 0, (unsigned)(Nfft - procLen) * sizeof(float) );
+
+          pffft_transform(s->st, s->Xt, s->Xf, /* tmp = */ s->Mf, PFFFT_FORWARD);
+        }
+        else if ( flags & PFFASTCONV_DIRECT_INP )
+        {
+          pffft_transform(s->st, X + inpOff, s->Xf, /* tmp = */ s->Mf, PFFFT_FORWARD);
+        }
+        else
+        {
+          memcpy( s->Xt, X + inpOff, (unsigned)procLen * sizeof(float) );
+          if ( procLen < Nfft )
+            memset( s->Xt + procLen, 0, (unsigned)(Nfft - procLen) * sizeof(float) );
+    
+          pffft_transform(s->st, s->Xt, s->Xf, /* tmp = */ s->Mf, PFFFT_FORWARD);
+        }
+
+        pffft_zconvolve_no_accu(s->st, s->Xf, s->Hf, /* tmp = */ s->Mf, s->scale);
+
+        if ( flags & PFFASTCONV_CPLX_INP_OUT )
+        {
+          pffft_transform(s->st, s->Mf, s->Xf, /* tmp = */ s->Xt, PFFFT_BACKWARD);
+    
+          cplxOff = 2 * inpOff + part;
+          for ( j = 0; j < numOut; ++j )
+            Y[ cplxOff + 2 * j ] = s->Xf[j];
+        }
+        else if ( flags & PFFASTCONV_DIRECT_OUT )
+        {
+          pffft_transform(s->st, s->Mf, Y + inpOff, s->Xf, PFFFT_BACKWARD);
+        }
+        else
+        {
+          pffft_transform(s->st, s->Mf, s->Xf, /* tmp = */ s->Xt, PFFFT_BACKWARD);
+          memcpy( Y + inpOff, s->Xf, (unsigned)numOut * sizeof(float) );
+        }
+
+      }
+    }
+
+    return inpOff;
+  }
+}
+
diff --git a/pffastconv.h b/pffastconv.h
new file mode 100644
index 0000000..6bc5e47
--- /dev/null
+++ b/pffastconv.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2019  Hayati Ayguen ( h_ayguen@web.de )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of PFFFT, PFFASTCONV, nor the names of its
+   sponsors or contributors may be used to endorse or promote products
+   derived from this Software without specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+/*
+   PFFASTCONV : a Pretty Fast Fast Convolution
+
+   This is basically the implementation of fast convolution,
+   utilizing the FFT (pffft).
+
+   Restrictions: 
+
+   - 1D transforms only, with 32-bit single precision.
+
+   - all (float*) pointers in the functions below are expected to
+   have an "simd-compatible" alignment, that is 16 bytes on x86 and
+   powerpc CPUs.
+  
+   You can allocate such buffers with the functions
+   pffft_aligned_malloc / pffft_aligned_free (or with stuff like
+   posix_memalign..)
+
+*/
+
+#ifndef PFFASTCONV_H
+#define PFFASTCONV_H
+
+#include <stddef.h> /* for size_t */
+#include "pffft.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+  /* opaque struct holding internal stuff
+     this struct can't be shared by many threads as it contains
+     temporary data, computed within the convolution
+  */
+  typedef struct PFFASTCONV_Setup PFFASTCONV_Setup;
+
+  typedef enum {
+    PFFASTCONV_CPLX_INP_OUT = 1,
+    /* set when input and output is complex,
+     * with real and imag part interleaved in both vectors.
+     * input[] has inputLen complex values: 2 * inputLen floats,
+     * output[] is also written with complex values.
+     * without this flag, the input is interpreted as real vector
+     */
+
+    PFFASTCONV_CPLX_FILTER = 2,
+    /* set when filterCoeffs is complex,
+     * with real and imag part interleaved.
+     * filterCoeffs[] has filterLen complex values: 2 * filterLen floats
+     * without this flag, the filter is interpreted as real vector
+     * ATTENTION: this is not implemented yet!
+     */
+
+    PFFASTCONV_DIRECT_INP = 4,
+    /* set PFFASTCONV_DIRECT_INP only, when following conditions are met:
+     * 1- input vecor X must be aligned
+     * 2- (all) inputLen <= ouput blockLen
+     * 3- X must have minimum length of output BlockLen
+     * 4- the additional samples from inputLen .. BlockLen-1
+     *   must contain valid small and non-NAN samples (ideally zero)
+     * 
+     * this option is ignored when PFFASTCONV_CPLX_INP_OUT is set
+     */
+
+    PFFASTCONV_DIRECT_OUT = 8,
+    /* set PFFASTCONV_DIRECT_OUT only when following conditions are met:
+     * 1- output vector Y must be aligned
+     * 2- (all) inputLen <= ouput blockLen
+     * 3- Y must have minimum length of output blockLen
+     * 
+     * this option is ignored when PFFASTCONV_CPLX_INP_OUT is set
+     */
+
+    PFFASTCONV_CPLX_SINGLE_FFT = 16,
+    /* hint to process complex data with one single FFT;
+     * default is to use 2 FFTs: one for real part, one for imag part
+     * */
+
+
+    PFFASTCONV_SYMMETRIC = 32,
+    /* just informal, that filter is symmetric .. and filterLen is multiple of 8 */
+
+    PFFASTCONV_CORRELATION = 64,
+    /* filterCoeffs[] of pffastconv_new_setup are for correlation;
+     * thus, do not flip them for the internal fft calculation
+     * - as necessary for the fast convolution */
+
+  } pffastconv_flags_t;
+
+  /*
+    prepare for performing fast convolution(s) of 'filterLen' with input 'blockLen'.
+    The output 'blockLen' might be bigger to allow the fast convolution.
+    
+    'flags' are bitmask over the 'pffastconv_flags_t' enum.
+
+    PFFASTCONV_Setup structure can't be shared accross multiple filters
+    or concurrent threads.
+  */
+  PFFASTCONV_Setup * pffastconv_new_setup( const float * filterCoeffs, int filterLen, int * blockLen, int flags );
+
+  void pffastconv_destroy_setup(PFFASTCONV_Setup *);
+
+  /* 
+     Perform the fast convolution.
+
+     'input' and 'output' don't need to be aligned - unless any of
+     PFFASTCONV_DIRECT_INP or PFFASTCONV_DIRECT_OUT is set in 'flags'.
+
+     inputLen > output 'blockLen' (from pffastconv_new_setup()) is allowed.
+     in this case, multiple FFTs are called internally, to process the
+     input[].
+
+     'output' vector must have size >= (inputLen - filterLen + 1)
+
+     set bool option 'applyFlush' to process the full input[].
+     with this option, 'tail samples' of input are also processed.
+     This might be inefficient, because the FFT is called to produce
+     few(er) output samples, than possible.
+     This option is useful to process the last samples of an input (file)
+     or to reduce latency.
+
+     return value is the number of produced samples in output[].
+     the same amount of samples is processed from input[]. to continue
+     processing, the caller must save/move the remaining samples of
+     input[].
+
+  */
+  int pffastconv_apply(PFFASTCONV_Setup * s, const float *input, int inputLen, float *output, int applyFlush);
+
+  void *pffastconv_malloc(size_t nb_bytes);
+  void pffastconv_free(void *);
+
+  /* return 4 or 1 wether support SSE/Altivec instructions was enabled when building pffft.c */
+  int pffastconv_simd_size();
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PFFASTCONV_H */
diff --git a/pffft.c b/pffft.c
new file mode 100644
index 0000000..059f2d7
--- /dev/null
+++ b/pffft.c
@@ -0,0 +1,131 @@
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+   Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+
+   Based on original fortran 77 code from FFTPACKv4 from NETLIB
+   (http://www.netlib.org/fftpack), authored by Dr Paul Swarztrauber
+   of NCAR, in 1985.
+
+   As confirmed by the NCAR fftpack software curators, the following
+   FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
+   released under the same terms.
+
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+
+
+   PFFFT : a Pretty Fast FFT.
+
+   This file is largerly based on the original FFTPACK implementation, modified in
+   order to take advantage of SIMD instructions of modern CPUs.
+*/
+
+/*
+  ChangeLog: 
+  - 2011/10/02, version 1: This is the very first release of this file.
+*/
+
+#include "pffft.h"
+
+/* detect compiler flavour */
+#if defined(_MSC_VER)
+#  define COMPILER_MSVC
+#elif defined(__GNUC__)
+#  define COMPILER_GCC
+#endif
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <math.h>
+#include <assert.h>
+
+#if defined(COMPILER_GCC)
+#  define ALWAYS_INLINE(return_type) inline return_type __attribute__ ((always_inline))
+#  define NEVER_INLINE(return_type) return_type __attribute__ ((noinline))
+#  define RESTRICT __restrict
+#  define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ varname__[size__];
+#elif defined(COMPILER_MSVC)
+#  define ALWAYS_INLINE(return_type) __forceinline return_type
+#  define NEVER_INLINE(return_type) __declspec(noinline) return_type
+#  define RESTRICT __restrict
+#  define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ *varname__ = (type__*)_alloca(size__ * sizeof(type__))
+#endif
+
+
+#ifdef COMPILER_MSVC
+#pragma warning( disable : 4244 4305 4204 4456 )
+#endif
+
+/* 
+   vector support macros: the rest of the code is independant of
+   SSE/Altivec/NEON -- adding support for other platforms with 4-element
+   vectors should be limited to these macros 
+*/
+#include "simd/pf_float.h"
+
+/* have code comparable with this definition */
+#define SETUP_STRUCT               PFFFT_Setup
+#define FUNC_NEW_SETUP             pffft_new_setup
+#define FUNC_DESTROY               pffft_destroy_setup
+#define FUNC_TRANSFORM_UNORDRD     pffft_transform
+#define FUNC_TRANSFORM_ORDERED     pffft_transform_ordered
+#define FUNC_ZREORDER              pffft_zreorder
+#define FUNC_ZCONVOLVE_ACCUMULATE  pffft_zconvolve_accumulate
+#define FUNC_ZCONVOLVE_NO_ACCU     pffft_zconvolve_no_accu
+
+#define FUNC_ALIGNED_MALLOC        pffft_aligned_malloc
+#define FUNC_ALIGNED_FREE          pffft_aligned_free
+#define FUNC_SIMD_SIZE             pffft_simd_size
+#define FUNC_SIMD_ARCH             pffft_simd_arch
+#define FUNC_VALIDATE_SIMD_A       validate_pffft_simd
+#define FUNC_VALIDATE_SIMD_EX      validate_pffft_simd_ex
+
+#define FUNC_CPLX_FINALIZE         pffft_cplx_finalize
+#define FUNC_CPLX_PREPROCESS       pffft_cplx_preprocess
+#define FUNC_REAL_PREPROCESS_4X4   pffft_real_preprocess_4x4
+#define FUNC_REAL_PREPROCESS       pffft_real_preprocess
+#define FUNC_REAL_FINALIZE_4X4     pffft_real_finalize_4x4
+#define FUNC_REAL_FINALIZE         pffft_real_finalize
+#define FUNC_TRANSFORM_INTERNAL    pffft_transform_internal
+
+#define FUNC_COS  cosf
+#define FUNC_SIN  sinf
+
+
+#include "pffft_priv_impl.h"
+
+
diff --git a/pffft.h b/pffft.h
new file mode 100644
index 0000000..31bb731
--- /dev/null
+++ b/pffft.h
@@ -0,0 +1,216 @@
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com ) 
+
+   Based on original fortran 77 code from FFTPACKv4 from NETLIB,
+   authored by Dr Paul Swarztrauber of NCAR, in 1985.
+
+   As confirmed by the NCAR fftpack software curators, the following
+   FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
+   released under the same terms.
+
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+   
+/*
+   PFFFT : a Pretty Fast FFT.
+
+   This is basically an adaptation of the single precision fftpack
+   (v4) as found on netlib taking advantage of SIMD instruction found
+   on cpus such as intel x86 (SSE1), powerpc (Altivec), and arm (NEON).
+   
+   For architectures where no SIMD instruction is available, the code
+   falls back to a scalar version.  
+
+   Restrictions: 
+
+   - 1D transforms only, with 32-bit single precision.
+
+   - supports only transforms for inputs of length N of the form
+   N=(2^a)*(3^b)*(5^c), a >= 5, b >=0, c >= 0 (32, 48, 64, 96, 128,
+   144, 160, etc are all acceptable lengths). Performance is best for
+   128<=N<=8192.
+
+   - all (float*) pointers in the functions below are expected to
+   have an "simd-compatible" alignment, that is 16 bytes on x86 and
+   powerpc CPUs.
+  
+   You can allocate such buffers with the functions
+   pffft_aligned_malloc / pffft_aligned_free (or with stuff like
+   posix_memalign..)
+
+*/
+
+#ifndef PFFFT_H
+#define PFFFT_H
+
+#include <stddef.h> /* for size_t */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+  /* opaque struct holding internal stuff (precomputed twiddle factors)
+     this struct can be shared by many threads as it contains only
+     read-only data.  
+  */
+  typedef struct PFFFT_Setup PFFFT_Setup;
+
+#ifndef PFFFT_COMMON_ENUMS
+#define PFFFT_COMMON_ENUMS
+
+  /* direction of the transform */
+  typedef enum { PFFFT_FORWARD, PFFFT_BACKWARD } pffft_direction_t;
+  
+  /* type of transform */
+  typedef enum { PFFFT_REAL, PFFFT_COMPLEX } pffft_transform_t;
+
+#endif
+
+  /*
+    prepare for performing transforms of size N -- the returned
+    PFFFT_Setup structure is read-only so it can safely be shared by
+    multiple concurrent threads. 
+  */
+  PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform);
+  void pffft_destroy_setup(PFFFT_Setup *);
+  /* 
+     Perform a Fourier transform , The z-domain data is stored in the
+     most efficient order for transforming it back, or using it for
+     convolution. If you need to have its content sorted in the
+     "usual" way, that is as an array of interleaved complex numbers,
+     either use pffft_transform_ordered , or call pffft_zreorder after
+     the forward fft, and before the backward fft.
+
+     Transforms are not scaled: PFFFT_BACKWARD(PFFFT_FORWARD(x)) = N*x.
+     Typically you will want to scale the backward transform by 1/N.
+     
+     The 'work' pointer should point to an area of N (2*N for complex
+     fft) floats, properly aligned. If 'work' is NULL, then stack will
+     be used instead (this is probably the best strategy for small
+     FFTs, say for N < 16384). Threads usually have a small stack, that
+     there's no sufficient amount of memory, usually leading to a crash!
+     Use the heap with pffft_aligned_malloc() in this case.
+
+     input and output may alias.
+  */
+  void pffft_transform(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction);
+
+  /* 
+     Similar to pffft_transform, but makes sure that the output is
+     ordered as expected (interleaved complex numbers).  This is
+     similar to calling pffft_transform and then pffft_zreorder.
+     
+     input and output may alias.
+  */
+  void pffft_transform_ordered(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction);
+
+  /* 
+     call pffft_zreorder(.., PFFFT_FORWARD) after pffft_transform(...,
+     PFFFT_FORWARD) if you want to have the frequency components in
+     the correct "canonical" order, as interleaved complex numbers.
+     
+     (for real transforms, both 0-frequency and half frequency
+     components, which are real, are assembled in the first entry as
+     F(0)+i*F(n/2+1). Note that the original fftpack did place
+     F(n/2+1) at the end of the arrays).
+     
+     input and output should not alias.
+  */
+  void pffft_zreorder(PFFFT_Setup *setup, const float *input, float *output, pffft_direction_t direction);
+
+  /* 
+     Perform a multiplication of the frequency components of dft_a and
+     dft_b and accumulate them into dft_ab. The arrays should have
+     been obtained with pffft_transform(.., PFFFT_FORWARD) and should
+     *not* have been reordered with pffft_zreorder (otherwise just
+     perform the operation yourself as the dft coefs are stored as
+     interleaved complex numbers).
+     
+     the operation performed is: dft_ab += (dft_a * fdt_b)*scaling
+     
+     The dft_a, dft_b and dft_ab pointers may alias.
+  */
+  void pffft_zconvolve_accumulate(PFFFT_Setup *setup, const float *dft_a, const float *dft_b, float *dft_ab, float scaling);
+
+  /* 
+     Perform a multiplication of the frequency components of dft_a and
+     dft_b and put result in dft_ab. The arrays should have
+     been obtained with pffft_transform(.., PFFFT_FORWARD) and should
+     *not* have been reordered with pffft_zreorder (otherwise just
+     perform the operation yourself as the dft coefs are stored as
+     interleaved complex numbers).
+
+     the operation performed is: dft_ab = (dft_a * fdt_b)*scaling
+
+     The dft_a, dft_b and dft_ab pointers may alias.
+  */
+  void pffft_zconvolve_no_accu(PFFFT_Setup *setup, const float *dft_a, const float *dft_b, float *dft_ab, float scaling);
+
+  /* return 4 or 1 wether support SSE/NEON/Altivec instructions was enabled when building pffft.c */
+  int pffft_simd_size();
+
+  /* return string identifier of used architecture (SSE/NEON/Altivec/..) */
+  const char * pffft_simd_arch();
+
+
+  /* following functions are identical to the pffftd_ functions */
+
+  /* simple helper to get minimum possible fft size */
+  int pffft_min_fft_size(pffft_transform_t transform);
+
+  /* simple helper to determine next power of 2
+     - without inexact/rounding floating point operations
+  */
+  int pffft_next_power_of_two(int N);
+
+  /* simple helper to determine if power of 2 - returns bool */
+  int pffft_is_power_of_two(int N);
+
+  /*
+    the float buffers must have the correct alignment (16-byte boundary
+    on intel and powerpc). This function may be used to obtain such
+    correctly aligned buffers.  
+  */
+  void *pffft_aligned_malloc(size_t nb_bytes);
+  void pffft_aligned_free(void *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PFFFT_H */
+
diff --git a/pffft.hpp b/pffft.hpp
new file mode 100644
index 0000000..ce910f9
--- /dev/null
+++ b/pffft.hpp
@@ -0,0 +1,1001 @@
+/* Copyright (c) 2020  Dario Mambro ( dario.mambro@gmail.com )
+   Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of PFFFT, nor the names of its
+   sponsors or contributors may be used to endorse or promote products
+   derived from this Software without specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#pragma once
+
+#include <complex>
+#include <vector>
+#include <limits>
+
+namespace {
+#if defined(PFFFT_ENABLE_FLOAT) || ( !defined(PFFFT_ENABLE_FLOAT) && !defined(PFFFT_ENABLE_DOUBLE) )
+#include "pffft.h"
+#endif
+#if defined(PFFFT_ENABLE_DOUBLE)
+#include "pffft_double.h"
+#endif
+}
+
+namespace pffft {
+
+// enum { PFFFT_REAL, PFFFT_COMPLEX }
+typedef pffft_transform_t TransformType;
+
+// define 'Scalar' and 'Complex' (in namespace pffft) with template Types<>
+// and other type specific helper functions
+template<typename T> struct Types {};
+#if defined(PFFFT_ENABLE_FLOAT) || ( !defined(PFFFT_ENABLE_FLOAT) && !defined(PFFFT_ENABLE_DOUBLE) )
+template<> struct Types<float>  {
+  typedef float  Scalar;
+  typedef std::complex<Scalar> Complex;
+  static int simd_size() { return pffft_simd_size(); }
+  static const char * simd_arch() { return pffft_simd_arch(); }
+};
+template<> struct Types< std::complex<float> >  {
+  typedef float  Scalar;
+  typedef std::complex<float>  Complex;
+  static int simd_size() { return pffft_simd_size(); }
+  static const char * simd_arch() { return pffft_simd_arch(); }
+};
+#endif
+#if defined(PFFFT_ENABLE_DOUBLE)
+template<> struct Types<double> {
+  typedef double Scalar;
+  typedef std::complex<Scalar> Complex;
+  static int simd_size() { return pffftd_simd_size(); }
+  static const char * simd_arch() { return pffftd_simd_arch(); }
+};
+template<> struct Types< std::complex<double> > {
+  typedef double Scalar;
+  typedef std::complex<double> Complex;
+  static int simd_size() { return pffftd_simd_size(); }
+  static const char * simd_arch() { return pffftd_simd_arch(); }
+};
+#endif
+
+// Allocator
+template<typename T> class PFAlloc;
+
+namespace {
+  template<typename T> class Setup;
+}
+
+#if (__cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900))
+
+// define AlignedVector<T> utilizing 'using' in C++11
+template<typename T>
+using AlignedVector = typename std::vector< T, PFAlloc<T> >;
+
+#else
+
+// define AlignedVector<T> having to derive std::vector<>
+template <typename T>
+struct AlignedVector : public std::vector< T, PFAlloc<T> > {
+  AlignedVector() : std::vector< T, PFAlloc<T> >() { }
+  AlignedVector(int N) : std::vector< T, PFAlloc<T> >(N) { }
+};
+
+#endif
+
+
+// T can be float, double, std::complex<float> or std::complex<double>
+//   define PFFFT_ENABLE_DOUBLE before include this file for double and std::complex<double>
+template<typename T>
+class Fft
+{
+public:
+
+  // define types value_type, Scalar and Complex
+  typedef T value_type;
+  typedef typename Types<T>::Scalar  Scalar;
+  typedef typename Types<T>::Complex Complex;
+
+  // static retrospection functions
+  static bool isComplexTransform()  { return sizeof(T) == sizeof(Complex); }
+  static bool isFloatScalar()  { return sizeof(Scalar) == sizeof(float); }
+  static bool isDoubleScalar() { return sizeof(Scalar) == sizeof(double); }
+
+  // simple helper to get minimum possible fft length
+  static int minFFtsize() { return pffft_min_fft_size( isComplexTransform() ? PFFFT_COMPLEX : PFFFT_REAL ); }
+
+  // simple helper to determine next power of 2 - without inexact/rounding floating point operations
+  static int nextPowerOfTwo(int N) { return pffft_next_power_of_two(N); }
+  static bool isPowerOfTwo(int N) { return pffft_is_power_of_two(N); }
+
+  static int simd_size() { return Types<T>::simd_size(); }
+  static const char * simd_arch() { return Types<T>::simd_arch(); }
+
+  //////////////////
+
+  /*
+   * Contructor, with transformation length, preparing transforms.
+   *
+   * For length <= stackThresholdLen, the stack is used for the internal
+   * work memory. for bigger length', the heap is used.
+   *
+   * Using the stack is probably the best strategy for small
+   * FFTs, say for N <= 4096). Threads usually have a small stack, that
+   * there's no sufficient amount of memory, usually leading to a crash!
+   */
+  Fft( int length, int stackThresholdLen = 4096 );
+
+  ~Fft();
+
+  /*
+   * prepare for transformation length 'newLength'.
+   * length is identical to forward()'s input vector's size,
+   * and also equals inverse()'s output vector size.
+   * this function is no simple setter. it pre-calculates twiddle factors.
+   */
+  void prepareLength(int newLength);
+
+  /*
+   * retrieve the transformation length.
+   */
+  int getLength() const { return length; }
+
+  /*
+   * retrieve size of complex spectrum vector,
+   * the output of forward()
+   */
+  int getSpectrumSize() const { return isComplexTransform() ? length : ( length / 2 ); }
+
+  /*
+   * retrieve size of spectrum vector - in internal layout;
+   * the output of forwardToInternalLayout()
+   */
+  int getInternalLayoutSize() const { return isComplexTransform() ? ( 2 * length ) : length; }
+
+
+  ////////////////////////////////////////////
+  ////
+  //// API 1, with std::vector<> based containers,
+  ////   which free the allocated memory themselves (RAII).
+  ////
+  //// uses an Allocator for the alignment of SIMD data.
+  ////
+  ////////////////////////////////////////////
+
+  // create suitably preallocated aligned vector for one FFT
+  AlignedVector<T>       valueVector() const;
+  AlignedVector<Complex> spectrumVector() const;
+  AlignedVector<Scalar>  internalLayoutVector() const;
+
+  ////////////////////////////////////////////
+  // although using Vectors for output ..
+  // they need to have resize() applied before!
+
+  // core API, having the spectrum in canonical order
+
+  /*
+   * Perform the forward Fourier transform.
+   *
+   * Transforms are not scaled: inverse(forward(x)) = N*x.
+   * Typically you will want to scale the backward transform by 1/N.
+   *
+   * The output 'spectrum' is canonically ordered - as expected.
+   *
+   * a) for complex input isComplexTransform() == true,
+   *    and transformation length N  the output array is complex:
+   *   index k in 0 .. N/2 -1  corresponds to frequency k * Samplerate / N
+   *   index k in N/2 .. N -1  corresponds to frequency (k -N) * Samplerate / N,
+   *     resulting in negative frequencies
+   *
+   * b) for real input isComplexTransform() == false,
+   *    and transformation length N  the output array is 'mostly' complex:
+   *   index k in 1 .. N/2 -1  corresponds to frequency k * Samplerate / N
+   *   index k == 0 is a special case:
+   *     the real() part contains the result for the DC frequency 0,
+   *     the imag() part contains the result for the Nyquist frequency Samplerate/2
+   *   both 0-frequency and half frequency components, which are real,
+   *   are assembled in the first entry as  F(0)+i*F(N/2).
+   *
+   * input and output may alias - if you do nasty type conversion.
+   * return is just the given output parameter 'spectrum'.
+   */
+  AlignedVector<Complex> & forward(const AlignedVector<T> & input, AlignedVector<Complex> & spectrum);
+
+  /*
+   * Perform the inverse Fourier transform, see forward().
+   * return is just the given output parameter 'output'.
+   */
+  AlignedVector<T> & inverse(const AlignedVector<Complex> & spectrum, AlignedVector<T> & output);
+
+
+  // provide additional functions with spectrum in some internal Layout.
+  // these are faster, cause the implementation omits the reordering.
+  // these are useful in special applications, like fast convolution,
+  // where inverse() is following anyway ..
+
+  /*
+   * Perform the forward Fourier transform - similar to forward(), BUT:
+   *
+   * The z-domain data is stored in the most efficient order
+   * for transforming it back, or using it for convolution.
+   * If you need to have its content sorted in the "usual" canonical order,
+   * either use forward(), or call reorderSpectrum() after calling
+   * forwardToInternalLayout(), and before the backward fft
+   *
+   * return is just the given output parameter 'spectrum_internal_layout'.
+   */
+  AlignedVector<Scalar> & forwardToInternalLayout(
+          const AlignedVector<T> & input,
+          AlignedVector<Scalar> & spectrum_internal_layout );
+
+  /*
+   * Perform the inverse Fourier transform, see forwardToInternalLayout()
+   *
+   * return is just the given output parameter 'output'.
+   */
+  AlignedVector<T> & inverseFromInternalLayout(
+          const AlignedVector<Scalar> & spectrum_internal_layout,
+          AlignedVector<T> & output );
+
+  /*
+   * Reorder the spectrum from internal layout to have the
+   * frequency components in the correct "canonical" order.
+   * see forward() for a description of the canonical order.
+   *
+   * input and output should not alias.
+   */
+  void reorderSpectrum(
+          const AlignedVector<Scalar> & input,
+          AlignedVector<Complex> & output );
+
+  /*
+   * Perform a multiplication of the frequency components of
+   * spectrum_internal_a and spectrum_internal_b
+   * into spectrum_internal_ab.
+   * The arrays should have been obtained with forwardToInternalLayout)
+   * and should *not* have been reordered with reorderSpectrum().
+   *
+   * the operation performed is:
+   *  spectrum_internal_ab = (spectrum_internal_a * spectrum_internal_b)*scaling
+   *
+   * The spectrum_internal_[a][b], pointers may alias.
+   * return is just the given output parameter 'spectrum_internal_ab'.
+   */
+  AlignedVector<Scalar> & convolve(
+          const AlignedVector<Scalar> & spectrum_internal_a,
+          const AlignedVector<Scalar> & spectrum_internal_b,
+          AlignedVector<Scalar> & spectrum_internal_ab,
+          const Scalar scaling );
+
+  /*
+   * Perform a multiplication and accumulation of the frequency components
+   * - similar to convolve().
+   *
+   * the operation performed is:
+   *  spectrum_internal_ab += (spectrum_internal_a * spectrum_internal_b)*scaling
+   *
+   * The spectrum_internal_[a][b], pointers may alias.
+   * return is just the given output parameter 'spectrum_internal_ab'.
+   */
+  AlignedVector<Scalar> & convolveAccumulate(
+          const AlignedVector<Scalar> & spectrum_internal_a,
+          const AlignedVector<Scalar> & spectrum_internal_b,
+          AlignedVector<Scalar> & spectrum_internal_ab,
+          const Scalar scaling );
+
+
+  ////////////////////////////////////////////
+  ////
+  //// API 2, dealing with raw pointers,
+  //// which need to be deallocated using alignedFree()
+  ////
+  //// the special allocation is required cause SIMD
+  //// implementations require aligned memory
+  ////
+  //// Method descriptions are equal to the methods above,
+  //// having  AlignedVector<T> parameters - instead of raw pointers.
+  //// That is why following methods have no documentation.
+  ////
+  ////////////////////////////////////////////
+
+  static void alignedFree(void* ptr);
+
+  static T * alignedAllocType(int length);
+  static Scalar* alignedAllocScalar(int length);
+  static Complex* alignedAllocComplex(int length);
+
+  // core API, having the spectrum in canonical order
+
+  Complex* forward(const T* input, Complex* spectrum);
+
+  T* inverse(const Complex* spectrum, T* output);
+
+
+  // provide additional functions with spectrum in some internal Layout.
+  // these are faster, cause the implementation omits the reordering.
+  // these are useful in special applications, like fast convolution,
+  // where inverse() is following anyway ..
+
+  Scalar* forwardToInternalLayout(const T* input,
+                                Scalar* spectrum_internal_layout);
+
+  T* inverseFromInternalLayout(const Scalar* spectrum_internal_layout, T* output);
+
+  void reorderSpectrum(const Scalar* input, Complex* output );
+
+  Scalar* convolve(const Scalar* spectrum_internal_a,
+                   const Scalar* spectrum_internal_b,
+                   Scalar* spectrum_internal_ab,
+                   const Scalar scaling);
+
+  Scalar* convolveAccumulate(const Scalar* spectrum_internal_a,
+                             const Scalar* spectrum_internal_b,
+                             Scalar* spectrum_internal_ab,
+                             const Scalar scaling);
+
+private:
+  Setup<T> setup;
+  Scalar* work;
+  int length;
+  int stackThresholdLen;
+};
+
+
+template<typename T>
+inline T* alignedAlloc(int length) {
+  return (T*)pffft_aligned_malloc( length * sizeof(T) );
+}
+
+inline void alignedFree(void *ptr) {
+  pffft_aligned_free(ptr);
+}
+
+
+// simple helper to get minimum possible fft length
+inline int minFFtsize(pffft_transform_t transform) {
+  return pffft_min_fft_size(transform);
+}
+
+// simple helper to determine next power of 2 - without inexact/rounding floating point operations
+inline int nextPowerOfTwo(int N) {
+  return pffft_next_power_of_two(N);
+}
+
+inline bool isPowerOfTwo(int N) {
+  return pffft_is_power_of_two(N);
+}
+
+
+
+////////////////////////////////////////////////////////////////////
+
+// implementation
+
+namespace {
+
+template<typename T>
+class Setup
+{};
+
+#if defined(PFFFT_ENABLE_FLOAT) || ( !defined(PFFFT_ENABLE_FLOAT) && !defined(PFFFT_ENABLE_DOUBLE) )
+
+template<>
+class Setup<float>
+{
+  PFFFT_Setup* self;
+
+public:
+  typedef float value_type;
+  typedef Types< value_type >::Scalar Scalar;
+
+  Setup()
+    : self(NULL)
+  {}
+
+  void prepareLength(int length)
+  {
+    if (self) {
+      pffft_destroy_setup(self);
+    }
+    self = pffft_new_setup(length, PFFFT_REAL);
+  }
+
+  ~Setup() { pffft_destroy_setup(self); }
+
+  void transform_ordered(const Scalar* input,
+                         Scalar* output,
+                         Scalar* work,
+                         pffft_direction_t direction)
+  {
+    pffft_transform_ordered(self, input, output, work, direction);
+  }
+
+  void transform(const Scalar* input,
+                 Scalar* output,
+                 Scalar* work,
+                 pffft_direction_t direction)
+  {
+    pffft_transform(self, input, output, work, direction);
+  }
+
+  void reorder(const Scalar* input, Scalar* output, pffft_direction_t direction)
+  {
+    pffft_zreorder(self, input, output, direction);
+  }
+
+  void convolveAccumulate(const Scalar* dft_a,
+                          const Scalar* dft_b,
+                          Scalar* dft_ab,
+                          const Scalar scaling)
+  {
+    pffft_zconvolve_accumulate(self, dft_a, dft_b, dft_ab, scaling);
+  }
+
+  void convolve(const Scalar* dft_a,
+                const Scalar* dft_b,
+                Scalar* dft_ab,
+                const Scalar scaling)
+  {
+    pffft_zconvolve_no_accu(self, dft_a, dft_b, dft_ab, scaling);
+  }
+};
+
+template<>
+class Setup< std::complex<float> >
+{
+  PFFFT_Setup* self;
+
+public:
+  typedef std::complex<float> value_type;
+  typedef Types< value_type >::Scalar Scalar;
+
+  Setup()
+    : self(NULL)
+  {}
+
+  ~Setup() { pffft_destroy_setup(self); }
+
+  void prepareLength(int length)
+  {
+    if (self) {
+      pffft_destroy_setup(self);
+    }
+    self = pffft_new_setup(length, PFFFT_COMPLEX);
+  }
+
+  void transform_ordered(const Scalar* input,
+                         Scalar* output,
+                         Scalar* work,
+                         pffft_direction_t direction)
+  {
+    pffft_transform_ordered(self, input, output, work, direction);
+  }
+
+  void transform(const Scalar* input,
+                 Scalar* output,
+                 Scalar* work,
+                 pffft_direction_t direction)
+  {
+    pffft_transform(self, input, output, work, direction);
+  }
+
+  void reorder(const Scalar* input, Scalar* output, pffft_direction_t direction)
+  {
+    pffft_zreorder(self, input, output, direction);
+  }
+
+  void convolve(const Scalar* dft_a,
+                const Scalar* dft_b,
+                Scalar* dft_ab,
+                const Scalar scaling)
+  {
+    pffft_zconvolve_no_accu(self, dft_a, dft_b, dft_ab, scaling);
+  }
+};
+
+#endif /* defined(PFFFT_ENABLE_FLOAT) || ( !defined(PFFFT_ENABLE_FLOAT) && !defined(PFFFT_ENABLE_DOUBLE) ) */
+
+
+#if defined(PFFFT_ENABLE_DOUBLE)
+
+template<>
+class Setup<double>
+{
+  PFFFTD_Setup* self;
+
+public:
+  typedef double value_type;
+  typedef Types< value_type >::Scalar Scalar;
+
+  Setup()
+    : self(NULL)
+  {}
+
+  ~Setup() { pffftd_destroy_setup(self); }
+
+  void prepareLength(int length)
+  {
+    if (self) {
+      pffftd_destroy_setup(self);
+      self = NULL;
+    }
+    if (length > 0) {
+      self = pffftd_new_setup(length, PFFFT_REAL);
+    }
+  }
+
+  void transform_ordered(const Scalar* input,
+                         Scalar* output,
+                         Scalar* work,
+                         pffft_direction_t direction)
+  {
+    pffftd_transform_ordered(self, input, output, work, direction);
+  }
+
+  void transform(const Scalar* input,
+                 Scalar* output,
+                 Scalar* work,
+                 pffft_direction_t direction)
+  {
+    pffftd_transform(self, input, output, work, direction);
+  }
+
+  void reorder(const Scalar* input, Scalar* output, pffft_direction_t direction)
+  {
+    pffftd_zreorder(self, input, output, direction);
+  }
+
+  void convolveAccumulate(const Scalar* dft_a,
+                          const Scalar* dft_b,
+                          Scalar* dft_ab,
+                          const Scalar scaling)
+  {
+    pffftd_zconvolve_accumulate(self, dft_a, dft_b, dft_ab, scaling);
+  }
+
+  void convolve(const Scalar* dft_a,
+                const Scalar* dft_b,
+                Scalar* dft_ab,
+                const Scalar scaling)
+  {
+    pffftd_zconvolve_no_accu(self, dft_a, dft_b, dft_ab, scaling);
+  }
+};
+
+template<>
+class Setup< std::complex<double> >
+{
+  PFFFTD_Setup* self;
+
+public:
+  typedef std::complex<double> value_type;
+  typedef Types< value_type >::Scalar Scalar;
+
+  Setup()
+    : self(NULL)
+  {}
+
+  ~Setup() { pffftd_destroy_setup(self); }
+
+  void prepareLength(int length)
+  {
+    if (self) {
+      pffftd_destroy_setup(self);
+    }
+    self = pffftd_new_setup(length, PFFFT_COMPLEX);
+  }
+
+  void transform_ordered(const Scalar* input,
+                         Scalar* output,
+                         Scalar* work,
+                         pffft_direction_t direction)
+  {
+    pffftd_transform_ordered(self, input, output, work, direction);
+  }
+
+  void transform(const Scalar* input,
+                 Scalar* output,
+                 Scalar* work,
+                 pffft_direction_t direction)
+  {
+    pffftd_transform(self, input, output, work, direction);
+  }
+
+  void reorder(const Scalar* input, Scalar* output, pffft_direction_t direction)
+  {
+    pffftd_zreorder(self, input, output, direction);
+  }
+
+  void convolveAccumulate(const Scalar* dft_a,
+                          const Scalar* dft_b,
+                          Scalar* dft_ab,
+                          const Scalar scaling)
+  {
+    pffftd_zconvolve_accumulate(self, dft_a, dft_b, dft_ab, scaling);
+  }
+
+  void convolve(const Scalar* dft_a,
+                const Scalar* dft_b,
+                Scalar* dft_ab,
+                const Scalar scaling)
+  {
+    pffftd_zconvolve_no_accu(self, dft_a, dft_b, dft_ab, scaling);
+  }
+};
+
+#endif /* defined(PFFFT_ENABLE_DOUBLE) */
+
+} // end of anonymous namespace for Setup<>
+
+
+template<typename T>
+inline Fft<T>::Fft(int length, int stackThresholdLen)
+  : length(0)
+  , stackThresholdLen(stackThresholdLen)
+  , work(NULL)
+{
+#if (__cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900))
+  static_assert( sizeof(Complex) == 2 * sizeof(Scalar), "pffft requires sizeof(std::complex<>) == 2 * sizeof(Scalar)" );
+#elif defined(__GNUC__)
+  char static_assert_like[(sizeof(Complex) == 2 * sizeof(Scalar)) ? 1 : -1]; // pffft requires sizeof(std::complex<>) == 2 * sizeof(Scalar)
+#endif
+  prepareLength(length);
+}
+
+template<typename T>
+inline Fft<T>::~Fft()
+{
+  alignedFree(work);
+}
+
+template<typename T>
+inline void
+Fft<T>::prepareLength(int newLength)
+{
+  const bool wasOnHeap = ( work != NULL );
+
+  const bool useHeap = newLength > stackThresholdLen;
+
+  if (useHeap == wasOnHeap && newLength == length) {
+    return;
+  }
+
+  length = newLength;
+
+  setup.prepareLength(length);
+
+  if (work) {
+    alignedFree(work);
+    work = NULL;
+  }
+
+  if (useHeap) {
+    work = reinterpret_cast<Scalar*>( alignedAllocType(length) );
+  }
+}
+
+
+template<typename T>
+inline AlignedVector<T>
+Fft<T>::valueVector() const
+{
+  return AlignedVector<T>(length);
+}
+
+template<typename T>
+inline AlignedVector< typename Fft<T>::Complex >
+Fft<T>::spectrumVector() const
+{
+  return AlignedVector<Complex>( getSpectrumSize() );
+}
+
+template<typename T>
+inline AlignedVector< typename Fft<T>::Scalar >
+Fft<T>::internalLayoutVector() const
+{
+  return AlignedVector<Scalar>( getInternalLayoutSize() );
+}
+
+
+template<typename T>
+inline AlignedVector< typename Fft<T>::Complex > &
+Fft<T>::forward(const AlignedVector<T> & input, AlignedVector<Complex> & spectrum)
+{
+  forward( input.data(), spectrum.data() );
+  return spectrum;
+}
+
+template<typename T>
+inline AlignedVector<T> &
+Fft<T>::inverse(const AlignedVector<Complex> & spectrum, AlignedVector<T> & output)
+{
+  inverse( spectrum.data(), output.data() );
+  return output;
+}
+
+
+template<typename T>
+inline AlignedVector< typename Fft<T>::Scalar > &
+Fft<T>::forwardToInternalLayout(
+    const AlignedVector<T> & input,
+    AlignedVector<Scalar> & spectrum_internal_layout )
+{
+  forwardToInternalLayout( input.data(), spectrum_internal_layout.data() );
+  return spectrum_internal_layout;
+}
+
+template<typename T>
+inline AlignedVector<T> &
+Fft<T>::inverseFromInternalLayout(
+    const AlignedVector<Scalar> & spectrum_internal_layout,
+    AlignedVector<T> & output )
+{
+  inverseFromInternalLayout( spectrum_internal_layout.data(), output.data() );
+  return output;
+}
+
+template<typename T>
+inline void
+Fft<T>::reorderSpectrum(
+    const AlignedVector<Scalar> & input,
+    AlignedVector<Complex> & output )
+{
+  reorderSpectrum( input.data(), output.data() );
+}
+
+template<typename T>
+inline AlignedVector< typename Fft<T>::Scalar > &
+Fft<T>::convolveAccumulate(
+    const AlignedVector<Scalar> & spectrum_internal_a,
+    const AlignedVector<Scalar> & spectrum_internal_b,
+    AlignedVector<Scalar> & spectrum_internal_ab,
+    const Scalar scaling )
+{
+  convolveAccumulate( spectrum_internal_a.data(), spectrum_internal_b.data(),
+                      spectrum_internal_ab.data(), scaling );
+  return spectrum_internal_ab;
+}
+
+template<typename T>
+inline AlignedVector< typename Fft<T>::Scalar > &
+Fft<T>::convolve(
+    const AlignedVector<Scalar> & spectrum_internal_a,
+    const AlignedVector<Scalar> & spectrum_internal_b,
+    AlignedVector<Scalar> & spectrum_internal_ab,
+    const Scalar scaling )
+{
+  convolve( spectrum_internal_a.data(), spectrum_internal_b.data(),
+            spectrum_internal_ab.data(), scaling );
+  return spectrum_internal_ab;
+}
+
+
+template<typename T>
+inline typename Fft<T>::Complex *
+Fft<T>::forward(const T* input, Complex * spectrum)
+{
+  setup.transform_ordered(reinterpret_cast<const Scalar*>(input),
+                          reinterpret_cast<Scalar*>(spectrum),
+                          work,
+                          PFFFT_FORWARD);
+  return spectrum;
+}
+
+template<typename T>
+inline T*
+Fft<T>::inverse(Complex const* spectrum, T* output)
+{
+  setup.transform_ordered(reinterpret_cast<const Scalar*>(spectrum),
+                          reinterpret_cast<Scalar*>(output),
+                          work,
+                          PFFFT_BACKWARD);
+  return output;
+}
+
+template<typename T>
+inline typename pffft::Fft<T>::Scalar*
+Fft<T>::forwardToInternalLayout(const T* input, Scalar* spectrum_internal_layout)
+{
+  setup.transform(reinterpret_cast<const Scalar*>(input),
+                  spectrum_internal_layout,
+                  work,
+                  PFFFT_FORWARD);
+  return spectrum_internal_layout;
+}
+
+template<typename T>
+inline T*
+Fft<T>::inverseFromInternalLayout(const Scalar* spectrum_internal_layout, T* output)
+{
+  setup.transform(spectrum_internal_layout,
+                  reinterpret_cast<Scalar*>(output),
+                  work,
+                  PFFFT_BACKWARD);
+  return output;
+}
+
+template<typename T>
+inline void
+Fft<T>::reorderSpectrum( const Scalar* input, Complex* output )
+{
+  setup.reorder(input, reinterpret_cast<Scalar*>(output), PFFFT_FORWARD);
+}
+
+template<typename T>
+inline typename pffft::Fft<T>::Scalar*
+Fft<T>::convolveAccumulate(const Scalar* dft_a,
+                           const Scalar* dft_b,
+                           Scalar* dft_ab,
+                           const Scalar scaling)
+{
+  setup.convolveAccumulate(dft_a, dft_b, dft_ab, scaling);
+  return dft_ab;
+}
+
+template<typename T>
+inline typename pffft::Fft<T>::Scalar*
+Fft<T>::convolve(const Scalar* dft_a,
+                 const Scalar* dft_b,
+                 Scalar* dft_ab,
+                 const Scalar scaling)
+{
+  setup.convolve(dft_a, dft_b, dft_ab, scaling);
+  return dft_ab;
+}
+
+template<typename T>
+inline void
+Fft<T>::alignedFree(void* ptr)
+{
+  pffft::alignedFree(ptr);
+}
+
+
+template<typename T>
+inline T*
+pffft::Fft<T>::alignedAllocType(int length)
+{
+  return alignedAlloc<T>(length);
+}
+
+template<typename T>
+inline typename pffft::Fft<T>::Scalar*
+pffft::Fft<T>::alignedAllocScalar(int length)
+{
+  return alignedAlloc<Scalar>(length);
+}
+
+template<typename T>
+inline typename Fft<T>::Complex *
+Fft<T>::alignedAllocComplex(int length)
+{
+  return alignedAlloc<Complex>(length);
+}
+
+
+
+////////////////////////////////////////////////////////////////////
+
+// Allocator - for std::vector<>:
+// origin: http://www.josuttis.com/cppcode/allocator.html
+// http://www.josuttis.com/cppcode/myalloc.hpp
+//
+// minor renaming and utilizing of pffft (de)allocation functions
+// are applied to Jossutis' allocator
+
+/* The following code example is taken from the book
+ * "The C++ Standard Library - A Tutorial and Reference"
+ * by Nicolai M. Josuttis, Addison-Wesley, 1999
+ *
+ * (C) Copyright Nicolai M. Josuttis 1999.
+ * Permission to copy, use, modify, sell and distribute this software
+ * is granted provided this copyright notice appears in all copies.
+ * This software is provided "as is" without express or implied
+ * warranty, and with no claim as to its suitability for any purpose.
+ */
+
+template <class T>
+class PFAlloc {
+  public:
+    // type definitions
+    typedef T        value_type;
+    typedef T*       pointer;
+    typedef const T* const_pointer;
+    typedef T&       reference;
+    typedef const T& const_reference;
+    typedef std::size_t    size_type;
+    typedef std::ptrdiff_t difference_type;
+
+    // rebind allocator to type U
+    template <class U>
+    struct rebind {
+        typedef PFAlloc<U> other;
+    };
+
+    // return address of values
+    pointer address (reference value) const {
+        return &value;
+    }
+    const_pointer address (const_reference value) const {
+        return &value;
+    }
+
+    /* constructors and destructor
+     * - nothing to do because the allocator has no state
+     */
+    PFAlloc() throw() {
+    }
+    PFAlloc(const PFAlloc&) throw() {
+    }
+    template <class U>
+      PFAlloc (const PFAlloc<U>&) throw() {
+    }
+    ~PFAlloc() throw() {
+    }
+
+    // return maximum number of elements that can be allocated
+    size_type max_size () const throw() {
+        return std::numeric_limits<std::size_t>::max() / sizeof(T);
+    }
+
+    // allocate but don't initialize num elements of type T
+    pointer allocate (size_type num, const void* = 0) {
+        pointer ret = (pointer)( alignedAlloc<T>(num) );
+        return ret;
+    }
+
+    // initialize elements of allocated storage p with value value
+    void construct (pointer p, const T& value) {
+        // initialize memory with placement new
+        new((void*)p)T(value);
+    }
+
+    // destroy elements of initialized storage p
+    void destroy (pointer p) {
+        // destroy objects by calling their destructor
+        p->~T();
+    }
+
+    // deallocate storage p of deleted elements
+    void deallocate (pointer p, size_type num) {
+        // deallocate memory with pffft
+        alignedFree( (void*)p );
+    }
+};
+
+// return that all specializations of this allocator are interchangeable
+template <class T1, class T2>
+bool operator== (const PFAlloc<T1>&,
+                 const PFAlloc<T2>&) throw() {
+    return true;
+}
+template <class T1, class T2>
+bool operator!= (const PFAlloc<T1>&,
+                 const PFAlloc<T2>&) throw() {
+    return false;
+}
+
+
+} // namespace pffft
+
diff --git a/pffft_common.c b/pffft_common.c
new file mode 100644
index 0000000..1121ac7
--- /dev/null
+++ b/pffft_common.c
@@ -0,0 +1,68 @@
+
+#include "pffft.h"
+
+#include <stdlib.h>
+
+/* SSE and co like 16-bytes aligned pointers
+ * with a 64-byte alignment, we are even aligned on L2 cache lines... */
+#define MALLOC_V4SF_ALIGNMENT 64
+
+static void * Valigned_malloc(size_t nb_bytes) {
+  void *p, *p0 = malloc(nb_bytes + MALLOC_V4SF_ALIGNMENT);
+  if (!p0) return (void *) 0;
+  p = (void *) (((size_t) p0 + MALLOC_V4SF_ALIGNMENT) & (~((size_t) (MALLOC_V4SF_ALIGNMENT-1))));
+  *((void **) p - 1) = p0;
+  return p;
+}
+
+static void Valigned_free(void *p) {
+  if (p) free(*((void **) p - 1));
+}
+
+
+static int next_power_of_two(int N) {
+  /* https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */
+  /* compute the next highest power of 2 of 32-bit v */
+  unsigned v = N;
+  v--;
+  v |= v >> 1;
+  v |= v >> 2;
+  v |= v >> 4;
+  v |= v >> 8;
+  v |= v >> 16;
+  v++;
+  return v;
+}
+
+static int is_power_of_two(int N) {
+  /* https://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2 */
+  int f = N && !(N & (N - 1));
+  return f;
+}
+
+static int min_fft_size(pffft_transform_t transform) {
+  /* unfortunately, the fft size must be a multiple of 16 for complex FFTs
+     and 32 for real FFTs -- a lot of stuff would need to be rewritten to
+     handle other cases (or maybe just switch to a scalar fft, I don't know..) */
+  int simdSz = pffft_simd_size();
+  if (transform == PFFFT_REAL)
+    return ( 2 * simdSz * simdSz );
+  else if (transform == PFFFT_COMPLEX)
+    return ( simdSz * simdSz );
+  else
+    return 1;
+}
+
+
+void *pffft_aligned_malloc(size_t nb_bytes) { return Valigned_malloc(nb_bytes); }
+void pffft_aligned_free(void *p) { Valigned_free(p); }
+int pffft_next_power_of_two(int N) { return next_power_of_two(N); }
+int pffft_is_power_of_two(int N) { return is_power_of_two(N); }
+int pffft_min_fft_size(pffft_transform_t transform) { return min_fft_size(transform); }
+
+void *pffftd_aligned_malloc(size_t nb_bytes) { return Valigned_malloc(nb_bytes); }
+void pffftd_aligned_free(void *p) { Valigned_free(p); }
+int pffftd_next_power_of_two(int N) { return next_power_of_two(N); }
+int pffftd_is_power_of_two(int N) { return is_power_of_two(N); }
+int pffftd_min_fft_size(pffft_transform_t transform) { return min_fft_size(transform); }
+
diff --git a/pffft_double.c b/pffft_double.c
new file mode 100644
index 0000000..28c0832
--- /dev/null
+++ b/pffft_double.c
@@ -0,0 +1,142 @@
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+   Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+   Copyright (c) 2020  Dario Mambro ( dario.mambro@gmail.com )
+
+   Based on original fortran 77 code from FFTPACKv4 from NETLIB
+   (http://www.netlib.org/fftpack), authored by Dr Paul Swarztrauber
+   of NCAR, in 1985.
+
+   As confirmed by the NCAR fftpack software curators, the following
+   FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
+   released under the same terms.
+
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+
+
+   PFFFT : a Pretty Fast FFT.
+
+   This file is largerly based on the original FFTPACK implementation, modified in
+   order to take advantage of SIMD instructions of modern CPUs.
+*/
+
+/*
+   NOTE: This file is adapted from Julien Pommier's original PFFFT,
+   which works on 32 bit floating point precision using SSE instructions,
+   to work with 64 bit floating point precision using AVX instructions.
+   Author: Dario Mambro @ https://github.com/unevens/pffft
+*/
+
+#include "pffft_double.h"
+
+/* detect compiler flavour */
+#if defined(_MSC_VER)
+#  define COMPILER_MSVC
+#elif defined(__GNUC__)
+#  define COMPILER_GCC
+#endif
+
+#ifdef COMPILER_MSVC
+#  define _USE_MATH_DEFINES
+#  include <malloc.h>
+#else
+#  include <alloca.h>
+#endif
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <math.h>
+#include <assert.h>
+
+#if defined(COMPILER_GCC)
+#  define ALWAYS_INLINE(return_type) inline return_type __attribute__ ((always_inline))
+#  define NEVER_INLINE(return_type) return_type __attribute__ ((noinline))
+#  define RESTRICT __restrict
+#  define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ varname__[size__];
+#elif defined(COMPILER_MSVC)
+#  define ALWAYS_INLINE(return_type) __forceinline return_type
+#  define NEVER_INLINE(return_type) __declspec(noinline) return_type
+#  define RESTRICT __restrict
+#  define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ *varname__ = (type__*)_alloca(size__ * sizeof(type__))
+#endif
+
+
+#ifdef COMPILER_MSVC
+#pragma warning( disable : 4244 4305 4204 4456 )
+#endif
+
+/* 
+   vector support macros: the rest of the code is independant of
+   AVX -- adding support for other platforms with 4-element
+   vectors should be limited to these macros 
+*/
+#include "simd/pf_double.h"
+
+/* have code comparable with this definition */
+#define float double
+#define SETUP_STRUCT               PFFFTD_Setup
+#define FUNC_NEW_SETUP             pffftd_new_setup
+#define FUNC_DESTROY               pffftd_destroy_setup
+#define FUNC_TRANSFORM_UNORDRD     pffftd_transform
+#define FUNC_TRANSFORM_ORDERED     pffftd_transform_ordered
+#define FUNC_ZREORDER              pffftd_zreorder
+#define FUNC_ZCONVOLVE_ACCUMULATE  pffftd_zconvolve_accumulate
+#define FUNC_ZCONVOLVE_NO_ACCU     pffftd_zconvolve_no_accu
+
+#define FUNC_ALIGNED_MALLOC        pffftd_aligned_malloc
+#define FUNC_ALIGNED_FREE          pffftd_aligned_free
+#define FUNC_SIMD_SIZE             pffftd_simd_size
+#define FUNC_SIMD_ARCH             pffftd_simd_arch
+#define FUNC_VALIDATE_SIMD_A       validate_pffftd_simd
+#define FUNC_VALIDATE_SIMD_EX      validate_pffftd_simd_ex
+
+#define FUNC_CPLX_FINALIZE         pffftd_cplx_finalize
+#define FUNC_CPLX_PREPROCESS       pffftd_cplx_preprocess
+#define FUNC_REAL_PREPROCESS_4X4   pffftd_real_preprocess_4x4
+#define FUNC_REAL_PREPROCESS       pffftd_real_preprocess
+#define FUNC_REAL_FINALIZE_4X4     pffftd_real_finalize_4x4
+#define FUNC_REAL_FINALIZE         pffftd_real_finalize
+#define FUNC_TRANSFORM_INTERNAL    pffftd_transform_internal
+
+#define FUNC_COS  cos
+#define FUNC_SIN  sin
+
+
+#include "pffft_priv_impl.h"
+
+
diff --git a/pffft_double.h b/pffft_double.h
new file mode 100644
index 0000000..d83c06d
--- /dev/null
+++ b/pffft_double.h
@@ -0,0 +1,221 @@
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com ) 
+
+   Based on original fortran 77 code from FFTPACKv4 from NETLIB,
+   authored by Dr Paul Swarztrauber of NCAR, in 1985.
+
+   As confirmed by the NCAR fftpack software curators, the following
+   FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
+   released under the same terms.
+
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+/*
+   NOTE: This file is adapted from Julien Pommier's original PFFFT,
+   which works on 32 bit floating point precision using SSE instructions,
+   to work with 64 bit floating point precision using AVX instructions.
+   Author: Dario Mambro @ https://github.com/unevens/pffft
+*/
+/*
+   PFFFT : a Pretty Fast FFT.
+
+   This is basically an adaptation of the single precision fftpack
+   (v4) as found on netlib taking advantage of SIMD instruction found
+   on cpus such as intel x86 (SSE1), powerpc (Altivec), and arm (NEON).
+   
+   For architectures where no SIMD instruction is available, the code
+   falls back to a scalar version.  
+
+   Restrictions: 
+
+   - 1D transforms only, with 64-bit double precision.
+
+   - supports only transforms for inputs of length N of the form
+   N=(2^a)*(3^b)*(5^c), a >= 5, b >=0, c >= 0 (32, 48, 64, 96, 128,
+   144, 160, etc are all acceptable lengths). Performance is best for
+   128<=N<=8192.
+
+   - all (double*) pointers in the functions below are expected to
+   have an "simd-compatible" alignment, that is 32 bytes on x86 and
+   powerpc CPUs.
+  
+   You can allocate such buffers with the functions
+   pffft_aligned_malloc / pffft_aligned_free (or with stuff like
+   posix_memalign..)
+
+*/
+
+#ifndef PFFFT_DOUBLE_H
+#define PFFFT_DOUBLE_H
+
+#include <stddef.h> /* for size_t */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+  /* opaque struct holding internal stuff (precomputed twiddle factors)
+     this struct can be shared by many threads as it contains only
+     read-only data.  
+  */
+  typedef struct PFFFTD_Setup PFFFTD_Setup;
+
+#ifndef PFFFT_COMMON_ENUMS
+#define PFFFT_COMMON_ENUMS
+
+  /* direction of the transform */
+  typedef enum { PFFFT_FORWARD, PFFFT_BACKWARD } pffft_direction_t;
+  
+  /* type of transform */
+  typedef enum { PFFFT_REAL, PFFFT_COMPLEX } pffft_transform_t;
+
+#endif
+
+  /*
+    prepare for performing transforms of size N -- the returned
+    PFFFTD_Setup structure is read-only so it can safely be shared by
+    multiple concurrent threads. 
+  */
+  PFFFTD_Setup *pffftd_new_setup(int N, pffft_transform_t transform);
+  void pffftd_destroy_setup(PFFFTD_Setup *);
+  /* 
+     Perform a Fourier transform , The z-domain data is stored in the
+     most efficient order for transforming it back, or using it for
+     convolution. If you need to have its content sorted in the
+     "usual" way, that is as an array of interleaved complex numbers,
+     either use pffft_transform_ordered , or call pffft_zreorder after
+     the forward fft, and before the backward fft.
+
+     Transforms are not scaled: PFFFT_BACKWARD(PFFFT_FORWARD(x)) = N*x.
+     Typically you will want to scale the backward transform by 1/N.
+     
+     The 'work' pointer should point to an area of N (2*N for complex
+     fft) doubles, properly aligned. If 'work' is NULL, then stack will
+     be used instead (this is probably the best strategy for small
+     FFTs, say for N < 16384). Threads usually have a small stack, that
+     there's no sufficient amount of memory, usually leading to a crash!
+     Use the heap with pffft_aligned_malloc() in this case.
+
+     input and output may alias.
+  */
+  void pffftd_transform(PFFFTD_Setup *setup, const double *input, double *output, double *work, pffft_direction_t direction);
+
+  /* 
+     Similar to pffft_transform, but makes sure that the output is
+     ordered as expected (interleaved complex numbers).  This is
+     similar to calling pffft_transform and then pffft_zreorder.
+     
+     input and output may alias.
+  */
+  void pffftd_transform_ordered(PFFFTD_Setup *setup, const double *input, double *output, double *work, pffft_direction_t direction);
+
+  /* 
+     call pffft_zreorder(.., PFFFT_FORWARD) after pffft_transform(...,
+     PFFFT_FORWARD) if you want to have the frequency components in
+     the correct "canonical" order, as interleaved complex numbers.
+     
+     (for real transforms, both 0-frequency and half frequency
+     components, which are real, are assembled in the first entry as
+     F(0)+i*F(n/2+1). Note that the original fftpack did place
+     F(n/2+1) at the end of the arrays).
+     
+     input and output should not alias.
+  */
+  void pffftd_zreorder(PFFFTD_Setup *setup, const double *input, double *output, pffft_direction_t direction);
+
+  /* 
+     Perform a multiplication of the frequency components of dft_a and
+     dft_b and accumulate them into dft_ab. The arrays should have
+     been obtained with pffft_transform(.., PFFFT_FORWARD) and should
+     *not* have been reordered with pffft_zreorder (otherwise just
+     perform the operation yourself as the dft coefs are stored as
+     interleaved complex numbers).
+     
+     the operation performed is: dft_ab += (dft_a * fdt_b)*scaling
+     
+     The dft_a, dft_b and dft_ab pointers may alias.
+  */
+  void pffftd_zconvolve_accumulate(PFFFTD_Setup *setup, const double *dft_a, const double *dft_b, double *dft_ab, double scaling);
+
+  /* 
+     Perform a multiplication of the frequency components of dft_a and
+     dft_b and put result in dft_ab. The arrays should have
+     been obtained with pffft_transform(.., PFFFT_FORWARD) and should
+     *not* have been reordered with pffft_zreorder (otherwise just
+     perform the operation yourself as the dft coefs are stored as
+     interleaved complex numbers).
+
+     the operation performed is: dft_ab = (dft_a * fdt_b)*scaling
+
+     The dft_a, dft_b and dft_ab pointers may alias.
+  */
+  void pffftd_zconvolve_no_accu(PFFFTD_Setup *setup, const double *dft_a, const double *dft_b, double*dft_ab, double scaling);
+
+  /* return 4 or 1 wether support AVX instructions was enabled when building pffft-double.c */
+  int pffftd_simd_size();
+
+  /* return string identifier of used architecture (AVX/..) */
+  const char * pffftd_simd_arch();
+
+
+  /* following functions are identical to the pffft_ functions */
+
+  /* simple helper to get minimum possible fft size */
+  int pffftd_min_fft_size(pffft_transform_t transform);
+
+  /* simple helper to determine next power of 2
+     - without inexact/rounding floating point operations
+  */
+  int pffftd_next_power_of_two(int N);
+
+  /* simple helper to determine if power of 2 - returns bool */
+  int pffftd_is_power_of_two(int N);
+
+  /*
+    the double buffers must have the correct alignment (32-byte boundary
+    on intel and powerpc). This function may be used to obtain such
+    correctly aligned buffers.  
+  */
+  void *pffftd_aligned_malloc(size_t nb_bytes);
+  void pffftd_aligned_free(void *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PFFFT_DOUBLE_H */
+
diff --git a/pffft_priv_impl.h b/pffft_priv_impl.h
new file mode 100644
index 0000000..36cae59
--- /dev/null
+++ b/pffft_priv_impl.h
@@ -0,0 +1,2189 @@
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+   Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+   Copyright (c) 2020  Dario Mambro ( dario.mambro@gmail.com )
+
+   Based on original fortran 77 code from FFTPACKv4 from NETLIB
+   (http://www.netlib.org/fftpack), authored by Dr Paul Swarztrauber
+   of NCAR, in 1985.
+
+   As confirmed by the NCAR fftpack software curators, the following
+   FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
+   released under the same terms.
+
+   FFTPACK license:
+
+   http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
+
+   Copyright (c) 2004 the University Corporation for Atmospheric
+   Research ("UCAR"). All rights reserved. Developed by NCAR's
+   Computational and Information Systems Laboratory, UCAR,
+   www.cisl.ucar.edu.
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.  
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+
+
+   PFFFT : a Pretty Fast FFT.
+
+   This file is largerly based on the original FFTPACK implementation, modified in
+   order to take advantage of SIMD instructions of modern CPUs.
+*/
+
+/* this file requires architecture specific preprocessor definitions
+ * it's only for library internal use
+ */
+
+
+/* define own constants required to turn off g++ extensions .. */
+#ifndef M_PI
+  #define M_PI    3.14159265358979323846  /* pi */
+#endif
+
+#ifndef M_SQRT2
+  #define M_SQRT2 1.41421356237309504880  /* sqrt(2) */
+#endif
+
+
+int FUNC_SIMD_SIZE() { return SIMD_SZ; }
+
+const char * FUNC_SIMD_ARCH() { return VARCH; }
+
+
+/*
+  passf2 and passb2 has been merged here, fsign = -1 for passf2, +1 for passb2
+*/
+static NEVER_INLINE(void) passf2_ps(int ido, int l1, const v4sf *cc, v4sf *ch, const float *wa1, float fsign) {
+  int k, i;
+  int l1ido = l1*ido;
+  if (ido <= 2) {
+    for (k=0; k < l1ido; k += ido, ch += ido, cc+= 2*ido) {
+      ch[0]         = VADD(cc[0], cc[ido+0]);
+      ch[l1ido]     = VSUB(cc[0], cc[ido+0]);
+      ch[1]         = VADD(cc[1], cc[ido+1]);
+      ch[l1ido + 1] = VSUB(cc[1], cc[ido+1]);
+    }
+  } else {
+    for (k=0; k < l1ido; k += ido, ch += ido, cc += 2*ido) {
+      for (i=0; i<ido-1; i+=2) {
+        v4sf tr2 = VSUB(cc[i+0], cc[i+ido+0]);
+        v4sf ti2 = VSUB(cc[i+1], cc[i+ido+1]);
+        v4sf wr = LD_PS1(wa1[i]), wi = VMUL(LD_PS1(fsign), LD_PS1(wa1[i+1]));
+        ch[i]   = VADD(cc[i+0], cc[i+ido+0]);
+        ch[i+1] = VADD(cc[i+1], cc[i+ido+1]);
+        VCPLXMUL(tr2, ti2, wr, wi);
+        ch[i+l1ido]   = tr2;
+        ch[i+l1ido+1] = ti2;
+      }
+    }
+  }
+}
+
+/*
+  passf3 and passb3 has been merged here, fsign = -1 for passf3, +1 for passb3
+*/
+static NEVER_INLINE(void) passf3_ps(int ido, int l1, const v4sf *cc, v4sf *ch,
+                                    const float *wa1, const float *wa2, float fsign) {
+  static const float taur = -0.5f;
+  float taui = 0.866025403784439f*fsign;
+  int i, k;
+  v4sf tr2, ti2, cr2, ci2, cr3, ci3, dr2, di2, dr3, di3;
+  int l1ido = l1*ido;
+  float wr1, wi1, wr2, wi2;
+  assert(ido > 2);
+  for (k=0; k< l1ido; k += ido, cc+= 3*ido, ch +=ido) {
+    for (i=0; i<ido-1; i+=2) {
+      tr2 = VADD(cc[i+ido], cc[i+2*ido]);
+      cr2 = VADD(cc[i], SVMUL(taur,tr2));
+      ch[i]    = VADD(cc[i], tr2);
+      ti2 = VADD(cc[i+ido+1], cc[i+2*ido+1]);
+      ci2 = VADD(cc[i    +1], SVMUL(taur,ti2));
+      ch[i+1]  = VADD(cc[i+1], ti2);
+      cr3 = SVMUL(taui, VSUB(cc[i+ido], cc[i+2*ido]));
+      ci3 = SVMUL(taui, VSUB(cc[i+ido+1], cc[i+2*ido+1]));
+      dr2 = VSUB(cr2, ci3);
+      dr3 = VADD(cr2, ci3);
+      di2 = VADD(ci2, cr3);
+      di3 = VSUB(ci2, cr3);
+      wr1=wa1[i], wi1=fsign*wa1[i+1], wr2=wa2[i], wi2=fsign*wa2[i+1]; 
+      VCPLXMUL(dr2, di2, LD_PS1(wr1), LD_PS1(wi1));
+      ch[i+l1ido] = dr2; 
+      ch[i+l1ido + 1] = di2;
+      VCPLXMUL(dr3, di3, LD_PS1(wr2), LD_PS1(wi2));
+      ch[i+2*l1ido] = dr3;
+      ch[i+2*l1ido+1] = di3;
+    }
+  }
+} /* passf3 */
+
+static NEVER_INLINE(void) passf4_ps(int ido, int l1, const v4sf *cc, v4sf *ch,
+                                    const float *wa1, const float *wa2, const float *wa3, float fsign) {
+  /* isign == -1 for forward transform and +1 for backward transform */
+
+  int i, k;
+  v4sf ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+  int l1ido = l1*ido;
+  if (ido == 2) {
+    for (k=0; k < l1ido; k += ido, ch += ido, cc += 4*ido) {
+      tr1 = VSUB(cc[0], cc[2*ido + 0]);
+      tr2 = VADD(cc[0], cc[2*ido + 0]);
+      ti1 = VSUB(cc[1], cc[2*ido + 1]);
+      ti2 = VADD(cc[1], cc[2*ido + 1]);
+      ti4 = VMUL(VSUB(cc[1*ido + 0], cc[3*ido + 0]), LD_PS1(fsign));
+      tr4 = VMUL(VSUB(cc[3*ido + 1], cc[1*ido + 1]), LD_PS1(fsign));
+      tr3 = VADD(cc[ido + 0], cc[3*ido + 0]);
+      ti3 = VADD(cc[ido + 1], cc[3*ido + 1]);
+
+      ch[0*l1ido + 0] = VADD(tr2, tr3);
+      ch[0*l1ido + 1] = VADD(ti2, ti3);
+      ch[1*l1ido + 0] = VADD(tr1, tr4);
+      ch[1*l1ido + 1] = VADD(ti1, ti4);
+      ch[2*l1ido + 0] = VSUB(tr2, tr3);
+      ch[2*l1ido + 1] = VSUB(ti2, ti3);        
+      ch[3*l1ido + 0] = VSUB(tr1, tr4);
+      ch[3*l1ido + 1] = VSUB(ti1, ti4);
+    }
+  } else {
+    for (k=0; k < l1ido; k += ido, ch+=ido, cc += 4*ido) {
+      for (i=0; i<ido-1; i+=2) {
+        float wr1, wi1, wr2, wi2, wr3, wi3;
+        tr1 = VSUB(cc[i + 0], cc[i + 2*ido + 0]);
+        tr2 = VADD(cc[i + 0], cc[i + 2*ido + 0]);
+        ti1 = VSUB(cc[i + 1], cc[i + 2*ido + 1]);
+        ti2 = VADD(cc[i + 1], cc[i + 2*ido + 1]);
+        tr4 = VMUL(VSUB(cc[i + 3*ido + 1], cc[i + 1*ido + 1]), LD_PS1(fsign));
+        ti4 = VMUL(VSUB(cc[i + 1*ido + 0], cc[i + 3*ido + 0]), LD_PS1(fsign));
+        tr3 = VADD(cc[i + ido + 0], cc[i + 3*ido + 0]);
+        ti3 = VADD(cc[i + ido + 1], cc[i + 3*ido + 1]);
+
+        ch[i] = VADD(tr2, tr3);
+        cr3    = VSUB(tr2, tr3);
+        ch[i + 1] = VADD(ti2, ti3);
+        ci3 = VSUB(ti2, ti3);
+
+        cr2 = VADD(tr1, tr4);
+        cr4 = VSUB(tr1, tr4);
+        ci2 = VADD(ti1, ti4);
+        ci4 = VSUB(ti1, ti4);
+        wr1=wa1[i], wi1=fsign*wa1[i+1];
+        VCPLXMUL(cr2, ci2, LD_PS1(wr1), LD_PS1(wi1));
+        wr2=wa2[i], wi2=fsign*wa2[i+1]; 
+        ch[i + l1ido] = cr2;
+        ch[i + l1ido + 1] = ci2;
+
+        VCPLXMUL(cr3, ci3, LD_PS1(wr2), LD_PS1(wi2));
+        wr3=wa3[i], wi3=fsign*wa3[i+1]; 
+        ch[i + 2*l1ido] = cr3;
+        ch[i + 2*l1ido + 1] = ci3;
+
+        VCPLXMUL(cr4, ci4, LD_PS1(wr3), LD_PS1(wi3));
+        ch[i + 3*l1ido] = cr4;
+        ch[i + 3*l1ido + 1] = ci4;
+      }
+    }
+  }
+} /* passf4 */
+
+/*
+  passf5 and passb5 has been merged here, fsign = -1 for passf5, +1 for passb5
+*/
+static NEVER_INLINE(void) passf5_ps(int ido, int l1, const v4sf *cc, v4sf *ch,
+                                    const float *wa1, const float *wa2, 
+                                    const float *wa3, const float *wa4, float fsign) {  
+  static const float tr11 = .309016994374947f;
+  const float ti11 = .951056516295154f*fsign;
+  static const float tr12 = -.809016994374947f;
+  const float ti12 = .587785252292473f*fsign;
+
+  /* Local variables */
+  int i, k;
+  v4sf ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
+    ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
+
+  float wr1, wi1, wr2, wi2, wr3, wi3, wr4, wi4;
+
+#define cc_ref(a_1,a_2) cc[(a_2-1)*ido + a_1 + 1]
+#define ch_ref(a_1,a_3) ch[(a_3-1)*l1*ido + a_1 + 1]
+
+  assert(ido > 2);
+  for (k = 0; k < l1; ++k, cc += 5*ido, ch += ido) {
+    for (i = 0; i < ido-1; i += 2) {
+      ti5 = VSUB(cc_ref(i  , 2), cc_ref(i  , 5));
+      ti2 = VADD(cc_ref(i  , 2), cc_ref(i  , 5));
+      ti4 = VSUB(cc_ref(i  , 3), cc_ref(i  , 4));
+      ti3 = VADD(cc_ref(i  , 3), cc_ref(i  , 4));
+      tr5 = VSUB(cc_ref(i-1, 2), cc_ref(i-1, 5));
+      tr2 = VADD(cc_ref(i-1, 2), cc_ref(i-1, 5));
+      tr4 = VSUB(cc_ref(i-1, 3), cc_ref(i-1, 4));
+      tr3 = VADD(cc_ref(i-1, 3), cc_ref(i-1, 4));
+      ch_ref(i-1, 1) = VADD(cc_ref(i-1, 1), VADD(tr2, tr3));
+      ch_ref(i  , 1) = VADD(cc_ref(i  , 1), VADD(ti2, ti3));
+      cr2 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr11, tr2),SVMUL(tr12, tr3)));
+      ci2 = VADD(cc_ref(i  , 1), VADD(SVMUL(tr11, ti2),SVMUL(tr12, ti3)));
+      cr3 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr12, tr2),SVMUL(tr11, tr3)));
+      ci3 = VADD(cc_ref(i  , 1), VADD(SVMUL(tr12, ti2),SVMUL(tr11, ti3)));
+      cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4));
+      ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4));
+      cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4));
+      ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4));
+      dr3 = VSUB(cr3, ci4);
+      dr4 = VADD(cr3, ci4);
+      di3 = VADD(ci3, cr4);
+      di4 = VSUB(ci3, cr4);
+      dr5 = VADD(cr2, ci5);
+      dr2 = VSUB(cr2, ci5);
+      di5 = VSUB(ci2, cr5);
+      di2 = VADD(ci2, cr5);
+      wr1=wa1[i], wi1=fsign*wa1[i+1], wr2=wa2[i], wi2=fsign*wa2[i+1]; 
+      wr3=wa3[i], wi3=fsign*wa3[i+1], wr4=wa4[i], wi4=fsign*wa4[i+1]; 
+      VCPLXMUL(dr2, di2, LD_PS1(wr1), LD_PS1(wi1));
+      ch_ref(i - 1, 2) = dr2;
+      ch_ref(i, 2)     = di2;
+      VCPLXMUL(dr3, di3, LD_PS1(wr2), LD_PS1(wi2));
+      ch_ref(i - 1, 3) = dr3;
+      ch_ref(i, 3)     = di3;
+      VCPLXMUL(dr4, di4, LD_PS1(wr3), LD_PS1(wi3));
+      ch_ref(i - 1, 4) = dr4;
+      ch_ref(i, 4)     = di4;
+      VCPLXMUL(dr5, di5, LD_PS1(wr4), LD_PS1(wi4));
+      ch_ref(i - 1, 5) = dr5;
+      ch_ref(i, 5)     = di5;
+    }
+  }
+#undef ch_ref
+#undef cc_ref
+}
+
+static NEVER_INLINE(void) radf2_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, const float *wa1) {
+  static const float minus_one = -1.f;
+  int i, k, l1ido = l1*ido;
+  for (k=0; k < l1ido; k += ido) {
+    v4sf a = cc[k], b = cc[k + l1ido];
+    ch[2*k] = VADD(a, b);
+    ch[2*(k+ido)-1] = VSUB(a, b);
+  }
+  if (ido < 2) return;
+  if (ido != 2) {
+    for (k=0; k < l1ido; k += ido) {
+      for (i=2; i<ido; i+=2) {
+        v4sf tr2 = cc[i - 1 + k + l1ido], ti2 = cc[i + k + l1ido];
+        v4sf br = cc[i - 1 + k], bi = cc[i + k];
+        VCPLXMULCONJ(tr2, ti2, LD_PS1(wa1[i - 2]), LD_PS1(wa1[i - 1])); 
+        ch[i + 2*k] = VADD(bi, ti2);
+        ch[2*(k+ido) - i] = VSUB(ti2, bi);
+        ch[i - 1 + 2*k] = VADD(br, tr2);
+        ch[2*(k+ido) - i -1] = VSUB(br, tr2);
+      }
+    }
+    if (ido % 2 == 1) return;
+  }
+  for (k=0; k < l1ido; k += ido) {
+    ch[2*k + ido] = SVMUL(minus_one, cc[ido-1 + k + l1ido]);
+    ch[2*k + ido-1] = cc[k + ido-1];
+  }
+} /* radf2 */
+
+
+static NEVER_INLINE(void) radb2_ps(int ido, int l1, const v4sf *cc, v4sf *ch, const float *wa1) {
+  static const float minus_two=-2;
+  int i, k, l1ido = l1*ido;
+  v4sf a,b,c,d, tr2, ti2;
+  for (k=0; k < l1ido; k += ido) {
+    a = cc[2*k]; b = cc[2*(k+ido) - 1];
+    ch[k] = VADD(a, b);
+    ch[k + l1ido] =VSUB(a, b);
+  }
+  if (ido < 2) return;
+  if (ido != 2) {
+    for (k = 0; k < l1ido; k += ido) {
+      for (i = 2; i < ido; i += 2) {
+        a = cc[i-1 + 2*k]; b = cc[2*(k + ido) - i - 1];
+        c = cc[i+0 + 2*k]; d = cc[2*(k + ido) - i + 0];
+        ch[i-1 + k] = VADD(a, b);
+        tr2 = VSUB(a, b);
+        ch[i+0 + k] = VSUB(c, d);
+        ti2 = VADD(c, d);
+        VCPLXMUL(tr2, ti2, LD_PS1(wa1[i - 2]), LD_PS1(wa1[i - 1]));
+        ch[i-1 + k + l1ido] = tr2;
+        ch[i+0 + k + l1ido] = ti2;
+      }
+    }
+    if (ido % 2 == 1) return;
+  }
+  for (k = 0; k < l1ido; k += ido) {
+    a = cc[2*k + ido-1]; b = cc[2*k + ido];
+    ch[k + ido-1] = VADD(a,a);
+    ch[k + ido-1 + l1ido] = SVMUL(minus_two, b);
+  }
+} /* radb2 */
+
+static void radf3_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch,
+                     const float *wa1, const float *wa2) {
+  static const float taur = -0.5f;
+  static const float taui = 0.866025403784439f;
+  int i, k, ic;
+  v4sf ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3, wr1, wi1, wr2, wi2;
+  for (k=0; k<l1; k++) {
+    cr2 = VADD(cc[(k + l1)*ido], cc[(k + 2*l1)*ido]);
+    ch[3*k*ido] = VADD(cc[k*ido], cr2);
+    ch[(3*k+2)*ido] = SVMUL(taui, VSUB(cc[(k + l1*2)*ido], cc[(k + l1)*ido]));
+    ch[ido-1 + (3*k + 1)*ido] = VADD(cc[k*ido], SVMUL(taur, cr2));
+  }
+  if (ido == 1) return;
+  for (k=0; k<l1; k++) {
+    for (i=2; i<ido; i+=2) {
+      ic = ido - i;
+      wr1 = LD_PS1(wa1[i - 2]); wi1 = LD_PS1(wa1[i - 1]);
+      dr2 = cc[i - 1 + (k + l1)*ido]; di2 = cc[i + (k + l1)*ido];
+      VCPLXMULCONJ(dr2, di2, wr1, wi1);
+
+      wr2 = LD_PS1(wa2[i - 2]); wi2 = LD_PS1(wa2[i - 1]);
+      dr3 = cc[i - 1 + (k + l1*2)*ido]; di3 = cc[i + (k + l1*2)*ido];
+      VCPLXMULCONJ(dr3, di3, wr2, wi2);
+        
+      cr2 = VADD(dr2, dr3);
+      ci2 = VADD(di2, di3);
+      ch[i - 1 + 3*k*ido] = VADD(cc[i - 1 + k*ido], cr2);
+      ch[i + 3*k*ido] = VADD(cc[i + k*ido], ci2);
+      tr2 = VADD(cc[i - 1 + k*ido], SVMUL(taur, cr2));
+      ti2 = VADD(cc[i + k*ido], SVMUL(taur, ci2));
+      tr3 = SVMUL(taui, VSUB(di2, di3));
+      ti3 = SVMUL(taui, VSUB(dr3, dr2));
+      ch[i - 1 + (3*k + 2)*ido] = VADD(tr2, tr3);
+      ch[ic - 1 + (3*k + 1)*ido] = VSUB(tr2, tr3);
+      ch[i + (3*k + 2)*ido] = VADD(ti2, ti3);
+      ch[ic + (3*k + 1)*ido] = VSUB(ti3, ti2);
+    }
+  }
+} /* radf3 */
+
+
+static void radb3_ps(int ido, int l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch,
+                     const float *wa1, const float *wa2)
+{
+  static const float taur = -0.5f;
+  static const float taui = 0.866025403784439f;
+  static const float taui_2 = 0.866025403784439f*2;
+  int i, k, ic;
+  v4sf ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
+  for (k=0; k<l1; k++) {
+    tr2 = cc[ido-1 + (3*k + 1)*ido]; tr2 = VADD(tr2,tr2);
+    cr2 = VMADD(LD_PS1(taur), tr2, cc[3*k*ido]);
+    ch[k*ido] = VADD(cc[3*k*ido], tr2);
+    ci3 = SVMUL(taui_2, cc[(3*k + 2)*ido]);
+    ch[(k + l1)*ido] = VSUB(cr2, ci3);
+    ch[(k + 2*l1)*ido] = VADD(cr2, ci3);
+  }
+  if (ido == 1) return;
+  for (k=0; k<l1; k++) {
+    for (i=2; i<ido; i+=2) {
+      ic = ido - i;
+      tr2 = VADD(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido]);
+      cr2 = VMADD(LD_PS1(taur), tr2, cc[i - 1 + 3*k*ido]);
+      ch[i - 1 + k*ido] = VADD(cc[i - 1 + 3*k*ido], tr2);
+      ti2 = VSUB(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido]);
+      ci2 = VMADD(LD_PS1(taur), ti2, cc[i + 3*k*ido]);
+      ch[i + k*ido] = VADD(cc[i + 3*k*ido], ti2);
+      cr3 = SVMUL(taui, VSUB(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido]));
+      ci3 = SVMUL(taui, VADD(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido]));
+      dr2 = VSUB(cr2, ci3);
+      dr3 = VADD(cr2, ci3);
+      di2 = VADD(ci2, cr3);
+      di3 = VSUB(ci2, cr3);
+      VCPLXMUL(dr2, di2, LD_PS1(wa1[i-2]), LD_PS1(wa1[i-1]));
+      ch[i - 1 + (k + l1)*ido] = dr2;
+      ch[i + (k + l1)*ido] = di2;
+      VCPLXMUL(dr3, di3, LD_PS1(wa2[i-2]), LD_PS1(wa2[i-1]));
+      ch[i - 1 + (k + 2*l1)*ido] = dr3;
+      ch[i + (k + 2*l1)*ido] = di3;
+    }
+  }
+} /* radb3 */
+
+static NEVER_INLINE(void) radf4_ps(int ido, int l1, const v4sf *RESTRICT cc, v4sf * RESTRICT ch,
+                                   const float * RESTRICT wa1, const float * RESTRICT wa2, const float * RESTRICT wa3)
+{
+  static const float minus_hsqt2 = (float)-0.7071067811865475;
+  int i, k, l1ido = l1*ido;
+  {
+    const v4sf *RESTRICT cc_ = cc, * RESTRICT cc_end = cc + l1ido; 
+    v4sf * RESTRICT ch_ = ch;
+    while (cc < cc_end) {
+      /* this loop represents between 25% and 40% of total radf4_ps cost ! */
+      v4sf a0 = cc[0], a1 = cc[l1ido];
+      v4sf a2 = cc[2*l1ido], a3 = cc[3*l1ido];
+      v4sf tr1 = VADD(a1, a3);
+      v4sf tr2 = VADD(a0, a2);
+      ch[2*ido-1] = VSUB(a0, a2);
+      ch[2*ido  ] = VSUB(a3, a1);
+      ch[0      ] = VADD(tr1, tr2);
+      ch[4*ido-1] = VSUB(tr2, tr1);
+      cc += ido; ch += 4*ido;
+    }
+    cc = cc_; ch = ch_;
+  }
+  if (ido < 2) return;
+  if (ido != 2) {
+    for (k = 0; k < l1ido; k += ido) {
+      const v4sf * RESTRICT pc = (v4sf*)(cc + 1 + k);
+      for (i=2; i<ido; i += 2, pc += 2) {
+        int ic = ido - i;
+        v4sf wr, wi, cr2, ci2, cr3, ci3, cr4, ci4;
+        v4sf tr1, ti1, tr2, ti2, tr3, ti3, tr4, ti4;
+
+        cr2 = pc[1*l1ido+0];
+        ci2 = pc[1*l1ido+1];
+        wr=LD_PS1(wa1[i - 2]);
+        wi=LD_PS1(wa1[i - 1]);
+        VCPLXMULCONJ(cr2,ci2,wr,wi);
+
+        cr3 = pc[2*l1ido+0];
+        ci3 = pc[2*l1ido+1];
+        wr = LD_PS1(wa2[i-2]); 
+        wi = LD_PS1(wa2[i-1]);
+        VCPLXMULCONJ(cr3, ci3, wr, wi);
+
+        cr4 = pc[3*l1ido];
+        ci4 = pc[3*l1ido+1];
+        wr = LD_PS1(wa3[i-2]); 
+        wi = LD_PS1(wa3[i-1]);
+        VCPLXMULCONJ(cr4, ci4, wr, wi);
+
+        /* at this point, on SSE, five of "cr2 cr3 cr4 ci2 ci3 ci4" should be loaded in registers */
+
+        tr1 = VADD(cr2,cr4);
+        tr4 = VSUB(cr4,cr2); 
+        tr2 = VADD(pc[0],cr3);
+        tr3 = VSUB(pc[0],cr3);
+        ch[i - 1 + 4*k] = VADD(tr1,tr2);
+        ch[ic - 1 + 4*k + 3*ido] = VSUB(tr2,tr1); /* at this point tr1 and tr2 can be disposed */
+        ti1 = VADD(ci2,ci4);
+        ti4 = VSUB(ci2,ci4);
+        ch[i - 1 + 4*k + 2*ido] = VADD(ti4,tr3);
+        ch[ic - 1 + 4*k + 1*ido] = VSUB(tr3,ti4); /* dispose tr3, ti4 */
+        ti2 = VADD(pc[1],ci3);
+        ti3 = VSUB(pc[1],ci3);
+        ch[i + 4*k] = VADD(ti1, ti2);
+        ch[ic + 4*k + 3*ido] = VSUB(ti1, ti2);
+        ch[i + 4*k + 2*ido] = VADD(tr4, ti3);
+        ch[ic + 4*k + 1*ido] = VSUB(tr4, ti3);
+      }
+    }
+    if (ido % 2 == 1) return;
+  }
+  for (k=0; k<l1ido; k += ido) {
+    v4sf a = cc[ido-1 + k + l1ido], b = cc[ido-1 + k + 3*l1ido];
+    v4sf c = cc[ido-1 + k], d = cc[ido-1 + k + 2*l1ido];
+    v4sf ti1 = SVMUL(minus_hsqt2, VADD(a, b));
+    v4sf tr1 = SVMUL(minus_hsqt2, VSUB(b, a));
+    ch[ido-1 + 4*k] = VADD(tr1, c);
+    ch[ido-1 + 4*k + 2*ido] = VSUB(c, tr1);
+    ch[4*k + 1*ido] = VSUB(ti1, d); 
+    ch[4*k + 3*ido] = VADD(ti1, d); 
+  }
+} /* radf4 */
+
+
+static NEVER_INLINE(void) radb4_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch,
+                                   const float * RESTRICT wa1, const float * RESTRICT wa2, const float *RESTRICT wa3)
+{
+  static const float minus_sqrt2 = (float)-1.414213562373095;
+  static const float two = 2.f;
+  int i, k, l1ido = l1*ido;
+  v4sf ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+  {
+    const v4sf *RESTRICT cc_ = cc, * RESTRICT ch_end = ch + l1ido; 
+    v4sf *ch_ = ch;
+    while (ch < ch_end) {
+      v4sf a = cc[0], b = cc[4*ido-1];
+      v4sf c = cc[2*ido], d = cc[2*ido-1];
+      tr3 = SVMUL(two,d);
+      tr2 = VADD(a,b);
+      tr1 = VSUB(a,b);
+      tr4 = SVMUL(two,c);
+      ch[0*l1ido] = VADD(tr2, tr3);
+      ch[2*l1ido] = VSUB(tr2, tr3);
+      ch[1*l1ido] = VSUB(tr1, tr4);
+      ch[3*l1ido] = VADD(tr1, tr4);
+      
+      cc += 4*ido; ch += ido;
+    }
+    cc = cc_; ch = ch_;
+  }
+  if (ido < 2) return;
+  if (ido != 2) {
+    for (k = 0; k < l1ido; k += ido) {
+      const v4sf * RESTRICT pc = (v4sf*)(cc - 1 + 4*k);
+      v4sf * RESTRICT ph = (v4sf*)(ch + k + 1);
+      for (i = 2; i < ido; i += 2) {
+
+        tr1 = VSUB(pc[i], pc[4*ido - i]);
+        tr2 = VADD(pc[i], pc[4*ido - i]);
+        ti4 = VSUB(pc[2*ido + i], pc[2*ido - i]);
+        tr3 = VADD(pc[2*ido + i], pc[2*ido - i]);
+        ph[0] = VADD(tr2, tr3);
+        cr3 = VSUB(tr2, tr3);
+
+        ti3 = VSUB(pc[2*ido + i + 1], pc[2*ido - i + 1]);
+        tr4 = VADD(pc[2*ido + i + 1], pc[2*ido - i + 1]);
+        cr2 = VSUB(tr1, tr4);
+        cr4 = VADD(tr1, tr4);
+
+        ti1 = VADD(pc[i + 1], pc[4*ido - i + 1]);
+        ti2 = VSUB(pc[i + 1], pc[4*ido - i + 1]);
+
+        ph[1] = VADD(ti2, ti3); ph += l1ido;
+        ci3 = VSUB(ti2, ti3);
+        ci2 = VADD(ti1, ti4);
+        ci4 = VSUB(ti1, ti4);
+        VCPLXMUL(cr2, ci2, LD_PS1(wa1[i-2]), LD_PS1(wa1[i-1]));
+        ph[0] = cr2;
+        ph[1] = ci2; ph += l1ido;
+        VCPLXMUL(cr3, ci3, LD_PS1(wa2[i-2]), LD_PS1(wa2[i-1]));
+        ph[0] = cr3;
+        ph[1] = ci3; ph += l1ido;
+        VCPLXMUL(cr4, ci4, LD_PS1(wa3[i-2]), LD_PS1(wa3[i-1]));
+        ph[0] = cr4;
+        ph[1] = ci4; ph = ph - 3*l1ido + 2;
+      }
+    }
+    if (ido % 2 == 1) return;
+  }
+  for (k=0; k < l1ido; k+=ido) {
+    int i0 = 4*k + ido;
+    v4sf c = cc[i0-1], d = cc[i0 + 2*ido-1];
+    v4sf a = cc[i0+0], b = cc[i0 + 2*ido+0];
+    tr1 = VSUB(c,d);
+    tr2 = VADD(c,d);
+    ti1 = VADD(b,a);
+    ti2 = VSUB(b,a);
+    ch[ido-1 + k + 0*l1ido] = VADD(tr2,tr2);
+    ch[ido-1 + k + 1*l1ido] = SVMUL(minus_sqrt2, VSUB(ti1, tr1));
+    ch[ido-1 + k + 2*l1ido] = VADD(ti2, ti2);
+    ch[ido-1 + k + 3*l1ido] = SVMUL(minus_sqrt2, VADD(ti1, tr1));
+  }
+} /* radb4 */
+
+static void radf5_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, 
+                     const float *wa1, const float *wa2, const float *wa3, const float *wa4)
+{
+  static const float tr11 = .309016994374947f;
+  static const float ti11 = .951056516295154f;
+  static const float tr12 = -.809016994374947f;
+  static const float ti12 = .587785252292473f;
+
+  /* System generated locals */
+  int cc_offset, ch_offset;
+
+  /* Local variables */
+  int i, k, ic;
+  v4sf ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5,
+    cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5;
+  int idp2;
+
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*l1 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*5 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * 6;
+  ch -= ch_offset;
+  cc_offset = 1 + ido * (1 + l1);
+  cc -= cc_offset;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    cr2 = VADD(cc_ref(1, k, 5), cc_ref(1, k, 2));
+    ci5 = VSUB(cc_ref(1, k, 5), cc_ref(1, k, 2));
+    cr3 = VADD(cc_ref(1, k, 4), cc_ref(1, k, 3));
+    ci4 = VSUB(cc_ref(1, k, 4), cc_ref(1, k, 3));
+    ch_ref(1, 1, k) = VADD(cc_ref(1, k, 1), VADD(cr2, cr3));
+    ch_ref(ido, 2, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3)));
+    ch_ref(1, 3, k) = VADD(SVMUL(ti11, ci5), SVMUL(ti12, ci4));
+    ch_ref(ido, 4, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3)));
+    ch_ref(1, 5, k) = VSUB(SVMUL(ti12, ci5), SVMUL(ti11, ci4));
+    /* printf("pffft: radf5, k=%d ch_ref=%f, ci4=%f\n", k, ch_ref(1, 5, k), ci4); */
+  }
+  if (ido == 1) {
+    return;
+  }
+  idp2 = ido + 2;
+  for (k = 1; k <= l1; ++k) {
+    for (i = 3; i <= ido; i += 2) {
+      ic = idp2 - i;
+      dr2 = LD_PS1(wa1[i-3]); di2 = LD_PS1(wa1[i-2]);
+      dr3 = LD_PS1(wa2[i-3]); di3 = LD_PS1(wa2[i-2]);
+      dr4 = LD_PS1(wa3[i-3]); di4 = LD_PS1(wa3[i-2]);
+      dr5 = LD_PS1(wa4[i-3]); di5 = LD_PS1(wa4[i-2]);
+      VCPLXMULCONJ(dr2, di2, cc_ref(i-1, k, 2), cc_ref(i, k, 2));
+      VCPLXMULCONJ(dr3, di3, cc_ref(i-1, k, 3), cc_ref(i, k, 3));
+      VCPLXMULCONJ(dr4, di4, cc_ref(i-1, k, 4), cc_ref(i, k, 4));
+      VCPLXMULCONJ(dr5, di5, cc_ref(i-1, k, 5), cc_ref(i, k, 5));
+      cr2 = VADD(dr2, dr5);
+      ci5 = VSUB(dr5, dr2);
+      cr5 = VSUB(di2, di5);
+      ci2 = VADD(di2, di5);
+      cr3 = VADD(dr3, dr4);
+      ci4 = VSUB(dr4, dr3);
+      cr4 = VSUB(di3, di4);
+      ci3 = VADD(di3, di4);
+      ch_ref(i - 1, 1, k) = VADD(cc_ref(i - 1, k, 1), VADD(cr2, cr3));
+      ch_ref(i, 1, k) = VSUB(cc_ref(i, k, 1), VADD(ci2, ci3));
+      tr2 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3)));
+      ti2 = VSUB(cc_ref(i, k, 1), VADD(SVMUL(tr11, ci2), SVMUL(tr12, ci3)));
+      tr3 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3)));
+      ti3 = VSUB(cc_ref(i, k, 1), VADD(SVMUL(tr12, ci2), SVMUL(tr11, ci3)));
+      tr5 = VADD(SVMUL(ti11, cr5), SVMUL(ti12, cr4));
+      ti5 = VADD(SVMUL(ti11, ci5), SVMUL(ti12, ci4));
+      tr4 = VSUB(SVMUL(ti12, cr5), SVMUL(ti11, cr4));
+      ti4 = VSUB(SVMUL(ti12, ci5), SVMUL(ti11, ci4));
+      ch_ref(i - 1, 3, k) = VSUB(tr2, tr5);
+      ch_ref(ic - 1, 2, k) = VADD(tr2, tr5);
+      ch_ref(i, 3, k) = VADD(ti2, ti5);
+      ch_ref(ic, 2, k) = VSUB(ti5, ti2);
+      ch_ref(i - 1, 5, k) = VSUB(tr3, tr4);
+      ch_ref(ic - 1, 4, k) = VADD(tr3, tr4);
+      ch_ref(i, 5, k) = VADD(ti3, ti4);
+      ch_ref(ic, 4, k) = VSUB(ti4, ti3);
+    }
+  }
+#undef cc_ref
+#undef ch_ref
+} /* radf5 */
+
+static void radb5_ps(int ido, int l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch, 
+                  const float *wa1, const float *wa2, const float *wa3, const float *wa4)
+{
+  static const float tr11 = .309016994374947f;
+  static const float ti11 = .951056516295154f;
+  static const float tr12 = -.809016994374947f;
+  static const float ti12 = .587785252292473f;
+
+  int cc_offset, ch_offset;
+
+  /* Local variables */
+  int i, k, ic;
+  v4sf ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
+    ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
+  int idp2;
+
+#define cc_ref(a_1,a_2,a_3) cc[((a_3)*5 + (a_2))*ido + a_1]
+#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1]
+
+  /* Parameter adjustments */
+  ch_offset = 1 + ido * (1 + l1);
+  ch -= ch_offset;
+  cc_offset = 1 + ido * 6;
+  cc -= cc_offset;
+
+  /* Function Body */
+  for (k = 1; k <= l1; ++k) {
+    ti5 = VADD(cc_ref(1, 3, k), cc_ref(1, 3, k));
+    ti4 = VADD(cc_ref(1, 5, k), cc_ref(1, 5, k));
+    tr2 = VADD(cc_ref(ido, 2, k), cc_ref(ido, 2, k));
+    tr3 = VADD(cc_ref(ido, 4, k), cc_ref(ido, 4, k));
+    ch_ref(1, k, 1) = VADD(cc_ref(1, 1, k), VADD(tr2, tr3));
+    cr2 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3)));
+    cr3 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr12, tr2), SVMUL(tr11, tr3)));
+    ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4));
+    ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4));
+    ch_ref(1, k, 2) = VSUB(cr2, ci5);
+    ch_ref(1, k, 3) = VSUB(cr3, ci4);
+    ch_ref(1, k, 4) = VADD(cr3, ci4);
+    ch_ref(1, k, 5) = VADD(cr2, ci5);
+  }
+  if (ido == 1) {
+    return;
+  }
+  idp2 = ido + 2;
+  for (k = 1; k <= l1; ++k) {
+    for (i = 3; i <= ido; i += 2) {
+      ic = idp2 - i;
+      ti5 = VADD(cc_ref(i  , 3, k), cc_ref(ic  , 2, k));
+      ti2 = VSUB(cc_ref(i  , 3, k), cc_ref(ic  , 2, k));
+      ti4 = VADD(cc_ref(i  , 5, k), cc_ref(ic  , 4, k));
+      ti3 = VSUB(cc_ref(i  , 5, k), cc_ref(ic  , 4, k));
+      tr5 = VSUB(cc_ref(i-1, 3, k), cc_ref(ic-1, 2, k));
+      tr2 = VADD(cc_ref(i-1, 3, k), cc_ref(ic-1, 2, k));
+      tr4 = VSUB(cc_ref(i-1, 5, k), cc_ref(ic-1, 4, k));
+      tr3 = VADD(cc_ref(i-1, 5, k), cc_ref(ic-1, 4, k));
+      ch_ref(i - 1, k, 1) = VADD(cc_ref(i-1, 1, k), VADD(tr2, tr3));
+      ch_ref(i, k, 1) = VADD(cc_ref(i, 1, k), VADD(ti2, ti3));
+      cr2 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3)));
+      ci2 = VADD(cc_ref(i  , 1, k), VADD(SVMUL(tr11, ti2), SVMUL(tr12, ti3)));
+      cr3 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr12, tr2), SVMUL(tr11, tr3)));
+      ci3 = VADD(cc_ref(i  , 1, k), VADD(SVMUL(tr12, ti2), SVMUL(tr11, ti3)));
+      cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4));
+      ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4));
+      cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4));
+      ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4));
+      dr3 = VSUB(cr3, ci4);
+      dr4 = VADD(cr3, ci4);
+      di3 = VADD(ci3, cr4);
+      di4 = VSUB(ci3, cr4);
+      dr5 = VADD(cr2, ci5);
+      dr2 = VSUB(cr2, ci5);
+      di5 = VSUB(ci2, cr5);
+      di2 = VADD(ci2, cr5);
+      VCPLXMUL(dr2, di2, LD_PS1(wa1[i-3]), LD_PS1(wa1[i-2]));
+      VCPLXMUL(dr3, di3, LD_PS1(wa2[i-3]), LD_PS1(wa2[i-2]));
+      VCPLXMUL(dr4, di4, LD_PS1(wa3[i-3]), LD_PS1(wa3[i-2]));
+      VCPLXMUL(dr5, di5, LD_PS1(wa4[i-3]), LD_PS1(wa4[i-2]));
+
+      ch_ref(i-1, k, 2) = dr2; ch_ref(i, k, 2) = di2;
+      ch_ref(i-1, k, 3) = dr3; ch_ref(i, k, 3) = di3;
+      ch_ref(i-1, k, 4) = dr4; ch_ref(i, k, 4) = di4;
+      ch_ref(i-1, k, 5) = dr5; ch_ref(i, k, 5) = di5;
+    }
+  }
+#undef cc_ref
+#undef ch_ref
+} /* radb5 */
+
+static NEVER_INLINE(v4sf *) rfftf1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, 
+                                      const float *wa, const int *ifac) {  
+  v4sf *in  = (v4sf*)input_readonly;
+  v4sf *out = (in == work2 ? work1 : work2);
+  int nf = ifac[1], k1;
+  int l2 = n;
+  int iw = n-1;
+  assert(in != out && work1 != work2);
+  for (k1 = 1; k1 <= nf; ++k1) {
+    int kh = nf - k1;
+    int ip = ifac[kh + 2];
+    int l1 = l2 / ip;
+    int ido = n / l2;
+    iw -= (ip - 1)*ido;
+    switch (ip) {
+      case 5: {
+        int ix2 = iw + ido;
+        int ix3 = ix2 + ido;
+        int ix4 = ix3 + ido;
+        radf5_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
+      } break;
+      case 4: {
+        int ix2 = iw + ido;
+        int ix3 = ix2 + ido;
+        radf4_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3]);
+      } break;
+      case 3: {
+        int ix2 = iw + ido;
+        radf3_ps(ido, l1, in, out, &wa[iw], &wa[ix2]);
+      } break;
+      case 2:
+        radf2_ps(ido, l1, in, out, &wa[iw]);
+        break;
+      default:
+        assert(0);
+        break;
+    }
+    l2 = l1;
+    if (out == work2) {
+      out = work1; in = work2;
+    } else {
+      out = work2; in = work1;
+    }
+  }
+  return in; /* this is in fact the output .. */
+} /* rfftf1 */
+
+static NEVER_INLINE(v4sf *) rfftb1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, 
+                                      const float *wa, const int *ifac) {  
+  v4sf *in  = (v4sf*)input_readonly;
+  v4sf *out = (in == work2 ? work1 : work2);
+  int nf = ifac[1], k1;
+  int l1 = 1;
+  int iw = 0;
+  assert(in != out);
+  for (k1=1; k1<=nf; k1++) {
+    int ip = ifac[k1 + 1];
+    int l2 = ip*l1;
+    int ido = n / l2;
+    switch (ip) {
+      case 5: {
+        int ix2 = iw + ido;
+        int ix3 = ix2 + ido;
+        int ix4 = ix3 + ido;
+        radb5_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
+      } break;
+      case 4: {
+        int ix2 = iw + ido;
+        int ix3 = ix2 + ido;
+        radb4_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3]);
+      } break;
+      case 3: {
+        int ix2 = iw + ido;
+        radb3_ps(ido, l1, in, out, &wa[iw], &wa[ix2]);
+      } break;
+      case 2:
+        radb2_ps(ido, l1, in, out, &wa[iw]);
+        break;
+      default:
+        assert(0);
+        break;
+    }
+    l1 = l2;
+    iw += (ip - 1)*ido;
+
+    if (out == work2) {
+      out = work1; in = work2;
+    } else {
+      out = work2; in = work1;
+    }
+  }
+  return in; /* this is in fact the output .. */
+}
+
+static int decompose(int n, int *ifac, const int *ntryh) {
+  int nl = n, nf = 0, i, j = 0;
+  for (j=0; ntryh[j]; ++j) {
+    int ntry = ntryh[j];
+    while (nl != 1) {
+      int nq = nl / ntry;
+      int nr = nl - ntry * nq;
+      if (nr == 0) {
+        ifac[2+nf++] = ntry;
+        nl = nq;
+        if (ntry == 2 && nf != 1) {
+          for (i = 2; i <= nf; ++i) {
+            int ib = nf - i + 2;
+            ifac[ib + 1] = ifac[ib];
+          }
+          ifac[2] = 2;
+        }
+      } else break;
+    }
+  }
+  ifac[0] = n;
+  ifac[1] = nf;  
+  return nf;
+}
+
+
+
+static void rffti1_ps(int n, float *wa, int *ifac)
+{
+  static const int ntryh[] = { 4,2,3,5,0 };
+  int k1, j, ii;
+
+  int nf = decompose(n,ifac,ntryh);
+  float argh = (2*(float)M_PI) / n;
+  int is = 0;
+  int nfm1 = nf - 1;
+  int l1 = 1;
+  for (k1 = 1; k1 <= nfm1; k1++) {
+    int ip = ifac[k1 + 1];
+    int ld = 0;
+    int l2 = l1*ip;
+    int ido = n / l2;
+    int ipm = ip - 1;
+    for (j = 1; j <= ipm; ++j) {
+      float argld;
+      int i = is, fi=0;
+      ld += l1;
+      argld = ld*argh;
+      for (ii = 3; ii <= ido; ii += 2) {
+        i += 2;
+        fi += 1;
+        wa[i - 2] = FUNC_COS(fi*argld);
+        wa[i - 1] = FUNC_SIN(fi*argld);
+      }
+      is += ido;
+    }
+    l1 = l2;
+  }
+} /* rffti1 */
+
+static void cffti1_ps(int n, float *wa, int *ifac)
+{
+  static const int ntryh[] = { 5,3,4,2,0 };
+  int k1, j, ii;
+
+  int nf = decompose(n,ifac,ntryh);
+  float argh = (2*(float)M_PI) / n;
+  int i = 1;
+  int l1 = 1;
+  for (k1=1; k1<=nf; k1++) {
+    int ip = ifac[k1+1];
+    int ld = 0;
+    int l2 = l1*ip;
+    int ido = n / l2;
+    int idot = ido + ido + 2;
+    int ipm = ip - 1;
+    for (j=1; j<=ipm; j++) {
+      float argld;
+      int i1 = i, fi = 0;
+      wa[i-1] = 1;
+      wa[i] = 0;
+      ld += l1;
+      argld = ld*argh;
+      for (ii = 4; ii <= idot; ii += 2) {
+        i += 2;
+        fi += 1;
+        wa[i-1] = FUNC_COS(fi*argld);
+        wa[i] = FUNC_SIN(fi*argld);
+      }
+      if (ip > 5) {
+        wa[i1-1] = wa[i-1];
+        wa[i1] = wa[i];
+      }
+    }
+    l1 = l2;
+  }
+} /* cffti1 */
+
+
+static v4sf *cfftf1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, const float *wa, const int *ifac, int isign) {
+  v4sf *in  = (v4sf*)input_readonly;
+  v4sf *out = (in == work2 ? work1 : work2); 
+  int nf = ifac[1], k1;
+  int l1 = 1;
+  int iw = 0;
+  assert(in != out && work1 != work2);
+  for (k1=2; k1<=nf+1; k1++) {
+    int ip = ifac[k1];
+    int l2 = ip*l1;
+    int ido = n / l2;
+    int idot = ido + ido;
+    switch (ip) {
+      case 5: {
+        int ix2 = iw + idot;
+        int ix3 = ix2 + idot;
+        int ix4 = ix3 + idot;
+        passf5_ps(idot, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], isign);
+      } break;
+      case 4: {
+        int ix2 = iw + idot;
+        int ix3 = ix2 + idot;
+        passf4_ps(idot, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], isign);
+      } break;
+      case 2: {
+        passf2_ps(idot, l1, in, out, &wa[iw], isign);
+      } break;
+      case 3: {
+        int ix2 = iw + idot;
+        passf3_ps(idot, l1, in, out, &wa[iw], &wa[ix2], isign);
+      } break;
+      default:
+        assert(0);
+    }
+    l1 = l2;
+    iw += (ip - 1)*idot;
+    if (out == work2) {
+      out = work1; in = work2;
+    } else {
+      out = work2; in = work1;
+    }
+  }
+
+  return in; /* this is in fact the output .. */
+}
+
+
+struct SETUP_STRUCT {
+  int     N;
+  int     Ncvec;  /* nb of complex simd vectors (N/4 if PFFFT_COMPLEX, N/8 if PFFFT_REAL) */
+  int ifac[15];
+  pffft_transform_t transform;
+  v4sf *data;     /* allocated room for twiddle coefs */
+  float *e;       /* points into 'data', N/4*3 elements */
+  float *twiddle; /* points into 'data', N/4 elements */
+};
+
+SETUP_STRUCT *FUNC_NEW_SETUP(int N, pffft_transform_t transform) {
+  SETUP_STRUCT *s = (SETUP_STRUCT*)malloc(sizeof(SETUP_STRUCT));
+  int k, m;
+  /* unfortunately, the fft size must be a multiple of 16 for complex FFTs 
+     and 32 for real FFTs -- a lot of stuff would need to be rewritten to
+     handle other cases (or maybe just switch to a scalar fft, I don't know..) */
+  if (transform == PFFFT_REAL) { assert((N%(2*SIMD_SZ*SIMD_SZ))==0 && N>0); }
+  if (transform == PFFFT_COMPLEX) { assert((N%(SIMD_SZ*SIMD_SZ))==0 && N>0); }
+  /* assert((N % 32) == 0); */
+  s->N = N;
+  s->transform = transform;  
+  /* nb of complex simd vectors */
+  s->Ncvec = (transform == PFFFT_REAL ? N/2 : N)/SIMD_SZ;
+  s->data = (v4sf*)FUNC_ALIGNED_MALLOC(2*s->Ncvec * sizeof(v4sf));
+  s->e = (float*)s->data;
+  s->twiddle = (float*)(s->data + (2*s->Ncvec*(SIMD_SZ-1))/SIMD_SZ);  
+
+  if (transform == PFFFT_REAL) {
+    for (k=0; k < s->Ncvec; ++k) {
+      int i = k/SIMD_SZ;
+      int j = k%SIMD_SZ;
+      for (m=0; m < SIMD_SZ-1; ++m) {
+        float A = -2*(float)M_PI*(m+1)*k / N;
+        s->e[(2*(i*3 + m) + 0) * SIMD_SZ + j] = FUNC_COS(A);
+        s->e[(2*(i*3 + m) + 1) * SIMD_SZ + j] = FUNC_SIN(A);
+      }
+    }
+    rffti1_ps(N/SIMD_SZ, s->twiddle, s->ifac);
+  } else {
+    for (k=0; k < s->Ncvec; ++k) {
+      int i = k/SIMD_SZ;
+      int j = k%SIMD_SZ;
+      for (m=0; m < SIMD_SZ-1; ++m) {
+        float A = -2*(float)M_PI*(m+1)*k / N;
+        s->e[(2*(i*3 + m) + 0)*SIMD_SZ + j] = FUNC_COS(A);
+        s->e[(2*(i*3 + m) + 1)*SIMD_SZ + j] = FUNC_SIN(A);
+      }
+    }
+    cffti1_ps(N/SIMD_SZ, s->twiddle, s->ifac);
+  }
+
+  /* check that N is decomposable with allowed prime factors */
+  for (k=0, m=1; k < s->ifac[1]; ++k) { m *= s->ifac[2+k]; }
+  if (m != N/SIMD_SZ) {
+    FUNC_DESTROY(s); s = 0;
+  }
+
+  return s;
+}
+
+
+void FUNC_DESTROY(SETUP_STRUCT *s) {
+  FUNC_ALIGNED_FREE(s->data);
+  free(s);
+}
+
+#if ( SIMD_SZ == 4 )    /* !defined(PFFFT_SIMD_DISABLE) */
+
+/* [0 0 1 2 3 4 5 6 7 8] -> [0 8 7 6 5 4 3 2 1] */
+static void reversed_copy(int N, const v4sf *in, int in_stride, v4sf *out) {
+  v4sf g0, g1;
+  int k;
+  INTERLEAVE2(in[0], in[1], g0, g1); in += in_stride;
+  
+  *--out = VSWAPHL(g0, g1); /* [g0l, g0h], [g1l g1h] -> [g1l, g0h] */
+  for (k=1; k < N; ++k) {
+    v4sf h0, h1;
+    INTERLEAVE2(in[0], in[1], h0, h1); in += in_stride;
+    *--out = VSWAPHL(g1, h0);
+    *--out = VSWAPHL(h0, h1);
+    g1 = h1;
+  }
+  *--out = VSWAPHL(g1, g0);
+}
+
+static void unreversed_copy(int N, const v4sf *in, v4sf *out, int out_stride) {
+  v4sf g0, g1, h0, h1;
+  int k;
+  g0 = g1 = in[0]; ++in;
+  for (k=1; k < N; ++k) {
+    h0 = *in++; h1 = *in++;
+    g1 = VSWAPHL(g1, h0);
+    h0 = VSWAPHL(h0, h1);
+    UNINTERLEAVE2(h0, g1, out[0], out[1]); out += out_stride;
+    g1 = h1;
+  }
+  h0 = *in++; h1 = g0;
+  g1 = VSWAPHL(g1, h0);
+  h0 = VSWAPHL(h0, h1);
+  UNINTERLEAVE2(h0, g1, out[0], out[1]);
+}
+
+void FUNC_ZREORDER(SETUP_STRUCT *setup, const float *in, float *out, pffft_direction_t direction) {
+  int k, N = setup->N, Ncvec = setup->Ncvec;
+  const v4sf *vin = (const v4sf*)in;
+  v4sf *vout = (v4sf*)out;
+  assert(in != out);
+  if (setup->transform == PFFFT_REAL) {
+    int k, dk = N/32;
+    if (direction == PFFFT_FORWARD) {
+      for (k=0; k < dk; ++k) {
+        INTERLEAVE2(vin[k*8 + 0], vin[k*8 + 1], vout[2*(0*dk + k) + 0], vout[2*(0*dk + k) + 1]);
+        INTERLEAVE2(vin[k*8 + 4], vin[k*8 + 5], vout[2*(2*dk + k) + 0], vout[2*(2*dk + k) + 1]);
+      }
+      reversed_copy(dk, vin+2, 8, (v4sf*)(out + N/2));
+      reversed_copy(dk, vin+6, 8, (v4sf*)(out + N));
+    } else {
+      for (k=0; k < dk; ++k) {
+        UNINTERLEAVE2(vin[2*(0*dk + k) + 0], vin[2*(0*dk + k) + 1], vout[k*8 + 0], vout[k*8 + 1]);
+        UNINTERLEAVE2(vin[2*(2*dk + k) + 0], vin[2*(2*dk + k) + 1], vout[k*8 + 4], vout[k*8 + 5]);
+      }
+      unreversed_copy(dk, (v4sf*)(in + N/4), (v4sf*)(out + N - 6*SIMD_SZ), -8);
+      unreversed_copy(dk, (v4sf*)(in + 3*N/4), (v4sf*)(out + N - 2*SIMD_SZ), -8);
+    }
+  } else {
+    if (direction == PFFFT_FORWARD) {
+      for (k=0; k < Ncvec; ++k) { 
+        int kk = (k/4) + (k%4)*(Ncvec/4);
+        INTERLEAVE2(vin[k*2], vin[k*2+1], vout[kk*2], vout[kk*2+1]);
+      }
+    } else {
+      for (k=0; k < Ncvec; ++k) { 
+        int kk = (k/4) + (k%4)*(Ncvec/4);
+        UNINTERLEAVE2(vin[kk*2], vin[kk*2+1], vout[k*2], vout[k*2+1]);
+      }
+    }
+  }
+}
+
+void FUNC_CPLX_FINALIZE(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) {
+  int k, dk = Ncvec/SIMD_SZ; /* number of 4x4 matrix blocks */
+  v4sf r0, i0, r1, i1, r2, i2, r3, i3;
+  v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1;
+  assert(in != out);
+  for (k=0; k < dk; ++k) {    
+    r0 = in[8*k+0]; i0 = in[8*k+1];
+    r1 = in[8*k+2]; i1 = in[8*k+3];
+    r2 = in[8*k+4]; i2 = in[8*k+5];
+    r3 = in[8*k+6]; i3 = in[8*k+7];
+    VTRANSPOSE4(r0,r1,r2,r3);
+    VTRANSPOSE4(i0,i1,i2,i3);
+    VCPLXMUL(r1,i1,e[k*6+0],e[k*6+1]);
+    VCPLXMUL(r2,i2,e[k*6+2],e[k*6+3]);
+    VCPLXMUL(r3,i3,e[k*6+4],e[k*6+5]);
+
+    sr0 = VADD(r0,r2); dr0 = VSUB(r0, r2);
+    sr1 = VADD(r1,r3); dr1 = VSUB(r1, r3);
+    si0 = VADD(i0,i2); di0 = VSUB(i0, i2);
+    si1 = VADD(i1,i3); di1 = VSUB(i1, i3);
+
+    /*
+      transformation for each column is:
+      
+      [1   1   1   1   0   0   0   0]   [r0]
+      [1   0  -1   0   0  -1   0   1]   [r1]
+      [1  -1   1  -1   0   0   0   0]   [r2]
+      [1   0  -1   0   0   1   0  -1]   [r3]
+      [0   0   0   0   1   1   1   1] * [i0]
+      [0   1   0  -1   1   0  -1   0]   [i1]
+      [0   0   0   0   1  -1   1  -1]   [i2]
+      [0  -1   0   1   1   0  -1   0]   [i3]    
+    */
+    
+    r0 = VADD(sr0, sr1); i0 = VADD(si0, si1);
+    r1 = VADD(dr0, di1); i1 = VSUB(di0, dr1);
+    r2 = VSUB(sr0, sr1); i2 = VSUB(si0, si1);
+    r3 = VSUB(dr0, di1); i3 = VADD(di0, dr1);
+  
+    *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1;
+    *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3;
+  }
+}
+
+void FUNC_CPLX_PREPROCESS(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) {
+  int k, dk = Ncvec/SIMD_SZ; /* number of 4x4 matrix blocks */
+  v4sf r0, i0, r1, i1, r2, i2, r3, i3;
+  v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1;
+  assert(in != out);
+  for (k=0; k < dk; ++k) {    
+    r0 = in[8*k+0]; i0 = in[8*k+1];
+    r1 = in[8*k+2]; i1 = in[8*k+3];
+    r2 = in[8*k+4]; i2 = in[8*k+5];
+    r3 = in[8*k+6]; i3 = in[8*k+7];
+
+    sr0 = VADD(r0,r2); dr0 = VSUB(r0, r2);
+    sr1 = VADD(r1,r3); dr1 = VSUB(r1, r3);
+    si0 = VADD(i0,i2); di0 = VSUB(i0, i2);
+    si1 = VADD(i1,i3); di1 = VSUB(i1, i3);
+
+    r0 = VADD(sr0, sr1); i0 = VADD(si0, si1);
+    r1 = VSUB(dr0, di1); i1 = VADD(di0, dr1);
+    r2 = VSUB(sr0, sr1); i2 = VSUB(si0, si1);
+    r3 = VADD(dr0, di1); i3 = VSUB(di0, dr1);
+
+    VCPLXMULCONJ(r1,i1,e[k*6+0],e[k*6+1]);
+    VCPLXMULCONJ(r2,i2,e[k*6+2],e[k*6+3]);
+    VCPLXMULCONJ(r3,i3,e[k*6+4],e[k*6+5]);
+
+    VTRANSPOSE4(r0,r1,r2,r3);
+    VTRANSPOSE4(i0,i1,i2,i3);
+
+    *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1;
+    *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3;
+  }
+}
+
+
+static ALWAYS_INLINE(void) FUNC_REAL_FINALIZE_4X4(const v4sf *in0, const v4sf *in1, const v4sf *in,
+                            const v4sf *e, v4sf *out) {
+  v4sf r0, i0, r1, i1, r2, i2, r3, i3;
+  v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1;
+  r0 = *in0; i0 = *in1;
+  r1 = *in++; i1 = *in++; r2 = *in++; i2 = *in++; r3 = *in++; i3 = *in++;
+  VTRANSPOSE4(r0,r1,r2,r3);
+  VTRANSPOSE4(i0,i1,i2,i3);
+ 
+  /*
+    transformation for each column is:
+
+    [1   1   1   1   0   0   0   0]   [r0]
+    [1   0  -1   0   0  -1   0   1]   [r1]
+    [1   0  -1   0   0   1   0  -1]   [r2]
+    [1  -1   1  -1   0   0   0   0]   [r3]
+    [0   0   0   0   1   1   1   1] * [i0]
+    [0  -1   0   1  -1   0   1   0]   [i1]
+    [0  -1   0   1   1   0  -1   0]   [i2]
+    [0   0   0   0  -1   1  -1   1]   [i3]    
+  */
+  
+  /* cerr << "matrix initial, before e , REAL:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n"; */
+  /* cerr << "matrix initial, before e, IMAG :\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n"; */
+
+  VCPLXMUL(r1,i1,e[0],e[1]);
+  VCPLXMUL(r2,i2,e[2],e[3]);
+  VCPLXMUL(r3,i3,e[4],e[5]);
+
+  /* cerr << "matrix initial, real part:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n"; */
+  /* cerr << "matrix initial, imag part:\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n"; */
+
+  sr0 = VADD(r0,r2); dr0 = VSUB(r0,r2); 
+  sr1 = VADD(r1,r3); dr1 = VSUB(r3,r1);
+  si0 = VADD(i0,i2); di0 = VSUB(i0,i2); 
+  si1 = VADD(i1,i3); di1 = VSUB(i3,i1);
+
+  r0 = VADD(sr0, sr1);
+  r3 = VSUB(sr0, sr1);
+  i0 = VADD(si0, si1);
+  i3 = VSUB(si1, si0);
+  r1 = VADD(dr0, di1);
+  r2 = VSUB(dr0, di1);
+  i1 = VSUB(dr1, di0);
+  i2 = VADD(dr1, di0);
+
+  *out++ = r0;
+  *out++ = i0;
+  *out++ = r1;
+  *out++ = i1;
+  *out++ = r2;
+  *out++ = i2;
+  *out++ = r3;
+  *out++ = i3;
+
+}
+
+static NEVER_INLINE(void) FUNC_REAL_FINALIZE(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) {
+  int k, dk = Ncvec/SIMD_SZ; /* number of 4x4 matrix blocks */
+  /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */
+
+  v4sf_union cr, ci, *uout = (v4sf_union*)out;
+  v4sf save = in[7], zero=VZERO();
+  float xr0, xi0, xr1, xi1, xr2, xi2, xr3, xi3;
+  static const float s = (float)M_SQRT2/2;
+
+  cr.v = in[0]; ci.v = in[Ncvec*2-1];
+  assert(in != out);
+  FUNC_REAL_FINALIZE_4X4(&zero, &zero, in+1, e, out);
+
+  /*
+    [cr0 cr1 cr2 cr3 ci0 ci1 ci2 ci3]
+
+    [Xr(1)]  ] [1   1   1   1   0   0   0   0]
+    [Xr(N/4) ] [0   0   0   0   1   s   0  -s]
+    [Xr(N/2) ] [1   0  -1   0   0   0   0   0]
+    [Xr(3N/4)] [0   0   0   0   1  -s   0   s]
+    [Xi(1)   ] [1  -1   1  -1   0   0   0   0]
+    [Xi(N/4) ] [0   0   0   0   0  -s  -1  -s]
+    [Xi(N/2) ] [0  -1   0   1   0   0   0   0]
+    [Xi(3N/4)] [0   0   0   0   0  -s   1  -s]
+  */
+
+  xr0=(cr.f[0]+cr.f[2]) + (cr.f[1]+cr.f[3]); uout[0].f[0] = xr0;
+  xi0=(cr.f[0]+cr.f[2]) - (cr.f[1]+cr.f[3]); uout[1].f[0] = xi0;
+  xr2=(cr.f[0]-cr.f[2]);                     uout[4].f[0] = xr2;
+  xi2=(cr.f[3]-cr.f[1]);                     uout[5].f[0] = xi2;
+  xr1= ci.f[0] + s*(ci.f[1]-ci.f[3]);        uout[2].f[0] = xr1;
+  xi1=-ci.f[2] - s*(ci.f[1]+ci.f[3]);        uout[3].f[0] = xi1;
+  xr3= ci.f[0] - s*(ci.f[1]-ci.f[3]);        uout[6].f[0] = xr3;
+  xi3= ci.f[2] - s*(ci.f[1]+ci.f[3]);        uout[7].f[0] = xi3; 
+
+  for (k=1; k < dk; ++k) {
+    v4sf save_next = in[8*k+7];
+    FUNC_REAL_FINALIZE_4X4(&save, &in[8*k+0], in + 8*k+1,
+                           e + k*6, out + k*8);
+    save = save_next;
+  }
+
+}
+
+static ALWAYS_INLINE(void) FUNC_REAL_PREPROCESS_4X4(const v4sf *in, 
+                                             const v4sf *e, v4sf *out, int first) {
+  v4sf r0=in[0], i0=in[1], r1=in[2], i1=in[3], r2=in[4], i2=in[5], r3=in[6], i3=in[7];
+  /*
+    transformation for each column is:
+
+    [1   1   1   1   0   0   0   0]   [r0]
+    [1   0   0  -1   0  -1  -1   0]   [r1]
+    [1  -1  -1   1   0   0   0   0]   [r2]
+    [1   0   0  -1   0   1   1   0]   [r3]
+    [0   0   0   0   1  -1   1  -1] * [i0]
+    [0  -1   1   0   1   0   0   1]   [i1]
+    [0   0   0   0   1   1  -1  -1]   [i2]
+    [0   1  -1   0   1   0   0   1]   [i3]    
+  */
+
+  v4sf sr0 = VADD(r0,r3), dr0 = VSUB(r0,r3); 
+  v4sf sr1 = VADD(r1,r2), dr1 = VSUB(r1,r2);
+  v4sf si0 = VADD(i0,i3), di0 = VSUB(i0,i3); 
+  v4sf si1 = VADD(i1,i2), di1 = VSUB(i1,i2);
+
+  r0 = VADD(sr0, sr1);
+  r2 = VSUB(sr0, sr1);
+  r1 = VSUB(dr0, si1);
+  r3 = VADD(dr0, si1);
+  i0 = VSUB(di0, di1);
+  i2 = VADD(di0, di1);
+  i1 = VSUB(si0, dr1);
+  i3 = VADD(si0, dr1);
+
+  VCPLXMULCONJ(r1,i1,e[0],e[1]);
+  VCPLXMULCONJ(r2,i2,e[2],e[3]);
+  VCPLXMULCONJ(r3,i3,e[4],e[5]);
+
+  VTRANSPOSE4(r0,r1,r2,r3);
+  VTRANSPOSE4(i0,i1,i2,i3);
+
+  if (!first) {
+    *out++ = r0;
+    *out++ = i0;
+  }
+  *out++ = r1;
+  *out++ = i1;
+  *out++ = r2;
+  *out++ = i2;
+  *out++ = r3;
+  *out++ = i3;
+}
+
+static NEVER_INLINE(void) FUNC_REAL_PREPROCESS(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) {
+  int k, dk = Ncvec/SIMD_SZ; /* number of 4x4 matrix blocks */
+  /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */
+
+  v4sf_union Xr, Xi, *uout = (v4sf_union*)out;
+  float cr0, ci0, cr1, ci1, cr2, ci2, cr3, ci3;
+  static const float s = (float)M_SQRT2;
+  assert(in != out);
+  for (k=0; k < 4; ++k) {
+    Xr.f[k] = ((float*)in)[8*k];
+    Xi.f[k] = ((float*)in)[8*k+4];
+  }
+
+  FUNC_REAL_PREPROCESS_4X4(in, e, out+1, 1); /* will write only 6 values */
+
+  /*
+    [Xr0 Xr1 Xr2 Xr3 Xi0 Xi1 Xi2 Xi3]
+
+    [cr0] [1   0   2   0   1   0   0   0]
+    [cr1] [1   0   0   0  -1   0  -2   0]
+    [cr2] [1   0  -2   0   1   0   0   0]
+    [cr3] [1   0   0   0  -1   0   2   0]
+    [ci0] [0   2   0   2   0   0   0   0]
+    [ci1] [0   s   0  -s   0  -s   0  -s]
+    [ci2] [0   0   0   0   0  -2   0   2]
+    [ci3] [0  -s   0   s   0  -s   0  -s]
+  */
+  for (k=1; k < dk; ++k) {    
+    FUNC_REAL_PREPROCESS_4X4(in+8*k, e + k*6, out-1+k*8, 0);
+  }
+
+  cr0=(Xr.f[0]+Xi.f[0]) + 2*Xr.f[2]; uout[0].f[0] = cr0;
+  cr1=(Xr.f[0]-Xi.f[0]) - 2*Xi.f[2]; uout[0].f[1] = cr1;
+  cr2=(Xr.f[0]+Xi.f[0]) - 2*Xr.f[2]; uout[0].f[2] = cr2;
+  cr3=(Xr.f[0]-Xi.f[0]) + 2*Xi.f[2]; uout[0].f[3] = cr3;
+  ci0= 2*(Xr.f[1]+Xr.f[3]);                       uout[2*Ncvec-1].f[0] = ci0;
+  ci1= s*(Xr.f[1]-Xr.f[3]) - s*(Xi.f[1]+Xi.f[3]); uout[2*Ncvec-1].f[1] = ci1;
+  ci2= 2*(Xi.f[3]-Xi.f[1]);                       uout[2*Ncvec-1].f[2] = ci2;
+  ci3=-s*(Xr.f[1]-Xr.f[3]) - s*(Xi.f[1]+Xi.f[3]); uout[2*Ncvec-1].f[3] = ci3;
+}
+
+
+void FUNC_TRANSFORM_INTERNAL(SETUP_STRUCT *setup, const float *finput, float *foutput, v4sf *scratch,
+                             pffft_direction_t direction, int ordered) {
+  int k, Ncvec   = setup->Ncvec;
+  int nf_odd = (setup->ifac[1] & 1);
+
+  /* temporary buffer is allocated on the stack if the scratch pointer is NULL */
+  int stack_allocate = (scratch == 0 ? Ncvec*2 : 1);
+  VLA_ARRAY_ON_STACK(v4sf, scratch_on_stack, stack_allocate);
+
+  const v4sf *vinput = (const v4sf*)finput;
+  v4sf *voutput      = (v4sf*)foutput;
+  v4sf *buff[2]      = { voutput, scratch ? scratch : scratch_on_stack };
+  int ib = (nf_odd ^ ordered ? 1 : 0);
+
+  assert(VALIGNED(finput) && VALIGNED(foutput));
+
+  /* assert(finput != foutput); */
+  if (direction == PFFFT_FORWARD) {
+    ib = !ib;
+    if (setup->transform == PFFFT_REAL) { 
+      ib = (rfftf1_ps(Ncvec*2, vinput, buff[ib], buff[!ib],
+                      setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1);      
+      FUNC_REAL_FINALIZE(Ncvec, buff[ib], buff[!ib], (v4sf*)setup->e);
+    } else {
+      v4sf *tmp = buff[ib];
+      for (k=0; k < Ncvec; ++k) {
+        UNINTERLEAVE2(vinput[k*2], vinput[k*2+1], tmp[k*2], tmp[k*2+1]);
+      }
+      ib = (cfftf1_ps(Ncvec, buff[ib], buff[!ib], buff[ib], 
+                      setup->twiddle, &setup->ifac[0], -1) == buff[0] ? 0 : 1);
+      FUNC_CPLX_FINALIZE(Ncvec, buff[ib], buff[!ib], (v4sf*)setup->e);
+    }
+    if (ordered) {
+      FUNC_ZREORDER(setup, (float*)buff[!ib], (float*)buff[ib], PFFFT_FORWARD);
+    } else ib = !ib;
+  } else {
+    if (vinput == buff[ib]) { 
+      ib = !ib; /* may happen when finput == foutput */
+    }
+    if (ordered) {
+      FUNC_ZREORDER(setup, (float*)vinput, (float*)buff[ib], PFFFT_BACKWARD);
+      vinput = buff[ib]; ib = !ib;
+    }
+    if (setup->transform == PFFFT_REAL) {
+      FUNC_REAL_PREPROCESS(Ncvec, vinput, buff[ib], (v4sf*)setup->e);
+      ib = (rfftb1_ps(Ncvec*2, buff[ib], buff[0], buff[1], 
+                      setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1);
+    } else {
+      FUNC_CPLX_PREPROCESS(Ncvec, vinput, buff[ib], (v4sf*)setup->e);
+      ib = (cfftf1_ps(Ncvec, buff[ib], buff[0], buff[1], 
+                      setup->twiddle, &setup->ifac[0], +1) == buff[0] ? 0 : 1);
+      for (k=0; k < Ncvec; ++k) {
+        INTERLEAVE2(buff[ib][k*2], buff[ib][k*2+1], buff[ib][k*2], buff[ib][k*2+1]);
+      }
+    }
+  }
+  
+  if (buff[ib] != voutput) {
+    /* extra copy required -- this situation should only happen when finput == foutput */
+    assert(finput==foutput);
+    for (k=0; k < Ncvec; ++k) {
+      v4sf a = buff[ib][2*k], b = buff[ib][2*k+1];
+      voutput[2*k] = a; voutput[2*k+1] = b;
+    }
+    ib = !ib;
+  }
+  assert(buff[ib] == voutput);
+}
+
+void FUNC_ZCONVOLVE_ACCUMULATE(SETUP_STRUCT *s, const float *a, const float *b, float *ab, float scaling) {
+  int Ncvec = s->Ncvec;
+  const v4sf * RESTRICT va = (const v4sf*)a;
+  const v4sf * RESTRICT vb = (const v4sf*)b;
+  v4sf * RESTRICT vab = (v4sf*)ab;
+
+#ifdef __arm__
+  __builtin_prefetch(va);
+  __builtin_prefetch(vb);
+  __builtin_prefetch(vab);
+  __builtin_prefetch(va+2);
+  __builtin_prefetch(vb+2);
+  __builtin_prefetch(vab+2);
+  __builtin_prefetch(va+4);
+  __builtin_prefetch(vb+4);
+  __builtin_prefetch(vab+4);
+  __builtin_prefetch(va+6);
+  __builtin_prefetch(vb+6);
+  __builtin_prefetch(vab+6);
+# ifndef __clang__
+#   define ZCONVOLVE_USING_INLINE_NEON_ASM
+# endif
+#endif
+
+  float ar, ai, br, bi, abr, abi;
+#ifndef ZCONVOLVE_USING_INLINE_ASM
+  v4sf vscal = LD_PS1(scaling);
+  int i;
+#endif
+
+  assert(VALIGNED(a) && VALIGNED(b) && VALIGNED(ab));
+  ar = ((v4sf_union*)va)[0].f[0];
+  ai = ((v4sf_union*)va)[1].f[0];
+  br = ((v4sf_union*)vb)[0].f[0];
+  bi = ((v4sf_union*)vb)[1].f[0];
+  abr = ((v4sf_union*)vab)[0].f[0];
+  abi = ((v4sf_union*)vab)[1].f[0];
+ 
+#ifdef ZCONVOLVE_USING_INLINE_ASM
+  /* inline asm version, unfortunately miscompiled by clang 3.2,
+   * at least on ubuntu.. so this will be restricted to gcc */
+  const float *a_ = a, *b_ = b; float *ab_ = ab;
+  int N = Ncvec;
+  asm volatile("mov         r8, %2                  \n"
+               "vdup.f32    q15, %4                 \n"
+               "1:                                  \n"
+               "pld         [%0,#64]                \n"
+               "pld         [%1,#64]                \n"
+               "pld         [%2,#64]                \n"
+               "pld         [%0,#96]                \n"
+               "pld         [%1,#96]                \n"
+               "pld         [%2,#96]                \n"
+               "vld1.f32    {q0,q1},   [%0,:128]!         \n"
+               "vld1.f32    {q4,q5},   [%1,:128]!         \n"
+               "vld1.f32    {q2,q3},   [%0,:128]!         \n"
+               "vld1.f32    {q6,q7},   [%1,:128]!         \n"
+               "vld1.f32    {q8,q9},   [r8,:128]!          \n"
+               
+               "vmul.f32    q10, q0, q4             \n"
+               "vmul.f32    q11, q0, q5             \n"
+               "vmul.f32    q12, q2, q6             \n" 
+               "vmul.f32    q13, q2, q7             \n"                 
+               "vmls.f32    q10, q1, q5             \n"
+               "vmla.f32    q11, q1, q4             \n"
+               "vld1.f32    {q0,q1}, [r8,:128]!     \n"
+               "vmls.f32    q12, q3, q7             \n"
+               "vmla.f32    q13, q3, q6             \n"
+               "vmla.f32    q8, q10, q15            \n"
+               "vmla.f32    q9, q11, q15            \n"
+               "vmla.f32    q0, q12, q15            \n"
+               "vmla.f32    q1, q13, q15            \n"
+               "vst1.f32    {q8,q9},[%2,:128]!    \n"
+               "vst1.f32    {q0,q1},[%2,:128]!    \n"
+               "subs        %3, #2                  \n"
+               "bne         1b                      \n"
+               : "+r"(a_), "+r"(b_), "+r"(ab_), "+r"(N) : "r"(scaling) : "r8", "q0","q1","q2","q3","q4","q5","q6","q7","q8","q9", "q10","q11","q12","q13","q15","memory");
+#else
+  /* default routine, works fine for non-arm cpus with current compilers */
+  for (i=0; i < Ncvec; i += 2) {
+    v4sf ar, ai, br, bi;
+    ar = va[2*i+0]; ai = va[2*i+1];
+    br = vb[2*i+0]; bi = vb[2*i+1];
+    VCPLXMUL(ar, ai, br, bi);
+    vab[2*i+0] = VMADD(ar, vscal, vab[2*i+0]);
+    vab[2*i+1] = VMADD(ai, vscal, vab[2*i+1]);
+    ar = va[2*i+2]; ai = va[2*i+3];
+    br = vb[2*i+2]; bi = vb[2*i+3];
+    VCPLXMUL(ar, ai, br, bi);
+    vab[2*i+2] = VMADD(ar, vscal, vab[2*i+2]);
+    vab[2*i+3] = VMADD(ai, vscal, vab[2*i+3]);
+  }
+#endif
+  if (s->transform == PFFFT_REAL) {
+    ((v4sf_union*)vab)[0].f[0] = abr + ar*br*scaling;
+    ((v4sf_union*)vab)[1].f[0] = abi + ai*bi*scaling;
+  }
+}
+
+void FUNC_ZCONVOLVE_NO_ACCU(SETUP_STRUCT *s, const float *a, const float *b, float *ab, float scaling) {
+  v4sf vscal = LD_PS1(scaling);
+  const v4sf * RESTRICT va = (const v4sf*)a;
+  const v4sf * RESTRICT vb = (const v4sf*)b;
+  v4sf * RESTRICT vab = (v4sf*)ab;
+  float sar, sai, sbr, sbi;
+  const int NcvecMulTwo = 2*s->Ncvec;  /* int Ncvec = s->Ncvec; */
+  int k; /* was i -- but always used "2*i" - except at for() */
+
+#ifdef __arm__
+  __builtin_prefetch(va);
+  __builtin_prefetch(vb);
+  __builtin_prefetch(vab);
+  __builtin_prefetch(va+2);
+  __builtin_prefetch(vb+2);
+  __builtin_prefetch(vab+2);
+  __builtin_prefetch(va+4);
+  __builtin_prefetch(vb+4);
+  __builtin_prefetch(vab+4);
+  __builtin_prefetch(va+6);
+  __builtin_prefetch(vb+6);
+  __builtin_prefetch(vab+6);
+# ifndef __clang__
+#   define ZCONVOLVE_USING_INLINE_NEON_ASM
+# endif
+#endif
+
+  assert(VALIGNED(a) && VALIGNED(b) && VALIGNED(ab));
+  sar = ((v4sf_union*)va)[0].f[0];
+  sai = ((v4sf_union*)va)[1].f[0];
+  sbr = ((v4sf_union*)vb)[0].f[0];
+  sbi = ((v4sf_union*)vb)[1].f[0];
+
+  /* default routine, works fine for non-arm cpus with current compilers */
+  for (k=0; k < NcvecMulTwo; k += 4) {
+    v4sf var, vai, vbr, vbi;
+    var = va[k+0]; vai = va[k+1];
+    vbr = vb[k+0]; vbi = vb[k+1];
+    VCPLXMUL(var, vai, vbr, vbi);
+    vab[k+0] = VMUL(var, vscal);
+    vab[k+1] = VMUL(vai, vscal);
+    var = va[k+2]; vai = va[k+3];
+    vbr = vb[k+2]; vbi = vb[k+3];
+    VCPLXMUL(var, vai, vbr, vbi);
+    vab[k+2] = VMUL(var, vscal);
+    vab[k+3] = VMUL(vai, vscal);
+  }
+
+  if (s->transform == PFFFT_REAL) {
+    ((v4sf_union*)vab)[0].f[0] = sar*sbr*scaling;
+    ((v4sf_union*)vab)[1].f[0] = sai*sbi*scaling;
+  }
+}
+
+
+#else  /* #if ( SIMD_SZ == 4 )   * !defined(PFFFT_SIMD_DISABLE) */
+
+/* standard routine using scalar floats, without SIMD stuff. */
+
+#define pffft_zreorder_nosimd FUNC_ZREORDER
+void pffft_zreorder_nosimd(SETUP_STRUCT *setup, const float *in, float *out, pffft_direction_t direction) {
+  int k, N = setup->N;
+  if (setup->transform == PFFFT_COMPLEX) {
+    for (k=0; k < 2*N; ++k) out[k] = in[k];
+    return;
+  }
+  else if (direction == PFFFT_FORWARD) {
+    float x_N = in[N-1];
+    for (k=N-1; k > 1; --k) out[k] = in[k-1]; 
+    out[0] = in[0];
+    out[1] = x_N;
+  } else {
+    float x_N = in[1];
+    for (k=1; k < N-1; ++k) out[k] = in[k+1]; 
+    out[0] = in[0];
+    out[N-1] = x_N;
+  }
+}
+
+#define pffft_transform_internal_nosimd FUNC_TRANSFORM_INTERNAL
+void pffft_transform_internal_nosimd(SETUP_STRUCT *setup, const float *input, float *output, float *scratch,
+                                    pffft_direction_t direction, int ordered) {
+  int Ncvec   = setup->Ncvec;
+  int nf_odd = (setup->ifac[1] & 1);
+
+  /* temporary buffer is allocated on the stack if the scratch pointer is NULL */
+  int stack_allocate = (scratch == 0 ? Ncvec*2 : 1);
+  VLA_ARRAY_ON_STACK(v4sf, scratch_on_stack, stack_allocate);
+  float *buff[2];
+  int ib;
+  if (scratch == 0) scratch = scratch_on_stack;
+  buff[0] = output; buff[1] = scratch;
+
+  if (setup->transform == PFFFT_COMPLEX) ordered = 0; /* it is always ordered. */
+  ib = (nf_odd ^ ordered ? 1 : 0);
+
+  if (direction == PFFFT_FORWARD) {
+    if (setup->transform == PFFFT_REAL) {
+      ib = (rfftf1_ps(Ncvec*2, input, buff[ib], buff[!ib],
+                      setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1);      
+    } else {
+      ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], 
+                      setup->twiddle, &setup->ifac[0], -1) == buff[0] ? 0 : 1);
+    }
+    if (ordered) {
+      FUNC_ZREORDER(setup, buff[ib], buff[!ib], PFFFT_FORWARD); ib = !ib;
+    }
+  } else {    
+    if (input == buff[ib]) { 
+      ib = !ib; /* may happen when finput == foutput */
+    }
+    if (ordered) {
+      FUNC_ZREORDER(setup, input, buff[!ib], PFFFT_BACKWARD); 
+      input = buff[!ib];
+    }
+    if (setup->transform == PFFFT_REAL) {
+      ib = (rfftb1_ps(Ncvec*2, input, buff[ib], buff[!ib], 
+                      setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1);
+    } else {
+      ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], 
+                      setup->twiddle, &setup->ifac[0], +1) == buff[0] ? 0 : 1);
+    }
+  }
+  if (buff[ib] != output) {
+    int k;
+    /* extra copy required -- this situation should happens only when finput == foutput */
+    assert(input==output);
+    for (k=0; k < Ncvec; ++k) {
+      float a = buff[ib][2*k], b = buff[ib][2*k+1];
+      output[2*k] = a; output[2*k+1] = b;
+    }
+    ib = !ib;
+  }
+  assert(buff[ib] == output);
+}
+
+#define pffft_zconvolve_accumulate_nosimd FUNC_ZCONVOLVE_ACCUMULATE
+void pffft_zconvolve_accumulate_nosimd(SETUP_STRUCT *s, const float *a, const float *b,
+                                       float *ab, float scaling) {
+  int NcvecMulTwo = 2*s->Ncvec;  /* int Ncvec = s->Ncvec; */
+  int k; /* was i -- but always used "2*i" - except at for() */
+
+  if (s->transform == PFFFT_REAL) {
+    /* take care of the fftpack ordering */
+    ab[0] += a[0]*b[0]*scaling;
+    ab[NcvecMulTwo-1] += a[NcvecMulTwo-1]*b[NcvecMulTwo-1]*scaling;
+    ++ab; ++a; ++b; NcvecMulTwo -= 2;
+  }
+  for (k=0; k < NcvecMulTwo; k += 2) {
+    float ar, ai, br, bi;
+    ar = a[k+0]; ai = a[k+1];
+    br = b[k+0]; bi = b[k+1];
+    VCPLXMUL(ar, ai, br, bi);
+    ab[k+0] += ar*scaling;
+    ab[k+1] += ai*scaling;
+  }
+}
+
+#define pffft_zconvolve_no_accu_nosimd FUNC_ZCONVOLVE_NO_ACCU
+void pffft_zconvolve_no_accu_nosimd(SETUP_STRUCT *s, const float *a, const float *b,
+                                    float *ab, float scaling) {
+  int NcvecMulTwo = 2*s->Ncvec;  /* int Ncvec = s->Ncvec; */
+  int k; /* was i -- but always used "2*i" - except at for() */
+
+  if (s->transform == PFFFT_REAL) {
+    /* take care of the fftpack ordering */
+    ab[0] += a[0]*b[0]*scaling;
+    ab[NcvecMulTwo-1] += a[NcvecMulTwo-1]*b[NcvecMulTwo-1]*scaling;
+    ++ab; ++a; ++b; NcvecMulTwo -= 2;
+  }
+  for (k=0; k < NcvecMulTwo; k += 2) {
+    float ar, ai, br, bi;
+    ar = a[k+0]; ai = a[k+1];
+    br = b[k+0]; bi = b[k+1];
+    VCPLXMUL(ar, ai, br, bi);
+    ab[k+0] = ar*scaling;
+    ab[k+1] = ai*scaling;
+  }
+}
+
+
+#endif /* #if ( SIMD_SZ == 4 )    * !defined(PFFFT_SIMD_DISABLE) */
+
+
+void FUNC_TRANSFORM_UNORDRD(SETUP_STRUCT *setup, const float *input, float *output, float *work, pffft_direction_t direction) {
+  FUNC_TRANSFORM_INTERNAL(setup, input, output, (v4sf*)work, direction, 0);
+}
+
+void FUNC_TRANSFORM_ORDERED(SETUP_STRUCT *setup, const float *input, float *output, float *work, pffft_direction_t direction) {
+  FUNC_TRANSFORM_INTERNAL(setup, input, output, (v4sf*)work, direction, 1);
+}
+
+
+#if ( SIMD_SZ == 4 )
+
+#define assertv4(v,f0,f1,f2,f3) assert(v.f[0] == (f0) && v.f[1] == (f1) && v.f[2] == (f2) && v.f[3] == (f3))
+
+/* detect bugs with the vector support macros */
+void FUNC_VALIDATE_SIMD_A() {
+  float f[16] = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 };
+  v4sf_union a0, a1, a2, a3, t, u; 
+  memcpy(a0.f, f, 4*sizeof(float));
+  memcpy(a1.f, f+4, 4*sizeof(float));
+  memcpy(a2.f, f+8, 4*sizeof(float));
+  memcpy(a3.f, f+12, 4*sizeof(float));
+
+  t = a0; u = a1; t.v = VZERO();
+  printf("VZERO=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 0, 0, 0, 0);
+  t.v = VADD(a1.v, a2.v);
+  printf("VADD(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 12, 14, 16, 18);
+  t.v = VMUL(a1.v, a2.v);
+  printf("VMUL(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 32, 45, 60, 77);
+  t.v = VMADD(a1.v, a2.v,a0.v);
+  printf("VMADD(4:7,8:11,0:3)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 32, 46, 62, 80);
+
+  INTERLEAVE2(a1.v,a2.v,t.v,u.v);
+  printf("INTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3], u.f[0], u.f[1], u.f[2], u.f[3]);
+  assertv4(t, 4, 8, 5, 9); assertv4(u, 6, 10, 7, 11);
+  UNINTERLEAVE2(a1.v,a2.v,t.v,u.v);
+  printf("UNINTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3], u.f[0], u.f[1], u.f[2], u.f[3]);
+  assertv4(t, 4, 6, 8, 10); assertv4(u, 5, 7, 9, 11);
+
+  t.v=LD_PS1(f[15]);
+  printf("LD_PS1(15)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]);
+  assertv4(t, 15, 15, 15, 15);
+  t.v = VSWAPHL(a1.v, a2.v);
+  printf("VSWAPHL(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]);
+  assertv4(t, 8, 9, 6, 7);
+  VTRANSPOSE4(a0.v, a1.v, a2.v, a3.v);
+  printf("VTRANSPOSE4(0:3,4:7,8:11,12:15)=[%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", 
+         a0.f[0], a0.f[1], a0.f[2], a0.f[3], a1.f[0], a1.f[1], a1.f[2], a1.f[3], 
+         a2.f[0], a2.f[1], a2.f[2], a2.f[3], a3.f[0], a3.f[1], a3.f[2], a3.f[3]); 
+  assertv4(a0, 0, 4, 8, 12); assertv4(a1, 1, 5, 9, 13); assertv4(a2, 2, 6, 10, 14); assertv4(a3, 3, 7, 11, 15);
+}
+
+
+static void pffft_assert1( float result, float ref, const char * vartxt, const char * functxt, int * numErrs, const char * f, int lineNo )
+{
+  if ( !( fabsf( result - ref ) < 0.01F ) )
+  {
+    fprintf(stderr, "%s: assert for %s at %s(%d)\n  expected %f  value %f\n", functxt, vartxt, f, lineNo, ref, result);
+    ++(*numErrs);
+  }
+}
+
+static void pffft_assert4( const v4sf_union V, float a, float b, float c, float d, const char * functxt, int * numErrs, const char * f, int lineNo )
+{
+  pffft_assert1( V.f[0], a, "[0]", functxt, numErrs, f, lineNo );
+  pffft_assert1( V.f[1], b, "[1]", functxt, numErrs, f, lineNo );
+  pffft_assert1( V.f[2], c, "[2]", functxt, numErrs, f, lineNo );
+  pffft_assert1( V.f[3], d, "[3]", functxt, numErrs, f, lineNo );
+}
+
+#define PFFFT_ASSERT4( V, a, b, c, d, FUNCTXT )  pffft_assert4( V, a, b, c, d, FUNCTXT, &numErrs, __FILE__, __LINE__ )
+
+
+int FUNC_VALIDATE_SIMD_EX(FILE * DbgOut)
+{
+  int numErrs = 0;
+
+  {
+    v4sf_union C;
+    int k;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: { }\n" );
+    }
+    C.v = VZERO();
+    if (DbgOut) {
+      fprintf(DbgOut, "VZERO(a) => C) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( C, 0.0F, 0.0F, 0.0F, 0.0F, "VZERO() Out C" );
+  }
+
+  {
+    v4sf_union C;
+    float a = 42.0F;
+    int k;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: a = {\n" );
+      fprintf(DbgOut, "  Inp a:  %f\n", a );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = LD_PS1(a);
+    if (DbgOut) {
+      fprintf(DbgOut, "LD_PS1(a) => C) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( C, 42.0F, 42.0F, 42.0F, 42.0F, "LD_PS1() Out C" );
+  }
+
+  {
+    v4sf_union C;
+    float a[16];
+    int numAligned = 0, numUnaligned = 0;
+    int k;
+    const char * pUn;
+    for ( k = 0; k < 16; ++k ) a[k] = k+1;
+
+    for ( k = 0; k + 3 < 16; ++k )
+    {
+      const float * ptr = &a[k];
+      if (DbgOut)
+        fprintf(DbgOut, "\ninput: a = [ %f, %f, %f, %f ]\n", ptr[0], ptr[1], ptr[2], ptr[3] );
+      if ( VALIGNED(ptr) )
+      {
+        C.v = VLOAD_ALIGNED( ptr );
+        pUn = "";
+        ++numAligned;
+      }
+      else
+      {
+        C.v = VLOAD_UNALIGNED( ptr );
+        pUn = "UN";
+        ++numUnaligned;
+      }
+      if (DbgOut) {
+        fprintf(DbgOut, "C = VLOAD_%sALIGNED(&a[%d]) => {\n", pUn, k );
+        fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+        fprintf(DbgOut, "}\n" );
+      }
+      //PFFFT_ASSERT4( C, 32.0F, 34.0F, 36.0F, 38.0F, "VADD(): Out C" );
+      
+      if ( numAligned >= 1 && numUnaligned >= 4 )
+        break;
+    }
+    if ( numAligned < 1 ) {
+      fprintf(stderr, "VALIGNED() should have found at least 1 occurence!");
+      ++numErrs;
+    }
+    if ( numUnaligned < 4 ) {
+      fprintf(stderr, "!VALIGNED() should have found at least 4 occurences!");
+      ++numErrs;
+    }
+  }
+
+  {
+    v4sf_union A, B, C;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = 20 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = VADD(A.v, B.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "C = VADD(A,B) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "VADD(): Inp A" );
+    PFFFT_ASSERT4( B, 21.0F, 22.0F, 23.0F, 24.0F, "VADD(): Inp B" );
+    PFFFT_ASSERT4( C, 32.0F, 34.0F, 36.0F, 38.0F, "VADD(): Out C" );
+  }
+
+  {
+    v4sf_union A, B, C;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 20 + 2*k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = VSUB(A.v, B.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "C = VSUB(A,B) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 21.0F, 23.0F, 25.0F, 27.0F, "VSUB(): Inp A" );
+    PFFFT_ASSERT4( B, 11.0F, 12.0F, 13.0F, 14.0F, "VSUB(): Inp B" );
+    PFFFT_ASSERT4( C, 10.0F, 11.0F, 12.0F, 13.0F, "VSUB(): Out C" );
+  }
+
+  {
+    v4sf_union A, B, C;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = VMUL(A.v, B.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "C = VMUL(A,B) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "VMUL(): Inp A" );
+    PFFFT_ASSERT4( B,  1.0F,  2.0F,  3.0F,  4.0F, "VMUL(): Inp B" );
+    PFFFT_ASSERT4( C, 11.0F, 24.0F, 39.0F, 56.0F, "VMUL(): Out C" );
+  }
+
+  {
+    v4sf_union A, B, C, D;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 10 + k;
+    for ( k = 0; k < 4; ++k )  D.f[k] = 40 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B,C = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "  Inp C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    D.v = VMADD(A.v, B.v, C.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "D = VMADD(A,B,C) => {\n" );
+      fprintf(DbgOut, "  Out D:  %f, %f, %f, %f\n", D.f[0], D.f[1], D.f[2], D.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "VMADD(): Inp A" );
+    PFFFT_ASSERT4( B,  1.0F,  2.0F,  3.0F,  4.0F, "VMADD(): Inp B" );
+    PFFFT_ASSERT4( C, 10.0F, 11.0F, 12.0F, 13.0F, "VMADD(): Inp C" );
+    PFFFT_ASSERT4( D, 21.0F, 35.0F, 51.0F, 69.0F, "VMADD(): Out D" );
+  }
+
+  {
+    v4sf_union A, B, C, D;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = 20 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+    for ( k = 0; k < 4; ++k )  D.f[k] = 40 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    INTERLEAVE2(A.v, B.v, C.v, D.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "INTERLEAVE2(A,B, => C,D) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "  Out D:  %f, %f, %f, %f\n", D.f[0], D.f[1], D.f[2], D.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "INTERLEAVE2() Inp A" );
+    PFFFT_ASSERT4( B, 21.0F, 22.0F, 23.0F, 24.0F, "INTERLEAVE2() Inp B" );
+    PFFFT_ASSERT4( C, 11.0F, 21.0F, 12.0F, 22.0F, "INTERLEAVE2() Out C" );
+    PFFFT_ASSERT4( D, 13.0F, 23.0F, 14.0F, 24.0F, "INTERLEAVE2() Out D" );
+  }
+
+  {
+    v4sf_union A, B, C, D;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = 20 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+    for ( k = 0; k < 4; ++k )  D.f[k] = 40 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    UNINTERLEAVE2(A.v, B.v, C.v, D.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "UNINTERLEAVE2(A,B, => C,D) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "  Out D:  %f, %f, %f, %f\n", D.f[0], D.f[1], D.f[2], D.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "UNINTERLEAVE2() Inp A" );
+    PFFFT_ASSERT4( B, 21.0F, 22.0F, 23.0F, 24.0F, "UNINTERLEAVE2() Inp B" );
+    PFFFT_ASSERT4( C, 11.0F, 13.0F, 21.0F, 23.0F, "UNINTERLEAVE2() Out C" );
+    PFFFT_ASSERT4( D, 12.0F, 14.0F, 22.0F, 24.0F, "UNINTERLEAVE2() Out D" );
+  }
+
+  {
+    v4sf_union A, B, C, D;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = 20 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+    for ( k = 0; k < 4; ++k )  D.f[k] = 40 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B,C,D = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "  Inp C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "  Inp D:  %f, %f, %f, %f\n", D.f[0], D.f[1], D.f[2], D.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    VTRANSPOSE4(A.v, B.v, C.v, D.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "VTRANSPOSE4(A,B,C,D) => {\n" );
+      fprintf(DbgOut, "  Out A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Out B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "  Out D:  %f, %f, %f, %f\n", D.f[0], D.f[1], D.f[2], D.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 21.0F, 31.0F, 41.0F, "VTRANSPOSE4(): Out A" );
+    PFFFT_ASSERT4( B, 12.0F, 22.0F, 32.0F, 42.0F, "VTRANSPOSE4(): Out B" );
+    PFFFT_ASSERT4( C, 13.0F, 23.0F, 33.0F, 43.0F, "VTRANSPOSE4(): Out C" );
+    PFFFT_ASSERT4( D, 14.0F, 24.0F, 34.0F, 44.0F, "VTRANSPOSE4(): Out D" );
+  }
+
+  {
+    v4sf_union A, B, C;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  B.f[k] = 20 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A,B = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "  Inp B:  %f, %f, %f, %f\n", B.f[0], B.f[1], B.f[2], B.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = VSWAPHL(A.v, B.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "C = VSWAPHL(A,B) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "VSWAPHL(): Inp A" );
+    PFFFT_ASSERT4( B, 21.0F, 22.0F, 23.0F, 24.0F, "VSWAPHL(): Inp B" );
+    PFFFT_ASSERT4( C, 21.0F, 22.0F, 13.0F, 14.0F, "VSWAPHL(): Out C" );
+  }
+
+  {
+    v4sf_union A, C;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+    for ( k = 0; k < 4; ++k )  C.f[k] = 30 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = VREV_S(A.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "C = VREV_S(A) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "VREV_S(): Inp A" );
+    PFFFT_ASSERT4( C, 14.0F, 13.0F, 12.0F, 11.0F, "VREV_S(): Out C" );
+  }
+
+  {
+    v4sf_union A, C;
+    int k;
+    for ( k = 0; k < 4; ++k )  A.f[k] = 10 + k+1;
+
+    if (DbgOut) {
+      fprintf(DbgOut, "\ninput: A = {\n" );
+      fprintf(DbgOut, "  Inp A:  %f, %f, %f, %f\n", A.f[0], A.f[1], A.f[2], A.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    C.v = VREV_C(A.v);
+    if (DbgOut) {
+      fprintf(DbgOut, "C = VREV_C(A) => {\n" );
+      fprintf(DbgOut, "  Out C:  %f, %f, %f, %f\n", C.f[0], C.f[1], C.f[2], C.f[3] );
+      fprintf(DbgOut, "}\n" );
+    }
+    PFFFT_ASSERT4( A, 11.0F, 12.0F, 13.0F, 14.0F, "VREV_C(): Inp A" );
+    PFFFT_ASSERT4( C, 13.0F, 14.0F, 11.0F, 12.0F, "VREV_C(): Out A" );
+  }
+
+  return numErrs;
+}
+
+#else  /* if ( SIMD_SZ == 4 ) */
+
+void FUNC_VALIDATE_SIMD_A()
+{
+}
+
+int FUNC_VALIDATE_SIMD_EX(FILE * DbgOut)
+{
+  return -1;
+}
+
+#endif  /* end if ( SIMD_SZ == 4 ) */
+
diff --git a/plots.sh b/plots.sh
new file mode 100755
index 0000000..c09affe
--- /dev/null
+++ b/plots.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+OUTPNG="1"
+W="1024"
+H="768"
+PTS="20"
+LWS="20"
+
+for f in $(ls -1 *-4-*.csv *-6-*.csv); do
+  b=$(basename "$f" ".csv")
+  #echo $b
+  LASTCOL="$(head -n 1 $f |sed 's/,/,\n/g' |grep -c ',')"
+  echo "${b}: last column is $LASTCOL"
+  if [ $(echo "$b" |grep -c -- "-1-") -gt 0 ]; then
+    YL="duration in ms; less is better"
+  elif [ $(echo "$b" |grep -c -- "-4-") -gt 0 ]; then
+    YL="duration relative to pffft; less is better"
+  else
+    YL=""
+  fi
+
+  E=""
+  if [ "${OUTPNG}" = "1" ]; then
+    E="set terminal png size $W,$H"
+    E="${E} ; set output '${b}.png'"
+  fi
+  if [ -z "${E}" ]; then
+    E="set key outside"
+  else
+    E="${E} ; set key outside"
+  fi
+  E="${E} ; set datafile separator ','"
+  E="${E} ; set title '${b}'"
+  E="${E} ; set xlabel 'fft order: fft size N = 2\\^order'"
+  if [ ! -z "${YL}" ]; then
+    #echo "  setting  Y label to ${YL}"
+    E="${E} ; set ylabel '${YL}'"
+  fi
+  # unfortunately no effect for 
+  #for LNO in $(seq 1 ${LASTCOL}) ; do
+  #  E="${E} ; set style line ${LNO} ps ${PTS} lw ${LWS}"
+  #done
+  E="${E} ; plot for [col=3:${LASTCOL}] '${f}' using 2:col with lines title columnhead"
+
+  if [ "${OUTPNG}" = "1" ]; then
+    gnuplot -e "${E}"
+  else
+    gnuplot -e "${E}" --persist
+  fi
+done
diff --git a/simd/pf_altivec_float.h b/simd/pf_altivec_float.h
new file mode 100644
index 0000000..30e3f18
--- /dev/null
+++ b/simd/pf_altivec_float.h
@@ -0,0 +1,81 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_ALTIVEC_FLT_H
+#define PF_ALTIVEC_FLT_H
+
+/*
+   Altivec support macros
+*/
+#if !defined(PFFFT_SIMD_DISABLE) && (defined(__ppc__) || defined(__ppc64__))
+#pragma message __FILE__ ": ALTIVEC float macros are defined"
+typedef vector float v4sf;
+
+#  define SIMD_SZ 4
+
+typedef union v4sf_union {
+  v4sf  v;
+  float f[SIMD_SZ];
+} v4sf_union;
+
+#  define VREQUIRES_ALIGN 1  /* not sure, if really required */
+#  define VARCH "ALTIVEC"
+#  define VZERO() ((vector float) vec_splat_u8(0))
+#  define VMUL(a,b) vec_madd(a,b, VZERO())
+#  define VADD(a,b) vec_add(a,b)
+#  define VMADD(a,b,c) vec_madd(a,b,c)
+#  define VSUB(a,b) vec_sub(a,b)
+inline v4sf ld_ps1(const float *p) { v4sf v=vec_lde(0,p); return vec_splat(vec_perm(v, v, vec_lvsl(0, p)), 0); }
+#  define LD_PS1(p) ld_ps1(&p)
+#  define INTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = vec_mergeh(in1, in2); out2 = vec_mergel(in1, in2); out1 = tmp__; }
+#  define UNINTERLEAVE2(in1, in2, out1, out2) {                           \
+    vector unsigned char vperm1 =  (vector unsigned char)(0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27); \
+    vector unsigned char vperm2 =  (vector unsigned char)(4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31); \
+    v4sf tmp__ = vec_perm(in1, in2, vperm1); out2 = vec_perm(in1, in2, vperm2); out1 = tmp__; \
+  }
+#  define VTRANSPOSE4(x0,x1,x2,x3) {              \
+    v4sf y0 = vec_mergeh(x0, x2);               \
+    v4sf y1 = vec_mergel(x0, x2);               \
+    v4sf y2 = vec_mergeh(x1, x3);               \
+    v4sf y3 = vec_mergel(x1, x3);               \
+    x0 = vec_mergeh(y0, y2);                    \
+    x1 = vec_mergel(y0, y2);                    \
+    x2 = vec_mergeh(y1, y3);                    \
+    x3 = vec_mergel(y1, y3);                    \
+  }
+#  define VSWAPHL(a,b) vec_perm(a,b, (vector unsigned char)(16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15))
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0xF) == 0)
+
+#endif
+
+#endif /* PF_SSE1_FLT_H */
+
diff --git a/simd/pf_avx_double.h b/simd/pf_avx_double.h
new file mode 100644
index 0000000..62c9123
--- /dev/null
+++ b/simd/pf_avx_double.h
@@ -0,0 +1,145 @@
+/*
+   Copyright (c) 2020  Dario Mambro ( dario.mambro@gmail.com )
+*/
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_AVX_DBL_H
+#define PF_AVX_DBL_H
+
+/*
+   vector support macros: the rest of the code is independant of
+   AVX -- adding support for other platforms with 4-element
+   vectors should be limited to these macros
+*/
+
+
+/*
+  AVX support macros
+*/
+#if !defined(SIMD_SZ) && !defined(PFFFT_SIMD_DISABLE) && defined(__AVX__)
+#pragma message __FILE__ ": AVX macros are defined"
+
+#include <immintrin.h>
+typedef __m256d v4sf;
+
+/* 4 doubles by simd vector */
+#  define SIMD_SZ 4
+
+typedef union v4sf_union {
+  v4sf  v;
+  double f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "AVX"
+#  define VREQUIRES_ALIGN 1
+#  define VZERO() _mm256_setzero_pd()
+#  define VMUL(a,b) _mm256_mul_pd(a,b)
+#  define VADD(a,b) _mm256_add_pd(a,b)
+#  define VMADD(a,b,c) _mm256_add_pd(_mm256_mul_pd(a,b), c)
+#  define VSUB(a,b) _mm256_sub_pd(a,b)
+#  define LD_PS1(p) _mm256_set1_pd(p)
+#  define VLOAD_UNALIGNED(ptr)  _mm256_loadu_pd(ptr)
+#  define VLOAD_ALIGNED(ptr)    _mm256_load_pd(ptr)
+
+/* INTERLEAVE2 (in1, in2, out1, out2) pseudo code:
+out1 = [ in1[0], in2[0], in1[1], in2[1] ]
+out2 = [ in1[2], in2[2], in1[3], in2[3] ]
+*/
+#  define INTERLEAVE2(in1, in2, out1, out2) {							\
+	__m128d low1__ = _mm256_castpd256_pd128(in1);						\
+	__m128d low2__ = _mm256_castpd256_pd128(in2);						\
+	__m128d high1__ = _mm256_extractf128_pd(in1, 1);					\
+	__m128d high2__ = _mm256_extractf128_pd(in2, 1);					\
+	__m256d tmp__ = _mm256_insertf128_pd(								\
+		_mm256_castpd128_pd256(_mm_shuffle_pd(low1__, low2__, 0)),		\
+		_mm_shuffle_pd(low1__, low2__, 3),								\
+		1);																\
+	out2 = _mm256_insertf128_pd(										\
+		_mm256_castpd128_pd256(_mm_shuffle_pd(high1__, high2__, 0)),	\
+		_mm_shuffle_pd(high1__, high2__, 3),							\
+		1);																\
+	out1 = tmp__;														\
+}
+
+/*UNINTERLEAVE2(in1, in2, out1, out2) pseudo code:
+out1 = [ in1[0], in1[2], in2[0], in2[2] ]
+out2 = [ in1[1], in1[3], in2[1], in2[3] ]
+*/
+#  define UNINTERLEAVE2(in1, in2, out1, out2) {							\
+	__m128d low1__ = _mm256_castpd256_pd128(in1);						\
+	__m128d low2__ = _mm256_castpd256_pd128(in2);						\
+	__m128d high1__ = _mm256_extractf128_pd(in1, 1);					\
+	__m128d high2__ = _mm256_extractf128_pd(in2, 1); 					\
+	__m256d tmp__ = _mm256_insertf128_pd(								\
+		_mm256_castpd128_pd256(_mm_shuffle_pd(low1__, high1__, 0)),		\
+		_mm_shuffle_pd(low2__, high2__, 0),								\
+		1);																\
+	out2 = _mm256_insertf128_pd(										\
+		_mm256_castpd128_pd256(_mm_shuffle_pd(low1__, high1__, 3)),		\
+		_mm_shuffle_pd(low2__, high2__, 3),								\
+		1);																\
+	out1 = tmp__;														\
+}
+
+#  define VTRANSPOSE4(row0, row1, row2, row3) {				\
+        __m256d tmp3, tmp2, tmp1, tmp0;                     \
+                                                            \
+        tmp0 = _mm256_shuffle_pd((row0),(row1), 0x0);       \
+        tmp2 = _mm256_shuffle_pd((row0),(row1), 0xF);       \
+        tmp1 = _mm256_shuffle_pd((row2),(row3), 0x0);       \
+        tmp3 = _mm256_shuffle_pd((row2),(row3), 0xF);       \
+                                                            \
+        (row0) = _mm256_permute2f128_pd(tmp0, tmp1, 0x20);	\
+        (row1) = _mm256_permute2f128_pd(tmp2, tmp3, 0x20);  \
+        (row2) = _mm256_permute2f128_pd(tmp0, tmp1, 0x31);  \
+        (row3) = _mm256_permute2f128_pd(tmp2, tmp3, 0x31);  \
+    }
+
+/*VSWAPHL(a, b) pseudo code:
+return [ b[0], b[1], a[2], a[3] ]
+*/
+#  define VSWAPHL(a,b)	\
+   _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm256_castpd256_pd128(b)), _mm256_extractf128_pd(a, 1), 1)
+
+/* reverse/flip all floats */
+#  define VREV_S(a)    _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm_permute_pd(_mm256_extractf128_pd(a, 1),1)), _mm_permute_pd(_mm256_castpd256_pd128(a), 1), 1)
+
+/* reverse/flip complex floats */
+#  define VREV_C(a)    _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm256_extractf128_pd(a, 1)), _mm256_castpd256_pd128(a), 1)
+
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0x1F) == 0)
+
+#endif
+
+#endif /* PF_AVX_DBL_H */
+
diff --git a/simd/pf_double.h b/simd/pf_double.h
new file mode 100644
index 0000000..c6bac31
--- /dev/null
+++ b/simd/pf_double.h
@@ -0,0 +1,82 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_DBL_H
+#define PF_DBL_H
+
+#include <assert.h>
+#include <string.h>
+#include <stdint.h>
+
+
+/*
+ *  SIMD reference material:
+ *
+ * general SIMD introduction:
+ * https://www.linuxjournal.com/content/introduction-gcc-compiler-intrinsics-vector-processing
+ *
+ * SSE 1:
+ * https://software.intel.com/sites/landingpage/IntrinsicsGuide/
+ *
+ * ARM NEON:
+ * https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/intrinsics
+ *
+ * Altivec:
+ * https://www.nxp.com/docs/en/reference-manual/ALTIVECPIM.pdf
+ * https://gcc.gnu.org/onlinedocs/gcc-4.9.2/gcc/PowerPC-AltiVec_002fVSX-Built-in-Functions.html
+ * better one?
+ *
+ */
+
+typedef double vsfscalar;
+
+#include "pf_avx_double.h"
+
+#ifndef SIMD_SZ
+#  if !defined(PFFFT_SIMD_DISABLE)
+#    warning "building double with simd disabled !\n";
+#    define PFFFT_SIMD_DISABLE /* fallback to scalar code */
+#  endif
+#endif
+
+#include "pf_scalar_double.h"
+
+/* shortcuts for complex multiplcations */
+#define VCPLXMUL(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VSUB(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VADD(ai,tmp); }
+#define VCPLXMULCONJ(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VADD(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VSUB(ai,tmp); }
+#ifndef SVMUL
+/* multiply a scalar with a vector */
+#define SVMUL(f,v) VMUL(LD_PS1(f),v)
+#endif
+
+#endif /* PF_DBL_H */
+
diff --git a/simd/pf_float.h b/simd/pf_float.h
new file mode 100644
index 0000000..1798194
--- /dev/null
+++ b/simd/pf_float.h
@@ -0,0 +1,84 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_FLT_H
+#define PF_FLT_H
+
+#include <assert.h>
+#include <string.h>
+#include <stdint.h>
+
+
+/*
+ *  SIMD reference material:
+ *
+ * general SIMD introduction:
+ * https://www.linuxjournal.com/content/introduction-gcc-compiler-intrinsics-vector-processing
+ *
+ * SSE 1:
+ * https://software.intel.com/sites/landingpage/IntrinsicsGuide/
+ *
+ * ARM NEON:
+ * https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/intrinsics
+ *
+ * Altivec:
+ * https://www.nxp.com/docs/en/reference-manual/ALTIVECPIM.pdf
+ * https://gcc.gnu.org/onlinedocs/gcc-4.9.2/gcc/PowerPC-AltiVec_002fVSX-Built-in-Functions.html
+ * better one?
+ *
+ */
+
+typedef float vsfscalar;
+
+#include "pf_sse1_float.h"
+#include "pf_neon_float.h"
+#include "pf_altivec_float.h"
+
+#ifndef SIMD_SZ
+#  if !defined(PFFFT_SIMD_DISABLE)
+#    warning "building float with simd disabled !\n";
+#    define PFFFT_SIMD_DISABLE /* fallback to scalar code */
+#  endif
+#endif
+
+#include "pf_scalar_float.h"
+
+/* shortcuts for complex multiplcations */
+#define VCPLXMUL(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VSUB(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VADD(ai,tmp); }
+#define VCPLXMULCONJ(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VADD(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VSUB(ai,tmp); }
+#ifndef SVMUL
+/* multiply a scalar with a vector */
+#define SVMUL(f,v) VMUL(LD_PS1(f),v)
+#endif
+
+#endif /* PF_FLT_H */
+
diff --git a/simd/pf_neon_float.h b/simd/pf_neon_float.h
new file mode 100644
index 0000000..8c31fcd
--- /dev/null
+++ b/simd/pf_neon_float.h
@@ -0,0 +1,87 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_NEON_FLT_H
+#define PF_NEON_FLT_H
+
+/*
+  ARM NEON support macros
+*/
+#if !defined(PFFFT_SIMD_DISABLE) && defined(PFFFT_ENABLE_NEON) && (defined(__arm__) || defined(__aarch64__) || defined(__arm64__))
+#pragma message __FILE__ ": ARM NEON macros are defined"
+
+#  include <arm_neon.h>
+typedef float32x4_t v4sf;
+
+#  define SIMD_SZ 4
+
+typedef union v4sf_union {
+  v4sf  v;
+  float f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "NEON"
+#  define VREQUIRES_ALIGN 0  /* usually no alignment required */
+#  define VZERO() vdupq_n_f32(0)
+#  define VMUL(a,b) vmulq_f32(a,b)
+#  define VADD(a,b) vaddq_f32(a,b)
+#  define VMADD(a,b,c) vmlaq_f32(c,a,b)
+#  define VSUB(a,b) vsubq_f32(a,b)
+#  define LD_PS1(p) vld1q_dup_f32(&(p))
+#  define VLOAD_UNALIGNED(ptr)  (*((v4sf*)(ptr)))
+#  define VLOAD_ALIGNED(ptr)    (*((v4sf*)(ptr)))
+#  define INTERLEAVE2(in1, in2, out1, out2) { float32x4x2_t tmp__ = vzipq_f32(in1,in2); out1=tmp__.val[0]; out2=tmp__.val[1]; }
+#  define UNINTERLEAVE2(in1, in2, out1, out2) { float32x4x2_t tmp__ = vuzpq_f32(in1,in2); out1=tmp__.val[0]; out2=tmp__.val[1]; }
+#  define VTRANSPOSE4(x0,x1,x2,x3) {                                    \
+    float32x4x2_t t0_ = vzipq_f32(x0, x2);                              \
+    float32x4x2_t t1_ = vzipq_f32(x1, x3);                              \
+    float32x4x2_t u0_ = vzipq_f32(t0_.val[0], t1_.val[0]);              \
+    float32x4x2_t u1_ = vzipq_f32(t0_.val[1], t1_.val[1]);              \
+    x0 = u0_.val[0]; x1 = u0_.val[1]; x2 = u1_.val[0]; x3 = u1_.val[1]; \
+  }
+// marginally faster version
+//#  define VTRANSPOSE4(x0,x1,x2,x3) { asm("vtrn.32 %q0, %q1;\n vtrn.32 %q2,%q3\n vswp %f0,%e2\n vswp %f1,%e3" : "+w"(x0), "+w"(x1), "+w"(x2), "+w"(x3)::); }
+#  define VSWAPHL(a,b) vcombine_f32(vget_low_f32(b), vget_high_f32(a))
+
+/* reverse/flip all floats */
+#  define VREV_S(a)    _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,2,3))
+/* reverse/flip complex floats */
+#  define VREV_C(a)    _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2))
+
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0x3) == 0)
+
+#else
+/* #pragma message __FILE__ ": ARM NEON macros are not defined" */
+#endif
+
+#endif /* PF_NEON_FLT_H */
+
diff --git a/simd/pf_scalar_double.h b/simd/pf_scalar_double.h
new file mode 100644
index 0000000..8c88c05
--- /dev/null
+++ b/simd/pf_scalar_double.h
@@ -0,0 +1,185 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+   Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_SCAL_DBL_H
+#define PF_SCAL_DBL_H
+
+/*
+  fallback mode(s) for situations where SSE/AVX/NEON/Altivec are not available, use scalar mode instead
+*/
+
+#if !defined(SIMD_SZ) && defined(PFFFT_SCALVEC_ENABLED)
+#pragma message __FILE__ ": double SCALAR4 macros are defined"
+
+typedef struct {
+  vsfscalar a;
+  vsfscalar b;
+  vsfscalar c;
+  vsfscalar d;
+} v4sf;
+
+#  define SIMD_SZ 4
+
+typedef union v4sf_union {
+  v4sf  v;
+  vsfscalar f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "4xScalar"
+#  define VREQUIRES_ALIGN 0
+
+  static ALWAYS_INLINE(v4sf) VZERO() {
+    v4sf r = { 0.f, 0.f, 0.f, 0.f };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VMUL(v4sf A, v4sf B) {
+    v4sf r = { A.a * B.a, A.b * B.b, A.c * B.c, A.d * B.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VADD(v4sf A, v4sf B) {
+    v4sf r = { A.a + B.a, A.b + B.b, A.c + B.c, A.d + B.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VMADD(v4sf A, v4sf B, v4sf C) {
+    v4sf r = { A.a * B.a + C.a, A.b * B.b + C.b, A.c * B.c + C.c, A.d * B.d + C.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VSUB(v4sf A, v4sf B) {
+    v4sf r = { A.a - B.a, A.b - B.b, A.c - B.c, A.d - B.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) LD_PS1(vsfscalar v) {
+    v4sf r = { v, v, v, v };
+    return r;
+  }
+
+#  define VLOAD_UNALIGNED(ptr)  (*((v4sf*)(ptr)))
+
+#  define VLOAD_ALIGNED(ptr)    (*((v4sf*)(ptr)))
+
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & (sizeof(v4sf)-1) ) == 0)
+
+
+  /* INTERLEAVE2() */
+  #define INTERLEAVE2( A, B, C, D) \
+  do { \
+    v4sf Cr = { A.a, B.a, A.b, B.b }; \
+    v4sf Dr = { A.c, B.c, A.d, B.d }; \
+    C = Cr; \
+    D = Dr; \
+  } while (0)
+
+
+  /* UNINTERLEAVE2() */
+  #define UNINTERLEAVE2(A, B, C, D) \
+  do { \
+    v4sf Cr = { A.a, A.c, B.a, B.c }; \
+    v4sf Dr = { A.b, A.d, B.b, B.d }; \
+    C = Cr; \
+    D = Dr; \
+  } while (0)
+
+
+  /* VTRANSPOSE4() */
+  #define VTRANSPOSE4(A, B, C, D) \
+  do { \
+    v4sf Ar = { A.a, B.a, C.a, D.a }; \
+    v4sf Br = { A.b, B.b, C.b, D.b }; \
+    v4sf Cr = { A.c, B.c, C.c, D.c }; \
+    v4sf Dr = { A.d, B.d, C.d, D.d }; \
+    A = Ar; \
+    B = Br; \
+    C = Cr; \
+    D = Dr; \
+  } while (0)
+
+
+  /* VSWAPHL() */
+  static ALWAYS_INLINE(v4sf) VSWAPHL(v4sf A, v4sf B) {
+    v4sf r = { B.a, B.b, A.c, A.d };
+    return r;
+  }
+
+
+  /* reverse/flip all floats */
+  static ALWAYS_INLINE(v4sf) VREV_S(v4sf A) {
+    v4sf r = { A.d, A.c, A.b, A.a };
+    return r;
+  }
+
+  /* reverse/flip complex floats */
+  static ALWAYS_INLINE(v4sf) VREV_C(v4sf A) {
+    v4sf r = { A.c, A.d, A.a, A.b };
+    return r;
+  }
+
+#else
+/* #pragma message __FILE__ ": double SCALAR4 macros are not defined" */
+#endif
+
+
+#if !defined(SIMD_SZ)
+#pragma message __FILE__ ": float SCALAR1 macros are defined"
+typedef vsfscalar v4sf;
+
+#  define SIMD_SZ 1
+
+typedef union v4sf_union {
+  v4sf  v;
+  vsfscalar f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "Scalar"
+#  define VREQUIRES_ALIGN 0
+#  define VZERO() 0.0
+#  define VMUL(a,b) ((a)*(b))
+#  define VADD(a,b) ((a)+(b))
+#  define VMADD(a,b,c) ((a)*(b)+(c))
+#  define VSUB(a,b) ((a)-(b))
+#  define LD_PS1(p) (p)
+#  define VLOAD_UNALIGNED(ptr)  (*(ptr))
+#  define VLOAD_ALIGNED(ptr)    (*(ptr))
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & (sizeof(vsfscalar)-1) ) == 0)
+
+#else
+/* #pragma message __FILE__ ": double SCALAR1 macros are not defined" */
+#endif
+
+
+#endif /* PF_SCAL_DBL_H */
+
diff --git a/simd/pf_scalar_float.h b/simd/pf_scalar_float.h
new file mode 100644
index 0000000..7e5e894
--- /dev/null
+++ b/simd/pf_scalar_float.h
@@ -0,0 +1,185 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+   Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_SCAL_FLT_H
+#define PF_SCAL_FLT_H
+
+/*
+  fallback mode(s) for situations where SSE/AVX/NEON/Altivec are not available, use scalar mode instead
+*/
+
+#if !defined(SIMD_SZ) && defined(PFFFT_SCALVEC_ENABLED)
+#pragma message __FILE__ ": float SCALAR4 macros are defined"
+
+typedef struct {
+  vsfscalar a;
+  vsfscalar b;
+  vsfscalar c;
+  vsfscalar d;
+} v4sf;
+
+#  define SIMD_SZ 4
+
+typedef union v4sf_union {
+  v4sf  v;
+  vsfscalar f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "4xScalar"
+#  define VREQUIRES_ALIGN 0
+
+  static ALWAYS_INLINE(v4sf) VZERO() {
+    v4sf r = { 0.f, 0.f, 0.f, 0.f };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VMUL(v4sf A, v4sf B) {
+    v4sf r = { A.a * B.a, A.b * B.b, A.c * B.c, A.d * B.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VADD(v4sf A, v4sf B) {
+    v4sf r = { A.a + B.a, A.b + B.b, A.c + B.c, A.d + B.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VMADD(v4sf A, v4sf B, v4sf C) {
+    v4sf r = { A.a * B.a + C.a, A.b * B.b + C.b, A.c * B.c + C.c, A.d * B.d + C.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) VSUB(v4sf A, v4sf B) {
+    v4sf r = { A.a - B.a, A.b - B.b, A.c - B.c, A.d - B.d };
+    return r;
+  }
+
+  static ALWAYS_INLINE(v4sf) LD_PS1(vsfscalar v) {
+    v4sf r = { v, v, v, v };
+    return r;
+  }
+
+#  define VLOAD_UNALIGNED(ptr)  (*((v4sf*)(ptr)))
+
+#  define VLOAD_ALIGNED(ptr)    (*((v4sf*)(ptr)))
+
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & (sizeof(v4sf)-1) ) == 0)
+
+
+  /* INTERLEAVE2() */
+  #define INTERLEAVE2( A, B, C, D) \
+  do { \
+    v4sf Cr = { A.a, B.a, A.b, B.b }; \
+    v4sf Dr = { A.c, B.c, A.d, B.d }; \
+    C = Cr; \
+    D = Dr; \
+  } while (0)
+
+
+  /* UNINTERLEAVE2() */
+  #define UNINTERLEAVE2(A, B, C, D) \
+  do { \
+    v4sf Cr = { A.a, A.c, B.a, B.c }; \
+    v4sf Dr = { A.b, A.d, B.b, B.d }; \
+    C = Cr; \
+    D = Dr; \
+  } while (0)
+
+
+  /* VTRANSPOSE4() */
+  #define VTRANSPOSE4(A, B, C, D) \
+  do { \
+    v4sf Ar = { A.a, B.a, C.a, D.a }; \
+    v4sf Br = { A.b, B.b, C.b, D.b }; \
+    v4sf Cr = { A.c, B.c, C.c, D.c }; \
+    v4sf Dr = { A.d, B.d, C.d, D.d }; \
+    A = Ar; \
+    B = Br; \
+    C = Cr; \
+    D = Dr; \
+  } while (0)
+
+
+  /* VSWAPHL() */
+  static ALWAYS_INLINE(v4sf) VSWAPHL(v4sf A, v4sf B) {
+    v4sf r = { B.a, B.b, A.c, A.d };
+    return r;
+  }
+
+
+  /* reverse/flip all floats */
+  static ALWAYS_INLINE(v4sf) VREV_S(v4sf A) {
+    v4sf r = { A.d, A.c, A.b, A.a };
+    return r;
+  }
+
+  /* reverse/flip complex floats */
+  static ALWAYS_INLINE(v4sf) VREV_C(v4sf A) {
+    v4sf r = { A.c, A.d, A.a, A.b };
+    return r;
+  }
+
+#else
+/* #pragma message __FILE__ ": float SCALAR4 macros are not defined" */
+#endif
+
+
+#if !defined(SIMD_SZ)
+#pragma message __FILE__ ": float SCALAR1 macros are defined"
+typedef vsfscalar v4sf;
+
+#  define SIMD_SZ 1
+
+typedef union v4sf_union {
+  v4sf  v;
+  vsfscalar f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "Scalar"
+#  define VREQUIRES_ALIGN 0
+#  define VZERO() 0.f
+#  define VMUL(a,b) ((a)*(b))
+#  define VADD(a,b) ((a)+(b))
+#  define VMADD(a,b,c) ((a)*(b)+(c))
+#  define VSUB(a,b) ((a)-(b))
+#  define LD_PS1(p) (p)
+#  define VLOAD_UNALIGNED(ptr)  (*(ptr))
+#  define VLOAD_ALIGNED(ptr)    (*(ptr))
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & (sizeof(vsfscalar)-1) ) == 0)
+
+#else
+/* #pragma message __FILE__ ": float SCALAR1 macros are not defined" */
+#endif
+
+
+#endif /* PF_SCAL_FLT_H */
+
diff --git a/simd/pf_sse1_float.h b/simd/pf_sse1_float.h
new file mode 100644
index 0000000..808350a
--- /dev/null
+++ b/simd/pf_sse1_float.h
@@ -0,0 +1,82 @@
+
+/* Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+
+   Redistribution and use of the Software in source and binary forms,
+   with or without modification, is permitted provided that the
+   following conditions are met:
+
+   - Neither the names of NCAR's Computational and Information Systems
+   Laboratory, the University Corporation for Atmospheric Research,
+   nor the names of its sponsors or contributors may be used to
+   endorse or promote products derived from this Software without
+   specific prior written permission.
+
+   - Redistributions of source code must retain the above copyright
+   notices, this list of conditions, and the disclaimer below.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions, and the disclaimer below in the
+   documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+   HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+   SOFTWARE.
+*/
+
+#ifndef PF_SSE1_FLT_H
+#define PF_SSE1_FLT_H
+
+/*
+  SSE1 support macros
+*/
+#if !defined(SIMD_SZ) && !defined(PFFFT_SIMD_DISABLE) && (defined(__x86_64__) || defined(_M_X64) || defined(i386) || defined(_M_IX86))
+#pragma message __FILE__ ": SSE1 float macros are defined"
+
+#include <xmmintrin.h>
+typedef __m128 v4sf;
+
+/* 4 floats by simd vector -- this is pretty much hardcoded in the preprocess/finalize functions
+ *  anyway so you will have to work if you want to enable AVX with its 256-bit vectors. */
+#  define SIMD_SZ 4
+
+typedef union v4sf_union {
+  v4sf  v;
+  float f[SIMD_SZ];
+} v4sf_union;
+
+#  define VARCH "SSE1"
+#  define VREQUIRES_ALIGN 1
+#  define VZERO() _mm_setzero_ps()
+#  define VMUL(a,b) _mm_mul_ps(a,b)
+#  define VADD(a,b) _mm_add_ps(a,b)
+#  define VMADD(a,b,c) _mm_add_ps(_mm_mul_ps(a,b), c)
+#  define VSUB(a,b) _mm_sub_ps(a,b)
+#  define LD_PS1(p) _mm_set1_ps(p)
+#  define VLOAD_UNALIGNED(ptr)  _mm_loadu_ps(ptr)
+#  define VLOAD_ALIGNED(ptr)    _mm_load_ps(ptr)
+
+#  define INTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_unpacklo_ps(in1, in2); out2 = _mm_unpackhi_ps(in1, in2); out1 = tmp__; }
+#  define UNINTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(2,0,2,0)); out2 = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(3,1,3,1)); out1 = tmp__; }
+#  define VTRANSPOSE4(x0,x1,x2,x3) _MM_TRANSPOSE4_PS(x0,x1,x2,x3)
+#  define VSWAPHL(a,b) _mm_shuffle_ps(b, a, _MM_SHUFFLE(3,2,1,0))
+
+/* reverse/flip all floats */
+#  define VREV_S(a)    _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,2,3))
+/* reverse/flip complex floats */
+#  define VREV_C(a)    _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2))
+
+#  define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0xF) == 0)
+
+#else
+/* #pragma message __FILE__ ": SSE1 float macros are not defined" */
+#endif
+
+#endif /* PF_SSE1_FLT_H */
+
diff --git a/test_pffastconv.c b/test_pffastconv.c
new file mode 100644
index 0000000..90d36ca
--- /dev/null
+++ b/test_pffastconv.c
@@ -0,0 +1,915 @@
+/*
+  Copyright (c) 2013 Julien Pommier.
+  Copyright (c) 2019  Hayati Ayguen ( h_ayguen@web.de )
+ */
+
+#define _WANT_SNAN  1
+
+#include "pffft.h"
+#include "pffastconv.h"
+
+#include <math.h>
+#include <float.h>
+#include <limits.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <assert.h>
+#include <string.h>
+
+#ifdef HAVE_SYS_TIMES
+#  include <sys/times.h>
+#  include <unistd.h>
+#endif
+
+/* 
+   vector support macros: the rest of the code is independant of
+   SSE/Altivec/NEON -- adding support for other platforms with 4-element
+   vectors should be limited to these macros 
+*/
+#if 0
+#include "simd/pf_float.h"
+#endif
+
+#if defined(_MSC_VER)
+#  define RESTRICT __restrict
+#elif defined(__GNUC__)
+#  define RESTRICT __restrict
+#else
+#  define RESTRICT
+#endif
+
+
+#if defined(_MSC_VER)
+#pragma warning( disable : 4244 )
+#endif
+
+
+#ifdef SNANF
+  #define INVALID_FLOAT_VAL  SNANF
+#elif defined(SNAN)
+  #define INVALID_FLOAT_VAL  SNAN
+#elif defined(NAN)
+  #define INVALID_FLOAT_VAL  NAN
+#elif defined(INFINITY)
+  #define INVALID_FLOAT_VAL  INFINITY
+#else
+  #define INVALID_FLOAT_VAL  FLT_MAX
+#endif
+
+
+#if defined(HAVE_SYS_TIMES)
+  inline double uclock_sec(void) {
+    static double ttclk = 0.;
+    struct tms t;
+    if (ttclk == 0.)
+      ttclk = sysconf(_SC_CLK_TCK);
+    times(&t);
+    /* use only the user time of this process - not realtime, which depends on OS-scheduler .. */
+    return ((double)t.tms_utime)) / ttclk;
+  }
+# else
+  double uclock_sec(void)
+{ return (double)clock()/(double)CLOCKS_PER_SEC; }
+#endif
+
+
+
+typedef int            (*pfnConvolution)  (void * setup, const float * X, int len, float *Y, const float *Yref, int applyFlush);
+typedef void*          (*pfnConvSetup)    (float *Hfwd, int Nf, int * BlkLen, int flags);
+typedef pfnConvolution (*pfnGetConvFnPtr) (void * setup);
+typedef void           (*pfnConvDestroy)  (void * setup);
+
+
+struct ConvSetup
+{
+  pfnConvolution pfn;
+  int N;
+  int B;
+  float * H;
+  int flags;
+};
+
+
+void * convSetupRev( float * H, int N, int * BlkLen, int flags )
+{
+  struct ConvSetup * s = pffastconv_malloc( sizeof(struct ConvSetup) );
+  int i, Nr = N;
+  if (flags & PFFASTCONV_CPLX_INP_OUT)
+    Nr *= 2;
+  Nr += 4;
+  s->pfn = NULL;
+  s->N = N;
+  s->B = *BlkLen;
+  s->H = pffastconv_malloc((unsigned)Nr * sizeof(float));
+  s->flags = flags;
+  memset(s->H, 0, (unsigned)Nr * sizeof(float));
+  if (flags & PFFASTCONV_CPLX_INP_OUT)
+  {
+    for ( i = 0; i < N; ++i ) {
+      s->H[2*(N-1 -i)  ] = H[i];
+      s->H[2*(N-1 -i)+1] = H[i];
+    }
+    /* simpler detection of overruns */
+    s->H[ 2*N    ] = INVALID_FLOAT_VAL;
+    s->H[ 2*N +1 ] = INVALID_FLOAT_VAL;
+    s->H[ 2*N +2 ] = INVALID_FLOAT_VAL;
+    s->H[ 2*N +3 ] = INVALID_FLOAT_VAL;
+  }
+  else
+  {
+    for ( i = 0; i < N; ++i )
+      s->H[ N-1 -i ] = H[i];
+    /* simpler detection of overruns */
+    s->H[ N    ] = INVALID_FLOAT_VAL;
+    s->H[ N +1 ] = INVALID_FLOAT_VAL;
+    s->H[ N +2 ] = INVALID_FLOAT_VAL;
+    s->H[ N +3 ] = INVALID_FLOAT_VAL;
+  }
+  return s;
+}
+
+void convDestroyRev( void * setup )
+{
+  struct ConvSetup * s = (struct ConvSetup*)setup;
+  pffastconv_free(s->H);
+  pffastconv_free(setup);
+}
+
+
+pfnConvolution ConvGetFnPtrRev( void * setup )
+{
+  struct ConvSetup * s = (struct ConvSetup*)setup;
+  if (!s)
+    return NULL;
+  return s->pfn;
+}
+
+
+void convSimdDestroy( void * setup )
+{
+  convDestroyRev(setup);
+}
+
+
+void * fastConvSetup( float * H, int N, int * BlkLen, int flags )
+{
+  void * p = pffastconv_new_setup( H, N, BlkLen, flags );
+  if (!p)
+    printf("fastConvSetup(N = %d, *BlkLen = %d, flags = %d) = NULL\n", N, *BlkLen, flags);
+  return p;
+}
+
+
+void fastConvDestroy( void * setup )
+{
+  pffastconv_destroy_setup( (PFFASTCONV_Setup*)setup );
+}
+
+
+
+int slow_conv_R(void * setup, const float * input, int len, float *output, const float *Yref, int applyFlush)
+{
+  struct ConvSetup * p = (struct ConvSetup*)setup;
+  const float * RESTRICT X = input;
+  const float * RESTRICT Hrev = p->H;
+  float * RESTRICT Y = output;
+  const int Nr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * p->N;
+  const int lenNr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * (len - p->N);
+  int i, j;
+  (void)Yref;
+  (void)applyFlush;
+
+  if (p->flags & PFFASTCONV_CPLX_INP_OUT)
+  {
+    for ( i = 0; i <= lenNr; i += 2 )
+    {
+      float sumRe = 0.0F, sumIm = 0.0F;
+      for ( j = 0; j < Nr; j += 2 )
+      {
+        sumRe += X[i+j  ] * Hrev[j];
+        sumIm += X[i+j+1] * Hrev[j+1];
+      }
+      Y[i  ] = sumRe;
+      Y[i+1] = sumIm;
+    }
+    return i/2;
+  }
+  else
+  {
+    for ( i = 0; i <= lenNr; ++i )
+    {
+      float sum = 0.0F;
+      for (j = 0; j < Nr; ++j )
+        sum += X[i+j]   * Hrev[j];
+      Y[i] = sum;
+    }
+    return i;
+  }
+}
+
+
+
+int slow_conv_A(void * setup, const float * input, int len, float *output, const float *Yref, int applyFlush)
+{
+  float sum[4];
+  struct ConvSetup * p = (struct ConvSetup*)setup;
+  const float * RESTRICT X = input;
+  const float * RESTRICT Hrev = p->H;
+  float * RESTRICT Y = output;
+  const int Nr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * p->N;
+  const int lenNr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * (len - p->N);
+  int i, j;
+  (void)Yref;
+  (void)applyFlush;
+
+  if (p->flags & PFFASTCONV_CPLX_INP_OUT)
+  {
+    if ( (Nr & 3) == 0 )
+    {
+      for ( i = 0; i <= lenNr; i += 2 )
+      {
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j < Nr; j += 4 )
+        {
+          sum[0] += X[i+j]   * Hrev[j];
+          sum[1] += X[i+j+1] * Hrev[j+1];
+          sum[2] += X[i+j+2] * Hrev[j+2];
+          sum[3] += X[i+j+3] * Hrev[j+3];
+        }
+        Y[i  ] = sum[0] + sum[2];
+        Y[i+1] = sum[1] + sum[3];
+      }
+    }
+    else
+    {
+      const int M = Nr & (~3);
+      for ( i = 0; i <= lenNr; i += 2 )
+      {
+        float tailSumRe = 0.0F, tailSumIm = 0.0F;
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j < M; j += 4 )
+        {
+          sum[0] += X[i+j  ] * Hrev[j  ];
+          sum[1] += X[i+j+1] * Hrev[j+1];
+          sum[2] += X[i+j+2] * Hrev[j+2];
+          sum[3] += X[i+j+3] * Hrev[j+3];
+        }
+        for ( ; j < Nr; j += 2 ) {
+          tailSumRe += X[i+j  ] * Hrev[j  ];
+          tailSumIm += X[i+j+1] * Hrev[j+1];
+        }
+        Y[i  ] = ( sum[0] + sum[2] ) + tailSumRe;
+        Y[i+1] = ( sum[1] + sum[3] ) + tailSumIm;
+      }
+    }
+    return i/2;
+  }
+  else
+  {
+    if ( (Nr & 3) == 0 )
+    {
+      for ( i = 0; i <= lenNr; ++i )
+      {
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j < Nr; j += 4 )
+        {
+          sum[0] += X[i+j]   * Hrev[j];
+          sum[1] += X[i+j+1] * Hrev[j+1];
+          sum[2] += X[i+j+2] * Hrev[j+2];
+          sum[3] += X[i+j+3] * Hrev[j+3];
+        }
+        Y[i] = sum[0] + sum[1] + sum[2] + sum[3];
+      }
+      return i;
+    }
+    else
+    {
+      const int M = Nr & (~3);
+      /* printf("A: Nr = %d, M = %d, H[M] = %f, H[M+1] = %f, H[M+2] = %f, H[M+3] = %f\n", Nr, M, Hrev[M], Hrev[M+1], Hrev[M+2], Hrev[M+3] ); */
+      for ( i = 0; i <= lenNr; ++i )
+      {
+        float tailSum = 0.0;
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j < M; j += 4 )
+        {
+          sum[0] += X[i+j]   * Hrev[j];
+          sum[1] += X[i+j+1] * Hrev[j+1];
+          sum[2] += X[i+j+2] * Hrev[j+2];
+          sum[3] += X[i+j+3] * Hrev[j+3];
+        }
+        for ( ; j < Nr; ++j )
+          tailSum += X[i+j] * Hrev[j];
+        Y[i] = (sum[0] + sum[1]) + (sum[2] + sum[3]) + tailSum;
+      }
+      return i;
+    }
+  }
+}
+
+
+int slow_conv_B(void * setup, const float * input, int len, float *output, const float *Yref, int applyFlush)
+{
+  float sum[4];
+  struct ConvSetup * p = (struct ConvSetup*)setup;
+  (void)Yref;
+  (void)applyFlush;
+  if (p->flags & PFFASTCONV_SYMMETRIC)
+  {
+    const float * RESTRICT X = input;
+    const float * RESTRICT Hrev = p->H;
+    float * RESTRICT Y = output;
+    const int Nr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * p->N;
+    const int lenNr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * (len - p->N);
+    const int h = Nr / 2 -4;
+    const int E = Nr -4;
+    int i, j;
+
+    if (p->flags & PFFASTCONV_CPLX_INP_OUT)
+    {
+      for ( i = 0; i <= lenNr; i += 2 )
+      {
+        const int k = i + E;
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j <= h; j += 4 )
+        {
+          sum[0] += Hrev[j  ] * ( X[i+j  ] + X[k-j+2] );
+          sum[1] += Hrev[j+1] * ( X[i+j+1] + X[k-j+3] );
+          sum[2] += Hrev[j+2] * ( X[i+j+2] + X[k-j  ] );
+          sum[3] += Hrev[j+3] * ( X[i+j+3] + X[k-j+1] );
+        }
+        Y[i  ] = sum[0] + sum[2];
+        Y[i+1] = sum[1] + sum[3];
+      }
+      return i/2;
+    }
+    else
+    {
+      for ( i = 0; i <= lenNr; ++i )
+      {
+        const int k = i + E;
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j <= h; j += 4 )
+        {
+          sum[0] += Hrev[j  ] * ( X[i+j  ] + X[k-j+3] );
+          sum[1] += Hrev[j+1] * ( X[i+j+1] + X[k-j+2] );
+          sum[2] += Hrev[j+2] * ( X[i+j+2] + X[k-j+1] );
+          sum[3] += Hrev[j+3] * ( X[i+j+3] + X[k-j  ] );
+        }
+        Y[i] = sum[0] + sum[1] + sum[2] + sum[3];
+      }
+      return i;
+    }
+  }
+  else
+  {
+    const float * RESTRICT X = input;
+    const float * RESTRICT Hrev = p->H;
+    float * RESTRICT Y = output;
+    const int Nr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * p->N;
+    const int lenNr = ((p->flags & PFFASTCONV_CPLX_INP_OUT) ? 2 : 1) * (len - p->N);
+    int i, j;
+
+    if (p->flags & PFFASTCONV_CPLX_INP_OUT)
+    {
+      for ( i = 0; i <= lenNr; i += 2 )
+      {
+        sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+        for (j = 0; j < Nr; j += 4 )
+        {
+          sum[0] += X[i+j]   * Hrev[j];
+          sum[1] += X[i+j+1] * Hrev[j+1];
+          sum[2] += X[i+j+2] * Hrev[j+2];
+          sum[3] += X[i+j+3] * Hrev[j+3];
+        }
+        Y[i  ] = sum[0] + sum[2];
+        Y[i+1] = sum[1] + sum[3];
+      }
+      return i/2;
+    }
+    else
+    {
+      if ( (Nr & 3) == 0 )
+      {
+        for ( i = 0; i <= lenNr; ++i )
+        {
+          sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+          for (j = 0; j < Nr; j += 4 )
+          {
+            sum[0] += X[i+j]   * Hrev[j];
+            sum[1] += X[i+j+1] * Hrev[j+1];
+            sum[2] += X[i+j+2] * Hrev[j+2];
+            sum[3] += X[i+j+3] * Hrev[j+3];
+          }
+          Y[i] = (sum[0] + sum[1]) + (sum[2] + sum[3]);
+        }
+        return i;
+      }
+      else
+      {
+        const int M = Nr & (~3);
+        /* printf("B: Nr = %d\n", Nr ); */
+        for ( i = 0; i <= lenNr; ++i )
+        {
+          float tailSum = 0.0;
+          sum[0] = sum[1] = sum[2] = sum[3] = 0.0F;
+          for (j = 0; j < M; j += 4 )
+          {
+            sum[0] += X[i+j]   * Hrev[j];
+            sum[1] += X[i+j+1] * Hrev[j+1];
+            sum[2] += X[i+j+2] * Hrev[j+2];
+            sum[3] += X[i+j+3] * Hrev[j+3];
+          }
+          for ( ; j < Nr; ++j )
+            tailSum += X[i+j] * Hrev[j];
+          Y[i] = (sum[0] + sum[1]) + (sum[2] + sum[3]) + tailSum;
+        }
+        return i;
+      }
+    }
+  }
+
+}
+
+
+int fast_conv(void * setup, const float * X, int len, float *Y, const float *Yref, int applyFlush)
+{
+  (void)Yref;
+  return pffastconv_apply( (PFFASTCONV_Setup*)setup, X, len, Y, applyFlush );
+}
+
+
+
+void printFirst( const float * V, const char * st, const int N, const int perLine )
+{
+  (void)V;  (void)st;  (void)N;  (void)perLine;
+  return;
+#if 0
+  int i;
+  for ( i = 0; i < N; ++i )
+  {
+    if ( (i % perLine) == 0 )
+      printf("\n%s[%d]", st, i);
+    printf("\t%.1f", V[i]);
+  }
+  printf("\n");
+#endif
+}
+
+
+
+#define NUMY       11
+
+
+int test(int FILTERLEN, int convFlags, const int testOutLen, int printDbg, int printSpeed) {
+  double t0, t1, tstop, td, tdref;
+  float *X, *H;
+  float *Y[NUMY];
+  int64_t outN[NUMY];
+  /* 256 KFloats or 16 MFloats data */
+#if 1
+  const int len = testOutLen ? (1 << 18) : (1 << 24);
+#elif 0
+  const int len = testOutLen ? (1 << 18) : (1 << 13);
+#else
+  const int len = testOutLen ? (1 << 18) : (1024);
+#endif
+  const int cplxFactor = ( convFlags & PFFASTCONV_CPLX_INP_OUT ) ? 2 : 1;
+  const int lenC = len / cplxFactor;
+
+  int yi, yc, posMaxErr;
+  float yRangeMin, yRangeMax, yErrLimit, maxErr = 0.0;
+  int i, j, numErrOverLimit, iter;
+  int retErr = 0;
+
+  /*                                  0               1               2               3                   4                   5                   6                   7                   8                      9  */
+  pfnConvSetup   aSetup[NUMY]     = { convSetupRev,   convSetupRev,   convSetupRev,   fastConvSetup,      fastConvSetup,      fastConvSetup,      fastConvSetup,      fastConvSetup,      fastConvSetup,         fastConvSetup   };
+  pfnConvDestroy aDestroy[NUMY]   = { convDestroyRev, convDestroyRev, convDestroyRev, fastConvDestroy,    fastConvDestroy,    fastConvDestroy,    fastConvDestroy,    fastConvDestroy,    fastConvDestroy,       fastConvDestroy };
+  pfnGetConvFnPtr aGetFnPtr[NUMY] = { NULL,           NULL,           NULL,           NULL,               NULL,               NULL,               NULL,               NULL,               NULL,                  NULL,           };
+  pfnConvolution aConv[NUMY]      = { slow_conv_R,    slow_conv_A,    slow_conv_B,    fast_conv,          fast_conv,          fast_conv,          fast_conv,          fast_conv,          fast_conv,             fast_conv       };
+  const char * convText[NUMY]     = { "R(non-simd)",  "A(non-simd)",  "B(non-simd)",  "fast_conv_64",     "fast_conv_128",    "fast_conv_256",    "fast_conv_512",    "fast_conv_1K",     "fast_conv_2K",        "fast_conv_4K"  };
+  int    aFastAlgo[NUMY]          = { 0,              0,              0,              1,                  1,                  1,                  1,                  1,                  1,                     1               };
+  void * aSetupCfg[NUMY]          = { NULL,           NULL,           NULL,           NULL,               NULL,               NULL,               NULL,               NULL,               NULL,                  NULL            };
+  int    aBlkLen[NUMY]            = { 1024,           1024,           1024,           64,                 128,                256,                512,                1024,               2048,                  4096            };
+#if 1
+  int    aRunAlgo[NUMY]           = { 1,              1,              1,              FILTERLEN<64,       FILTERLEN<128,      FILTERLEN<256,      FILTERLEN<512,      FILTERLEN<1024,     FILTERLEN<2048,        FILTERLEN<4096  };
+#elif 0
+  int    aRunAlgo[NUMY]           = { 1,              0,              0,              0 && FILTERLEN<64,  1 && FILTERLEN<128, 1 && FILTERLEN<256, 0 && FILTERLEN<512, 0 && FILTERLEN<1024, 0 && FILTERLEN<2048,  0 && FILTERLEN<4096  };
+#else
+  int    aRunAlgo[NUMY]           = { 1,              1,              1,              0 && FILTERLEN<64,  0 && FILTERLEN<128, 1 && FILTERLEN<256, 0 && FILTERLEN<512, 0 && FILTERLEN<1024, 0 && FILTERLEN<2048,  0 && FILTERLEN<4096  };
+#endif
+  double aSpeedFactor[NUMY], aDuration[NUMY], procSmpPerSec[NUMY];
+
+  X = pffastconv_malloc( (unsigned)(len+4) * sizeof(float) );
+  for ( i=0; i < NUMY; ++i)
+  {
+    if ( 1 || i < 2 )
+      Y[i] = pffastconv_malloc( (unsigned)len * sizeof(float) );
+    else
+      Y[i] = Y[1];
+
+    Y[i][0] = 123.F;  /* test for pffft_zconvolve_no_accu() */
+    aSpeedFactor[i] = -1.0;
+    aDuration[i] = -1.0;
+    procSmpPerSec[i] = -1.0;
+  }
+
+  H = pffastconv_malloc((unsigned)FILTERLEN * sizeof(float));
+
+  /* initialize input */
+  if ( convFlags & PFFASTCONV_CPLX_INP_OUT )
+  {
+    for ( i = 0; i < lenC; ++i )
+    {
+      X[2*i  ] = (float)(i % 4093);  /* 4094 is a prime number. see https://en.wikipedia.org/wiki/List_of_prime_numbers */
+      X[2*i+1] = (float)((i+2048) % 4093);
+    }
+  }
+  else
+  {
+    for ( i = 0; i < len; ++i )
+      X[i] = (float)(i % 4093);  /* 4094 is a prime number. see https://en.wikipedia.org/wiki/List_of_prime_numbers */
+  }
+  X[ len    ] = INVALID_FLOAT_VAL;
+  X[ len +1 ] = INVALID_FLOAT_VAL;
+  X[ len +2 ] = INVALID_FLOAT_VAL;
+  X[ len +3 ] = INVALID_FLOAT_VAL;
+
+  if (!testOutLen)
+    printFirst( X, "X", 64, 8 );
+
+  /* filter coeffs */
+  memset( H, 0, FILTERLEN * sizeof(float) );
+#if 1
+  if ( convFlags & PFFASTCONV_SYMMETRIC )
+  {
+    const int half = FILTERLEN / 2;
+    for ( j = 0; j < half; ++j ) {
+      switch (j % 3) {
+        case 0: H[j] = H[FILTERLEN-1-j] = -1.0F;  break;
+        case 1: H[j] = H[FILTERLEN-1-j] =  1.0F;  break;
+        case 2: H[j] = H[FILTERLEN-1-j] =  0.5F;  break;
+      }
+    }
+  }
+  else
+  {
+    for ( j = 0; j < FILTERLEN; ++j ) {
+      switch (j % 3) {
+        case 0: H[j] = -1.0F;  break;
+        case 1: H[j] = 1.0F;   break;
+        case 2: H[j] = 0.5F;   break;
+      }
+    }
+  }
+#else
+  H[0] = 1.0F;
+  H[FILTERLEN -1] = 1.0F;
+#endif
+  if (!testOutLen)
+    printFirst( H, "H", FILTERLEN, 8 );
+
+  printf("\n");
+  printf("filterLen = %d\t%s%s\t%s:\n", FILTERLEN,
+    ((convFlags & PFFASTCONV_CPLX_INP_OUT)?"cplx":"real"),
+    (convFlags & PFFASTCONV_CPLX_INP_OUT)?((convFlags & PFFASTCONV_CPLX_SINGLE_FFT)?" single":" 2x") : "",
+    ((convFlags & PFFASTCONV_SYMMETRIC)?"symmetric":"non-sym") );
+
+  while (1)
+  {
+
+    for ( yi = 0; yi < NUMY; ++yi )
+    {
+      if (!aRunAlgo[yi])
+        continue;
+
+      aSetupCfg[yi] = aSetup[yi]( H, FILTERLEN, &aBlkLen[yi], convFlags );
+
+      /* get effective apply function ptr */
+      if ( aSetupCfg[yi] && aGetFnPtr[yi] )
+        aConv[yi] = aGetFnPtr[yi]( aSetupCfg[yi] );
+
+      if ( aSetupCfg[yi] && aConv[yi] ) {
+        if (testOutLen)
+        {
+          t0 = uclock_sec();
+          outN[yi] = aConv[yi]( aSetupCfg[yi], X, lenC, Y[yi], Y[0], 1 /* applyFlush */ );
+          t1 = uclock_sec();
+          td = t1 - t0;
+        }
+        else
+        {
+          const int blkLen = 4096;  /* required for 'fast_conv_4K' */
+          int64_t offC = 0, offS, Nout;
+          int k;
+          iter = 0;
+          outN[yi] = 0;
+          t0 = uclock_sec();
+          tstop = t0 + 0.25;  /* benchmark duration: 250 ms */
+          do {
+            for ( k = 0; k < 128 && offC +blkLen < lenC; ++k )
+            {
+              offS = cplxFactor * offC;
+              Nout = aConv[yi]( aSetupCfg[yi], X +offS, blkLen, Y[yi] +offS, Y[0], (offC +blkLen >= lenC) /* applyFlush */ );
+              offC += Nout;
+              ++iter;
+              if ( !Nout )
+                break;
+              if ( offC +blkLen >= lenC )
+              {
+                outN[yi] += offC;
+                offC = 0;
+              }
+            }
+            t1 = uclock_sec();
+          } while ( t1 < tstop );
+          outN[yi] += offC;
+          td = t1 - t0;
+          procSmpPerSec[yi] = cplxFactor * (double)outN[yi] / td;
+        }
+      }
+      else
+      {
+        t0 = t1 = td = 0.0;
+        outN[yi] = 0;
+      }
+      aDuration[yi] = td;
+      if ( yi == 0 ) {
+        const float * Yvals = Y[0];
+        const int64_t refOutLen = cplxFactor * outN[0];
+        tdref = td;
+        if (printDbg) {
+          printf("convolution '%s' took: %f ms\n", convText[yi], td*1000.0);
+          printf("  convolution '%s' output size %" PRId64 " == (cplx) len %d + %" PRId64 "\n", convText[yi], outN[yi], len / cplxFactor, outN[yi] - len / cplxFactor);
+        }
+        aSpeedFactor[yi] = 1.0;
+        /*  */
+        yRangeMin = FLT_MAX;
+        yRangeMax = FLT_MIN;
+        for ( i = 0; i < refOutLen; ++i )
+        {
+          if ( yRangeMax < Yvals[i] )  yRangeMax = Yvals[i];
+          if ( yRangeMin > Yvals[i] )  yRangeMin = Yvals[i];
+        }
+        yErrLimit = fabsf(yRangeMax - yRangeMin) / ( 100.0F * 1000.0F );
+        /* yErrLimit = 0.01F; */
+        if (testOutLen) {
+          if (1) {
+            printf("reference output len = %" PRId64 " smp\n", outN[0]);
+            printf("reference output range |%.1f ..%.1f| = %.1f ==> err limit = %f\n", yRangeMin, yRangeMax, yRangeMax - yRangeMin, yErrLimit);
+          }
+          printFirst( Yvals, "Yref", 64, 8 );
+        }
+      }
+      else
+      {
+        aSpeedFactor[yi] = tdref / td;
+        if (printDbg) {
+          printf("\nconvolution '%s' took: %f ms == %f %% == %f X\n", convText[yi], td*1000.0, td * 100 / tdref, tdref / td);
+          printf("  convolution '%s' output size %" PRId64 " == (cplx) len %d + %" PRId64 "\n", convText[yi], outN[yi], len / cplxFactor, outN[yi] - len / cplxFactor);
+        }
+      }
+    }
+
+    int iMaxSpeedSlowAlgo = -1;
+    int iFirstFastAlgo = -1;
+    int iMaxSpeedFastAlgo = -1;
+    int iPrintedRefOutLen = 0;
+    {
+      for ( yc = 1; yc < NUMY; ++yc )
+      {
+        if (!aRunAlgo[yc])
+          continue;
+        if (aFastAlgo[yc]) {
+          if ( iMaxSpeedFastAlgo < 0 || aSpeedFactor[yc] > aSpeedFactor[iMaxSpeedFastAlgo] )
+            iMaxSpeedFastAlgo = yc;
+            
+          if (iFirstFastAlgo < 0)
+            iFirstFastAlgo = yc;
+        }
+        else
+        {
+          if ( iMaxSpeedSlowAlgo < 0 || aSpeedFactor[yc] > aSpeedFactor[iMaxSpeedSlowAlgo] )
+            iMaxSpeedSlowAlgo = yc;
+        }
+      }
+
+      if (printSpeed)
+      {
+        if (testOutLen)
+        {
+          if (iMaxSpeedSlowAlgo >= 0 )
+            printf("fastest slow algorithm is '%s' at speed %f X ; abs duration %f ms\n", convText[iMaxSpeedSlowAlgo], aSpeedFactor[iMaxSpeedSlowAlgo], 1000.0 * aDuration[iMaxSpeedSlowAlgo]);
+          if (0 != iMaxSpeedSlowAlgo && aRunAlgo[0])
+            printf("slow algorithm '%s' at speed %f X ; abs duration %f ms\n", convText[0], aSpeedFactor[0], 1000.0 * aDuration[0]);
+          if (1 != iMaxSpeedSlowAlgo && aRunAlgo[1])
+            printf("slow algorithm '%s' at speed %f X ; abs duration %f ms\n", convText[1], aSpeedFactor[1], 1000.0 * aDuration[1]);
+
+          if (iFirstFastAlgo >= 0 && iFirstFastAlgo != iMaxSpeedFastAlgo && aRunAlgo[iFirstFastAlgo])
+            printf("first   fast algorithm is '%s' at speed %f X ; abs duration %f ms\n", convText[iFirstFastAlgo],    aSpeedFactor[iFirstFastAlgo],    1000.0 * aDuration[iFirstFastAlgo]);
+          if (iFirstFastAlgo >= 0 && iFirstFastAlgo+1 != iMaxSpeedFastAlgo && iFirstFastAlgo+1 < NUMY && aRunAlgo[iFirstFastAlgo+1])
+            printf("2nd     fast algorithm is '%s' at speed %f X ; abs duration %f ms\n", convText[iFirstFastAlgo+1],  aSpeedFactor[iFirstFastAlgo+1],  1000.0 * aDuration[iFirstFastAlgo+1]);
+
+          if ( 0 <= iMaxSpeedFastAlgo && iMaxSpeedFastAlgo < NUMY && aRunAlgo[iMaxSpeedFastAlgo] )
+          {
+            printf("fastest fast algorithm is '%s' at speed %f X ; abs duration %f ms\n", convText[iMaxSpeedFastAlgo], aSpeedFactor[iMaxSpeedFastAlgo], 1000.0 * aDuration[iMaxSpeedFastAlgo]);
+            if ( 0 <= iMaxSpeedSlowAlgo && iMaxSpeedSlowAlgo < NUMY && aRunAlgo[iMaxSpeedSlowAlgo] )
+              printf("fast / slow ratio: %f X\n", aSpeedFactor[iMaxSpeedFastAlgo] / aSpeedFactor[iMaxSpeedSlowAlgo] );
+          }
+          printf("\n");
+        }
+        else
+        {
+          for ( yc = 0; yc < NUMY; ++yc )
+          {
+            if (!aRunAlgo[yc] || procSmpPerSec[yc] <= 0.0)
+              continue;
+            printf("algo '%s':\t%.2f MSmp\tin\t%.1f ms\t= %g kSmpPerSec\n",
+              convText[yc], (double)outN[yc]/(1000.0 * 1000.0), 1000.0 * aDuration[yc], procSmpPerSec[yc] * 0.001 );
+          }
+        }
+
+      }
+    }
+
+
+    for ( yc = 1; yc < NUMY; ++yc )
+    {
+      const float * Yref;
+      const float * Ycurr;
+      int outMin;
+
+      if (!aRunAlgo[yc])
+        continue;
+
+      if (printDbg)
+        printf("\n");
+
+      if ( outN[yc] == 0 )
+      {
+        printf("output size 0: '%s' not implemented\n", convText[yc]);
+      }
+      else if ( outN[0] != outN[yc] /* && aFastAlgo[yc] */ && testOutLen )
+      {
+        if (!iPrintedRefOutLen)
+        {
+          printf("reference output size = %" PRId64 ", delta to (cplx) input length = %" PRId64 " smp\n", outN[0], (len / cplxFactor) - outN[0]);
+          iPrintedRefOutLen = 1;
+        }
+        printf("output size doesn't match!: ref (FILTERLEN %d) returned %" PRId64 " smp, '%s' returned %" PRId64 " smp : delta = %" PRId64 " smp\n",
+          FILTERLEN, outN[0], convText[yc], outN[yc], outN[yc] - outN[0] );
+        retErr = 1;
+      }
+
+      posMaxErr = 0;
+      maxErr = -1.0;
+      Yref = Y[0];
+      Ycurr = Y[yc];
+      outMin = ( outN[yc] < outN[0] ) ? outN[yc] : outN[0];
+      numErrOverLimit = 0;
+      for ( i = 0; i < outMin; ++i )
+      {
+        if ( numErrOverLimit < 6 && fabs(Ycurr[i] - Yref[i]) >= yErrLimit )
+        {
+          printf("algo '%s': at %d: ***ERROR*** = %f, errLimit = %f, ref = %f, actual = %f\n",
+            convText[yc], i, fabs(Ycurr[i] - Yref[i]), yErrLimit, Yref[i], Ycurr[i] );
+          ++numErrOverLimit;
+        }
+
+        if ( fabs(Ycurr[i] - Yref[i]) > maxErr )
+        {
+          maxErr = fabsf(Ycurr[i] - Yref[i]);
+          posMaxErr = i;
+        }
+      }
+
+      if ( printDbg || (iMaxSpeedSlowAlgo == i) || (iMaxSpeedFastAlgo == i) )
+        printf("max difference for '%s' is %g at sample idx %d of max inp 4093-1 == %f %%\n", convText[yc], maxErr, posMaxErr, maxErr * 100.0 / 4092.0 );
+    }
+
+    break;
+  }
+
+  pffastconv_free(X);
+  for ( i=0; i < NUMY; ++i)
+  {
+    if ( 1 || i < 2 )
+      pffastconv_free( Y[i] );
+    if (!aRunAlgo[i])
+      continue;
+    aDestroy[i]( aSetupCfg[i] );
+  }
+
+  pffastconv_free(H);
+
+  return retErr;
+}
+
+/* small functions inside pffft.c that will detect (compiler) bugs with respect to simd instructions */
+void validate_pffft_simd();
+int  validate_pffft_simd_ex(FILE * DbgOut);
+
+
+int main(int argc, char **argv)
+{
+  int result = 0;
+  int i, k, M, flagsA, flagsB, flagsC, testOutLen, printDbg, printSpeed;
+  int testOutLens = 1, benchConv = 1, quickTest = 0, slowTest = 0;
+  int testReal = 1, testCplx = 1, testSymetric = 0;
+
+  for ( i = 1; i < argc; ++i ) {
+
+    if (!strcmp(argv[i], "--test-simd")) {
+      int numErrs = validate_pffft_simd_ex(stdout);
+      fprintf( ( numErrs != 0 ? stderr : stdout ), "validate_pffft_simd_ex() returned %d errors!\n", numErrs);
+      return ( numErrs > 0 ? 1 : 0 );
+    }
+
+    if (!strcmp(argv[i], "--no-len")) {
+      testOutLens = 0;
+    }
+    else if (!strcmp(argv[i], "--no-bench")) {
+      benchConv = 0;
+    }
+    else if (!strcmp(argv[i], "--quick")) {
+      quickTest = 1;
+    }
+    else if (!strcmp(argv[i], "--slow")) {
+      slowTest = 1;
+    }
+    else if (!strcmp(argv[i], "--real")) {
+      testCplx = 0;
+    }
+    else if (!strcmp(argv[i], "--cplx")) {
+      testReal = 0;
+    }
+    else if (!strcmp(argv[i], "--sym")) {
+      testSymetric = 1;
+    }
+    else /* if (!strcmp(argv[i], "--help")) */ {
+      printf("usage: %s [--test-simd] [--no-len] [--no-bench] [--quick|--slow] [--real|--cplx] [--sym]\n", argv[0]);
+      exit(1);
+    }
+  }
+
+
+  if (testOutLens)
+  {
+    for ( k = 0; k < 3; ++k )
+    {
+      if ( (k == 0 && !testReal) || (k > 0 && !testCplx) )
+        continue;
+      printf("\n\n==========\n");
+      printf("testing %s %s output lengths ..\n", (k == 0 ? "real" : "cplx"), ( k == 0 ? "" : (k==1 ? "2x" : "single") ) );
+      printf("==========\n");
+      flagsA = (k == 0) ? 0 : PFFASTCONV_CPLX_INP_OUT;
+      flagsB = flagsA | ( testSymetric ? PFFASTCONV_SYMMETRIC : 0 );
+      flagsC = flagsB | PFFASTCONV_CPLX_SINGLE_FFT;
+      testOutLen = 1;
+      printDbg = 0;
+      printSpeed = 0;
+      for ( M = 128 - 4; M <= (quickTest ? 128+16 : 256); ++M )
+      {
+        if ( (M % 16) != 0 && testSymetric )
+          continue;
+        result |= test(M, flagsB, testOutLen, printDbg, printSpeed);
+      }
+    }
+  }
+
+  if (benchConv)
+  {
+    for ( k = 0; k < 3; ++k )
+    {
+      if ( (k == 0 && !testReal) || (k > 0 && !testCplx) )
+        continue;
+      printf("\n\n==========\n");
+      printf("starting %s %s benchmark against linear convolutions ..\n", (k == 0 ? "real" : "cplx"), ( k == 0 ? "" : (k==1 ? "2x" : "single") ) );
+      printf("==========\n");
+      flagsA = (k == 0) ? 0 : PFFASTCONV_CPLX_INP_OUT;
+      flagsB = flagsA | ( testSymetric ? PFFASTCONV_SYMMETRIC : 0 );
+      flagsC = flagsB | ( k == 2 ? PFFASTCONV_CPLX_SINGLE_FFT : 0 );
+      testOutLen = 0;
+      printDbg = 0;
+      printSpeed = 1;
+      if (!slowTest) {
+        result |= test( 32,     flagsC, testOutLen, printDbg, printSpeed);
+        result |= test( 32+ 16, flagsC, testOutLen, printDbg, printSpeed);
+        result |= test( 64,     flagsC, testOutLen, printDbg, printSpeed);
+        result |= test( 64+ 32, flagsC, testOutLen, printDbg, printSpeed);
+        result |= test(128,     flagsC, testOutLen, printDbg, printSpeed);
+      }
+      if (!quickTest) {
+        result |= test(128+ 64, flagsC, testOutLen, printDbg, printSpeed);
+        result |= test(256,     flagsC, testOutLen, printDbg, printSpeed);
+        result |= test(256+128, flagsC, testOutLen, printDbg, printSpeed);
+        result |= test(512,     flagsC, testOutLen, printDbg, printSpeed);
+        result |= test(1024,    flagsC, testOutLen, printDbg, printSpeed);
+      }
+    }
+  }
+
+  return result;
+}
+
diff --git a/test_pffft.c b/test_pffft.c
new file mode 100644
index 0000000..2eb185a
--- /dev/null
+++ b/test_pffft.c
@@ -0,0 +1,371 @@
+/*
+  Copyright (c) 2013 Julien Pommier.
+
+  Small test & bench for PFFFT, comparing its performance with the scalar FFTPACK, FFTW, and Apple vDSP
+
+  How to build: 
+
+  on linux, with fftw3:
+  gcc -o test_pffft -DHAVE_FFTW -msse -mfpmath=sse -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -lm
+
+  on macos, without fftw3:
+  clang -o test_pffft -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -framework Accelerate
+
+  on macos, with fftw3:
+  clang -o test_pffft -DHAVE_FFTW -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -framework Accelerate
+
+  as alternative: replace clang by gcc.
+
+  on windows, with visual c++:
+  cl /Ox -D_USE_MATH_DEFINES /arch:SSE test_pffft.c pffft.c fftpack.c
+  
+  build without SIMD instructions:
+  gcc -o test_pffft -DPFFFT_SIMD_DISABLE -O3 -Wall -W pffft.c test_pffft.c fftpack.c -lm
+
+ */
+
+#ifdef PFFFT_ENABLE_FLOAT
+#include "pffft.h"
+
+typedef float pffft_scalar;
+#else
+/*
+Note: adapted for double precision dynamic range version.
+*/
+#include "pffft_double.h"
+
+typedef double pffft_scalar;
+#endif
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <assert.h>
+#include <string.h>
+
+/* define own constants required to turn off g++ extensions .. */
+#ifndef M_PI
+  #define M_PI    3.14159265358979323846  /* pi */
+#endif
+
+/* EXPECTED_DYN_RANGE in dB:
+ * single precision float has 24 bits mantissa
+ * => 24 Bits * 6 dB = 144 dB
+ * allow a few dB tolerance (even 144 dB looks good on my PC)
+ */
+#ifdef PFFFT_ENABLE_FLOAT
+#define EXPECTED_DYN_RANGE  140.0
+#else
+#define EXPECTED_DYN_RANGE  215.0
+#endif
+
+/* maximum allowed phase error in degree */
+#define DEG_ERR_LIMIT   1E-4
+
+/* maximum allowed magnitude error in amplitude (of 1.0 or 1.1) */
+#define MAG_ERR_LIMIT  1E-6
+
+
+#define PRINT_SPEC  0
+
+#define PWR2LOG(PWR)  ( (PWR) < 1E-30 ? 10.0*log10(1E-30) : 10.0*log10(PWR) )
+
+
+
+int test(int N, int cplx, int useOrdered) {
+  int Nfloat = (cplx ? N*2 : N);
+#ifdef PFFFT_ENABLE_FLOAT
+  pffft_scalar *X = pffft_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *Y = pffft_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *R = pffft_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *Z = pffft_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *W = pffft_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+#else
+  pffft_scalar *X = pffftd_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *Y = pffftd_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *R = pffftd_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *Z = pffftd_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+  pffft_scalar *W = pffftd_aligned_malloc((unsigned)Nfloat * sizeof(pffft_scalar));
+#endif
+  pffft_scalar amp = (pffft_scalar)1.0;
+  double freq, dPhi, phi, phi0;
+  double pwr, pwrCar, pwrOther, err, errSum, mag, expextedMag;
+  int k, j, m, iter, kmaxOther, retError = 0;
+
+#ifdef PFFFT_ENABLE_FLOAT
+  assert( pffft_is_power_of_two(N) );
+  PFFFT_Setup *s = pffft_new_setup(N, cplx ? PFFFT_COMPLEX : PFFFT_REAL);
+#else
+  assert( pffftd_is_power_of_two(N) );
+  PFFFTD_Setup *s = pffftd_new_setup(N, cplx ? PFFFT_COMPLEX : PFFFT_REAL);
+#endif
+  assert(s);
+  if (!s) {
+    printf("Error setting up PFFFT!\n");
+    return 1;
+  }
+
+  for ( k = m = 0; k < (cplx? N : (1 + N/2) ); k += N/16, ++m )
+  {
+    amp = (pffft_scalar)( ( (m % 3) == 0 ) ? 1.0 : 1.1 );
+    freq = (k < N/2) ? ((double)k / N) : ((double)(k-N) / N);
+    dPhi = 2.0 * M_PI * freq;
+    if ( dPhi < 0.0 )
+      dPhi += 2.0 * M_PI;
+
+    iter = -1;
+    while (1)
+    {
+      ++iter;
+
+      if (iter)
+        printf("bin %d: dphi = %f for freq %f\n", k, dPhi, freq);
+
+      /* generate cosine carrier as time signal - start at defined phase phi0 */
+      phi = phi0 = (m % 4) * 0.125 * M_PI;  /* have phi0 < 90 deg to be normalized */
+      for ( j = 0; j < N; ++j )
+      {
+        if (cplx) {
+          X[2*j] = amp * (pffft_scalar)cos(phi);  /* real part */
+          X[2*j+1] = amp * (pffft_scalar)sin(phi);  /* imag part */
+        }
+        else
+          X[j] = amp * (pffft_scalar)cos(phi);  /* only real part */
+
+        /* phase increment .. stay normalized - cos()/sin() might degrade! */
+        phi += dPhi;
+        if ( phi >= M_PI )
+          phi -= 2.0 * M_PI;
+      }
+
+      /* forward transform from X --> Y  .. using work buffer W */
+#ifdef PFFFT_ENABLE_FLOAT
+      if ( useOrdered )
+        pffft_transform_ordered(s, X, Y, W, PFFFT_FORWARD );
+      else
+      {
+        pffft_transform(s, X, R, W, PFFFT_FORWARD );  /* use R for reordering */
+        pffft_zreorder(s, R, Y, PFFFT_FORWARD ); /* reorder into Y[] for power calculations */
+      }
+#else
+      if ( useOrdered )
+        pffftd_transform_ordered(s, X, Y, W, PFFFT_FORWARD );
+      else
+      {
+        pffftd_transform(s, X, R, W, PFFFT_FORWARD );  /* use R for reordering */
+        pffftd_zreorder(s, R, Y, PFFFT_FORWARD ); /* reorder into Y[] for power calculations */
+      }
+#endif
+
+      pwrOther = -1.0;
+      pwrCar = 0;
+
+
+      /* for positive frequencies: 0 to 0.5 * samplerate */
+      /* and also for negative frequencies: -0.5 * samplerate to 0 */
+      for ( j = 0; j < ( cplx ? N : (1 + N/2) ); ++j )
+      {
+        if (!cplx && !j)  /* special treatment for DC for real input */
+          pwr = Y[j]*Y[j];
+        else if (!cplx && j == N/2)  /* treat 0.5 * samplerate */
+          pwr = Y[1] * Y[1];  /* despite j (for freq calculation) we have index 1 */
+        else
+          pwr = Y[2*j] * Y[2*j] + Y[2*j+1] * Y[2*j+1];
+        if (iter || PRINT_SPEC)
+          printf("%s fft %d:  pwr[j = %d] = %g == %f dB\n", (cplx ? "cplx":"real"), N, j, pwr, PWR2LOG(pwr) );
+        if (k == j)
+          pwrCar = pwr;
+        else if ( pwr > pwrOther ) {
+          pwrOther = pwr;
+          kmaxOther = j;
+        }
+      }
+
+      if ( PWR2LOG(pwrCar) - PWR2LOG(pwrOther) < EXPECTED_DYN_RANGE ) {
+        printf("%s fft %d amp %f iter %d:\n", (cplx ? "cplx":"real"), N, amp, iter);
+        printf("  carrier power  at bin %d: %g == %f dB\n", k, pwrCar, PWR2LOG(pwrCar) );
+        printf("  carrier mag || at bin %d: %g\n", k, sqrt(pwrCar) );
+        printf("  max other pwr  at bin %d: %g == %f dB\n", kmaxOther, pwrOther, PWR2LOG(pwrOther) );
+        printf("  dynamic range: %f dB\n\n", PWR2LOG(pwrCar) - PWR2LOG(pwrOther) );
+        retError = 1;
+        if ( iter == 0 )
+          continue;
+      }
+
+      if ( k > 0 && k != N/2 )
+      {
+        phi = atan2( Y[2*k+1], Y[2*k] );
+        if ( fabs( phi - phi0) > DEG_ERR_LIMIT * M_PI / 180.0 )
+        {
+        retError = 1;
+        printf("%s fft %d  bin %d amp %f : phase mismatch! phase = %f deg   expected = %f deg\n",
+            (cplx ? "cplx":"real"), N, k, amp, phi * 180.0 / M_PI, phi0 * 180.0 / M_PI );
+        }
+      }
+
+      expextedMag = cplx ? amp : ( (k == 0 || k == N/2) ? amp : (amp/2) );
+      mag = sqrt(pwrCar) / N;
+      if ( fabs(mag - expextedMag) > MAG_ERR_LIMIT )
+      {
+        retError = 1;
+        printf("%s fft %d  bin %d amp %f : mag = %g   expected = %g\n", (cplx ? "cplx":"real"), N, k, amp, mag, expextedMag );
+      }
+
+
+      /* now convert spectrum back */
+#ifdef PFFFT_ENABLE_FLOAT
+      if (useOrdered)
+        pffft_transform_ordered(s, Y, Z, W, PFFFT_BACKWARD);
+      else
+        pffft_transform(s, R, Z, W, PFFFT_BACKWARD);
+#else
+      if (useOrdered)
+        pffftd_transform_ordered(s, Y, Z, W, PFFFT_BACKWARD);
+      else
+        pffftd_transform(s, R, Z, W, PFFFT_BACKWARD);
+#endif
+
+      errSum = 0.0;
+      for ( j = 0; j < (cplx ? (2*N) : N); ++j )
+      {
+        /* scale back */
+        Z[j] /= N;
+        /* square sum errors over real (and imag parts) */
+        err = (X[j]-Z[j]) * (X[j]-Z[j]);
+        errSum += err;
+      }
+
+      if ( errSum > N * 1E-7 )
+      {
+        retError = 1;
+        printf("%s fft %d  bin %d : inverse FFT doesn't match original signal! errSum = %g ; mean err = %g\n", (cplx ? "cplx":"real"), N, k, errSum, errSum / N);
+      }
+
+      break;
+    }
+
+  }
+#ifdef PFFFT_ENABLE_FLOAT
+  pffft_destroy_setup(s);
+  pffft_aligned_free(X);
+  pffft_aligned_free(Y);
+  pffft_aligned_free(Z);
+  pffft_aligned_free(R);
+  pffft_aligned_free(W);
+#else
+  pffftd_destroy_setup(s);
+  pffftd_aligned_free(X);
+  pffftd_aligned_free(Y);
+  pffftd_aligned_free(Z);
+  pffftd_aligned_free(R);
+  pffftd_aligned_free(W);
+#endif
+
+  return retError;
+}
+
+/* small functions inside pffft.c that will detect (compiler) bugs with respect to simd instructions */
+void validate_pffft_simd();
+int  validate_pffft_simd_ex(FILE * DbgOut);
+void validate_pffftd_simd();
+int  validate_pffftd_simd_ex(FILE * DbgOut);
+
+
+
+int main(int argc, char **argv)
+{
+  int N, result, resN, resAll, i, k, resNextPw2, resIsPw2, resFFT;
+
+  int inp_power_of_two[] = { 1, 2, 3, 4, 5, 6, 7, 8,  9, 511, 512,  513 };
+  int ref_power_of_two[] = { 1, 2, 4, 4, 8, 8, 8, 8, 16, 512, 512, 1024 };
+
+  for ( i = 1; i < argc; ++i ) {
+
+    if (!strcmp(argv[i], "--test-simd")) {
+#ifdef PFFFT_ENABLE_FLOAT
+      int numErrs = validate_pffft_simd_ex(stdout);
+#else
+      int numErrs = validate_pffftd_simd_ex(stdout);
+#endif
+      fprintf( ( numErrs != 0 ? stderr : stdout ), "validate_pffft_simd_ex() returned %d errors!\n", numErrs);
+      return ( numErrs > 0 ? 1 : 0 );
+    }
+  }
+
+  resNextPw2 = 0;
+  resIsPw2 = 0;
+  for ( k = 0; k < (sizeof(inp_power_of_two)/sizeof(inp_power_of_two[0])); ++k) {
+#ifdef PFFFT_ENABLE_FLOAT
+    N = pffft_next_power_of_two(inp_power_of_two[k]);
+#else
+    N = pffftd_next_power_of_two(inp_power_of_two[k]);
+#endif
+    if (N != ref_power_of_two[k]) {
+      resNextPw2 = 1;
+      printf("pffft_next_power_of_two(%d) does deliver %d, which is not reference result %d!\n",
+        inp_power_of_two[k], N, ref_power_of_two[k] );
+    }
+
+#ifdef PFFFT_ENABLE_FLOAT
+    result = pffft_is_power_of_two(inp_power_of_two[k]);
+#else
+    result = pffftd_is_power_of_two(inp_power_of_two[k]);
+#endif
+    if (inp_power_of_two[k] == ref_power_of_two[k]) {
+      if (!result) {
+        resIsPw2 = 1;
+        printf("pffft_is_power_of_two(%d) delivers false; expected true!\n", inp_power_of_two[k]);
+      }
+    } else {
+      if (result) {
+        resIsPw2 = 1;
+        printf("pffft_is_power_of_two(%d) delivers true; expected false!\n", inp_power_of_two[k]);
+      }
+    }
+  }
+  if (!resNextPw2)
+    printf("tests for pffft_next_power_of_two() succeeded successfully.\n");
+  if (!resIsPw2)
+    printf("tests for pffft_is_power_of_two() succeeded successfully.\n");
+
+  resFFT = 0;
+  for ( N = 32; N <= 65536; N *= 2 )
+  {
+    result = test(N, 1 /* cplx fft */, 1 /* useOrdered */);
+    resN = result;
+    resFFT |= result;
+
+    result = test(N, 0 /* cplx fft */, 1 /* useOrdered */);
+    resN |= result;
+    resFFT |= result;
+
+    result = test(N, 1 /* cplx fft */, 0 /* useOrdered */);
+    resN |= result;
+    resFFT |= result;
+
+    result = test(N, 0 /* cplx fft */, 0 /* useOrdered */);
+    resN |= result;
+    resFFT |= result;
+
+    if (!resN)
+      printf("tests for size %d succeeded successfully.\n", N);
+  }
+
+  if (!resFFT) {
+#ifdef PFFFT_ENABLE_FLOAT
+    printf("all pffft transform tests (FORWARD/BACKWARD, REAL/COMPLEX, float) succeeded successfully.\n");
+#else
+    printf("all pffft transform tests (FORWARD/BACKWARD, REAL/COMPLEX, double) succeeded successfully.\n");
+#endif
+  }
+
+  resAll = resNextPw2 | resIsPw2 | resFFT;
+  if (!resAll)
+    printf("all tests succeeded successfully.\n");
+  else
+    printf("there are failed tests!\n");
+
+  return resAll;
+}
+
diff --git a/test_pffft.cpp b/test_pffft.cpp
new file mode 100644
index 0000000..4104a1b
--- /dev/null
+++ b/test_pffft.cpp
@@ -0,0 +1,377 @@
+/*
+  Copyright (c) 2013  Julien Pommier ( pommier@modartt.com )
+  Copyright (c) 2020  Dario Mambro ( dario.mambro@gmail.com )
+  Copyright (c) 2020  Hayati Ayguen ( h_ayguen@web.de )
+
+  Small test & bench for PFFFT, comparing its performance with the scalar
+  FFTPACK, FFTW, and Apple vDSP
+
+  How to build:
+
+  on linux, with fftw3:
+  gcc -o test_pffft -DHAVE_FFTW -msse -mfpmath=sse -O3 -Wall -W pffft.c
+  test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -lm
+
+  on macos, without fftw3:
+  clang -o test_pffft -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c
+  -L/usr/local/lib -I/usr/local/include/ -framework Accelerate
+
+  on macos, with fftw3:
+  clang -o test_pffft -DHAVE_FFTW -DHAVE_VECLIB -O3 -Wall -W pffft.c
+  test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f
+  -framework Accelerate
+
+  as alternative: replace clang by gcc.
+
+  on windows, with visual c++:
+  cl /Ox -D_USE_MATH_DEFINES /arch:SSE test_pffft.c pffft.c fftpack.c
+
+  build without SIMD instructions:
+  gcc -o test_pffft -DPFFFT_SIMD_DISABLE -O3 -Wall -W pffft.c test_pffft.c
+  fftpack.c -lm
+
+ */
+
+#include "pffft.hpp"
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+/* define own constants required to turn off g++ extensions .. */
+#ifndef M_PI
+  #define M_PI    3.14159265358979323846  /* pi */
+#endif
+
+/* maximum allowed phase error in degree */
+#define DEG_ERR_LIMIT 1E-4
+
+/* maximum allowed magnitude error in amplitude (of 1.0 or 1.1) */
+#define MAG_ERR_LIMIT 1E-6
+
+#define PRINT_SPEC 0
+
+#define PWR2LOG(PWR) ((PWR) < 1E-30 ? 10.0 * log10(1E-30) : 10.0 * log10(PWR))
+
+template<typename T>
+bool
+Ttest(int N, bool useOrdered)
+{
+  typedef pffft::Fft<T> Fft;
+  typedef typename pffft::Fft<T>::Scalar  FftScalar;
+  typedef typename Fft::Complex FftComplex;
+
+  const bool cplx = pffft::Fft<T>::isComplexTransform();
+  const double EXPECTED_DYN_RANGE = Fft::isDoubleScalar() ? 215.0 : 140.0;
+
+  assert(Fft::isPowerOfTwo(N));
+
+  Fft fft = Fft(N);  // instantiate and prepareLength() for length N
+
+#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900)
+
+  // possible ways to declare/instatiate aligned vectors with C++11
+  //   some lines require a typedef of above
+  auto X = fft.valueVector();                    // for X = input vector
+  pffft::AlignedVector<typename Fft::Complex> Y = fft.spectrumVector();  // for Y = forward(X)
+  pffft::AlignedVector<FftScalar> R = fft.internalLayoutVector(); // for R = forwardInternalLayout(X)
+  pffft::AlignedVector<T> Z = fft.valueVector(); // for Z = inverse(Y) = inverse( forward(X) )
+                                                 //  or Z = inverseInternalLayout(R)
+#else
+
+  // possible ways to declare/instatiate aligned vectors with C++98
+  pffft::AlignedVector<T> X = fft.valueVector();     // for X = input vector
+  pffft::AlignedVector<FftComplex>   Y = fft.spectrumVector();  // for Y = forward(X)
+  pffft::AlignedVector<typename Fft::Scalar>  R = fft.internalLayoutVector(); // for R = forwardInternalLayout(X)
+  pffft::AlignedVector<T> Z = fft.valueVector();     // for Z = inverse(Y) = inverse( forward(X) )
+                                                     //  or Z = inverseInternalLayout(R)
+#endif
+
+  // work with complex - without the capabilities of a higher c++ standard
+  FftScalar* Xs = reinterpret_cast<FftScalar*>(X.data()); // for X = input vector
+  FftScalar* Ys = reinterpret_cast<FftScalar*>(Y.data()); // for Y = forward(X)
+  FftScalar* Zs = reinterpret_cast<FftScalar*>(Z.data()); // for Z = inverse(Y) = inverse( forward(X) )
+
+  int k, j, m, iter, kmaxOther;
+  bool retError = false;
+  double freq, dPhi, phi, phi0;
+  double pwr, pwrCar, pwrOther, err, errSum, mag, expextedMag;
+  double amp = 1.0;
+
+  for (k = m = 0; k < (cplx ? N : (1 + N / 2)); k += N / 16, ++m) {
+    amp = ((m % 3) == 0) ? 1.0F : 1.1F;
+    freq = (k < N / 2) ? ((double)k / N) : ((double)(k - N) / N);
+    dPhi = 2.0 * M_PI * freq;
+    if (dPhi < 0.0)
+      dPhi += 2.0 * M_PI;
+
+    iter = -1;
+    while (1) {
+      ++iter;
+
+      if (iter)
+        printf("bin %d: dphi = %f for freq %f\n", k, dPhi, freq);
+
+      /* generate cosine carrier as time signal - start at defined phase phi0 */
+      phi = phi0 =
+        (m % 4) * 0.125 * M_PI; /* have phi0 < 90 deg to be normalized */
+      for (j = 0; j < N; ++j) {
+        if (cplx) {
+          Xs[2 * j] = amp * cos(phi);     /* real part */
+          Xs[2 * j + 1] = amp * sin(phi); /* imag part */
+        } else
+          Xs[j] = amp * cos(phi); /* only real part */
+
+        /* phase increment .. stay normalized - cos()/sin() might degrade! */
+        phi += dPhi;
+        if (phi >= M_PI)
+          phi -= 2.0 * M_PI;
+      }
+
+      /* forward transform from X --> Y  .. using work buffer W */
+      if (useOrdered)
+        fft.forward(X, Y);
+      else {
+        fft.forwardToInternalLayout(X, R); /* use R for reordering */
+        fft.reorderSpectrum(R, Y); /* have canonical order in Y[] for power calculations */
+      }
+
+      pwrOther = -1.0;
+      pwrCar = 0;
+
+      /* for positive frequencies: 0 to 0.5 * samplerate */
+      /* and also for negative frequencies: -0.5 * samplerate to 0 */
+      for (j = 0; j < (cplx ? N : (1 + N / 2)); ++j) {
+        if (!cplx && !j) /* special treatment for DC for real input */
+          pwr = Ys[j] * Ys[j];
+        else if (!cplx && j == N / 2) /* treat 0.5 * samplerate */
+          pwr = Ys[1] *
+                Ys[1]; /* despite j (for freq calculation) we have index 1 */
+        else
+          pwr = Ys[2 * j] * Ys[2 * j] + Ys[2 * j + 1] * Ys[2 * j + 1];
+        if (iter || PRINT_SPEC)
+          printf("%s fft %d:  pwr[j = %d] = %g == %f dB\n",
+                 (cplx ? "cplx" : "real"),
+                 N,
+                 j,
+                 pwr,
+                 PWR2LOG(pwr));
+        if (k == j)
+          pwrCar = pwr;
+        else if (pwr > pwrOther) {
+          pwrOther = pwr;
+          kmaxOther = j;
+        }
+      }
+
+      if (PWR2LOG(pwrCar) - PWR2LOG(pwrOther) < EXPECTED_DYN_RANGE) {
+        printf("%s fft %d amp %f iter %d:\n",
+               (cplx ? "cplx" : "real"),
+               N,
+               amp,
+               iter);
+        printf("  carrier power  at bin %d: %g == %f dB\n",
+               k,
+               pwrCar,
+               PWR2LOG(pwrCar));
+        printf("  carrier mag || at bin %d: %g\n", k, sqrt(pwrCar));
+        printf("  max other pwr  at bin %d: %g == %f dB\n",
+               kmaxOther,
+               pwrOther,
+               PWR2LOG(pwrOther));
+        printf("  dynamic range: %f dB\n\n",
+               PWR2LOG(pwrCar) - PWR2LOG(pwrOther));
+        retError = true;
+        if (iter == 0)
+          continue;
+      }
+
+      if (k > 0 && k != N / 2) {
+        phi = atan2(Ys[2 * k + 1], Ys[2 * k]);
+        if (fabs(phi - phi0) > DEG_ERR_LIMIT * M_PI / 180.0) {
+          retError = true;
+          printf("%s fft %d  bin %d amp %f : phase mismatch! phase = %f deg   "
+                 "expected = %f deg\n",
+                 (cplx ? "cplx" : "real"),
+                 N,
+                 k,
+                 amp,
+                 phi * 180.0 / M_PI,
+                 phi0 * 180.0 / M_PI);
+        }
+      }
+
+      expextedMag = cplx ? amp : ((k == 0 || k == N / 2) ? amp : (amp / 2));
+      mag = sqrt(pwrCar) / N;
+      if (fabs(mag - expextedMag) > MAG_ERR_LIMIT) {
+        retError = true;
+        printf("%s fft %d  bin %d amp %f : mag = %g   expected = %g\n",
+               (cplx ? "cplx" : "real"),
+               N,
+               k,
+               amp,
+               mag,
+               expextedMag);
+      }
+
+      /* now convert spectrum back */
+      if (useOrdered)
+        fft.inverse(Y, Z);
+      else
+        fft.inverseFromInternalLayout(R, Z); /* inverse() from internal Layout */
+
+      errSum = 0.0;
+      for (j = 0; j < (cplx ? (2 * N) : N); ++j) {
+        /* scale back */
+        Zs[j] /= N;
+        /* square sum errors over real (and imag parts) */
+        err = (Xs[j] - Zs[j]) * (Xs[j] - Zs[j]);
+        errSum += err;
+      }
+
+      if (errSum > N * 1E-7) {
+        retError = true;
+        printf("%s fft %d  bin %d : inverse FFT doesn't match original signal! "
+               "errSum = %g ; mean err = %g\n",
+               (cplx ? "cplx" : "real"),
+               N,
+               k,
+               errSum,
+               errSum / N);
+      }
+
+      break;
+    }
+  }
+
+  // using the std::vector<> base classes .. no need for alignedFree() for X, Y, Z and R
+
+  return retError;
+}
+
+bool
+test(int N, bool useComplex, bool useOrdered)
+{
+  if (useComplex) {
+    return
+#ifdef PFFFT_ENABLE_FLOAT
+           Ttest< std::complex<float> >(N, useOrdered)
+#endif
+#if defined(PFFFT_ENABLE_FLOAT) && defined(PFFFT_ENABLE_DOUBLE)
+        &&
+#endif
+#ifdef PFFFT_ENABLE_DOUBLE
+           Ttest< std::complex<double> >(N, useOrdered)
+#endif
+           ;
+  } else {
+    return
+#ifdef PFFFT_ENABLE_FLOAT
+           Ttest<float>(N, useOrdered)
+#endif
+#if defined(PFFFT_ENABLE_FLOAT) && defined(PFFFT_ENABLE_DOUBLE)
+        &&
+#endif
+#ifdef PFFFT_ENABLE_DOUBLE
+           Ttest<double>(N, useOrdered)
+#endif
+           ;
+  }
+}
+
+int
+main(int argc, char** argv)
+{
+  int N, result, resN, resAll, k, resNextPw2, resIsPw2, resFFT;
+
+  int inp_power_of_two[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 511, 512, 513 };
+  int ref_power_of_two[] = { 1, 2, 4, 4, 8, 8, 8, 8, 16, 512, 512, 1024 };
+
+  resNextPw2 = 0;
+  resIsPw2 = 0;
+  for (k = 0; k < (sizeof(inp_power_of_two) / sizeof(inp_power_of_two[0]));
+       ++k) {
+#ifdef PFFFT_ENABLE_FLOAT
+    N = pffft::Fft<float>::nextPowerOfTwo(inp_power_of_two[k]);
+#else
+    N = pffft::Fft<double>::nextPowerOfTwo(inp_power_of_two[k]);
+#endif
+    if (N != ref_power_of_two[k]) {
+      resNextPw2 = 1;
+      printf("pffft_next_power_of_two(%d) does deliver %d, which is not "
+             "reference result %d!\n",
+             inp_power_of_two[k],
+             N,
+             ref_power_of_two[k]);
+    }
+
+#ifdef PFFFT_ENABLE_FLOAT
+    result = pffft::Fft<float>::isPowerOfTwo(inp_power_of_two[k]);
+#else
+    result = pffft::Fft<double>::isPowerOfTwo(inp_power_of_two[k]);
+#endif
+    if (inp_power_of_two[k] == ref_power_of_two[k]) {
+      if (!result) {
+        resIsPw2 = 1;
+        printf("pffft_is_power_of_two(%d) delivers false; expected true!\n",
+               inp_power_of_two[k]);
+      }
+    } else {
+      if (result) {
+        resIsPw2 = 1;
+        printf("pffft_is_power_of_two(%d) delivers true; expected false!\n",
+               inp_power_of_two[k]);
+      }
+    }
+  }
+  if (!resNextPw2)
+    printf("tests for pffft_next_power_of_two() succeeded successfully.\n");
+  if (!resIsPw2)
+    printf("tests for pffft_is_power_of_two() succeeded successfully.\n");
+
+  resFFT = 0;
+  for (N = 32; N <= 65536; N *= 2) {
+    result = test(N, 1 /* cplx fft */, 1 /* useOrdered */);
+    resN = result;
+    resFFT |= result;
+
+    result = test(N, 0 /* cplx fft */, 1 /* useOrdered */);
+    resN |= result;
+    resFFT |= result;
+
+    result = test(N, 1 /* cplx fft */, 0 /* useOrdered */);
+    resN |= result;
+    resFFT |= result;
+
+    result = test(N, 0 /* cplx fft */, 0 /* useOrdered */);
+    resN |= result;
+    resFFT |= result;
+
+    if (!resN)
+      printf("tests for size %d succeeded successfully.\n", N);
+  }
+
+  if (!resFFT)
+    printf("all pffft transform tests (FORWARD/BACKWARD, REAL/COMPLEX, "
+#ifdef PFFFT_ENABLE_FLOAT
+           "float"
+#endif
+#if defined(PFFFT_ENABLE_FLOAT) && defined(PFFFT_ENABLE_DOUBLE)
+            "/"
+#endif
+#ifdef PFFFT_ENABLE_DOUBLE
+           "double"
+#endif
+           ") succeeded successfully.\n");
+
+  resAll = resNextPw2 | resIsPw2 | resFFT;
+  if (!resAll)
+    printf("all tests succeeded successfully.\n");
+  else
+    printf("there are failed tests!\n");
+
+  return resAll;
+}
diff --git a/use_gcc8.inc b/use_gcc8.inc
new file mode 100755
index 0000000..c4535f1
--- /dev/null
+++ b/use_gcc8.inc
@@ -0,0 +1,2 @@
+export GCC_WITH_CMAKE=$(which gcc-8)
+export GPP_WITH_CMAKE=$(which g++-8)