buildenv: Split up SSE3 and SSE4.1 code

Currently we find SSE3 and SSE4.1 code mixed togehter along with
generic code in one file. This introduces the risk that the
compiler exidantly mixes SSE4.1 instructions into an SSE3, or
even worse into a generic code path.

This commit splits the SSE3 and SSE4.1 code into separate files
and compiles them with the matching target options.

Change-Id: I846e190e92f1258cd412d1b2d79b539e204e04b3
This commit is contained in:
Philipp Maier 2017-03-20 12:08:42 +01:00 committed by Tom Tsou
parent f5bf33b287
commit e8ae9fcf38
12 changed files with 893 additions and 661 deletions

View File

@ -1,7 +1,28 @@
if !ARCH_ARM
AM_CFLAGS = -Wall -std=gnu99 $(SIMD_FLAGS) -I${srcdir}/../common
AM_CFLAGS = -Wall -std=gnu99 -I${srcdir}/../common
noinst_LTLIBRARIES = libarch.la
noinst_LTLIBRARIES += libarch_sse_3.la
noinst_LTLIBRARIES += libarch_sse_4_1.la
libarch_la_LIBADD =
# SSE 3 specific code
if HAVE_SSE3
libarch_sse_3_la_SOURCES = \
convert_sse_3.c \
convolve_sse_3.c
libarch_sse_3_la_CFLAGS = $(AM_CFLAGS) -msse3
libarch_la_LIBADD += libarch_sse_3.la
endif
# SSE 4.1 specific code
if HAVE_SSE4_1
libarch_sse_4_1_la_SOURCES = \
convert_sse_4_1.c
libarch_sse_4_1_la_CFLAGS = $(AM_CFLAGS) -msse4.1
libarch_la_LIBADD += libarch_sse_4_1.la
endif
libarch_la_SOURCES = \
../common/convolve_base.c \

View File

@ -20,6 +20,8 @@
#include <malloc.h>
#include <string.h>
#include "convert.h"
#include "convert_sse_3.h"
#include "convert_sse_4_1.h"
#ifdef HAVE_CONFIG_H
#include "config.h"
@ -36,140 +38,6 @@ struct convert_cpu_context {
static struct convert_cpu_context c;
#ifdef HAVE_SSE3
#include <xmmintrin.h>
#include <emmintrin.h>
#ifdef HAVE_SSE4_1
#include <smmintrin.h>
/* 16*N 16-bit signed integer converted to single precision floats */
static void _sse_convert_si16_ps_16n(float *restrict out,
const short *restrict in,
int len)
{
__m128i m0, m1, m2, m3, m4, m5;
__m128 m6, m7, m8, m9;
for (int i = 0; i < len / 16; i++) {
/* Load (unaligned) packed floats */
m0 = _mm_loadu_si128((__m128i *) &in[16 * i + 0]);
m1 = _mm_loadu_si128((__m128i *) &in[16 * i + 8]);
/* Unpack */
m2 = _mm_cvtepi16_epi32(m0);
m4 = _mm_cvtepi16_epi32(m1);
m0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1, 0, 3, 2));
m1 = _mm_shuffle_epi32(m1, _MM_SHUFFLE(1, 0, 3, 2));
m3 = _mm_cvtepi16_epi32(m0);
m5 = _mm_cvtepi16_epi32(m1);
/* Convert */
m6 = _mm_cvtepi32_ps(m2);
m7 = _mm_cvtepi32_ps(m3);
m8 = _mm_cvtepi32_ps(m4);
m9 = _mm_cvtepi32_ps(m5);
/* Store */
_mm_storeu_ps(&out[16 * i + 0], m6);
_mm_storeu_ps(&out[16 * i + 4], m7);
_mm_storeu_ps(&out[16 * i + 8], m8);
_mm_storeu_ps(&out[16 * i + 12], m9);
}
}
/* 16*N 16-bit signed integer conversion with remainder */
static void _sse_convert_si16_ps(float *restrict out,
const short *restrict in,
int len)
{
int start = len / 16 * 16;
_sse_convert_si16_ps_16n(out, in, len);
for (int i = 0; i < len % 16; i++)
out[start + i] = in[start + i];
}
#endif /* HAVE_SSE4_1 */
/* 8*N single precision floats scaled and converted to 16-bit signed integer */
static void _sse_convert_scale_ps_si16_8n(short *restrict out,
const float *restrict in,
float scale, int len)
{
__m128 m0, m1, m2;
__m128i m4, m5;
for (int i = 0; i < len / 8; i++) {
/* Load (unaligned) packed floats */
m0 = _mm_loadu_ps(&in[8 * i + 0]);
m1 = _mm_loadu_ps(&in[8 * i + 4]);
m2 = _mm_load1_ps(&scale);
/* Scale */
m0 = _mm_mul_ps(m0, m2);
m1 = _mm_mul_ps(m1, m2);
/* Convert */
m4 = _mm_cvtps_epi32(m0);
m5 = _mm_cvtps_epi32(m1);
/* Pack and store */
m5 = _mm_packs_epi32(m4, m5);
_mm_storeu_si128((__m128i *) &out[8 * i], m5);
}
}
/* 8*N single precision floats scaled and converted with remainder */
static void _sse_convert_scale_ps_si16(short *restrict out,
const float *restrict in,
float scale, int len)
{
int start = len / 8 * 8;
_sse_convert_scale_ps_si16_8n(out, in, scale, len);
for (int i = 0; i < len % 8; i++)
out[start + i] = in[start + i] * scale;
}
/* 16*N single precision floats scaled and converted to 16-bit signed integer */
static void _sse_convert_scale_ps_si16_16n(short *restrict out,
const float *restrict in,
float scale, int len)
{
__m128 m0, m1, m2, m3, m4;
__m128i m5, m6, m7, m8;
for (int i = 0; i < len / 16; i++) {
/* Load (unaligned) packed floats */
m0 = _mm_loadu_ps(&in[16 * i + 0]);
m1 = _mm_loadu_ps(&in[16 * i + 4]);
m2 = _mm_loadu_ps(&in[16 * i + 8]);
m3 = _mm_loadu_ps(&in[16 * i + 12]);
m4 = _mm_load1_ps(&scale);
/* Scale */
m0 = _mm_mul_ps(m0, m4);
m1 = _mm_mul_ps(m1, m4);
m2 = _mm_mul_ps(m2, m4);
m3 = _mm_mul_ps(m3, m4);
/* Convert */
m5 = _mm_cvtps_epi32(m0);
m6 = _mm_cvtps_epi32(m1);
m7 = _mm_cvtps_epi32(m2);
m8 = _mm_cvtps_epi32(m3);
/* Pack and store */
m5 = _mm_packs_epi32(m5, m6);
m7 = _mm_packs_epi32(m7, m8);
_mm_storeu_si128((__m128i *) &out[16 * i + 0], m5);
_mm_storeu_si128((__m128i *) &out[16 * i + 8], m7);
}
}
#endif
void convert_init(void)
{
c.convert_scale_ps_si16_16n = base_convert_float_short;

View File

@ -0,0 +1,107 @@
/*
* SSE type conversions
* Copyright (C) 2013 Thomas Tsou <tom@tsou.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <malloc.h>
#include <string.h>
#include "convert_sse_3.h"
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_SSE3
#include <xmmintrin.h>
#include <emmintrin.h>
/* 8*N single precision floats scaled and converted to 16-bit signed integer */
void _sse_convert_scale_ps_si16_8n(short *restrict out,
const float *restrict in,
float scale, int len)
{
__m128 m0, m1, m2;
__m128i m4, m5;
for (int i = 0; i < len / 8; i++) {
/* Load (unaligned) packed floats */
m0 = _mm_loadu_ps(&in[8 * i + 0]);
m1 = _mm_loadu_ps(&in[8 * i + 4]);
m2 = _mm_load1_ps(&scale);
/* Scale */
m0 = _mm_mul_ps(m0, m2);
m1 = _mm_mul_ps(m1, m2);
/* Convert */
m4 = _mm_cvtps_epi32(m0);
m5 = _mm_cvtps_epi32(m1);
/* Pack and store */
m5 = _mm_packs_epi32(m4, m5);
_mm_storeu_si128((__m128i *) & out[8 * i], m5);
}
}
/* 8*N single precision floats scaled and converted with remainder */
void _sse_convert_scale_ps_si16(short *restrict out,
const float *restrict in, float scale, int len)
{
int start = len / 8 * 8;
_sse_convert_scale_ps_si16_8n(out, in, scale, len);
for (int i = 0; i < len % 8; i++)
out[start + i] = in[start + i] * scale;
}
/* 16*N single precision floats scaled and converted to 16-bit signed integer */
void _sse_convert_scale_ps_si16_16n(short *restrict out,
const float *restrict in,
float scale, int len)
{
__m128 m0, m1, m2, m3, m4;
__m128i m5, m6, m7, m8;
for (int i = 0; i < len / 16; i++) {
/* Load (unaligned) packed floats */
m0 = _mm_loadu_ps(&in[16 * i + 0]);
m1 = _mm_loadu_ps(&in[16 * i + 4]);
m2 = _mm_loadu_ps(&in[16 * i + 8]);
m3 = _mm_loadu_ps(&in[16 * i + 12]);
m4 = _mm_load1_ps(&scale);
/* Scale */
m0 = _mm_mul_ps(m0, m4);
m1 = _mm_mul_ps(m1, m4);
m2 = _mm_mul_ps(m2, m4);
m3 = _mm_mul_ps(m3, m4);
/* Convert */
m5 = _mm_cvtps_epi32(m0);
m6 = _mm_cvtps_epi32(m1);
m7 = _mm_cvtps_epi32(m2);
m8 = _mm_cvtps_epi32(m3);
/* Pack and store */
m5 = _mm_packs_epi32(m5, m6);
m7 = _mm_packs_epi32(m7, m8);
_mm_storeu_si128((__m128i *) & out[16 * i + 0], m5);
_mm_storeu_si128((__m128i *) & out[16 * i + 8], m7);
}
}
#endif

View File

@ -0,0 +1,34 @@
/*
* SSE type conversions
* Copyright (C) 2013 Thomas Tsou <tom@tsou.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#pragma once
/* 8*N single precision floats scaled and converted to 16-bit signed integer */
void _sse_convert_scale_ps_si16_8n(short *restrict out,
const float *restrict in,
float scale, int len);
/* 8*N single precision floats scaled and converted with remainder */
void _sse_convert_scale_ps_si16(short *restrict out,
const float *restrict in, float scale, int len);
/* 16*N single precision floats scaled and converted to 16-bit signed integer */
void _sse_convert_scale_ps_si16_16n(short *restrict out,
const float *restrict in,
float scale, int len);

View File

@ -0,0 +1,77 @@
/*
* SSE type conversions
* Copyright (C) 2013 Thomas Tsou <tom@tsou.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <malloc.h>
#include <string.h>
#include "convert_sse_4_1.h"
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_SSE4_1
#include <smmintrin.h>
/* 16*N 16-bit signed integer converted to single precision floats */
void _sse_convert_si16_ps_16n(float *restrict out,
const short *restrict in, int len)
{
__m128i m0, m1, m2, m3, m4, m5;
__m128 m6, m7, m8, m9;
for (int i = 0; i < len / 16; i++) {
/* Load (unaligned) packed floats */
m0 = _mm_loadu_si128((__m128i *) & in[16 * i + 0]);
m1 = _mm_loadu_si128((__m128i *) & in[16 * i + 8]);
/* Unpack */
m2 = _mm_cvtepi16_epi32(m0);
m4 = _mm_cvtepi16_epi32(m1);
m0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1, 0, 3, 2));
m1 = _mm_shuffle_epi32(m1, _MM_SHUFFLE(1, 0, 3, 2));
m3 = _mm_cvtepi16_epi32(m0);
m5 = _mm_cvtepi16_epi32(m1);
/* Convert */
m6 = _mm_cvtepi32_ps(m2);
m7 = _mm_cvtepi32_ps(m3);
m8 = _mm_cvtepi32_ps(m4);
m9 = _mm_cvtepi32_ps(m5);
/* Store */
_mm_storeu_ps(&out[16 * i + 0], m6);
_mm_storeu_ps(&out[16 * i + 4], m7);
_mm_storeu_ps(&out[16 * i + 8], m8);
_mm_storeu_ps(&out[16 * i + 12], m9);
}
}
/* 16*N 16-bit signed integer conversion with remainder */
void _sse_convert_si16_ps(float *restrict out,
const short *restrict in, int len)
{
int start = len / 16 * 16;
_sse_convert_si16_ps_16n(out, in, len);
for (int i = 0; i < len % 16; i++)
out[start + i] = in[start + i];
}
#endif

View File

@ -0,0 +1,28 @@
/*
* SSE type conversions
* Copyright (C) 2013 Thomas Tsou <tom@tsou.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#pragma once
/* 16*N 16-bit signed integer converted to single precision floats */
void _sse_convert_si16_ps_16n(float *restrict out,
const short *restrict in, int len);
/* 16*N 16-bit signed integer conversion with remainder */
void _sse_convert_si16_ps(float *restrict out,
const short *restrict in, int len);

View File

@ -21,6 +21,7 @@
#include <string.h>
#include <stdio.h>
#include "convolve.h"
#include "convolve_sse_3.h"
#ifdef HAVE_CONFIG_H
#include "config.h"
@ -67,529 +68,6 @@ int _base_convolve_complex(const float *x, int x_len,
int bounds_check(int x_len, int h_len, int y_len,
int start, int len, int step);
#ifdef HAVE_SSE3
#include <xmmintrin.h>
#include <pmmintrin.h>
/* 4-tap SSE complex-real convolution */
static void sse_conv_real4(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* NOTE: The parameter list of this function has to match the parameter
* list of _base_convolve_real() in convolve_base.c. This specific
* implementation, ignores some of the parameters of
* _base_convolve_complex(), which are: x_len, y_len, offset, step */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m3 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m4 = _mm_mul_ps(m2, m7);
m5 = _mm_mul_ps(m3, m7);
/* Sum and store */
m6 = _mm_hadd_ps(m4, m5);
m0 = _mm_hadd_ps(m6, m6);
_mm_store_ss(&y[2 * i + 0], m0);
m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m0);
}
}
/* 8-tap SSE complex-real convolution */
static void sse_conv_real8(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7, m8, m9;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m6 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m8 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m6 = _mm_mul_ps(m6, m4);
m7 = _mm_mul_ps(m7, m4);
m8 = _mm_mul_ps(m8, m5);
m9 = _mm_mul_ps(m9, m5);
/* Sum and store */
m6 = _mm_add_ps(m6, m8);
m7 = _mm_add_ps(m7, m9);
m6 = _mm_hadd_ps(m6, m7);
m6 = _mm_hadd_ps(m6, m6);
_mm_store_ss(&y[2 * i + 0], m6);
m6 = _mm_shuffle_ps(m6, m6, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m6);
}
}
/* 12-tap SSE complex-real convolution */
static void sse_conv_real12(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m10, m11, m12, m13, m14;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_load_ps(&h[16]);
m5 = _mm_load_ps(&h[20]);
m12 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m13 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m14 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_loadu_ps(&_x[2 * i + 16]);
m1 = _mm_loadu_ps(&_x[2 * i + 20]);
m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m4, m12);
m1 = _mm_mul_ps(m5, m12);
m2 = _mm_mul_ps(m6, m13);
m3 = _mm_mul_ps(m7, m13);
m4 = _mm_mul_ps(m8, m14);
m5 = _mm_mul_ps(m9, m14);
/* Sum and store */
m8 = _mm_add_ps(m0, m2);
m9 = _mm_add_ps(m1, m3);
m10 = _mm_add_ps(m8, m4);
m11 = _mm_add_ps(m9, m5);
m2 = _mm_hadd_ps(m10, m11);
m3 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m3);
m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m3);
}
}
/* 16-tap SSE complex-real convolution */
static void sse_conv_real16(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m10, m11, m12, m13, m14, m15;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_load_ps(&h[16]);
m5 = _mm_load_ps(&h[20]);
m6 = _mm_load_ps(&h[24]);
m7 = _mm_load_ps(&h[28]);
m12 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m13 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m14 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
m15 = _mm_shuffle_ps(m6, m7, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_loadu_ps(&_x[2 * i + 16]);
m1 = _mm_loadu_ps(&_x[2 * i + 20]);
m2 = _mm_loadu_ps(&_x[2 * i + 24]);
m3 = _mm_loadu_ps(&_x[2 * i + 28]);
m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m10 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m11 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m4, m12);
m1 = _mm_mul_ps(m5, m12);
m2 = _mm_mul_ps(m6, m13);
m3 = _mm_mul_ps(m7, m13);
m4 = _mm_mul_ps(m8, m14);
m5 = _mm_mul_ps(m9, m14);
m6 = _mm_mul_ps(m10, m15);
m7 = _mm_mul_ps(m11, m15);
/* Sum and store */
m8 = _mm_add_ps(m0, m2);
m9 = _mm_add_ps(m1, m3);
m10 = _mm_add_ps(m4, m6);
m11 = _mm_add_ps(m5, m7);
m0 = _mm_add_ps(m8, m10);
m1 = _mm_add_ps(m9, m11);
m2 = _mm_hadd_ps(m0, m1);
m3 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m3);
m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m3);
}
}
/* 20-tap SSE complex-real convolution */
static void sse_conv_real20(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m11, m12, m13, m14, m15;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_load_ps(&h[16]);
m5 = _mm_load_ps(&h[20]);
m6 = _mm_load_ps(&h[24]);
m7 = _mm_load_ps(&h[28]);
m8 = _mm_load_ps(&h[32]);
m9 = _mm_load_ps(&h[36]);
m11 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m12 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m13 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
m14 = _mm_shuffle_ps(m6, m7, _MM_SHUFFLE(0, 2, 0, 2));
m15 = _mm_shuffle_ps(m8, m9, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Multiply-accumulate first 12 taps */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m4 = _mm_loadu_ps(&_x[2 * i + 16]);
m5 = _mm_loadu_ps(&_x[2 * i + 20]);
m6 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m8 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
m1 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(1, 3, 1, 3));
m2 = _mm_mul_ps(m6, m11);
m3 = _mm_mul_ps(m7, m11);
m4 = _mm_mul_ps(m8, m12);
m5 = _mm_mul_ps(m9, m12);
m6 = _mm_mul_ps(m0, m13);
m7 = _mm_mul_ps(m1, m13);
m0 = _mm_add_ps(m2, m4);
m1 = _mm_add_ps(m3, m5);
m8 = _mm_add_ps(m0, m6);
m9 = _mm_add_ps(m1, m7);
/* Multiply-accumulate last 8 taps */
m0 = _mm_loadu_ps(&_x[2 * i + 24]);
m1 = _mm_loadu_ps(&_x[2 * i + 28]);
m2 = _mm_loadu_ps(&_x[2 * i + 32]);
m3 = _mm_loadu_ps(&_x[2 * i + 36]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_mul_ps(m4, m14);
m1 = _mm_mul_ps(m5, m14);
m2 = _mm_mul_ps(m6, m15);
m3 = _mm_mul_ps(m7, m15);
m4 = _mm_add_ps(m0, m2);
m5 = _mm_add_ps(m1, m3);
/* Final sum and store */
m0 = _mm_add_ps(m8, m4);
m1 = _mm_add_ps(m9, m5);
m2 = _mm_hadd_ps(m0, m1);
m3 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m3);
m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m3);
}
}
/* 4*N-tap SSE complex-real convolution */
static void sse_conv_real4n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m4, m5, m6, m7;
const float *_x = &x[2 * (-(h_len - 1) + start)];
for (int i = 0; i < len; i++) {
/* Zero */
m6 = _mm_setzero_ps();
m7 = _mm_setzero_ps();
for (int n = 0; n < h_len / 4; n++) {
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[8 * n + 0]);
m1 = _mm_load_ps(&h[8 * n + 4]);
m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 8 * n + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 8 * n + 4]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m2, m4);
m1 = _mm_mul_ps(m2, m5);
/* Accumulate */
m6 = _mm_add_ps(m6, m0);
m7 = _mm_add_ps(m7, m1);
}
m0 = _mm_hadd_ps(m6, m7);
m0 = _mm_hadd_ps(m0, m0);
_mm_store_ss(&y[2 * i + 0], m0);
m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m0);
}
}
/* 4*N-tap SSE complex-complex convolution */
static void sse_conv_cmplx_4n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* NOTE: The parameter list of this function has to match the parameter
* list of _base_convolve_complex() in convolve_base.c. This specific
* implementation, ignores some of the parameters of
* _base_convolve_complex(), which are: x_len, y_len, offset, step. */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
const float *_x = &x[2 * (-(h_len - 1) + start)];
for (int i = 0; i < len; i++) {
/* Zero */
m6 = _mm_setzero_ps();
m7 = _mm_setzero_ps();
for (int n = 0; n < h_len / 4; n++) {
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[8 * n + 0]);
m1 = _mm_load_ps(&h[8 * n + 4]);
m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m3 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 8 * n + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 8 * n + 4]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m2, m4);
m1 = _mm_mul_ps(m3, m5);
m2 = _mm_mul_ps(m2, m5);
m3 = _mm_mul_ps(m3, m4);
/* Sum */
m0 = _mm_sub_ps(m0, m1);
m2 = _mm_add_ps(m2, m3);
/* Accumulate */
m6 = _mm_add_ps(m6, m0);
m7 = _mm_add_ps(m7, m2);
}
m0 = _mm_hadd_ps(m6, m7);
m0 = _mm_hadd_ps(m0, m0);
_mm_store_ss(&y[2 * i + 0], m0);
m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m0);
}
}
/* 8*N-tap SSE complex-complex convolution */
static void sse_conv_cmplx_8n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len,
int step, int offset)
{
/* See NOTE in sse_conv_cmplx_4n() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m10, m11, m12, m13, m14, m15;
const float *_x = &x[2 * (-(h_len - 1) + start)];
for (int i = 0; i < len; i++) {
/* Zero */
m12 = _mm_setzero_ps();
m13 = _mm_setzero_ps();
m14 = _mm_setzero_ps();
m15 = _mm_setzero_ps();
for (int n = 0; n < h_len / 8; n++) {
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[16 * n + 0]);
m1 = _mm_load_ps(&h[16 * n + 4]);
m2 = _mm_load_ps(&h[16 * n + 8]);
m3 = _mm_load_ps(&h[16 * n + 12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 16 * n + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 16 * n + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 16 * n + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 16 * n + 12]);
m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m10 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m11 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m4, m8);
m1 = _mm_mul_ps(m5, m9);
m2 = _mm_mul_ps(m6, m10);
m3 = _mm_mul_ps(m7, m11);
m4 = _mm_mul_ps(m4, m9);
m5 = _mm_mul_ps(m5, m8);
m6 = _mm_mul_ps(m6, m11);
m7 = _mm_mul_ps(m7, m10);
/* Sum */
m0 = _mm_sub_ps(m0, m1);
m2 = _mm_sub_ps(m2, m3);
m4 = _mm_add_ps(m4, m5);
m6 = _mm_add_ps(m6, m7);
/* Accumulate */
m12 = _mm_add_ps(m12, m0);
m13 = _mm_add_ps(m13, m2);
m14 = _mm_add_ps(m14, m4);
m15 = _mm_add_ps(m15, m6);
}
m0 = _mm_add_ps(m12, m13);
m1 = _mm_add_ps(m14, m15);
m2 = _mm_hadd_ps(m0, m1);
m2 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m2);
m2 = _mm_shuffle_ps(m2, m2, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m2);
}
}
#endif
/* API: Initalize convolve module */
void convolve_init(void)
{

View File

@ -0,0 +1,542 @@
/*
* SSE Convolution
* Copyright (C) 2012, 2013 Thomas Tsou <tom@tsou.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <malloc.h>
#include <string.h>
#include <stdio.h>
#include "convolve_sse_3.h"
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_SSE3
#include <xmmintrin.h>
#include <pmmintrin.h>
/* 4-tap SSE complex-real convolution */
void sse_conv_real4(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* NOTE: The parameter list of this function has to match the parameter
* list of _base_convolve_real() in convolve_base.c. This specific
* implementation, ignores some of the parameters of
* _base_convolve_complex(), which are: x_len, y_len, offset, step */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m3 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m4 = _mm_mul_ps(m2, m7);
m5 = _mm_mul_ps(m3, m7);
/* Sum and store */
m6 = _mm_hadd_ps(m4, m5);
m0 = _mm_hadd_ps(m6, m6);
_mm_store_ss(&y[2 * i + 0], m0);
m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m0);
}
}
/* 8-tap SSE complex-real convolution */
void sse_conv_real8(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7, m8, m9;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m6 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m8 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m6 = _mm_mul_ps(m6, m4);
m7 = _mm_mul_ps(m7, m4);
m8 = _mm_mul_ps(m8, m5);
m9 = _mm_mul_ps(m9, m5);
/* Sum and store */
m6 = _mm_add_ps(m6, m8);
m7 = _mm_add_ps(m7, m9);
m6 = _mm_hadd_ps(m6, m7);
m6 = _mm_hadd_ps(m6, m6);
_mm_store_ss(&y[2 * i + 0], m6);
m6 = _mm_shuffle_ps(m6, m6, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m6);
}
}
/* 12-tap SSE complex-real convolution */
void sse_conv_real12(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m10, m11, m12, m13, m14;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_load_ps(&h[16]);
m5 = _mm_load_ps(&h[20]);
m12 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m13 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m14 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_loadu_ps(&_x[2 * i + 16]);
m1 = _mm_loadu_ps(&_x[2 * i + 20]);
m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m4, m12);
m1 = _mm_mul_ps(m5, m12);
m2 = _mm_mul_ps(m6, m13);
m3 = _mm_mul_ps(m7, m13);
m4 = _mm_mul_ps(m8, m14);
m5 = _mm_mul_ps(m9, m14);
/* Sum and store */
m8 = _mm_add_ps(m0, m2);
m9 = _mm_add_ps(m1, m3);
m10 = _mm_add_ps(m8, m4);
m11 = _mm_add_ps(m9, m5);
m2 = _mm_hadd_ps(m10, m11);
m3 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m3);
m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m3);
}
}
/* 16-tap SSE complex-real convolution */
void sse_conv_real16(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m10, m11, m12, m13, m14, m15;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_load_ps(&h[16]);
m5 = _mm_load_ps(&h[20]);
m6 = _mm_load_ps(&h[24]);
m7 = _mm_load_ps(&h[28]);
m12 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m13 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m14 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
m15 = _mm_shuffle_ps(m6, m7, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_loadu_ps(&_x[2 * i + 16]);
m1 = _mm_loadu_ps(&_x[2 * i + 20]);
m2 = _mm_loadu_ps(&_x[2 * i + 24]);
m3 = _mm_loadu_ps(&_x[2 * i + 28]);
m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m10 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m11 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m4, m12);
m1 = _mm_mul_ps(m5, m12);
m2 = _mm_mul_ps(m6, m13);
m3 = _mm_mul_ps(m7, m13);
m4 = _mm_mul_ps(m8, m14);
m5 = _mm_mul_ps(m9, m14);
m6 = _mm_mul_ps(m10, m15);
m7 = _mm_mul_ps(m11, m15);
/* Sum and store */
m8 = _mm_add_ps(m0, m2);
m9 = _mm_add_ps(m1, m3);
m10 = _mm_add_ps(m4, m6);
m11 = _mm_add_ps(m5, m7);
m0 = _mm_add_ps(m8, m10);
m1 = _mm_add_ps(m9, m11);
m2 = _mm_hadd_ps(m0, m1);
m3 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m3);
m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m3);
}
}
/* 20-tap SSE complex-real convolution */
void sse_conv_real20(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m11, m12, m13, m14, m15;
const float *_x = &x[2 * (-(h_len - 1) + start)];
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[0]);
m1 = _mm_load_ps(&h[4]);
m2 = _mm_load_ps(&h[8]);
m3 = _mm_load_ps(&h[12]);
m4 = _mm_load_ps(&h[16]);
m5 = _mm_load_ps(&h[20]);
m6 = _mm_load_ps(&h[24]);
m7 = _mm_load_ps(&h[28]);
m8 = _mm_load_ps(&h[32]);
m9 = _mm_load_ps(&h[36]);
m11 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m12 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m13 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
m14 = _mm_shuffle_ps(m6, m7, _MM_SHUFFLE(0, 2, 0, 2));
m15 = _mm_shuffle_ps(m8, m9, _MM_SHUFFLE(0, 2, 0, 2));
for (int i = 0; i < len; i++) {
/* Multiply-accumulate first 12 taps */
m0 = _mm_loadu_ps(&_x[2 * i + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 12]);
m4 = _mm_loadu_ps(&_x[2 * i + 16]);
m5 = _mm_loadu_ps(&_x[2 * i + 20]);
m6 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m8 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
m1 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(1, 3, 1, 3));
m2 = _mm_mul_ps(m6, m11);
m3 = _mm_mul_ps(m7, m11);
m4 = _mm_mul_ps(m8, m12);
m5 = _mm_mul_ps(m9, m12);
m6 = _mm_mul_ps(m0, m13);
m7 = _mm_mul_ps(m1, m13);
m0 = _mm_add_ps(m2, m4);
m1 = _mm_add_ps(m3, m5);
m8 = _mm_add_ps(m0, m6);
m9 = _mm_add_ps(m1, m7);
/* Multiply-accumulate last 8 taps */
m0 = _mm_loadu_ps(&_x[2 * i + 24]);
m1 = _mm_loadu_ps(&_x[2 * i + 28]);
m2 = _mm_loadu_ps(&_x[2 * i + 32]);
m3 = _mm_loadu_ps(&_x[2 * i + 36]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
m0 = _mm_mul_ps(m4, m14);
m1 = _mm_mul_ps(m5, m14);
m2 = _mm_mul_ps(m6, m15);
m3 = _mm_mul_ps(m7, m15);
m4 = _mm_add_ps(m0, m2);
m5 = _mm_add_ps(m1, m3);
/* Final sum and store */
m0 = _mm_add_ps(m8, m4);
m1 = _mm_add_ps(m9, m5);
m2 = _mm_hadd_ps(m0, m1);
m3 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m3);
m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m3);
}
}
/* 4*N-tap SSE complex-real convolution */
void sse_conv_real4n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* See NOTE in sse_conv_real4() */
__m128 m0, m1, m2, m4, m5, m6, m7;
const float *_x = &x[2 * (-(h_len - 1) + start)];
for (int i = 0; i < len; i++) {
/* Zero */
m6 = _mm_setzero_ps();
m7 = _mm_setzero_ps();
for (int n = 0; n < h_len / 4; n++) {
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[8 * n + 0]);
m1 = _mm_load_ps(&h[8 * n + 4]);
m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 8 * n + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 8 * n + 4]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m2, m4);
m1 = _mm_mul_ps(m2, m5);
/* Accumulate */
m6 = _mm_add_ps(m6, m0);
m7 = _mm_add_ps(m7, m1);
}
m0 = _mm_hadd_ps(m6, m7);
m0 = _mm_hadd_ps(m0, m0);
_mm_store_ss(&y[2 * i + 0], m0);
m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m0);
}
}
/* 4*N-tap SSE complex-complex convolution */
void sse_conv_cmplx_4n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* NOTE: The parameter list of this function has to match the parameter
* list of _base_convolve_complex() in convolve_base.c. This specific
* implementation, ignores some of the parameters of
* _base_convolve_complex(), which are: x_len, y_len, offset, step. */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
const float *_x = &x[2 * (-(h_len - 1) + start)];
for (int i = 0; i < len; i++) {
/* Zero */
m6 = _mm_setzero_ps();
m7 = _mm_setzero_ps();
for (int n = 0; n < h_len / 4; n++) {
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[8 * n + 0]);
m1 = _mm_load_ps(&h[8 * n + 4]);
m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m3 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 8 * n + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 8 * n + 4]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m2, m4);
m1 = _mm_mul_ps(m3, m5);
m2 = _mm_mul_ps(m2, m5);
m3 = _mm_mul_ps(m3, m4);
/* Sum */
m0 = _mm_sub_ps(m0, m1);
m2 = _mm_add_ps(m2, m3);
/* Accumulate */
m6 = _mm_add_ps(m6, m0);
m7 = _mm_add_ps(m7, m2);
}
m0 = _mm_hadd_ps(m6, m7);
m0 = _mm_hadd_ps(m0, m0);
_mm_store_ss(&y[2 * i + 0], m0);
m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m0);
}
}
/* 8*N-tap SSE complex-complex convolution */
void sse_conv_cmplx_8n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset)
{
/* See NOTE in sse_conv_cmplx_4n() */
__m128 m0, m1, m2, m3, m4, m5, m6, m7;
__m128 m8, m9, m10, m11, m12, m13, m14, m15;
const float *_x = &x[2 * (-(h_len - 1) + start)];
for (int i = 0; i < len; i++) {
/* Zero */
m12 = _mm_setzero_ps();
m13 = _mm_setzero_ps();
m14 = _mm_setzero_ps();
m15 = _mm_setzero_ps();
for (int n = 0; n < h_len / 8; n++) {
/* Load (aligned) filter taps */
m0 = _mm_load_ps(&h[16 * n + 0]);
m1 = _mm_load_ps(&h[16 * n + 4]);
m2 = _mm_load_ps(&h[16 * n + 8]);
m3 = _mm_load_ps(&h[16 * n + 12]);
m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Load (unaligned) input data */
m0 = _mm_loadu_ps(&_x[2 * i + 16 * n + 0]);
m1 = _mm_loadu_ps(&_x[2 * i + 16 * n + 4]);
m2 = _mm_loadu_ps(&_x[2 * i + 16 * n + 8]);
m3 = _mm_loadu_ps(&_x[2 * i + 16 * n + 12]);
m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
m10 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
m11 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
/* Quad multiply */
m0 = _mm_mul_ps(m4, m8);
m1 = _mm_mul_ps(m5, m9);
m2 = _mm_mul_ps(m6, m10);
m3 = _mm_mul_ps(m7, m11);
m4 = _mm_mul_ps(m4, m9);
m5 = _mm_mul_ps(m5, m8);
m6 = _mm_mul_ps(m6, m11);
m7 = _mm_mul_ps(m7, m10);
/* Sum */
m0 = _mm_sub_ps(m0, m1);
m2 = _mm_sub_ps(m2, m3);
m4 = _mm_add_ps(m4, m5);
m6 = _mm_add_ps(m6, m7);
/* Accumulate */
m12 = _mm_add_ps(m12, m0);
m13 = _mm_add_ps(m13, m2);
m14 = _mm_add_ps(m14, m4);
m15 = _mm_add_ps(m15, m6);
}
m0 = _mm_add_ps(m12, m13);
m1 = _mm_add_ps(m14, m15);
m2 = _mm_hadd_ps(m0, m1);
m2 = _mm_hadd_ps(m2, m2);
_mm_store_ss(&y[2 * i + 0], m2);
m2 = _mm_shuffle_ps(m2, m2, _MM_SHUFFLE(0, 3, 2, 1));
_mm_store_ss(&y[2 * i + 1], m2);
}
}
#endif

View File

@ -0,0 +1,68 @@
/*
* SSE Convolution
* Copyright (C) 2012, 2013 Thomas Tsou <tom@tsou.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#pragma once
/* 4-tap SSE complex-real convolution */
void sse_conv_real4(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 8-tap SSE complex-real convolution */
void sse_conv_real8(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 12-tap SSE complex-real convolution */
void sse_conv_real12(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 16-tap SSE complex-real convolution */
void sse_conv_real16(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 20-tap SSE complex-real convolution */
void sse_conv_real20(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 4*N-tap SSE complex-real convolution */
void sse_conv_real4n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 4*N-tap SSE complex-complex convolution */
void sse_conv_cmplx_4n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);
/* 8*N-tap SSE complex-complex convolution */
void sse_conv_cmplx_8n(const float *x, int x_len,
const float *h, int h_len,
float *y, int y_len,
int start, int len, int step, int offset);

View File

@ -37,7 +37,7 @@
#serial 12
AC_DEFUN([AX_EXT],
AC_DEFUN([AX_SSE],
[
AC_REQUIRE([AC_CANONICAL_HOST])
@ -53,16 +53,20 @@ AC_DEFUN([AX_EXT],
if test x"$ax_cv_support_sse3_ext" = x"yes"; then
SIMD_FLAGS="$SIMD_FLAGS -msse3"
AC_DEFINE(HAVE_SSE3,,[Support SSE3 (Streaming SIMD Extensions 3) instructions])
AM_CONDITIONAL(HAVE_SSE3, true)
else
AC_MSG_WARN([Your compiler does not support sse3 instructions, can you try another compiler?])
AM_CONDITIONAL(HAVE_SSE3, false)
fi
AX_CHECK_COMPILE_FLAG(-msse4.1, ax_cv_support_sse41_ext=yes, [])
if test x"$ax_cv_support_sse41_ext" = x"yes"; then
SIMD_FLAGS="$SIMD_FLAGS -msse4.1"
AC_DEFINE(HAVE_SSE4_1,,[Support SSE4.1 (Streaming SIMD Extensions 4.1) instructions])
AM_CONDITIONAL(HAVE_SSE4_1, true)
else
AC_MSG_WARN([Your compiler does not support sse4.1 instructions, can you try another compiler?])
AM_CONDITIONAL(HAVE_SSE4_1, false)
fi
;;
esac

View File

@ -114,7 +114,10 @@ AS_IF([test "x$with_singledb" = "xyes"], [
# Find and define supported SIMD extensions
AS_IF([test "x$with_sse" != "xno"], [
AX_EXT
AX_SSE
], [
AM_CONDITIONAL(HAVE_SSE3, false)
AM_CONDITIONAL(HAVE_SSE4_1, false)
])
AM_CONDITIONAL(USRP1, [test "x$with_usrp1" = "xyes"])

View File

@ -1,4 +1,4 @@
all: main.o convolve_base.o convolve.o
all: main.o convolve_base.o convolve.o convolve_sse_3.o
gcc -g -Wall ./*.o -o convtest -losmocore
clean:
@ -14,3 +14,5 @@ convolve_base.o: ../../Transceiver52M/common/convolve_base.c
convolve.o: ../../Transceiver52M/x86/convolve.c
gcc -std=c99 -c ../../Transceiver52M/x86/convolve.c -I ../../Transceiver52M/common/ -msse3 -DHAVE_SSE3
convolve_sse_3.o: ../../Transceiver52M/x86/convolve_sse_3.c
gcc -std=c99 -c ../../Transceiver52M/x86/convolve_sse_3.c -I ../../Transceiver52M/common/ -msse3 -DHAVE_SSE3