Libav
lpc.c
Go to the documentation of this file.
1 /*
2  * SIMD-optimized LPC functions
3  * Copyright (c) 2007 Loren Merritt
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/attributes.h"
23 #include "libavutil/cpu.h"
24 #include "libavutil/internal.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/lpc.h"
29 
30 DECLARE_ASM_CONST(16, double, pd_1)[2] = { 1.0, 1.0 };
31 DECLARE_ASM_CONST(16, double, pd_2)[2] = { 2.0, 2.0 };
32 
33 #if HAVE_SSE2_INLINE
34 
35 static void lpc_apply_welch_window_sse2(const int32_t *data, int len,
36  double *w_data)
37 {
38  double c = 2.0 / (len-1.0);
39  int n2 = len>>1;
40  x86_reg i = -n2*sizeof(int32_t);
41  x86_reg j = n2*sizeof(int32_t);
42  __asm__ volatile(
43  "movsd %4, %%xmm7 \n\t"
44  "movapd "MANGLE(pd_1)", %%xmm6 \n\t"
45  "movapd "MANGLE(pd_2)", %%xmm5 \n\t"
46  "movlhps %%xmm7, %%xmm7 \n\t"
47  "subpd %%xmm5, %%xmm7 \n\t"
48  "addsd %%xmm6, %%xmm7 \n\t"
49  "test $1, %5 \n\t"
50  "jz 2f \n\t"
51 #define WELCH(MOVPD, offset)\
52  "1: \n\t"\
53  "movapd %%xmm7, %%xmm1 \n\t"\
54  "mulpd %%xmm1, %%xmm1 \n\t"\
55  "movapd %%xmm6, %%xmm0 \n\t"\
56  "subpd %%xmm1, %%xmm0 \n\t"\
57  "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\
58  "cvtpi2pd (%3,%0), %%xmm2 \n\t"\
59  "cvtpi2pd "#offset"*4(%3,%1), %%xmm3 \n\t"\
60  "mulpd %%xmm0, %%xmm2 \n\t"\
61  "mulpd %%xmm1, %%xmm3 \n\t"\
62  "movapd %%xmm2, (%2,%0,2) \n\t"\
63  MOVPD" %%xmm3, "#offset"*8(%2,%1,2) \n\t"\
64  "subpd %%xmm5, %%xmm7 \n\t"\
65  "sub $8, %1 \n\t"\
66  "add $8, %0 \n\t"\
67  "jl 1b \n\t"\
68 
69  WELCH("movupd", -1)
70  "jmp 3f \n\t"
71  "2: \n\t"
72  WELCH("movapd", -2)
73  "3: \n\t"
74  :"+&r"(i), "+&r"(j)
75  :"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len)
76  XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
77  "%xmm5", "%xmm6", "%xmm7")
78  );
79 #undef WELCH
80 }
81 
82 static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
83  double *autoc)
84 {
85  int j;
86 
87  if((x86_reg)data & 15)
88  data++;
89 
90  for(j=0; j<lag; j+=2){
91  x86_reg i = -len*sizeof(double);
92  if(j == lag-2) {
93  __asm__ volatile(
94  "movsd "MANGLE(pd_1)", %%xmm0 \n\t"
95  "movsd "MANGLE(pd_1)", %%xmm1 \n\t"
96  "movsd "MANGLE(pd_1)", %%xmm2 \n\t"
97  "1: \n\t"
98  "movapd (%2,%0), %%xmm3 \n\t"
99  "movupd -8(%3,%0), %%xmm4 \n\t"
100  "movapd (%3,%0), %%xmm5 \n\t"
101  "mulpd %%xmm3, %%xmm4 \n\t"
102  "mulpd %%xmm3, %%xmm5 \n\t"
103  "mulpd -16(%3,%0), %%xmm3 \n\t"
104  "addpd %%xmm4, %%xmm1 \n\t"
105  "addpd %%xmm5, %%xmm0 \n\t"
106  "addpd %%xmm3, %%xmm2 \n\t"
107  "add $16, %0 \n\t"
108  "jl 1b \n\t"
109  "movhlps %%xmm0, %%xmm3 \n\t"
110  "movhlps %%xmm1, %%xmm4 \n\t"
111  "movhlps %%xmm2, %%xmm5 \n\t"
112  "addsd %%xmm3, %%xmm0 \n\t"
113  "addsd %%xmm4, %%xmm1 \n\t"
114  "addsd %%xmm5, %%xmm2 \n\t"
115  "movsd %%xmm0, (%1) \n\t"
116  "movsd %%xmm1, 8(%1) \n\t"
117  "movsd %%xmm2, 16(%1) \n\t"
118  :"+&r"(i)
119  :"r"(autoc+j), "r"(data+len), "r"(data+len-j)
120  :"memory"
121  );
122  } else {
123  __asm__ volatile(
124  "movsd "MANGLE(pd_1)", %%xmm0 \n\t"
125  "movsd "MANGLE(pd_1)", %%xmm1 \n\t"
126  "1: \n\t"
127  "movapd (%3,%0), %%xmm3 \n\t"
128  "movupd -8(%4,%0), %%xmm4 \n\t"
129  "mulpd %%xmm3, %%xmm4 \n\t"
130  "mulpd (%4,%0), %%xmm3 \n\t"
131  "addpd %%xmm4, %%xmm1 \n\t"
132  "addpd %%xmm3, %%xmm0 \n\t"
133  "add $16, %0 \n\t"
134  "jl 1b \n\t"
135  "movhlps %%xmm0, %%xmm3 \n\t"
136  "movhlps %%xmm1, %%xmm4 \n\t"
137  "addsd %%xmm3, %%xmm0 \n\t"
138  "addsd %%xmm4, %%xmm1 \n\t"
139  "movsd %%xmm0, %1 \n\t"
140  "movsd %%xmm1, %2 \n\t"
141  :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
142  :"r"(data+len), "r"(data+len-j)
143  );
144  }
145  }
146 }
147 
148 #endif /* HAVE_SSE2_INLINE */
149 
151 {
152 #if HAVE_SSE2_INLINE
153  int cpu_flags = av_get_cpu_flags();
154 
155  if (INLINE_SSE2(cpu_flags) && (cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
156  c->lpc_apply_welch_window = lpc_apply_welch_window_sse2;
157  c->lpc_compute_autocorr = lpc_compute_autocorr_sse2;
158  }
159 #endif /* HAVE_SSE2_INLINE */
160 }
DECLARE_ASM_CONST(16, double, pd_1)[2]
Definition: lpc.h:49
memory handling functions
#define INLINE_SSE2(flags)
Definition: cpu.h:66
Macro definitions for various function/variable attributes.
av_cold void ff_lpc_init_x86(LPCContext *c)
Definition: lpc.c:150
#define av_cold
Definition: attributes.h:66
int x86_reg
Definition: asm.h:70
const char data[16]
Definition: mxf.c:70
#define MANGLE(a)
Definition: asm.h:110
#define AV_CPU_FLAG_SSE2SLOW
SSE2 supported, but usually not faster.
Definition: cpu.h:37
#define XMM_CLOBBERS_ONLY(...)
Definition: asm.h:97
void(* lpc_apply_welch_window)(const int32_t *data, int len, double *w_data)
Apply a Welch window to an array of input samples.
Definition: lpc.h:64
common internal API header
int32_t
void(* lpc_compute_autocorr)(const double *data, int len, int lag, double *autoc)
Perform autocorrelation on input samples with delay of 0 to lag.
Definition: lpc.h:79
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:47
int len