1/* Copyright (c) 2007-2008 CSIRO
2   Copyright (c) 2007-2010 Xiph.Org Foundation
3   Copyright (c) 2008 Gregory Maxwell
4   Written by Jean-Marc Valin and Gregory Maxwell */
5/*
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions
8   are met:
9
10   - Redistributions of source code must retain the above copyright
11   notice, this list of conditions and the following disclaimer.
12
13   - Redistributions in binary form must reproduce the above copyright
14   notice, this list of conditions and the following disclaimer in the
15   documentation and/or other materials provided with the distribution.
16
17   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18   ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
21   OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22   EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23   PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24   PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25   LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28*/
29
30#ifndef __CELT_MIPSR1_H__
31#define __CELT_MIPSR1_H__
32
33#ifdef HAVE_CONFIG_H
34#include "config.h"
35#endif
36
37#define CELT_C
38
39#include "os_support.h"
40#include "mdct.h"
41#include <math.h>
42#include "celt.h"
43#include "pitch.h"
44#include "bands.h"
45#include "modes.h"
46#include "entcode.h"
47#include "quant_bands.h"
48#include "rate.h"
49#include "stack_alloc.h"
50#include "mathops.h"
51#include "float_cast.h"
52#include <stdarg.h>
53#include "celt_lpc.h"
54#include "vq.h"
55
56#define OVERRIDE_comb_filter
57void comb_filter(opus_val32 *y, opus_val32 *x, int T0, int T1, int N,
58      opus_val16 g0, opus_val16 g1, int tapset0, int tapset1,
59      const opus_val16 *window, int overlap, int arch)
60{
61   int i;
62   opus_val32 x0, x1, x2, x3, x4;
63
64   (void)arch;
65
66   /* printf ("%d %d %f %f\n", T0, T1, g0, g1); */
67   opus_val16 g00, g01, g02, g10, g11, g12;
68   static const opus_val16 gains[3][3] = {
69         {QCONST16(0.3066406250f, 15), QCONST16(0.2170410156f, 15), QCONST16(0.1296386719f, 15)},
70         {QCONST16(0.4638671875f, 15), QCONST16(0.2680664062f, 15), QCONST16(0.f, 15)},
71         {QCONST16(0.7998046875f, 15), QCONST16(0.1000976562f, 15), QCONST16(0.f, 15)}};
72
73   if (g0==0 && g1==0)
74   {
75      /* OPT: Happens to work without the OPUS_MOVE(), but only because the current encoder already copies x to y */
76      if (x!=y)
77         OPUS_MOVE(y, x, N);
78      return;
79   }
80
81   g00 = MULT16_16_P15(g0, gains[tapset0][0]);
82   g01 = MULT16_16_P15(g0, gains[tapset0][1]);
83   g02 = MULT16_16_P15(g0, gains[tapset0][2]);
84   g10 = MULT16_16_P15(g1, gains[tapset1][0]);
85   g11 = MULT16_16_P15(g1, gains[tapset1][1]);
86   g12 = MULT16_16_P15(g1, gains[tapset1][2]);
87   x1 = x[-T1+1];
88   x2 = x[-T1  ];
89   x3 = x[-T1-1];
90   x4 = x[-T1-2];
91   /* If the filter didn't change, we don't need the overlap */
92   if (g0==g1 && T0==T1 && tapset0==tapset1)
93      overlap=0;
94
95   for (i=0;i<overlap;i++)
96   {
97      opus_val16 f;
98      opus_val32 res;
99      f = MULT16_16_Q15(window[i],window[i]);
100      x0= x[i-T1+2];
101
102      asm volatile("MULT $ac1, %0, %1" : : "r" ((int)MULT16_16_Q15((Q15ONE-f),g00)), "r" ((int)x[i-T0]));
103
104      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)MULT16_16_Q15((Q15ONE-f),g01)), "r" ((int)ADD32(x[i-T0-1],x[i-T0+1])));
105      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)MULT16_16_Q15((Q15ONE-f),g02)), "r" ((int)ADD32(x[i-T0-2],x[i-T0+2])));
106      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)MULT16_16_Q15(f,g10)), "r" ((int)x2));
107      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)MULT16_16_Q15(f,g11)), "r" ((int)ADD32(x3,x1)));
108      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)MULT16_16_Q15(f,g12)), "r" ((int)ADD32(x4,x0)));
109
110      asm volatile("EXTR.W %0,$ac1, %1" : "=r" (res): "i" (15));
111
112      y[i] = x[i] + res;
113
114      x4=x3;
115      x3=x2;
116      x2=x1;
117      x1=x0;
118   }
119
120   x4 = x[i-T1-2];
121   x3 = x[i-T1-1];
122   x2 = x[i-T1];
123   x1 = x[i-T1+1];
124
125   if (g1==0)
126   {
127      /* OPT: Happens to work without the OPUS_MOVE(), but only because the current encoder already copies x to y */
128      if (x!=y)
129         OPUS_MOVE(y+overlap, x+overlap, N-overlap);
130      return;
131   }
132
133   for (i=overlap;i<N;i++)
134   {
135      opus_val32 res;
136      x0=x[i-T1+2];
137
138      asm volatile("MULT $ac1, %0, %1" : : "r" ((int)g10), "r" ((int)x2));
139
140      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)g11), "r" ((int)ADD32(x3,x1)));
141      asm volatile("MADD $ac1, %0, %1" : : "r" ((int)g12), "r" ((int)ADD32(x4,x0)));
142      asm volatile("EXTR.W %0,$ac1, %1" : "=r" (res): "i" (15));
143      y[i] = x[i] + res;
144      x4=x3;
145      x3=x2;
146      x2=x1;
147      x1=x0;
148   }
149}
150
151#endif /* __CELT_MIPSR1_H__ */
152