1/*
2 ** Copyright 2003-2010, VisualOn, Inc.
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 **     http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16
17/***********************************************************************
18*      File: voAMRWBEnc.c                                              *
19*                                                                      *
20*      Description: Performs the main encoder routine                  *
21*                   Fixed-point C simulation of AMR WB ACELP coding    *
22*		    algorithm with 20 msspeech frames for              *
23*		    wideband speech signals.                           *
24*                                                                      *
25************************************************************************/
26
27#include <stdio.h>
28#include <stdlib.h>
29#include "typedef.h"
30#include "basic_op.h"
31#include "oper_32b.h"
32#include "math_op.h"
33#include "cnst.h"
34#include "acelp.h"
35#include "cod_main.h"
36#include "bits.h"
37#include "main.h"
38#include "voAMRWB.h"
39#include "mem_align.h"
40#include "cmnMemory.h"
41
42#define UNUSED(x) (void)(x)
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48/* LPC interpolation coef {0.45, 0.8, 0.96, 1.0}; in Q15 */
49static Word16 interpol_frac[NB_SUBFR] = {14746, 26214, 31457, 32767};
50
51/* isp tables for initialization */
52static Word16 isp_init[M] =
53{
54	32138, 30274, 27246, 23170, 18205, 12540, 6393, 0,
55	-6393, -12540, -18205, -23170, -27246, -30274, -32138, 1475
56};
57
58static Word16 isf_init[M] =
59{
60	1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192,
61	9216, 10240, 11264, 12288, 13312, 14336, 15360, 3840
62};
63
64/* High Band encoding */
65static const Word16 HP_gain[16] =
66{
67	3624, 4673, 5597, 6479, 7425, 8378, 9324, 10264,
68	11210, 12206, 13391, 14844, 16770, 19655, 24289, 32728
69};
70
71/* Private function declaration */
72static Word16 synthesis(
73			Word16 Aq[],                          /* A(z)  : quantized Az               */
74			Word16 exc[],                         /* (i)   : excitation at 12kHz        */
75			Word16 Q_new,                         /* (i)   : scaling performed on exc   */
76			Word16 synth16k[],                    /* (o)   : 16kHz synthesis signal     */
77			Coder_State * st                      /* (i/o) : State structure            */
78			);
79
80/* Codec some parameters initialization */
81void Reset_encoder(void *st, Word16 reset_all)
82{
83	Word16 i;
84	Coder_State *cod_state;
85	cod_state = (Coder_State *) st;
86	Set_zero(cod_state->old_exc, PIT_MAX + L_INTERPOL);
87	Set_zero(cod_state->mem_syn, M);
88	Set_zero(cod_state->past_isfq, M);
89	cod_state->mem_w0 = 0;
90	cod_state->tilt_code = 0;
91	cod_state->first_frame = 1;
92	Init_gp_clip(cod_state->gp_clip);
93	cod_state->L_gc_thres = 0;
94	if (reset_all != 0)
95	{
96		/* Static vectors to zero */
97		Set_zero(cod_state->old_speech, L_TOTAL - L_FRAME);
98		Set_zero(cod_state->old_wsp, (PIT_MAX / OPL_DECIM));
99		Set_zero(cod_state->mem_decim2, 3);
100		/* routines initialization */
101		Init_Decim_12k8(cod_state->mem_decim);
102		Init_HP50_12k8(cod_state->mem_sig_in);
103		Init_Levinson(cod_state->mem_levinson);
104		Init_Q_gain2(cod_state->qua_gain);
105		Init_Hp_wsp(cod_state->hp_wsp_mem);
106		/* isp initialization */
107		Copy(isp_init, cod_state->ispold, M);
108		Copy(isp_init, cod_state->ispold_q, M);
109		/* variable initialization */
110		cod_state->mem_preemph = 0;
111		cod_state->mem_wsp = 0;
112		cod_state->Q_old = 15;
113		cod_state->Q_max[0] = 15;
114		cod_state->Q_max[1] = 15;
115		cod_state->old_wsp_max = 0;
116		cod_state->old_wsp_shift = 0;
117		/* pitch ol initialization */
118		cod_state->old_T0_med = 40;
119		cod_state->ol_gain = 0;
120		cod_state->ada_w = 0;
121		cod_state->ol_wght_flg = 0;
122		for (i = 0; i < 5; i++)
123		{
124			cod_state->old_ol_lag[i] = 40;
125		}
126		Set_zero(cod_state->old_hp_wsp, (L_FRAME / 2) / OPL_DECIM + (PIT_MAX / OPL_DECIM));
127		Set_zero(cod_state->mem_syn_hf, M);
128		Set_zero(cod_state->mem_syn_hi, M);
129		Set_zero(cod_state->mem_syn_lo, M);
130		Init_HP50_12k8(cod_state->mem_sig_out);
131		Init_Filt_6k_7k(cod_state->mem_hf);
132		Init_HP400_12k8(cod_state->mem_hp400);
133		Copy(isf_init, cod_state->isfold, M);
134		cod_state->mem_deemph = 0;
135		cod_state->seed2 = 21845;
136		Init_Filt_6k_7k(cod_state->mem_hf2);
137		cod_state->gain_alpha = 32767;
138		cod_state->vad_hist = 0;
139		wb_vad_reset(cod_state->vadSt);
140		dtx_enc_reset(cod_state->dtx_encSt, isf_init);
141	}
142	return;
143}
144
145/*-----------------------------------------------------------------*
146*   Funtion  coder                                                *
147*            ~~~~~                                                *
148*   ->Main coder routine.                                         *
149*                                                                 *
150*-----------------------------------------------------------------*/
151void coder(
152		Word16 * mode,                        /* input :  used mode                             */
153		Word16 speech16k[],                   /* input :  320 new speech samples (at 16 kHz)    */
154		Word16 prms[],                        /* output:  output parameters                     */
155		Word16 * ser_size,                    /* output:  bit rate of the used mode             */
156		void *spe_state,                      /* i/o   :  State structure                       */
157		Word16 allow_dtx                      /* input :  DTX ON/OFF                            */
158	  )
159{
160	/* Coder states */
161	Coder_State *st;
162	/* Speech vector */
163	Word16 old_speech[L_TOTAL];
164	Word16 *new_speech, *speech, *p_window;
165
166	/* Weighted speech vector */
167	Word16 old_wsp[L_FRAME + (PIT_MAX / OPL_DECIM)];
168	Word16 *wsp;
169
170	/* Excitation vector */
171	Word16 old_exc[(L_FRAME + 1) + PIT_MAX + L_INTERPOL];
172	Word16 *exc;
173
174	/* LPC coefficients */
175	Word16 r_h[M + 1], r_l[M + 1];         /* Autocorrelations of windowed speech  */
176	Word16 rc[M];                          /* Reflection coefficients.             */
177	Word16 Ap[M + 1];                      /* A(z) with spectral expansion         */
178	Word16 ispnew[M];                      /* immittance spectral pairs at 4nd sfr */
179	Word16 ispnew_q[M];                    /* quantized ISPs at 4nd subframe       */
180	Word16 isf[M];                         /* ISF (frequency domain) at 4nd sfr    */
181	Word16 *p_A, *p_Aq;                    /* ptr to A(z) for the 4 subframes      */
182	Word16 A[NB_SUBFR * (M + 1)];          /* A(z) unquantized for the 4 subframes */
183	Word16 Aq[NB_SUBFR * (M + 1)];         /* A(z)   quantized for the 4 subframes */
184
185	/* Other vectors */
186	Word16 xn[L_SUBFR];                    /* Target vector for pitch search     */
187	Word16 xn2[L_SUBFR];                   /* Target vector for codebook search  */
188	Word16 dn[L_SUBFR];                    /* Correlation between xn2 and h1     */
189	Word16 cn[L_SUBFR];                    /* Target vector in residual domain   */
190	Word16 h1[L_SUBFR];                    /* Impulse response vector            */
191	Word16 h2[L_SUBFR];                    /* Impulse response vector            */
192	Word16 code[L_SUBFR];                  /* Fixed codebook excitation          */
193	Word16 y1[L_SUBFR];                    /* Filtered adaptive excitation       */
194	Word16 y2[L_SUBFR];                    /* Filtered adaptive excitation       */
195	Word16 error[M + L_SUBFR];             /* error of quantization              */
196	Word16 synth[L_SUBFR];                 /* 12.8kHz synthesis vector           */
197	Word16 exc2[L_FRAME];                  /* excitation vector                  */
198	Word16 buf[L_FRAME];                   /* VAD buffer                         */
199
200	/* Scalars */
201	Word32 i, j, i_subfr, select, pit_flag, clip_gain, vad_flag;
202	Word16 codec_mode;
203	Word16 T_op, T_op2, T0, T0_min, T0_max, T0_frac, index;
204	Word16 gain_pit, gain_code, g_coeff[4], g_coeff2[4];
205	Word16 tmp, gain1, gain2, exp, Q_new, mu, shift, max;
206	Word16 voice_fac;
207	Word16 indice[8];
208	Word32 L_tmp, L_gain_code, L_max, L_tmp1;
209	Word16 code2[L_SUBFR];                         /* Fixed codebook excitation  */
210	Word16 stab_fac, fac, gain_code_lo;
211
212	Word16 corr_gain;
213	Word16 *vo_p0, *vo_p1, *vo_p2, *vo_p3;
214
215	st = (Coder_State *) spe_state;
216
217	*ser_size = nb_of_bits[*mode];
218	codec_mode = *mode;
219
220	/*--------------------------------------------------------------------------*
221	 *          Initialize pointers to speech vector.                           *
222	 *                                                                          *
223	 *                                                                          *
224	 *                    |-------|-------|-------|-------|-------|-------|     *
225	 *                     past sp   sf1     sf2     sf3     sf4    L_NEXT      *
226	 *                    <-------  Total speech buffer (L_TOTAL)   ------>     *
227	 *              old_speech                                                  *
228	 *                    <-------  LPC analysis window (L_WINDOW)  ------>     *
229	 *                    |       <-- present frame (L_FRAME) ---->             *
230	 *                   p_window |       <----- new speech (L_FRAME) ---->     *
231	 *                            |       |                                     *
232	 *                          speech    |                                     *
233	 *                                 new_speech                               *
234	 *--------------------------------------------------------------------------*/
235
236	new_speech = old_speech + L_TOTAL - L_FRAME - L_FILT;         /* New speech     */
237	speech = old_speech + L_TOTAL - L_FRAME - L_NEXT;             /* Present frame  */
238	p_window = old_speech + L_TOTAL - L_WINDOW;
239
240	exc = old_exc + PIT_MAX + L_INTERPOL;
241	wsp = old_wsp + (PIT_MAX / OPL_DECIM);
242
243	/* copy coder memory state into working space */
244	Copy(st->old_speech, old_speech, L_TOTAL - L_FRAME);
245	Copy(st->old_wsp, old_wsp, PIT_MAX / OPL_DECIM);
246	Copy(st->old_exc, old_exc, PIT_MAX + L_INTERPOL);
247
248	/*---------------------------------------------------------------*
249	 * Down sampling signal from 16kHz to 12.8kHz                    *
250	 * -> The signal is extended by L_FILT samples (padded to zero)  *
251	 * to avoid additional delay (L_FILT samples) in the coder.      *
252	 * The last L_FILT samples are approximated after decimation and *
253	 * are used (and windowed) only in autocorrelations.             *
254	 *---------------------------------------------------------------*/
255
256	Decim_12k8(speech16k, L_FRAME16k, new_speech, st->mem_decim);
257
258	/* last L_FILT samples for autocorrelation window */
259	Copy(st->mem_decim, code, 2 * L_FILT16k);
260	Set_zero(error, L_FILT16k);            /* set next sample to zero */
261	Decim_12k8(error, L_FILT16k, new_speech + L_FRAME, code);
262
263	/*---------------------------------------------------------------*
264	 * Perform 50Hz HP filtering of input signal.                    *
265	 *---------------------------------------------------------------*/
266
267	HP50_12k8(new_speech, L_FRAME, st->mem_sig_in);
268
269	/* last L_FILT samples for autocorrelation window */
270	Copy(st->mem_sig_in, code, 6);
271	HP50_12k8(new_speech + L_FRAME, L_FILT, code);
272
273	/*---------------------------------------------------------------*
274	 * Perform fixed preemphasis through 1 - g z^-1                  *
275	 * Scale signal to get maximum of precision in filtering         *
276	 *---------------------------------------------------------------*/
277
278	mu = PREEMPH_FAC >> 1;              /* Q15 --> Q14 */
279
280	/* get max of new preemphased samples (L_FRAME+L_FILT) */
281	L_tmp = new_speech[0] << 15;
282	L_tmp -= (st->mem_preemph * mu)<<1;
283	L_max = L_abs(L_tmp);
284
285	for (i = 1; i < L_FRAME + L_FILT; i++)
286	{
287		L_tmp = new_speech[i] << 15;
288		L_tmp -= (new_speech[i - 1] * mu)<<1;
289		L_tmp = L_abs(L_tmp);
290		if(L_tmp > L_max)
291		{
292			L_max = L_tmp;
293		}
294	}
295
296	/* get scaling factor for new and previous samples */
297	/* limit scaling to Q_MAX to keep dynamic for ringing in low signal */
298	/* limit scaling to Q_MAX also to avoid a[0]<1 in syn_filt_32 */
299	tmp = extract_h(L_max);
300	if (tmp == 0)
301	{
302		shift = Q_MAX;
303	} else
304	{
305		shift = norm_s(tmp) - 1;
306		if (shift < 0)
307		{
308			shift = 0;
309		}
310		if (shift > Q_MAX)
311		{
312			shift = Q_MAX;
313		}
314	}
315	Q_new = shift;
316	if (Q_new > st->Q_max[0])
317	{
318		Q_new = st->Q_max[0];
319	}
320	if (Q_new > st->Q_max[1])
321	{
322		Q_new = st->Q_max[1];
323	}
324	exp = (Q_new - st->Q_old);
325	st->Q_old = Q_new;
326	st->Q_max[1] = st->Q_max[0];
327	st->Q_max[0] = shift;
328
329	/* preemphasis with scaling (L_FRAME+L_FILT) */
330	tmp = new_speech[L_FRAME - 1];
331
332	for (i = L_FRAME + L_FILT - 1; i > 0; i--)
333	{
334		L_tmp = new_speech[i] << 15;
335		L_tmp -= (new_speech[i - 1] * mu)<<1;
336		L_tmp = (L_tmp << Q_new);
337		new_speech[i] = vo_round(L_tmp);
338	}
339
340	L_tmp = new_speech[0] << 15;
341	L_tmp -= (st->mem_preemph * mu)<<1;
342	L_tmp = (L_tmp << Q_new);
343	new_speech[0] = vo_round(L_tmp);
344
345	st->mem_preemph = tmp;
346
347	/* scale previous samples and memory */
348
349	Scale_sig(old_speech, L_TOTAL - L_FRAME - L_FILT, exp);
350	Scale_sig(old_exc, PIT_MAX + L_INTERPOL, exp);
351	Scale_sig(st->mem_syn, M, exp);
352	Scale_sig(st->mem_decim2, 3, exp);
353	Scale_sig(&(st->mem_wsp), 1, exp);
354	Scale_sig(&(st->mem_w0), 1, exp);
355
356	/*------------------------------------------------------------------------*
357	 *  Call VAD                                                              *
358	 *  Preemphesis scale down signal in low frequency and keep dynamic in HF.*
359	 *  Vad work slightly in futur (new_speech = speech + L_NEXT - L_FILT).   *
360	 *------------------------------------------------------------------------*/
361	Copy(new_speech, buf, L_FRAME);
362
363#ifdef ASM_OPT        /* asm optimization branch */
364	Scale_sig_opt(buf, L_FRAME, 1 - Q_new);
365#else
366	Scale_sig(buf, L_FRAME, 1 - Q_new);
367#endif
368
369	vad_flag = wb_vad(st->vadSt, buf);          /* Voice Activity Detection */
370	if (vad_flag == 0)
371	{
372		st->vad_hist = (st->vad_hist + 1);
373	} else
374	{
375		st->vad_hist = 0;
376	}
377
378	/* DTX processing */
379	if (allow_dtx != 0)
380	{
381		/* Note that mode may change here */
382		tx_dtx_handler(st->dtx_encSt, vad_flag, mode);
383		*ser_size = nb_of_bits[*mode];
384	}
385
386	if(*mode != MRDTX)
387	{
388		Parm_serial(vad_flag, 1, &prms);
389	}
390	/*------------------------------------------------------------------------*
391	 *  Perform LPC analysis                                                  *
392	 *  ~~~~~~~~~~~~~~~~~~~~                                                  *
393	 *   - autocorrelation + lag windowing                                    *
394	 *   - Levinson-durbin algorithm to find a[]                              *
395	 *   - convert a[] to isp[]                                               *
396	 *   - convert isp[] to isf[] for quantization                            *
397	 *   - quantize and code the isf[]                                        *
398	 *   - convert isf[] to isp[] for interpolation                           *
399	 *   - find the interpolated ISPs and convert to a[] for the 4 subframes  *
400	 *------------------------------------------------------------------------*/
401
402	/* LP analysis centered at 4nd subframe */
403	Autocorr(p_window, M, r_h, r_l);                        /* Autocorrelations */
404	Lag_window(r_h, r_l);                                   /* Lag windowing    */
405	Levinson(r_h, r_l, A, rc, st->mem_levinson);            /* Levinson Durbin  */
406	Az_isp(A, ispnew, st->ispold);                          /* From A(z) to ISP */
407
408	/* Find the interpolated ISPs and convert to a[] for all subframes */
409	Int_isp(st->ispold, ispnew, interpol_frac, A);
410
411	/* update ispold[] for the next frame */
412	Copy(ispnew, st->ispold, M);
413
414	/* Convert ISPs to frequency domain 0..6400 */
415	Isp_isf(ispnew, isf, M);
416
417	/* check resonance for pitch clipping algorithm */
418	Gp_clip_test_isf(isf, st->gp_clip);
419
420	/*----------------------------------------------------------------------*
421	 *  Perform PITCH_OL analysis                                           *
422	 *  ~~~~~~~~~~~~~~~~~~~~~~~~~                                           *
423	 * - Find the residual res[] for the whole speech frame                 *
424	 * - Find the weighted input speech wsp[] for the whole speech frame    *
425	 * - scale wsp[] to avoid overflow in pitch estimation                  *
426	 * - Find open loop pitch lag for whole speech frame                    *
427	 *----------------------------------------------------------------------*/
428	p_A = A;
429	for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
430	{
431		/* Weighting of LPC coefficients */
432		Weight_a(p_A, Ap, GAMMA1, M);
433
434#ifdef ASM_OPT                    /* asm optimization branch */
435		Residu_opt(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
436#else
437		Residu(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
438#endif
439
440		p_A += (M + 1);
441	}
442
443	Deemph2(wsp, TILT_FAC, L_FRAME, &(st->mem_wsp));
444
445	/* find maximum value on wsp[] for 12 bits scaling */
446	max = 0;
447	for (i = 0; i < L_FRAME; i++)
448	{
449		tmp = abs_s(wsp[i]);
450		if(tmp > max)
451		{
452			max = tmp;
453		}
454	}
455	tmp = st->old_wsp_max;
456	if(max > tmp)
457	{
458		tmp = max;                         /* tmp = max(wsp_max, old_wsp_max) */
459	}
460	st->old_wsp_max = max;
461
462	shift = norm_s(tmp) - 3;
463	if (shift > 0)
464	{
465		shift = 0;                         /* shift = 0..-3 */
466	}
467	/* decimation of wsp[] to search pitch in LF and to reduce complexity */
468	LP_Decim2(wsp, L_FRAME, st->mem_decim2);
469
470	/* scale wsp[] in 12 bits to avoid overflow */
471#ifdef  ASM_OPT                  /* asm optimization branch */
472	Scale_sig_opt(wsp, L_FRAME / OPL_DECIM, shift);
473#else
474	Scale_sig(wsp, L_FRAME / OPL_DECIM, shift);
475#endif
476	/* scale old_wsp (warning: exp must be Q_new-Q_old) */
477	exp = exp + (shift - st->old_wsp_shift);
478	st->old_wsp_shift = shift;
479
480	Scale_sig(old_wsp, PIT_MAX / OPL_DECIM, exp);
481	Scale_sig(st->old_hp_wsp, PIT_MAX / OPL_DECIM, exp);
482
483	scale_mem_Hp_wsp(st->hp_wsp_mem, exp);
484
485	/* Find open loop pitch lag for whole speech frame */
486
487	if(*ser_size == NBBITS_7k)
488	{
489		/* Find open loop pitch lag for whole speech frame */
490		T_op = Pitch_med_ol(wsp, st, L_FRAME / OPL_DECIM);
491	} else
492	{
493		/* Find open loop pitch lag for first 1/2 frame */
494		T_op = Pitch_med_ol(wsp, st, (L_FRAME/2) / OPL_DECIM);
495	}
496
497	if(st->ol_gain > 19661)       /* 0.6 in Q15 */
498	{
499		st->old_T0_med = Med_olag(T_op, st->old_ol_lag);
500		st->ada_w = 32767;
501	} else
502	{
503		st->ada_w = vo_mult(st->ada_w, 29491);
504	}
505
506	if(st->ada_w < 26214)
507		st->ol_wght_flg = 0;
508	else
509		st->ol_wght_flg = 1;
510
511	wb_vad_tone_detection(st->vadSt, st->ol_gain);
512	T_op *= OPL_DECIM;
513
514	if(*ser_size != NBBITS_7k)
515	{
516		/* Find open loop pitch lag for second 1/2 frame */
517		T_op2 = Pitch_med_ol(wsp + ((L_FRAME / 2) / OPL_DECIM), st, (L_FRAME/2) / OPL_DECIM);
518
519		if(st->ol_gain > 19661)   /* 0.6 in Q15 */
520		{
521			st->old_T0_med = Med_olag(T_op2, st->old_ol_lag);
522			st->ada_w = 32767;
523		} else
524		{
525			st->ada_w = mult(st->ada_w, 29491);
526		}
527
528		if(st->ada_w < 26214)
529			st->ol_wght_flg = 0;
530		else
531			st->ol_wght_flg = 1;
532
533		wb_vad_tone_detection(st->vadSt, st->ol_gain);
534
535		T_op2 *= OPL_DECIM;
536
537	} else
538	{
539		T_op2 = T_op;
540	}
541	/*----------------------------------------------------------------------*
542	 *                              DTX-CNG                                 *
543	 *----------------------------------------------------------------------*/
544	if(*mode == MRDTX)            /* CNG mode */
545	{
546		/* Buffer isf's and energy */
547#ifdef ASM_OPT                   /* asm optimization branch */
548		Residu_opt(&A[3 * (M + 1)], speech, exc, L_FRAME);
549#else
550		Residu(&A[3 * (M + 1)], speech, exc, L_FRAME);
551#endif
552
553		for (i = 0; i < L_FRAME; i++)
554		{
555			exc2[i] = shr(exc[i], Q_new);
556		}
557
558		L_tmp = 0;
559		for (i = 0; i < L_FRAME; i++)
560			L_tmp += (exc2[i] * exc2[i])<<1;
561
562		L_tmp >>= 1;
563
564		dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
565
566		/* Quantize and code the ISFs */
567		dtx_enc(st->dtx_encSt, isf, exc2, &prms);
568
569		/* Convert ISFs to the cosine domain */
570		Isf_isp(isf, ispnew_q, M);
571		Isp_Az(ispnew_q, Aq, M, 0);
572
573		for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
574		{
575			corr_gain = synthesis(Aq, &exc2[i_subfr], 0, &speech16k[i_subfr * 5 / 4], st);
576		}
577		Copy(isf, st->isfold, M);
578
579		/* reset speech coder memories */
580		Reset_encoder(st, 0);
581
582		/*--------------------------------------------------*
583		 * Update signal for next frame.                    *
584		 * -> save past of speech[] and wsp[].              *
585		 *--------------------------------------------------*/
586
587		Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
588		Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
589
590		return;
591	}
592	/*----------------------------------------------------------------------*
593	 *                               ACELP                                  *
594	 *----------------------------------------------------------------------*/
595
596	/* Quantize and code the ISFs */
597
598	if (*ser_size <= NBBITS_7k)
599	{
600		Qpisf_2s_36b(isf, isf, st->past_isfq, indice, 4);
601
602		Parm_serial(indice[0], 8, &prms);
603		Parm_serial(indice[1], 8, &prms);
604		Parm_serial(indice[2], 7, &prms);
605		Parm_serial(indice[3], 7, &prms);
606		Parm_serial(indice[4], 6, &prms);
607	} else
608	{
609		Qpisf_2s_46b(isf, isf, st->past_isfq, indice, 4);
610
611		Parm_serial(indice[0], 8, &prms);
612		Parm_serial(indice[1], 8, &prms);
613		Parm_serial(indice[2], 6, &prms);
614		Parm_serial(indice[3], 7, &prms);
615		Parm_serial(indice[4], 7, &prms);
616		Parm_serial(indice[5], 5, &prms);
617		Parm_serial(indice[6], 5, &prms);
618	}
619
620	/* Check stability on isf : distance between old isf and current isf */
621
622	L_tmp = 0;
623	for (i = 0; i < M - 1; i++)
624	{
625		tmp = vo_sub(isf[i], st->isfold[i]);
626		L_tmp += (tmp * tmp)<<1;
627	}
628
629	tmp = extract_h(L_shl2(L_tmp, 8));
630
631	tmp = vo_mult(tmp, 26214);                /* tmp = L_tmp*0.8/256 */
632	tmp = vo_sub(20480, tmp);                 /* 1.25 - tmp (in Q14) */
633
634	stab_fac = shl(tmp, 1);
635
636	if (stab_fac < 0)
637	{
638		stab_fac = 0;
639	}
640	Copy(isf, st->isfold, M);
641
642	/* Convert ISFs to the cosine domain */
643	Isf_isp(isf, ispnew_q, M);
644
645	if (st->first_frame != 0)
646	{
647		st->first_frame = 0;
648		Copy(ispnew_q, st->ispold_q, M);
649	}
650	/* Find the interpolated ISPs and convert to a[] for all subframes */
651
652	Int_isp(st->ispold_q, ispnew_q, interpol_frac, Aq);
653
654	/* update ispold[] for the next frame */
655	Copy(ispnew_q, st->ispold_q, M);
656
657	p_Aq = Aq;
658	for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
659	{
660#ifdef ASM_OPT               /* asm optimization branch */
661		Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
662#else
663		Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
664#endif
665		p_Aq += (M + 1);
666	}
667
668	/* Buffer isf's and energy for dtx on non-speech frame */
669	if (vad_flag == 0)
670	{
671		for (i = 0; i < L_FRAME; i++)
672		{
673			exc2[i] = exc[i] >> Q_new;
674		}
675		L_tmp = 0;
676		for (i = 0; i < L_FRAME; i++)
677			L_tmp += (exc2[i] * exc2[i])<<1;
678		L_tmp >>= 1;
679
680		dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
681	}
682	/* range for closed loop pitch search in 1st subframe */
683
684	T0_min = T_op - 8;
685	if (T0_min < PIT_MIN)
686	{
687		T0_min = PIT_MIN;
688	}
689	T0_max = (T0_min + 15);
690
691	if(T0_max > PIT_MAX)
692	{
693		T0_max = PIT_MAX;
694		T0_min = T0_max - 15;
695	}
696	/*------------------------------------------------------------------------*
697	 *          Loop for every subframe in the analysis frame                 *
698	 *------------------------------------------------------------------------*
699	 *  To find the pitch and innovation parameters. The subframe size is     *
700	 *  L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times.               *
701	 *     - compute the target signal for pitch search                       *
702	 *     - compute impulse response of weighted synthesis filter (h1[])     *
703	 *     - find the closed-loop pitch parameters                            *
704	 *     - encode the pitch dealy                                           *
705	 *     - find 2 lt prediction (with / without LP filter for lt pred)      *
706	 *     - find 2 pitch gains and choose the best lt prediction.            *
707	 *     - find target vector for codebook search                           *
708	 *     - update the impulse response h1[] for codebook search             *
709	 *     - correlation between target vector and impulse response           *
710	 *     - codebook search and encoding                                     *
711	 *     - VQ of pitch and codebook gains                                   *
712	 *     - find voicing factor and tilt of code for next subframe.          *
713	 *     - update states of weighting filter                                *
714	 *     - find excitation and synthesis speech                             *
715	 *------------------------------------------------------------------------*/
716	p_A = A;
717	p_Aq = Aq;
718	for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
719	{
720		pit_flag = i_subfr;
721		if ((i_subfr == 2 * L_SUBFR) && (*ser_size > NBBITS_7k))
722		{
723			pit_flag = 0;
724			/* range for closed loop pitch search in 3rd subframe */
725			T0_min = (T_op2 - 8);
726
727			if (T0_min < PIT_MIN)
728			{
729				T0_min = PIT_MIN;
730			}
731			T0_max = (T0_min + 15);
732			if (T0_max > PIT_MAX)
733			{
734				T0_max = PIT_MAX;
735				T0_min = (T0_max - 15);
736			}
737		}
738		/*-----------------------------------------------------------------------*
739		 *                                                                       *
740		 *        Find the target vector for pitch search:                       *
741		 *        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~                        *
742		 *                                                                       *
743		 *             |------|  res[n]                                          *
744		 * speech[n]---| A(z) |--------                                          *
745		 *             |------|       |   |--------| error[n]  |------|          *
746		 *                   zero -- (-)--| 1/A(z) |-----------| W(z) |-- target *
747		 *                   exc          |--------|           |------|          *
748		 *                                                                       *
749		 * Instead of subtracting the zero-input response of filters from        *
750		 * the weighted input speech, the above configuration is used to         *
751		 * compute the target vector.                                            *
752		 *                                                                       *
753		 *-----------------------------------------------------------------------*/
754
755		for (i = 0; i < M; i++)
756		{
757			error[i] = vo_sub(speech[i + i_subfr - M], st->mem_syn[i]);
758		}
759
760#ifdef ASM_OPT              /* asm optimization branch */
761		Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
762#else
763		Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
764#endif
765		Syn_filt(p_Aq, &exc[i_subfr], error + M, L_SUBFR, error, 0);
766		Weight_a(p_A, Ap, GAMMA1, M);
767
768#ifdef ASM_OPT             /* asm optimization branch */
769		Residu_opt(Ap, error + M, xn, L_SUBFR);
770#else
771		Residu(Ap, error + M, xn, L_SUBFR);
772#endif
773		Deemph2(xn, TILT_FAC, L_SUBFR, &(st->mem_w0));
774
775		/*----------------------------------------------------------------------*
776		 * Find approx. target in residual domain "cn[]" for inovation search.  *
777		 *----------------------------------------------------------------------*/
778		/* first half: xn[] --> cn[] */
779		Set_zero(code, M);
780		Copy(xn, code + M, L_SUBFR / 2);
781		tmp = 0;
782		Preemph2(code + M, TILT_FAC, L_SUBFR / 2, &tmp);
783		Weight_a(p_A, Ap, GAMMA1, M);
784		Syn_filt(Ap,code + M, code + M, L_SUBFR / 2, code, 0);
785
786#ifdef ASM_OPT                /* asm optimization branch */
787		Residu_opt(p_Aq,code + M, cn, L_SUBFR / 2);
788#else
789		Residu(p_Aq,code + M, cn, L_SUBFR / 2);
790#endif
791
792		/* second half: res[] --> cn[] (approximated and faster) */
793		Copy(&exc[i_subfr + (L_SUBFR / 2)], cn + (L_SUBFR / 2), L_SUBFR / 2);
794
795		/*---------------------------------------------------------------*
796		 * Compute impulse response, h1[], of weighted synthesis filter  *
797		 *---------------------------------------------------------------*/
798
799		Set_zero(error, M + L_SUBFR);
800		Weight_a(p_A, error + M, GAMMA1, M);
801
802		vo_p0 = error+M;
803		vo_p3 = h1;
804		for (i = 0; i < L_SUBFR; i++)
805		{
806			L_tmp = *vo_p0 << 14;        /* x4 (Q12 to Q14) */
807			vo_p1 = p_Aq + 1;
808			vo_p2 = vo_p0-1;
809			for (j = 1; j <= M/4; j++)
810			{
811				L_tmp -= *vo_p1++ * *vo_p2--;
812				L_tmp -= *vo_p1++ * *vo_p2--;
813				L_tmp -= *vo_p1++ * *vo_p2--;
814				L_tmp -= *vo_p1++ * *vo_p2--;
815			}
816			*vo_p3++ = *vo_p0++ = vo_round((L_tmp <<4));
817		}
818		/* deemph without division by 2 -> Q14 to Q15 */
819		tmp = 0;
820		Deemph2(h1, TILT_FAC, L_SUBFR, &tmp);   /* h1 in Q14 */
821
822		/* h2 in Q12 for codebook search */
823		Copy(h1, h2, L_SUBFR);
824
825		/*---------------------------------------------------------------*
826		 * scale xn[] and h1[] to avoid overflow in dot_product12()      *
827		 *---------------------------------------------------------------*/
828#ifdef  ASM_OPT                  /* asm optimization branch */
829		Scale_sig_opt(h2, L_SUBFR, -2);
830		Scale_sig_opt(xn, L_SUBFR, shift);     /* scaling of xn[] to limit dynamic at 12 bits */
831		Scale_sig_opt(h1, L_SUBFR, 1 + shift);  /* set h1[] in Q15 with scaling for convolution */
832#else
833		Scale_sig(h2, L_SUBFR, -2);
834		Scale_sig(xn, L_SUBFR, shift);     /* scaling of xn[] to limit dynamic at 12 bits */
835		Scale_sig(h1, L_SUBFR, 1 + shift);  /* set h1[] in Q15 with scaling for convolution */
836#endif
837		/*----------------------------------------------------------------------*
838		 *                 Closed-loop fractional pitch search                  *
839		 *----------------------------------------------------------------------*/
840		/* find closed loop fractional pitch  lag */
841		if(*ser_size <= NBBITS_9k)
842		{
843			T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
844					pit_flag, PIT_MIN, PIT_FR1_8b, L_SUBFR);
845
846			/* encode pitch lag */
847			if (pit_flag == 0)             /* if 1st/3rd subframe */
848			{
849				/*--------------------------------------------------------------*
850				 * The pitch range for the 1st/3rd subframe is encoded with     *
851				 * 8 bits and is divided as follows:                            *
852				 *   PIT_MIN to PIT_FR1-1  resolution 1/2 (frac = 0 or 2)       *
853				 *   PIT_FR1 to PIT_MAX    resolution 1   (frac = 0)            *
854				 *--------------------------------------------------------------*/
855				if (T0 < PIT_FR1_8b)
856				{
857					index = ((T0 << 1) + (T0_frac >> 1) - (PIT_MIN<<1));
858				} else
859				{
860					index = ((T0 - PIT_FR1_8b) + ((PIT_FR1_8b - PIT_MIN)*2));
861				}
862
863				Parm_serial(index, 8, &prms);
864
865				/* find T0_min and T0_max for subframe 2 and 4 */
866				T0_min = (T0 - 8);
867				if (T0_min < PIT_MIN)
868				{
869					T0_min = PIT_MIN;
870				}
871				T0_max = T0_min + 15;
872				if (T0_max > PIT_MAX)
873				{
874					T0_max = PIT_MAX;
875					T0_min = (T0_max - 15);
876				}
877			} else
878			{                              /* if subframe 2 or 4 */
879				/*--------------------------------------------------------------*
880				 * The pitch range for subframe 2 or 4 is encoded with 5 bits:  *
881				 *   T0_min  to T0_max     resolution 1/2 (frac = 0 or 2)       *
882				 *--------------------------------------------------------------*/
883				i = (T0 - T0_min);
884				index = (i << 1) + (T0_frac >> 1);
885
886				Parm_serial(index, 5, &prms);
887			}
888		} else
889		{
890			T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
891					pit_flag, PIT_FR2, PIT_FR1_9b, L_SUBFR);
892
893			/* encode pitch lag */
894			if (pit_flag == 0)             /* if 1st/3rd subframe */
895			{
896				/*--------------------------------------------------------------*
897				 * The pitch range for the 1st/3rd subframe is encoded with     *
898				 * 9 bits and is divided as follows:                            *
899				 *   PIT_MIN to PIT_FR2-1  resolution 1/4 (frac = 0,1,2 or 3)   *
900				 *   PIT_FR2 to PIT_FR1-1  resolution 1/2 (frac = 0 or 1)       *
901				 *   PIT_FR1 to PIT_MAX    resolution 1   (frac = 0)            *
902				 *--------------------------------------------------------------*/
903
904				if (T0 < PIT_FR2)
905				{
906					index = ((T0 << 2) + T0_frac) - (PIT_MIN << 2);
907				} else if(T0 < PIT_FR1_9b)
908				{
909					index = ((((T0 << 1) + (T0_frac >> 1)) - (PIT_FR2<<1)) + ((PIT_FR2 - PIT_MIN)<<2));
910				} else
911				{
912					index = (((T0 - PIT_FR1_9b) + ((PIT_FR2 - PIT_MIN)<<2)) + ((PIT_FR1_9b - PIT_FR2)<<1));
913				}
914
915				Parm_serial(index, 9, &prms);
916
917				/* find T0_min and T0_max for subframe 2 and 4 */
918
919				T0_min = (T0 - 8);
920				if (T0_min < PIT_MIN)
921				{
922					T0_min = PIT_MIN;
923				}
924				T0_max = T0_min + 15;
925
926				if (T0_max > PIT_MAX)
927				{
928					T0_max = PIT_MAX;
929					T0_min = (T0_max - 15);
930				}
931			} else
932			{                              /* if subframe 2 or 4 */
933				/*--------------------------------------------------------------*
934				 * The pitch range for subframe 2 or 4 is encoded with 6 bits:  *
935				 *   T0_min  to T0_max     resolution 1/4 (frac = 0,1,2 or 3)   *
936				 *--------------------------------------------------------------*/
937				i = (T0 - T0_min);
938				index = (i << 2) + T0_frac;
939				Parm_serial(index, 6, &prms);
940			}
941		}
942
943		/*-----------------------------------------------------------------*
944		 * Gain clipping test to avoid unstable synthesis on frame erasure *
945		 *-----------------------------------------------------------------*/
946
947		clip_gain = 0;
948		if((st->gp_clip[0] < 154) && (st->gp_clip[1] > 14746))
949			clip_gain = 1;
950
951		/*-----------------------------------------------------------------*
952		 * - find unity gain pitch excitation (adaptive codebook entry)    *
953		 *   with fractional interpolation.                                *
954		 * - find filtered pitch exc. y1[]=exc[] convolved with h1[])      *
955		 * - compute pitch gain1                                           *
956		 *-----------------------------------------------------------------*/
957		/* find pitch exitation */
958#ifdef ASM_OPT                  /* asm optimization branch */
959		pred_lt4_asm(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
960#else
961		Pred_lt4(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
962#endif
963		if (*ser_size > NBBITS_9k)
964		{
965#ifdef ASM_OPT                   /* asm optimization branch */
966			Convolve_asm(&exc[i_subfr], h1, y1, L_SUBFR);
967#else
968			Convolve(&exc[i_subfr], h1, y1, L_SUBFR);
969#endif
970			gain1 = G_pitch(xn, y1, g_coeff, L_SUBFR);
971			/* clip gain if necessary to avoid problem at decoder */
972			if ((clip_gain != 0) && (gain1 > GP_CLIP))
973			{
974				gain1 = GP_CLIP;
975			}
976			/* find energy of new target xn2[] */
977			Updt_tar(xn, dn, y1, gain1, L_SUBFR);       /* dn used temporary */
978		} else
979		{
980			gain1 = 0;
981		}
982		/*-----------------------------------------------------------------*
983		 * - find pitch excitation filtered by 1st order LP filter.        *
984		 * - find filtered pitch exc. y2[]=exc[] convolved with h1[])      *
985		 * - compute pitch gain2                                           *
986		 *-----------------------------------------------------------------*/
987		/* find pitch excitation with lp filter */
988		vo_p0 = exc + i_subfr-1;
989		vo_p1 = code;
990		/* find pitch excitation with lp filter */
991		for (i = 0; i < L_SUBFR/2; i++)
992		{
993			L_tmp = 5898 * *vo_p0++;
994			L_tmp1 = 5898 * *vo_p0;
995			L_tmp += 20972 * *vo_p0++;
996			L_tmp1 += 20972 * *vo_p0++;
997			L_tmp1 += 5898 * *vo_p0--;
998			L_tmp += 5898 * *vo_p0;
999			*vo_p1++ = (L_tmp + 0x4000)>>15;
1000			*vo_p1++ = (L_tmp1 + 0x4000)>>15;
1001		}
1002
1003#ifdef ASM_OPT                 /* asm optimization branch */
1004		Convolve_asm(code, h1, y2, L_SUBFR);
1005#else
1006		Convolve(code, h1, y2, L_SUBFR);
1007#endif
1008
1009		gain2 = G_pitch(xn, y2, g_coeff2, L_SUBFR);
1010
1011		/* clip gain if necessary to avoid problem at decoder */
1012		if ((clip_gain != 0) && (gain2 > GP_CLIP))
1013		{
1014			gain2 = GP_CLIP;
1015		}
1016		/* find energy of new target xn2[] */
1017		Updt_tar(xn, xn2, y2, gain2, L_SUBFR);
1018		/*-----------------------------------------------------------------*
1019		 * use the best prediction (minimise quadratic error).             *
1020		 *-----------------------------------------------------------------*/
1021		select = 0;
1022		if(*ser_size > NBBITS_9k)
1023		{
1024			L_tmp = 0L;
1025			vo_p0 = dn;
1026			vo_p1 = xn2;
1027			for (i = 0; i < L_SUBFR/2; i++)
1028			{
1029				L_tmp += *vo_p0 * *vo_p0;
1030				vo_p0++;
1031				L_tmp -= *vo_p1 * *vo_p1;
1032				vo_p1++;
1033				L_tmp += *vo_p0 * *vo_p0;
1034				vo_p0++;
1035				L_tmp -= *vo_p1 * *vo_p1;
1036				vo_p1++;
1037			}
1038
1039			if (L_tmp <= 0)
1040			{
1041				select = 1;
1042			}
1043			Parm_serial(select, 1, &prms);
1044		}
1045		if (select == 0)
1046		{
1047			/* use the lp filter for pitch excitation prediction */
1048			gain_pit = gain2;
1049			Copy(code, &exc[i_subfr], L_SUBFR);
1050			Copy(y2, y1, L_SUBFR);
1051			Copy(g_coeff2, g_coeff, 4);
1052		} else
1053		{
1054			/* no filter used for pitch excitation prediction */
1055			gain_pit = gain1;
1056			Copy(dn, xn2, L_SUBFR);        /* target vector for codebook search */
1057		}
1058		/*-----------------------------------------------------------------*
1059		 * - update cn[] for codebook search                               *
1060		 *-----------------------------------------------------------------*/
1061		Updt_tar(cn, cn, &exc[i_subfr], gain_pit, L_SUBFR);
1062
1063#ifdef  ASM_OPT                           /* asm optimization branch */
1064		Scale_sig_opt(cn, L_SUBFR, shift);     /* scaling of cn[] to limit dynamic at 12 bits */
1065#else
1066		Scale_sig(cn, L_SUBFR, shift);     /* scaling of cn[] to limit dynamic at 12 bits */
1067#endif
1068		/*-----------------------------------------------------------------*
1069		 * - include fixed-gain pitch contribution into impulse resp. h1[] *
1070		 *-----------------------------------------------------------------*/
1071		tmp = 0;
1072		Preemph(h2, st->tilt_code, L_SUBFR, &tmp);
1073
1074		if (T0_frac > 2)
1075			T0 = (T0 + 1);
1076		Pit_shrp(h2, T0, PIT_SHARP, L_SUBFR);
1077		/*-----------------------------------------------------------------*
1078		 * - Correlation between target xn2[] and impulse response h1[]    *
1079		 * - Innovative codebook search                                    *
1080		 *-----------------------------------------------------------------*/
1081		cor_h_x(h2, xn2, dn);
1082		if (*ser_size <= NBBITS_7k)
1083		{
1084			ACELP_2t64_fx(dn, cn, h2, code, y2, indice);
1085
1086			Parm_serial(indice[0], 12, &prms);
1087		} else if(*ser_size <= NBBITS_9k)
1088		{
1089			ACELP_4t64_fx(dn, cn, h2, code, y2, 20, *ser_size, indice);
1090
1091			Parm_serial(indice[0], 5, &prms);
1092			Parm_serial(indice[1], 5, &prms);
1093			Parm_serial(indice[2], 5, &prms);
1094			Parm_serial(indice[3], 5, &prms);
1095		} else if(*ser_size <= NBBITS_12k)
1096		{
1097			ACELP_4t64_fx(dn, cn, h2, code, y2, 36, *ser_size, indice);
1098
1099			Parm_serial(indice[0], 9, &prms);
1100			Parm_serial(indice[1], 9, &prms);
1101			Parm_serial(indice[2], 9, &prms);
1102			Parm_serial(indice[3], 9, &prms);
1103		} else if(*ser_size <= NBBITS_14k)
1104		{
1105			ACELP_4t64_fx(dn, cn, h2, code, y2, 44, *ser_size, indice);
1106
1107			Parm_serial(indice[0], 13, &prms);
1108			Parm_serial(indice[1], 13, &prms);
1109			Parm_serial(indice[2], 9, &prms);
1110			Parm_serial(indice[3], 9, &prms);
1111		} else if(*ser_size <= NBBITS_16k)
1112		{
1113			ACELP_4t64_fx(dn, cn, h2, code, y2, 52, *ser_size, indice);
1114
1115			Parm_serial(indice[0], 13, &prms);
1116			Parm_serial(indice[1], 13, &prms);
1117			Parm_serial(indice[2], 13, &prms);
1118			Parm_serial(indice[3], 13, &prms);
1119		} else if(*ser_size <= NBBITS_18k)
1120		{
1121			ACELP_4t64_fx(dn, cn, h2, code, y2, 64, *ser_size, indice);
1122
1123			Parm_serial(indice[0], 2, &prms);
1124			Parm_serial(indice[1], 2, &prms);
1125			Parm_serial(indice[2], 2, &prms);
1126			Parm_serial(indice[3], 2, &prms);
1127			Parm_serial(indice[4], 14, &prms);
1128			Parm_serial(indice[5], 14, &prms);
1129			Parm_serial(indice[6], 14, &prms);
1130			Parm_serial(indice[7], 14, &prms);
1131		} else if(*ser_size <= NBBITS_20k)
1132		{
1133			ACELP_4t64_fx(dn, cn, h2, code, y2, 72, *ser_size, indice);
1134
1135			Parm_serial(indice[0], 10, &prms);
1136			Parm_serial(indice[1], 10, &prms);
1137			Parm_serial(indice[2], 2, &prms);
1138			Parm_serial(indice[3], 2, &prms);
1139			Parm_serial(indice[4], 10, &prms);
1140			Parm_serial(indice[5], 10, &prms);
1141			Parm_serial(indice[6], 14, &prms);
1142			Parm_serial(indice[7], 14, &prms);
1143		} else
1144		{
1145			ACELP_4t64_fx(dn, cn, h2, code, y2, 88, *ser_size, indice);
1146
1147			Parm_serial(indice[0], 11, &prms);
1148			Parm_serial(indice[1], 11, &prms);
1149			Parm_serial(indice[2], 11, &prms);
1150			Parm_serial(indice[3], 11, &prms);
1151			Parm_serial(indice[4], 11, &prms);
1152			Parm_serial(indice[5], 11, &prms);
1153			Parm_serial(indice[6], 11, &prms);
1154			Parm_serial(indice[7], 11, &prms);
1155		}
1156		/*-------------------------------------------------------*
1157		 * - Add the fixed-gain pitch contribution to code[].    *
1158		 *-------------------------------------------------------*/
1159		tmp = 0;
1160		Preemph(code, st->tilt_code, L_SUBFR, &tmp);
1161		Pit_shrp(code, T0, PIT_SHARP, L_SUBFR);
1162		/*----------------------------------------------------------*
1163		 *  - Compute the fixed codebook gain                       *
1164		 *  - quantize fixed codebook gain                          *
1165		 *----------------------------------------------------------*/
1166		if(*ser_size <= NBBITS_9k)
1167		{
1168			index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 6,
1169					&gain_pit, &L_gain_code, clip_gain, st->qua_gain);
1170			Parm_serial(index, 6, &prms);
1171		} else
1172		{
1173			index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 7,
1174					&gain_pit, &L_gain_code, clip_gain, st->qua_gain);
1175			Parm_serial(index, 7, &prms);
1176		}
1177		/* test quantized gain of pitch for pitch clipping algorithm */
1178		Gp_clip_test_gain_pit(gain_pit, st->gp_clip);
1179
1180		L_tmp = L_shl(L_gain_code, Q_new);
1181		gain_code = extract_h(L_add(L_tmp, 0x8000));
1182
1183		/*----------------------------------------------------------*
1184		 * Update parameters for the next subframe.                 *
1185		 * - tilt of code: 0.0 (unvoiced) to 0.5 (voiced)           *
1186		 *----------------------------------------------------------*/
1187		/* find voice factor in Q15 (1=voiced, -1=unvoiced) */
1188		Copy(&exc[i_subfr], exc2, L_SUBFR);
1189
1190#ifdef ASM_OPT                           /* asm optimization branch */
1191		Scale_sig_opt(exc2, L_SUBFR, shift);
1192#else
1193		Scale_sig(exc2, L_SUBFR, shift);
1194#endif
1195		voice_fac = voice_factor(exc2, shift, gain_pit, code, gain_code, L_SUBFR);
1196		/* tilt of code for next subframe: 0.5=voiced, 0=unvoiced */
1197		st->tilt_code = ((voice_fac >> 2) + 8192);
1198		/*------------------------------------------------------*
1199		 * - Update filter's memory "mem_w0" for finding the    *
1200		 *   target vector in the next subframe.                *
1201		 * - Find the total excitation                          *
1202		 * - Find synthesis speech to update mem_syn[].         *
1203		 *------------------------------------------------------*/
1204
1205		/* y2 in Q9, gain_pit in Q14 */
1206		L_tmp = (gain_code * y2[L_SUBFR - 1])<<1;
1207		L_tmp = L_shl(L_tmp, (5 + shift));
1208		L_tmp = L_negate(L_tmp);
1209		L_tmp += (xn[L_SUBFR - 1] * 16384)<<1;
1210		L_tmp -= (y1[L_SUBFR - 1] * gain_pit)<<1;
1211		L_tmp = L_shl(L_tmp, (1 - shift));
1212		st->mem_w0 = extract_h(L_add(L_tmp, 0x8000));
1213
1214		if (*ser_size >= NBBITS_24k)
1215			Copy(&exc[i_subfr], exc2, L_SUBFR);
1216
1217		for (i = 0; i < L_SUBFR; i++)
1218		{
1219			/* code in Q9, gain_pit in Q14 */
1220			L_tmp = (gain_code * code[i])<<1;
1221			L_tmp = (L_tmp << 5);
1222			L_tmp += (exc[i + i_subfr] * gain_pit)<<1;
1223			L_tmp = L_shl2(L_tmp, 1);
1224			exc[i + i_subfr] = extract_h(L_add(L_tmp, 0x8000));
1225		}
1226
1227		Syn_filt(p_Aq,&exc[i_subfr], synth, L_SUBFR, st->mem_syn, 1);
1228
1229		if(*ser_size >= NBBITS_24k)
1230		{
1231			/*------------------------------------------------------------*
1232			 * phase dispersion to enhance noise in low bit rate          *
1233			 *------------------------------------------------------------*/
1234			/* L_gain_code in Q16 */
1235			VO_L_Extract(L_gain_code, &gain_code, &gain_code_lo);
1236
1237			/*------------------------------------------------------------*
1238			 * noise enhancer                                             *
1239			 * ~~~~~~~~~~~~~~                                             *
1240			 * - Enhance excitation on noise. (modify gain of code)       *
1241			 *   If signal is noisy and LPC filter is stable, move gain   *
1242			 *   of code 1.5 dB toward gain of code threshold.            *
1243			 *   This decrease by 3 dB noise energy variation.            *
1244			 *------------------------------------------------------------*/
1245			tmp = (16384 - (voice_fac >> 1));        /* 1=unvoiced, 0=voiced */
1246			fac = vo_mult(stab_fac, tmp);
1247			L_tmp = L_gain_code;
1248			if(L_tmp < st->L_gc_thres)
1249			{
1250				L_tmp = vo_L_add(L_tmp, Mpy_32_16(gain_code, gain_code_lo, 6226));
1251				if(L_tmp > st->L_gc_thres)
1252				{
1253					L_tmp = st->L_gc_thres;
1254				}
1255			} else
1256			{
1257				L_tmp = Mpy_32_16(gain_code, gain_code_lo, 27536);
1258				if(L_tmp < st->L_gc_thres)
1259				{
1260					L_tmp = st->L_gc_thres;
1261				}
1262			}
1263			st->L_gc_thres = L_tmp;
1264
1265			L_gain_code = Mpy_32_16(gain_code, gain_code_lo, (32767 - fac));
1266			VO_L_Extract(L_tmp, &gain_code, &gain_code_lo);
1267			L_gain_code = vo_L_add(L_gain_code, Mpy_32_16(gain_code, gain_code_lo, fac));
1268
1269			/*------------------------------------------------------------*
1270			 * pitch enhancer                                             *
1271			 * ~~~~~~~~~~~~~~                                             *
1272			 * - Enhance excitation on voice. (HP filtering of code)      *
1273			 *   On voiced signal, filtering of code by a smooth fir HP   *
1274			 *   filter to decrease energy of code in low frequency.      *
1275			 *------------------------------------------------------------*/
1276
1277			tmp = ((voice_fac >> 3) + 4096); /* 0.25=voiced, 0=unvoiced */
1278
1279			L_tmp = L_deposit_h(code[0]);
1280			L_tmp -= (code[1] * tmp)<<1;
1281			code2[0] = vo_round(L_tmp);
1282
1283			for (i = 1; i < L_SUBFR - 1; i++)
1284			{
1285				L_tmp = L_deposit_h(code[i]);
1286				L_tmp -= (code[i + 1] * tmp)<<1;
1287				L_tmp -= (code[i - 1] * tmp)<<1;
1288				code2[i] = vo_round(L_tmp);
1289			}
1290
1291			L_tmp = L_deposit_h(code[L_SUBFR - 1]);
1292			L_tmp -= (code[L_SUBFR - 2] * tmp)<<1;
1293			code2[L_SUBFR - 1] = vo_round(L_tmp);
1294
1295			/* build excitation */
1296			gain_code = vo_round(L_shl(L_gain_code, Q_new));
1297
1298			for (i = 0; i < L_SUBFR; i++)
1299			{
1300				L_tmp = (code2[i] * gain_code)<<1;
1301				L_tmp = (L_tmp << 5);
1302				L_tmp += (exc2[i] * gain_pit)<<1;
1303				L_tmp = (L_tmp << 1);
1304				exc2[i] = vo_round(L_tmp);
1305			}
1306
1307			corr_gain = synthesis(p_Aq, exc2, Q_new, &speech16k[i_subfr * 5 / 4], st);
1308			Parm_serial(corr_gain, 4, &prms);
1309		}
1310		p_A += (M + 1);
1311		p_Aq += (M + 1);
1312	}                                      /* end of subframe loop */
1313
1314	/*--------------------------------------------------*
1315	 * Update signal for next frame.                    *
1316	 * -> save past of speech[], wsp[] and exc[].       *
1317	 *--------------------------------------------------*/
1318	Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
1319	Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
1320	Copy(&old_exc[L_FRAME], st->old_exc, PIT_MAX + L_INTERPOL);
1321	return;
1322}
1323
1324/*-----------------------------------------------------*
1325* Function synthesis()                                *
1326*                                                     *
1327* Synthesis of signal at 16kHz with HF extension.     *
1328*                                                     *
1329*-----------------------------------------------------*/
1330
1331static Word16 synthesis(
1332		Word16 Aq[],                          /* A(z)  : quantized Az               */
1333		Word16 exc[],                         /* (i)   : excitation at 12kHz        */
1334		Word16 Q_new,                         /* (i)   : scaling performed on exc   */
1335		Word16 synth16k[],                    /* (o)   : 16kHz synthesis signal     */
1336		Coder_State * st                      /* (i/o) : State structure            */
1337		)
1338{
1339	Word16 fac, tmp, exp;
1340	Word16 ener, exp_ener;
1341	Word32 L_tmp, i;
1342
1343	Word16 synth_hi[M + L_SUBFR], synth_lo[M + L_SUBFR];
1344	Word16 synth[L_SUBFR];
1345	Word16 HF[L_SUBFR16k];                 /* High Frequency vector      */
1346	Word16 Ap[M + 1];
1347
1348	Word16 HF_SP[L_SUBFR16k];              /* High Frequency vector (from original signal) */
1349
1350	Word16 HP_est_gain, HP_calc_gain, HP_corr_gain;
1351	Word16 dist_min, dist;
1352	Word16 HP_gain_ind = 0;
1353	Word16 gain1, gain2;
1354	Word16 weight1, weight2;
1355
1356	/*------------------------------------------------------------*
1357	 * speech synthesis                                           *
1358	 * ~~~~~~~~~~~~~~~~                                           *
1359	 * - Find synthesis speech corresponding to exc2[].           *
1360	 * - Perform fixed deemphasis and hp 50hz filtering.          *
1361	 * - Oversampling from 12.8kHz to 16kHz.                      *
1362	 *------------------------------------------------------------*/
1363	Copy(st->mem_syn_hi, synth_hi, M);
1364	Copy(st->mem_syn_lo, synth_lo, M);
1365
1366#ifdef ASM_OPT                 /* asm optimization branch */
1367	Syn_filt_32_asm(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
1368#else
1369	Syn_filt_32(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
1370#endif
1371
1372	Copy(synth_hi + L_SUBFR, st->mem_syn_hi, M);
1373	Copy(synth_lo + L_SUBFR, st->mem_syn_lo, M);
1374
1375#ifdef ASM_OPT                 /* asm optimization branch */
1376	Deemph_32_asm(synth_hi + M, synth_lo + M, synth, &(st->mem_deemph));
1377#else
1378	Deemph_32(synth_hi + M, synth_lo + M, synth, PREEMPH_FAC, L_SUBFR, &(st->mem_deemph));
1379#endif
1380
1381	HP50_12k8(synth, L_SUBFR, st->mem_sig_out);
1382
1383	/* Original speech signal as reference for high band gain quantisation */
1384	for (i = 0; i < L_SUBFR16k; i++)
1385	{
1386		HF_SP[i] = synth16k[i];
1387	}
1388
1389	/*------------------------------------------------------*
1390	 * HF noise synthesis                                   *
1391	 * ~~~~~~~~~~~~~~~~~~                                   *
1392	 * - Generate HF noise between 5.5 and 7.5 kHz.         *
1393	 * - Set energy of noise according to synthesis tilt.   *
1394	 *     tilt > 0.8 ==> - 14 dB (voiced)                  *
1395	 *     tilt   0.5 ==> - 6 dB  (voiced or noise)         *
1396	 *     tilt < 0.0 ==>   0 dB  (noise)                   *
1397	 *------------------------------------------------------*/
1398	/* generate white noise vector */
1399	for (i = 0; i < L_SUBFR16k; i++)
1400	{
1401		HF[i] = Random(&(st->seed2))>>3;
1402	}
1403	/* energy of excitation */
1404#ifdef ASM_OPT                    /* asm optimization branch */
1405	Scale_sig_opt(exc, L_SUBFR, -3);
1406	Q_new = Q_new - 3;
1407	ener = extract_h(Dot_product12_asm(exc, exc, L_SUBFR, &exp_ener));
1408#else
1409	Scale_sig(exc, L_SUBFR, -3);
1410	Q_new = Q_new - 3;
1411	ener = extract_h(Dot_product12(exc, exc, L_SUBFR, &exp_ener));
1412#endif
1413
1414	exp_ener = exp_ener - (Q_new + Q_new);
1415	/* set energy of white noise to energy of excitation */
1416#ifdef ASM_OPT              /* asm optimization branch */
1417	tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
1418#else
1419	tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
1420#endif
1421
1422	if(tmp > ener)
1423	{
1424		tmp = (tmp >> 1);                 /* Be sure tmp < ener */
1425		exp = (exp + 1);
1426	}
1427	L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
1428	exp = (exp - exp_ener);
1429	Isqrt_n(&L_tmp, &exp);
1430	L_tmp = L_shl(L_tmp, (exp + 1));       /* L_tmp x 2, L_tmp in Q31 */
1431	tmp = extract_h(L_tmp);                /* tmp = 2 x sqrt(ener_exc/ener_hf) */
1432
1433	for (i = 0; i < L_SUBFR16k; i++)
1434	{
1435		HF[i] = vo_mult(HF[i], tmp);
1436	}
1437
1438	/* find tilt of synthesis speech (tilt: 1=voiced, -1=unvoiced) */
1439	HP400_12k8(synth, L_SUBFR, st->mem_hp400);
1440
1441	L_tmp = 1L;
1442	for (i = 0; i < L_SUBFR; i++)
1443		L_tmp += (synth[i] * synth[i])<<1;
1444
1445	exp = norm_l(L_tmp);
1446	ener = extract_h(L_tmp << exp);   /* ener = r[0] */
1447
1448	L_tmp = 1L;
1449	for (i = 1; i < L_SUBFR; i++)
1450		L_tmp +=(synth[i] * synth[i - 1])<<1;
1451
1452	tmp = extract_h(L_tmp << exp);    /* tmp = r[1] */
1453
1454	if (tmp > 0)
1455	{
1456		fac = div_s(tmp, ener);
1457	} else
1458	{
1459		fac = 0;
1460	}
1461
1462	/* modify energy of white noise according to synthesis tilt */
1463	gain1 = 32767 - fac;
1464	gain2 = vo_mult(gain1, 20480);
1465	gain2 = shl(gain2, 1);
1466
1467	if (st->vad_hist > 0)
1468	{
1469		weight1 = 0;
1470		weight2 = 32767;
1471	} else
1472	{
1473		weight1 = 32767;
1474		weight2 = 0;
1475	}
1476	tmp = vo_mult(weight1, gain1);
1477	tmp = add1(tmp, vo_mult(weight2, gain2));
1478
1479	if (tmp != 0)
1480	{
1481		tmp = (tmp + 1);
1482	}
1483	HP_est_gain = tmp;
1484
1485	if(HP_est_gain < 3277)
1486	{
1487		HP_est_gain = 3277;                /* 0.1 in Q15 */
1488	}
1489	/* synthesis of noise: 4.8kHz..5.6kHz --> 6kHz..7kHz */
1490	Weight_a(Aq, Ap, 19661, M);            /* fac=0.6 */
1491
1492#ifdef ASM_OPT                /* asm optimization branch */
1493	Syn_filt_asm(Ap, HF, HF, st->mem_syn_hf);
1494	/* noise High Pass filtering (1ms of delay) */
1495	Filt_6k_7k_asm(HF, L_SUBFR16k, st->mem_hf);
1496	/* filtering of the original signal */
1497	Filt_6k_7k_asm(HF_SP, L_SUBFR16k, st->mem_hf2);
1498
1499	/* check the gain difference */
1500	Scale_sig_opt(HF_SP, L_SUBFR16k, -1);
1501	ener = extract_h(Dot_product12_asm(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
1502	/* set energy of white noise to energy of excitation */
1503	tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
1504#else
1505	Syn_filt(Ap, HF, HF, L_SUBFR16k, st->mem_syn_hf, 1);
1506	/* noise High Pass filtering (1ms of delay) */
1507	Filt_6k_7k(HF, L_SUBFR16k, st->mem_hf);
1508	/* filtering of the original signal */
1509	Filt_6k_7k(HF_SP, L_SUBFR16k, st->mem_hf2);
1510	/* check the gain difference */
1511	Scale_sig(HF_SP, L_SUBFR16k, -1);
1512	ener = extract_h(Dot_product12(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
1513	/* set energy of white noise to energy of excitation */
1514	tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
1515#endif
1516
1517	if (tmp > ener)
1518	{
1519		tmp = (tmp >> 1);                 /* Be sure tmp < ener */
1520		exp = (exp + 1);
1521	}
1522	L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
1523	exp = vo_sub(exp, exp_ener);
1524	Isqrt_n(&L_tmp, &exp);
1525	L_tmp = L_shl(L_tmp, exp);             /* L_tmp, L_tmp in Q31 */
1526	HP_calc_gain = extract_h(L_tmp);       /* tmp = sqrt(ener_input/ener_hf) */
1527
1528	/* st->gain_alpha *= st->dtx_encSt->dtxHangoverCount/7 */
1529	L_tmp = (vo_L_mult(st->dtx_encSt->dtxHangoverCount, 4681) << 15);
1530	st->gain_alpha = vo_mult(st->gain_alpha, extract_h(L_tmp));
1531
1532	if(st->dtx_encSt->dtxHangoverCount > 6)
1533		st->gain_alpha = 32767;
1534	HP_est_gain = HP_est_gain >> 1;     /* From Q15 to Q14 */
1535	HP_corr_gain = add1(vo_mult(HP_calc_gain, st->gain_alpha), vo_mult((32767 - st->gain_alpha), HP_est_gain));
1536
1537	/* Quantise the correction gain */
1538	dist_min = 32767;
1539	for (i = 0; i < 16; i++)
1540	{
1541		dist = vo_mult((HP_corr_gain - HP_gain[i]), (HP_corr_gain - HP_gain[i]));
1542		if (dist_min > dist)
1543		{
1544			dist_min = dist;
1545			HP_gain_ind = i;
1546		}
1547	}
1548	HP_corr_gain = HP_gain[HP_gain_ind];
1549	/* return the quantised gain index when using the highest mode, otherwise zero */
1550	return (HP_gain_ind);
1551}
1552
1553/*************************************************
1554*
1555* Breif: Codec main function
1556*
1557**************************************************/
1558
1559int AMR_Enc_Encode(HAMRENC hCodec)
1560{
1561	Word32 i;
1562	Coder_State *gData = (Coder_State*)hCodec;
1563	Word16 *signal;
1564	Word16 packed_size = 0;
1565	Word16 prms[NB_BITS_MAX];
1566	Word16 coding_mode = 0, nb_bits, allow_dtx, mode, reset_flag;
1567	mode = gData->mode;
1568	coding_mode = gData->mode;
1569	nb_bits = nb_of_bits[mode];
1570	signal = (Word16 *)gData->inputStream;
1571	allow_dtx = gData->allow_dtx;
1572
1573	/* check for homing frame */
1574	reset_flag = encoder_homing_frame_test(signal);
1575
1576	for (i = 0; i < L_FRAME16k; i++)   /* Delete the 2 LSBs (14-bit input) */
1577	{
1578		*(signal + i) = (Word16) (*(signal + i) & 0xfffC);
1579	}
1580
1581	coder(&coding_mode, signal, prms, &nb_bits, gData, allow_dtx);
1582	packed_size = PackBits(prms, coding_mode, mode, gData);
1583	if (reset_flag != 0)
1584	{
1585		Reset_encoder(gData, 1);
1586	}
1587	return packed_size;
1588}
1589
1590/***************************************************************************
1591*
1592*Brief: Codec API function --- Initialize the codec and return a codec handle
1593*
1594***************************************************************************/
1595
1596VO_U32 VO_API voAMRWB_Init(VO_HANDLE * phCodec,                   /* o: the audio codec handle */
1597						   VO_AUDIO_CODINGTYPE vType,             /* i: Codec Type ID */
1598						   VO_CODEC_INIT_USERDATA * pUserData     /* i: init Parameters */
1599						   )
1600{
1601	Coder_State *st;
1602	FrameStream *stream;
1603#ifdef USE_DEAULT_MEM
1604	VO_MEM_OPERATOR voMemoprator;
1605#endif
1606	VO_MEM_OPERATOR *pMemOP;
1607        UNUSED(vType);
1608
1609	int interMem = 0;
1610
1611	if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
1612	{
1613#ifdef USE_DEAULT_MEM
1614		voMemoprator.Alloc = cmnMemAlloc;
1615		voMemoprator.Copy = cmnMemCopy;
1616		voMemoprator.Free = cmnMemFree;
1617		voMemoprator.Set = cmnMemSet;
1618		voMemoprator.Check = cmnMemCheck;
1619		interMem = 1;
1620		pMemOP = &voMemoprator;
1621#else
1622		*phCodec = NULL;
1623		return VO_ERR_INVALID_ARG;
1624#endif
1625	}
1626	else
1627	{
1628		pMemOP = (VO_MEM_OPERATOR *)pUserData->memData;
1629	}
1630	/*-------------------------------------------------------------------------*
1631	 * Memory allocation for coder state.                                      *
1632	 *-------------------------------------------------------------------------*/
1633	if ((st = (Coder_State *)mem_malloc(pMemOP, sizeof(Coder_State), 32, VO_INDEX_ENC_AMRWB)) == NULL)
1634	{
1635		return VO_ERR_OUTOF_MEMORY;
1636	}
1637
1638	st->vadSt = NULL;
1639	st->dtx_encSt = NULL;
1640	st->sid_update_counter = 3;
1641	st->sid_handover_debt = 0;
1642	st->prev_ft = TX_SPEECH;
1643	st->inputStream = NULL;
1644	st->inputSize = 0;
1645
1646	/* Default setting */
1647	st->mode = VOAMRWB_MD2385;                        /* bit rate 23.85kbps */
1648	st->frameType = VOAMRWB_RFC3267;                  /* frame type: RFC3267 */
1649	st->allow_dtx = 0;                                /* disable DTX mode */
1650
1651	st->outputStream = NULL;
1652	st->outputSize = 0;
1653
1654	st->stream = (FrameStream *)mem_malloc(pMemOP, sizeof(FrameStream), 32, VO_INDEX_ENC_AMRWB);
1655	if(st->stream == NULL)
1656		return VO_ERR_OUTOF_MEMORY;
1657
1658	st->stream->frame_ptr = (unsigned char *)mem_malloc(pMemOP, Frame_Maxsize, 32, VO_INDEX_ENC_AMRWB);
1659	if(st->stream->frame_ptr == NULL)
1660		return  VO_ERR_OUTOF_MEMORY;
1661
1662	stream = st->stream;
1663	voAWB_InitFrameBuffer(stream);
1664
1665	wb_vad_init(&(st->vadSt), pMemOP);
1666	dtx_enc_init(&(st->dtx_encSt), isf_init, pMemOP);
1667
1668	Reset_encoder((void *) st, 1);
1669
1670	if(interMem)
1671	{
1672		st->voMemoprator.Alloc = cmnMemAlloc;
1673		st->voMemoprator.Copy = cmnMemCopy;
1674		st->voMemoprator.Free = cmnMemFree;
1675		st->voMemoprator.Set = cmnMemSet;
1676		st->voMemoprator.Check = cmnMemCheck;
1677		pMemOP = &st->voMemoprator;
1678	}
1679
1680	st->pvoMemop = pMemOP;
1681
1682	*phCodec = (void *) st;
1683
1684	return VO_ERR_NONE;
1685}
1686
1687/**********************************************************************************
1688*
1689* Brief: Codec API function: Input PCM data
1690*
1691***********************************************************************************/
1692
1693VO_U32 VO_API voAMRWB_SetInputData(
1694		VO_HANDLE hCodec,                   /* i/o: The codec handle which was created by Init function */
1695		VO_CODECBUFFER * pInput             /*   i: The input buffer parameter  */
1696		)
1697{
1698	Coder_State  *gData;
1699	FrameStream  *stream;
1700
1701	if(NULL == hCodec)
1702	{
1703		return VO_ERR_INVALID_ARG;
1704	}
1705
1706	gData = (Coder_State *)hCodec;
1707	stream = gData->stream;
1708
1709	if(NULL == pInput || NULL == pInput->Buffer)
1710	{
1711		return VO_ERR_INVALID_ARG;
1712	}
1713
1714	stream->set_ptr    = pInput->Buffer;
1715	stream->set_len    = pInput->Length;
1716	stream->frame_ptr  = stream->frame_ptr_bk;
1717	stream->used_len   = 0;
1718
1719	return VO_ERR_NONE;
1720}
1721
1722/**************************************************************************************
1723*
1724* Brief: Codec API function: Get the compression audio data frame by frame
1725*
1726***************************************************************************************/
1727
1728VO_U32 VO_API voAMRWB_GetOutputData(
1729		VO_HANDLE hCodec,                    /* i: The Codec Handle which was created by Init function*/
1730		VO_CODECBUFFER * pOutput,            /* o: The output audio data */
1731		VO_AUDIO_OUTPUTINFO * pAudioFormat   /* o: The encoder module filled audio format and used the input size*/
1732		)
1733{
1734	Coder_State* gData = (Coder_State*)hCodec;
1735	VO_MEM_OPERATOR  *pMemOP;
1736	FrameStream  *stream = (FrameStream *)gData->stream;
1737	pMemOP = (VO_MEM_OPERATOR  *)gData->pvoMemop;
1738
1739	if(stream->framebuffer_len  < Frame_MaxByte)         /* check the work buffer len */
1740	{
1741		stream->frame_storelen = stream->framebuffer_len;
1742		if(stream->frame_storelen)
1743		{
1744			pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk , stream->frame_ptr , stream->frame_storelen);
1745		}
1746		if(stream->set_len > 0)
1747		{
1748			voAWB_UpdateFrameBuffer(stream, pMemOP);
1749		}
1750		if(stream->framebuffer_len < Frame_MaxByte)
1751		{
1752			if(pAudioFormat)
1753				pAudioFormat->InputUsed = stream->used_len;
1754			return VO_ERR_INPUT_BUFFER_SMALL;
1755		}
1756	}
1757
1758	gData->inputStream = stream->frame_ptr;
1759	gData->outputStream = (unsigned short*)pOutput->Buffer;
1760
1761	gData->outputSize = AMR_Enc_Encode(gData);         /* encoder main function */
1762
1763	pOutput->Length = gData->outputSize;               /* get the output buffer length */
1764	stream->frame_ptr += 640;                          /* update the work buffer ptr */
1765	stream->framebuffer_len  -= 640;
1766
1767	if(pAudioFormat)                                   /* return output audio information */
1768	{
1769		pAudioFormat->Format.Channels = 1;
1770		pAudioFormat->Format.SampleRate = 8000;
1771		pAudioFormat->Format.SampleBits = 16;
1772		pAudioFormat->InputUsed = stream->used_len;
1773	}
1774	return VO_ERR_NONE;
1775}
1776
1777/*************************************************************************
1778*
1779* Brief: Codec API function---set the data by specified parameter ID
1780*
1781*************************************************************************/
1782
1783
1784VO_U32 VO_API voAMRWB_SetParam(
1785		VO_HANDLE hCodec,   /* i/o: The Codec Handle which was created by Init function */
1786		VO_S32 uParamID,    /*   i: The param ID */
1787		VO_PTR pData        /*   i: The param value depend on the ID */
1788		)
1789{
1790	Coder_State* gData = (Coder_State*)hCodec;
1791	FrameStream *stream = (FrameStream *)(gData->stream);
1792	int *lValue = (int*)pData;
1793
1794	switch(uParamID)
1795	{
1796		/* setting AMR-WB frame type*/
1797		case VO_PID_AMRWB_FRAMETYPE:
1798			if(*lValue < VOAMRWB_DEFAULT || *lValue > VOAMRWB_RFC3267)
1799				return VO_ERR_WRONG_PARAM_ID;
1800			gData->frameType = *lValue;
1801			break;
1802		/* setting AMR-WB bit rate */
1803		case VO_PID_AMRWB_MODE:
1804			{
1805				if(*lValue < VOAMRWB_MD66 || *lValue > VOAMRWB_MD2385)
1806					return VO_ERR_WRONG_PARAM_ID;
1807				gData->mode = *lValue;
1808			}
1809			break;
1810		/* enable or disable DTX mode */
1811		case VO_PID_AMRWB_DTX:
1812			gData->allow_dtx = (Word16)(*lValue);
1813			break;
1814
1815		case VO_PID_COMMON_HEADDATA:
1816			break;
1817        /* flush the work buffer */
1818		case VO_PID_COMMON_FLUSH:
1819			stream->set_ptr = NULL;
1820			stream->frame_storelen = 0;
1821			stream->framebuffer_len = 0;
1822			stream->set_len = 0;
1823			break;
1824
1825		default:
1826			return VO_ERR_WRONG_PARAM_ID;
1827	}
1828	return VO_ERR_NONE;
1829}
1830
1831/**************************************************************************
1832*
1833*Brief: Codec API function---Get the data by specified parameter ID
1834*
1835***************************************************************************/
1836
1837VO_U32 VO_API voAMRWB_GetParam(
1838		VO_HANDLE hCodec,      /* i: The Codec Handle which was created by Init function */
1839		VO_S32 uParamID,       /* i: The param ID */
1840		VO_PTR pData           /* o: The param value depend on the ID */
1841		)
1842{
1843	int    temp;
1844	Coder_State* gData = (Coder_State*)hCodec;
1845
1846	if (gData==NULL)
1847		return VO_ERR_INVALID_ARG;
1848	switch(uParamID)
1849	{
1850		/* output audio format */
1851		case VO_PID_AMRWB_FORMAT:
1852			{
1853				VO_AUDIO_FORMAT* fmt = (VO_AUDIO_FORMAT*)pData;
1854				fmt->Channels   = 1;
1855				fmt->SampleRate = 16000;
1856				fmt->SampleBits = 16;
1857				break;
1858			}
1859        /* output audio channel number */
1860		case VO_PID_AMRWB_CHANNELS:
1861			temp = 1;
1862			pData = (void *)(&temp);
1863			break;
1864        /* output audio sample rate */
1865		case VO_PID_AMRWB_SAMPLERATE:
1866			temp = 16000;
1867			pData = (void *)(&temp);
1868			break;
1869		/* output audio frame type */
1870		case VO_PID_AMRWB_FRAMETYPE:
1871			temp = gData->frameType;
1872			pData = (void *)(&temp);
1873			break;
1874		/* output audio bit rate */
1875		case VO_PID_AMRWB_MODE:
1876			temp = gData->mode;
1877			pData = (void *)(&temp);
1878			break;
1879		default:
1880			return VO_ERR_WRONG_PARAM_ID;
1881	}
1882
1883	return VO_ERR_NONE;
1884}
1885
1886/***********************************************************************************
1887*
1888* Brief: Codec API function---Release the codec after all encoder operations are done
1889*
1890*************************************************************************************/
1891
1892VO_U32 VO_API voAMRWB_Uninit(VO_HANDLE hCodec           /* i/o: Codec handle pointer */
1893							 )
1894{
1895	Coder_State* gData = (Coder_State*)hCodec;
1896	VO_MEM_OPERATOR *pMemOP;
1897	pMemOP = gData->pvoMemop;
1898
1899	if(hCodec)
1900	{
1901		if(gData->stream)
1902		{
1903			if(gData->stream->frame_ptr_bk)
1904			{
1905				mem_free(pMemOP, gData->stream->frame_ptr_bk, VO_INDEX_ENC_AMRWB);
1906				gData->stream->frame_ptr_bk = NULL;
1907			}
1908			mem_free(pMemOP, gData->stream, VO_INDEX_ENC_AMRWB);
1909			gData->stream = NULL;
1910		}
1911		wb_vad_exit(&(((Coder_State *) gData)->vadSt), pMemOP);
1912		dtx_enc_exit(&(((Coder_State *) gData)->dtx_encSt), pMemOP);
1913
1914		mem_free(pMemOP, hCodec, VO_INDEX_ENC_AMRWB);
1915		hCodec = NULL;
1916	}
1917
1918	return VO_ERR_NONE;
1919}
1920
1921/********************************************************************************
1922*
1923* Brief: voGetAMRWBEncAPI gets the API handle of the codec
1924*
1925********************************************************************************/
1926
1927VO_S32 VO_API voGetAMRWBEncAPI(
1928							   VO_AUDIO_CODECAPI * pEncHandle      /* i/o: Codec handle pointer */
1929							   )
1930{
1931	if(NULL == pEncHandle)
1932		return VO_ERR_INVALID_ARG;
1933	pEncHandle->Init = voAMRWB_Init;
1934	pEncHandle->SetInputData = voAMRWB_SetInputData;
1935	pEncHandle->GetOutputData = voAMRWB_GetOutputData;
1936	pEncHandle->SetParam = voAMRWB_SetParam;
1937	pEncHandle->GetParam = voAMRWB_GetParam;
1938	pEncHandle->Uninit = voAMRWB_Uninit;
1939
1940	return VO_ERR_NONE;
1941}
1942
1943#ifdef __cplusplus
1944}
1945#endif
1946