1/* K=9 r=1/3 Viterbi decoder for PowerPC G4/G5 Altivec vector instructions
2 * 8-bit offset-binary soft decision samples
3 * Copyright Aug 2006, Phil Karn, KA9Q
4 * May be used under the terms of the GNU Lesser General Public License (LGPL)
5 */
6#include <stdio.h>
7#include <stdlib.h>
8#include <memory.h>
9#include <limits.h>
10#include "fec.h"
11
12typedef union { unsigned char c[2][16]; vector unsigned char v[2]; } decision_t;
13typedef union { unsigned short s[256]; vector unsigned short v[32]; } metric_t;
14
15static union branchtab39 { unsigned short s[128]; vector unsigned short v[16];} Branchtab39[3];
16static int Init = 0;
17
18/* State info for instance of Viterbi decoder */
19struct v39 {
20  metric_t metrics1; /* path metric buffer 1 */
21  metric_t metrics2; /* path metric buffer 2 */
22  void *dp;          /* Pointer to current decision */
23  metric_t *old_metrics,*new_metrics; /* Pointers to path metrics, swapped on every bit */
24  void *decisions;   /* Beginning of decisions for block */
25};
26
27/* Initialize Viterbi decoder for start of new frame */
28int init_viterbi39_av(void *p,int starting_state){
29  struct v39 *vp = p;
30  int i;
31
32  for(i=0;i<32;i++)
33    vp->metrics1.v[i] = (vector unsigned short)(1000);
34
35  vp->old_metrics = &vp->metrics1;
36  vp->new_metrics = &vp->metrics2;
37  vp->dp = vp->decisions;
38  vp->old_metrics->s[starting_state & 255] = 0; /* Bias known start state */
39  return 0;
40}
41
42void set_viterbi39_polynomial_av(int polys[3]){
43  int state;
44
45  for(state=0;state < 128;state++){
46    Branchtab39[0].s[state] = (polys[0] < 0) ^ parity((2*state) & abs(polys[0])) ? 255 : 0;
47    Branchtab39[1].s[state] = (polys[1] < 0) ^ parity((2*state) & abs(polys[1])) ? 255 : 0;
48    Branchtab39[2].s[state] = (polys[2] < 0) ^ parity((2*state) & abs(polys[2])) ? 255 : 0;
49  }
50  Init++;
51}
52
53/* Create a new instance of a Viterbi decoder */
54void *create_viterbi39_av(int len){
55  struct v39 *vp;
56
57  if(!Init){
58    int polys[3] = { V39POLYA, V39POLYB, V39POLYC };
59
60    set_viterbi39_polynomial_av(polys);
61  }
62  vp = (struct v39 *)malloc(sizeof(struct v39));
63  vp->decisions = malloc(sizeof(decision_t)*(len+8));
64  init_viterbi39_av(vp,0);
65  return vp;
66}
67
68/* Viterbi chainback */
69int chainback_viterbi39_av(
70      void *p,
71      unsigned char *data, /* Decoded output data */
72      unsigned int nbits, /* Number of data bits */
73      unsigned int endstate){ /* Terminal encoder state */
74  struct v39 *vp = p;
75  decision_t *d = (decision_t *)vp->decisions;
76  int path_metric;
77
78  /* Make room beyond the end of the encoder register so we can
79   * accumulate a full byte of decoded data
80   */
81  endstate %= 256;
82
83  path_metric = vp->old_metrics->s[endstate];
84
85  /* The store into data[] only needs to be done every 8 bits.
86   * But this avoids a conditional branch, and the writes will
87   * combine in the cache anyway
88   */
89  d += 8; /* Look past tail */
90  while(nbits-- != 0){
91    int k;
92
93    k = (d[nbits].c[endstate >> 7][endstate & 15] & (0x80 >> ((endstate>>4)&7)) ) ? 1 : 0;
94    endstate = (k << 7) | (endstate >> 1);
95    data[nbits>>3] = endstate;
96  }
97  return path_metric;
98}
99
100/* Delete instance of a Viterbi decoder */
101void delete_viterbi39_av(void *p){
102  struct v39 *vp = p;
103
104  if(vp != NULL){
105    free(vp->decisions);
106    free(vp);
107  }
108}
109
110int update_viterbi39_blk_av(void *p,unsigned char *syms,int nbits){
111  struct v39 *vp = p;
112  decision_t *d = (decision_t *)vp->dp;
113  int path_metric = 0;
114  vector unsigned char decisions = (vector unsigned char)(0);
115
116  while(nbits--){
117    vector unsigned short symv,sym0v,sym1v,sym2v;
118    vector unsigned char s;
119    void *tmp;
120    int i;
121
122    /* Splat the 0th symbol across sym0v, the 1st symbol across sym1v, etc */
123    s = (vector unsigned char)vec_perm(vec_ld(0,syms),vec_ld(5,syms),vec_lvsl(0,syms));
124
125    symv = (vector unsigned short)vec_mergeh((vector unsigned char)(0),s);    /* Unsigned byte->word unpack */
126    sym0v = vec_splat(symv,0);
127    sym1v = vec_splat(symv,1);
128    sym2v = vec_splat(symv,2);
129    syms += 3;
130
131    for(i=0;i<16;i++){
132      vector bool short decision0,decision1;
133      vector unsigned short metric,m_metric,m0,m1,m2,m3,survivor0,survivor1;
134
135      /* Form branch metrics
136       * Because Branchtab takes on values 0 and 255, and the values of sym?v are offset binary in the range 0-255,
137       * the XOR operations constitute conditional negation.
138       * the metrics are in the range 0-765
139       */
140      m0 = vec_add(vec_xor(Branchtab39[0].v[i],sym0v),vec_xor(Branchtab39[1].v[i],sym1v));
141      m1 = vec_xor(Branchtab39[2].v[i],sym2v);
142      metric = vec_add(m0,m1);
143      m_metric = vec_sub((vector unsigned short)(765),metric);
144
145      /* Add branch metrics to path metrics */
146      m0 = vec_adds(vp->old_metrics->v[i],metric);
147      m3 = vec_adds(vp->old_metrics->v[16+i],metric);
148      m1 = vec_adds(vp->old_metrics->v[16+i],m_metric);
149      m2 = vec_adds(vp->old_metrics->v[i],m_metric);
150
151      /* Compare and select */
152      decision0 = vec_cmpgt(m0,m1);
153      decision1 = vec_cmpgt(m2,m3);
154      survivor0 = vec_min(m0,m1);
155      survivor1 = vec_min(m2,m3);
156
157      /* Store decisions and survivors.
158       * To save space without SSE2's handy PMOVMSKB instruction, we pack and store them in
159       * a funny interleaved fashion that we undo in the chainback function.
160       */
161      decisions = vec_add(decisions,decisions); /* Shift each byte 1 bit to the left */
162
163      /* Booleans are either 0xff or 0x00. Subtracting 0x00 leaves the lsb zero; subtracting
164       * 0xff is equivalent to adding 1, which sets the lsb.
165       */
166      decisions = vec_sub(decisions,(vector unsigned char)vec_pack(vec_mergeh(decision0,decision1),vec_mergel(decision0,decision1)));
167
168      vp->new_metrics->v[2*i] = vec_mergeh(survivor0,survivor1);
169      vp->new_metrics->v[2*i+1] = vec_mergel(survivor0,survivor1);
170
171      if((i % 8) == 7){
172	/* We've accumulated a total of 128 decisions, stash and start again */
173	d->v[i>>3] = decisions; /* No need to clear, the new bits will replace the old */
174      }
175    }
176#if 0
177    /* Experimentally determine metric spread
178     * The results are fixed for a given code and input symbol size
179     */
180    {
181      int i;
182      vector unsigned short min_metric;
183      vector unsigned short max_metric;
184      union { vector unsigned short v; unsigned short s[8];} t;
185      int minimum,maximum;
186      static int max_spread = 0;
187
188      min_metric = max_metric = vp->new_metrics->v[0];
189      for(i=1;i<32;i++){
190	min_metric = vec_min(min_metric,vp->new_metrics->v[i]);
191	max_metric = vec_max(max_metric,vp->new_metrics->v[i]);
192      }
193      min_metric = vec_min(min_metric,vec_sld(min_metric,min_metric,8));
194      max_metric = vec_max(max_metric,vec_sld(max_metric,max_metric,8));
195      min_metric = vec_min(min_metric,vec_sld(min_metric,min_metric,4));
196      max_metric = vec_max(max_metric,vec_sld(max_metric,max_metric,4));
197      min_metric = vec_min(min_metric,vec_sld(min_metric,min_metric,2));
198      max_metric = vec_max(max_metric,vec_sld(max_metric,max_metric,2));
199
200      t.v = min_metric;
201      minimum = t.s[0];
202      t.v = max_metric;
203      maximum = t.s[0];
204      if(maximum-minimum > max_spread){
205	max_spread = maximum-minimum;
206	printf("metric spread = %d\n",max_spread);
207      }
208    }
209#endif
210
211    /* Renormalize if necessary. This deserves some explanation.
212     * The maximum possible spread, found by experiment, for 8 bit symbols is about 3825
213     * So by looking at one arbitrary metric we can tell if any of them have possibly saturated.
214     * However, this is very conservative. Large spreads occur only at very high Eb/No, where
215     * saturating a bad path metric doesn't do much to increase its chances of being erroneously chosen as a survivor.
216
217     * At more interesting (low) Eb/No ratios, the spreads are much smaller so our chances of saturating a metric
218     * by not not normalizing when we should are extremely low. So either way, the risk to performance is small.
219
220     * All this is borne out by experiment.
221     */
222    if(vp->new_metrics->s[0] >= USHRT_MAX-5000){
223      vector unsigned short scale;
224      union { vector unsigned short v; unsigned short s[8];} t;
225
226      /* Find smallest metric and splat */
227      scale = vp->new_metrics->v[0];
228      for(i=1;i<32;i++)
229	scale = vec_min(scale,vp->new_metrics->v[i]);
230
231      scale = vec_min(scale,vec_sld(scale,scale,8));
232      scale = vec_min(scale,vec_sld(scale,scale,4));
233      scale = vec_min(scale,vec_sld(scale,scale,2));
234
235      /* Subtract it from all metrics
236       * Work backwards to try to improve the cache hit ratio, assuming LRU
237       */
238      for(i=31;i>=0;i--)
239	vp->new_metrics->v[i] = vec_subs(vp->new_metrics->v[i],scale);
240      t.v = scale;
241      path_metric += t.s[0];
242    }
243    d++;
244    /* Swap pointers to old and new metrics */
245    tmp = vp->old_metrics;
246    vp->old_metrics = vp->new_metrics;
247    vp->new_metrics = tmp;
248  }
249  vp->dp = d;
250  return path_metric;
251}
252