1/*
2 *  Licensed to the Apache Software Foundation (ASF) under one or more
3 *  contributor license agreements.  See the NOTICE file distributed with
4 *  this work for additional information regarding copyright ownership.
5 *  The ASF licenses this file to You under the Apache License, Version 2.0
6 *  (the "License"); you may not use this file except in compliance with
7 *  the License.  You may obtain a copy of the License at
8 *
9 *     http://www.apache.org/licenses/LICENSE-2.0
10 *
11 *  Unless required by applicable law or agreed to in writing, software
12 *  distributed under the License is distributed on an "AS IS" BASIS,
13 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 *  See the License for the specific language governing permissions and
15 *  limitations under the License.
16 */
17
18#include <string.h>
19#include "cbigint.h"
20
21#if defined(__linux__) || defined(FREEBSD) || defined(ZOS) || defined(MACOSX) || defined(AIX)
22#define USE_LL
23#endif
24
25#if __BYTE_ORDER == __LITTLE_ENDIAN
26#define at(i) (i)
27#else
28#define at(i) ((i)^1)
29/* the sequence for halfAt is -1, 2, 1, 4, 3, 6, 5, 8... */
30/* and it should correspond to 0, 1, 2, 3, 4, 5, 6, 7... */
31#define halfAt(i) (-((-(i)) ^ 1))
32#endif
33
34#define HIGH_IN_U64(u64) ((u64) >> 32)
35#if defined(USE_LL)
36#define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFLL)
37#else
38#if defined(USE_L)
39#define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFL)
40#else
41#define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFF)
42#endif /* USE_L */
43#endif /* USE_LL */
44
45#if defined(USE_LL)
46#define TEN_E1 (0xALL)
47#define TEN_E2 (0x64LL)
48#define TEN_E3 (0x3E8LL)
49#define TEN_E4 (0x2710LL)
50#define TEN_E5 (0x186A0LL)
51#define TEN_E6 (0xF4240LL)
52#define TEN_E7 (0x989680LL)
53#define TEN_E8 (0x5F5E100LL)
54#define TEN_E9 (0x3B9ACA00LL)
55#define TEN_E19 (0x8AC7230489E80000LL)
56#else
57#if defined(USE_L)
58#define TEN_E1 (0xAL)
59#define TEN_E2 (0x64L)
60#define TEN_E3 (0x3E8L)
61#define TEN_E4 (0x2710L)
62#define TEN_E5 (0x186A0L)
63#define TEN_E6 (0xF4240L)
64#define TEN_E7 (0x989680L)
65#define TEN_E8 (0x5F5E100L)
66#define TEN_E9 (0x3B9ACA00L)
67#define TEN_E19 (0x8AC7230489E80000L)
68#else
69#define TEN_E1 (0xA)
70#define TEN_E2 (0x64)
71#define TEN_E3 (0x3E8)
72#define TEN_E4 (0x2710)
73#define TEN_E5 (0x186A0)
74#define TEN_E6 (0xF4240)
75#define TEN_E7 (0x989680)
76#define TEN_E8 (0x5F5E100)
77#define TEN_E9 (0x3B9ACA00)
78#define TEN_E19 (0x8AC7230489E80000)
79#endif /* USE_L */
80#endif /* USE_LL */
81
82#define TIMES_TEN(x) (((x) << 3) + ((x) << 1))
83#define bitSection(x, mask, shift) (((x) & (mask)) >> (shift))
84#define CREATE_DOUBLE_BITS(normalizedM, e) (((normalizedM) & MANTISSA_MASK) | ((static_cast<uint64_t>((e) + E_OFFSET)) << 52))
85
86#if defined(USE_LL)
87#define MANTISSA_MASK (0x000FFFFFFFFFFFFFLL)
88#define EXPONENT_MASK (0x7FF0000000000000LL)
89#define NORMAL_MASK (0x0010000000000000LL)
90#define SIGN_MASK (0x8000000000000000LL)
91#else
92#if defined(USE_L)
93#define MANTISSA_MASK (0x000FFFFFFFFFFFFFL)
94#define EXPONENT_MASK (0x7FF0000000000000L)
95#define NORMAL_MASK (0x0010000000000000L)
96#define SIGN_MASK (0x8000000000000000L)
97#else
98#define MANTISSA_MASK (0x000FFFFFFFFFFFFF)
99#define EXPONENT_MASK (0x7FF0000000000000)
100#define NORMAL_MASK (0x0010000000000000)
101#define SIGN_MASK (0x8000000000000000)
102#endif /* USE_L */
103#endif /* USE_LL */
104
105#define E_OFFSET (1075)
106
107#define FLOAT_MANTISSA_MASK (0x007FFFFF)
108#define FLOAT_EXPONENT_MASK (0x7F800000)
109#define FLOAT_NORMAL_MASK   (0x00800000)
110#define FLOAT_E_OFFSET (150)
111
112int32_t
113simpleAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2)
114{
115  /* assumes length > 0 */
116  int32_t index = 1;
117
118  *arg1 += arg2;
119  if (arg2 <= *arg1)
120    return 0;
121  else if (length == 1)
122    return 1;
123
124  while (++arg1[index] == 0 && ++index < length);
125
126  return index == length;
127}
128
129int32_t
130addHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
131{
132  /* addition is limited by length of arg1 as it this function is
133   * storing the result in arg1 */
134  /* fix for cc (GCC) 3.2 20020903 (Red Hat Linux 8.0 3.2-7): code generated does not
135   * do the temp1 + temp2 + carry addition correct.  carry is 64 bit because gcc has
136   * subtle issues when you mix 64 / 32 bit maths. */
137  uint64_t temp1, temp2, temp3;     /* temporary variables to help the SH-4, and gcc */
138  uint64_t carry;
139  int32_t index;
140
141  if (length1 == 0 || length2 == 0)
142    {
143      return 0;
144    }
145  else if (length1 < length2)
146    {
147      length2 = length1;
148    }
149
150  carry = 0;
151  index = 0;
152  do
153    {
154      temp1 = arg1[index];
155      temp2 = arg2[index];
156      temp3 = temp1 + temp2;
157      arg1[index] = temp3 + carry;
158      if (arg2[index] < arg1[index])
159        carry = 0;
160      else if (arg2[index] != arg1[index])
161        carry = 1;
162    }
163  while (++index < length2);
164  if (!carry)
165    return 0;
166  else if (index == length1)
167    return 1;
168
169  while (++arg1[index] == 0 && ++index < length1);
170
171  return index == length1;
172}
173
174void
175subtractHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
176{
177  /* assumes arg1 > arg2 */
178  int32_t index;
179  for (index = 0; index < length1; ++index)
180    arg1[index] = ~arg1[index];
181  simpleAddHighPrecision (arg1, length1, 1);
182
183  while (length2 > 0 && arg2[length2 - 1] == 0)
184    --length2;
185
186  addHighPrecision (arg1, length1, arg2, length2);
187
188  for (index = 0; index < length1; ++index)
189    arg1[index] = ~arg1[index];
190  simpleAddHighPrecision (arg1, length1, 1);
191}
192
193static uint32_t simpleMultiplyHighPrecision(uint64_t* arg1, int32_t length, uint64_t arg2) {
194  /* assumes arg2 only holds 32 bits of information */
195  uint64_t product;
196  int32_t index;
197
198  index = 0;
199  product = 0;
200
201  do
202    {
203      product =
204        HIGH_IN_U64 (product) + arg2 * LOW_U32_FROM_PTR (arg1 + index);
205      LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
206      product =
207        HIGH_IN_U64 (product) + arg2 * HIGH_U32_FROM_PTR (arg1 + index);
208      HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
209    }
210  while (++index < length);
211
212  return HIGH_U32_FROM_VAR (product);
213}
214
215static void
216simpleMultiplyAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2,
217                                uint32_t * result)
218{
219  /* Assumes result can hold the product and arg2 only holds 32 bits
220     of information */
221  uint64_t product;
222  int32_t index, resultIndex;
223
224  index = resultIndex = 0;
225  product = 0;
226
227  do
228    {
229      product =
230        HIGH_IN_U64 (product) + result[at (resultIndex)] +
231        arg2 * LOW_U32_FROM_PTR (arg1 + index);
232      result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
233      ++resultIndex;
234      product =
235        HIGH_IN_U64 (product) + result[at (resultIndex)] +
236        arg2 * HIGH_U32_FROM_PTR (arg1 + index);
237      result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
238      ++resultIndex;
239    }
240  while (++index < length);
241
242  result[at (resultIndex)] += HIGH_U32_FROM_VAR (product);
243  if (result[at (resultIndex)] < HIGH_U32_FROM_VAR (product))
244    {
245      /* must be careful with ++ operator and macro expansion */
246      ++resultIndex;
247      while (++result[at (resultIndex)] == 0)
248        ++resultIndex;
249    }
250}
251
252#if __BYTE_ORDER != __LITTLE_ENDIAN
253void simpleMultiplyAddHighPrecisionBigEndianFix(uint64_t* arg1, int32_t length, uint64_t arg2, uint32_t* result) {
254    /* Assumes result can hold the product and arg2 only holds 32 bits of information */
255    int32_t index = 0;
256    int32_t resultIndex = 0;
257    uint64_t product = 0;
258
259    do {
260        product = HIGH_IN_U64(product) + result[halfAt(resultIndex)] + arg2 * LOW_U32_FROM_PTR(arg1 + index);
261        result[halfAt(resultIndex)] = LOW_U32_FROM_VAR(product);
262        ++resultIndex;
263        product = HIGH_IN_U64(product) + result[halfAt(resultIndex)] + arg2 * HIGH_U32_FROM_PTR(arg1 + index);
264        result[halfAt(resultIndex)] = LOW_U32_FROM_VAR(product);
265        ++resultIndex;
266    } while (++index < length);
267
268    result[halfAt(resultIndex)] += HIGH_U32_FROM_VAR(product);
269    if (result[halfAt(resultIndex)] < HIGH_U32_FROM_VAR(product)) {
270        /* must be careful with ++ operator and macro expansion */
271        ++resultIndex;
272        while (++result[halfAt(resultIndex)] == 0) ++resultIndex;
273    }
274}
275#endif
276
277void
278multiplyHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2,
279                       uint64_t * result, int32_t length)
280{
281  /* assumes result is large enough to hold product */
282  uint64_t* temp;
283  uint32_t* resultIn32;
284  int32_t count, index;
285
286  if (length1 < length2)
287    {
288      temp = arg1;
289      arg1 = arg2;
290      arg2 = temp;
291      count = length1;
292      length1 = length2;
293      length2 = count;
294    }
295
296  memset (result, 0, sizeof (uint64_t) * length);
297
298  /* length1 > length2 */
299  resultIn32 = reinterpret_cast<uint32_t*>(result);
300  index = -1;
301  for (count = 0; count < length2; ++count)
302    {
303      simpleMultiplyAddHighPrecision (arg1, length1, LOW_IN_U64 (arg2[count]),
304                                      resultIn32 + (++index));
305#if __BYTE_ORDER == __LITTLE_ENDIAN
306      simpleMultiplyAddHighPrecision(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index));
307#else
308      simpleMultiplyAddHighPrecisionBigEndianFix(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index));
309#endif
310    }
311}
312
313uint32_t
314simpleAppendDecimalDigitHighPrecision (uint64_t * arg1, int32_t length, uint64_t digit)
315{
316  /* assumes digit is less than 32 bits */
317  uint64_t arg;
318  int32_t index = 0;
319
320  digit <<= 32;
321  do
322    {
323      arg = LOW_IN_U64 (arg1[index]);
324      digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
325      LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
326
327      arg = HIGH_IN_U64 (arg1[index]);
328      digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
329      HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
330    }
331  while (++index < length);
332
333  return HIGH_U32_FROM_VAR (digit);
334}
335
336void
337simpleShiftLeftHighPrecision (uint64_t * arg1, int32_t length, int32_t arg2)
338{
339  /* assumes length > 0 */
340  int32_t index, offset;
341  if (arg2 >= 64)
342    {
343      offset = arg2 >> 6;
344      index = length;
345
346      while (--index - offset >= 0)
347        arg1[index] = arg1[index - offset];
348      do
349        {
350          arg1[index] = 0;
351        }
352      while (--index >= 0);
353
354      arg2 &= 0x3F;
355    }
356
357  if (arg2 == 0)
358    return;
359  while (--length > 0)
360    {
361      arg1[length] = arg1[length] << arg2 | arg1[length - 1] >> (64 - arg2);
362    }
363  *arg1 <<= arg2;
364}
365
366int32_t
367highestSetBit (uint64_t * y)
368{
369  uint32_t x;
370  int32_t result;
371
372  if (*y == 0)
373    return 0;
374
375#if defined(USE_LL)
376  if (*y & 0xFFFFFFFF00000000LL)
377    {
378      x = HIGH_U32_FROM_PTR (y);
379      result = 32;
380    }
381  else
382    {
383      x = LOW_U32_FROM_PTR (y);
384      result = 0;
385    }
386#else
387#if defined(USE_L)
388  if (*y & 0xFFFFFFFF00000000L)
389    {
390      x = HIGH_U32_FROM_PTR (y);
391      result = 32;
392    }
393  else
394    {
395      x = LOW_U32_FROM_PTR (y);
396      result = 0;
397    }
398#else
399  if (*y & 0xFFFFFFFF00000000)
400    {
401      x = HIGH_U32_FROM_PTR (y);
402      result = 32;
403    }
404  else
405    {
406      x = LOW_U32_FROM_PTR (y);
407      result = 0;
408    }
409#endif /* USE_L */
410#endif /* USE_LL */
411
412  if (x & 0xFFFF0000)
413    {
414      x = bitSection (x, 0xFFFF0000, 16);
415      result += 16;
416    }
417  if (x & 0xFF00)
418    {
419      x = bitSection (x, 0xFF00, 8);
420      result += 8;
421    }
422  if (x & 0xF0)
423    {
424      x = bitSection (x, 0xF0, 4);
425      result += 4;
426    }
427  if (x > 0x7)
428    return result + 4;
429  else if (x > 0x3)
430    return result + 3;
431  else if (x > 0x1)
432    return result + 2;
433  else
434    return result + 1;
435}
436
437int32_t
438lowestSetBit (uint64_t * y)
439{
440  uint32_t x;
441  int32_t result;
442
443  if (*y == 0)
444    return 0;
445
446#if defined(USE_LL)
447  if (*y & 0x00000000FFFFFFFFLL)
448    {
449      x = LOW_U32_FROM_PTR (y);
450      result = 0;
451    }
452  else
453    {
454      x = HIGH_U32_FROM_PTR (y);
455      result = 32;
456    }
457#else
458#if defined(USE_L)
459  if (*y & 0x00000000FFFFFFFFL)
460    {
461      x = LOW_U32_FROM_PTR (y);
462      result = 0;
463    }
464  else
465    {
466      x = HIGH_U32_FROM_PTR (y);
467      result = 32;
468    }
469#else
470  if (*y & 0x00000000FFFFFFFF)
471    {
472      x = LOW_U32_FROM_PTR (y);
473      result = 0;
474    }
475  else
476    {
477      x = HIGH_U32_FROM_PTR (y);
478      result = 32;
479    }
480#endif /* USE_L */
481#endif /* USE_LL */
482
483  if (!(x & 0xFFFF))
484    {
485      x = bitSection (x, 0xFFFF0000, 16);
486      result += 16;
487    }
488  if (!(x & 0xFF))
489    {
490      x = bitSection (x, 0xFF00, 8);
491      result += 8;
492    }
493  if (!(x & 0xF))
494    {
495      x = bitSection (x, 0xF0, 4);
496      result += 4;
497    }
498
499  if (x & 0x1)
500    return result + 1;
501  else if (x & 0x2)
502    return result + 2;
503  else if (x & 0x4)
504    return result + 3;
505  else
506    return result + 4;
507}
508
509int32_t
510highestSetBitHighPrecision (uint64_t * arg, int32_t length)
511{
512  int32_t highBit;
513
514  while (--length >= 0)
515    {
516      highBit = highestSetBit (arg + length);
517      if (highBit)
518        return highBit + 64 * length;
519    }
520
521  return 0;
522}
523
524int32_t
525lowestSetBitHighPrecision (uint64_t * arg, int32_t length)
526{
527  int32_t lowBit, index = -1;
528
529  while (++index < length)
530    {
531      lowBit = lowestSetBit (arg + index);
532      if (lowBit)
533        return lowBit + 64 * index;
534    }
535
536  return 0;
537}
538
539int32_t
540compareHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
541{
542  while (--length1 >= 0 && arg1[length1] == 0);
543  while (--length2 >= 0 && arg2[length2] == 0);
544
545  if (length1 > length2)
546    return 1;
547  else if (length1 < length2)
548    return -1;
549  else if (length1 > -1)
550    {
551      do
552        {
553          if (arg1[length1] > arg2[length1])
554            return 1;
555          else if (arg1[length1] < arg2[length1])
556            return -1;
557        }
558      while (--length1 >= 0);
559    }
560
561  return 0;
562}
563
564jdouble
565toDoubleHighPrecision (uint64_t * arg, int32_t length)
566{
567  int32_t highBit;
568  uint64_t mantissa, test64;
569  uint32_t test;
570  jdouble result;
571
572  while (length > 0 && arg[length - 1] == 0)
573    --length;
574
575  if (length == 0)
576    result = 0.0;
577  else if (length > 16)
578    {
579      DOUBLE_TO_LONGBITS (result) = EXPONENT_MASK;
580    }
581  else if (length == 1)
582    {
583      highBit = highestSetBit (arg);
584      if (highBit <= 53)
585        {
586          highBit = 53 - highBit;
587          mantissa = *arg << highBit;
588          DOUBLE_TO_LONGBITS (result) =
589            CREATE_DOUBLE_BITS (mantissa, -highBit);
590        }
591      else
592        {
593          highBit -= 53;
594          mantissa = *arg >> highBit;
595          DOUBLE_TO_LONGBITS (result) =
596            CREATE_DOUBLE_BITS (mantissa, highBit);
597
598          /* perform rounding, round to even in case of tie */
599          test = (LOW_U32_FROM_PTR (arg) << (11 - highBit)) & 0x7FF;
600          if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
601            DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
602        }
603    }
604  else
605    {
606      highBit = highestSetBit (arg + (--length));
607      if (highBit <= 53)
608        {
609          highBit = 53 - highBit;
610          if (highBit > 0)
611            {
612              mantissa =
613                (arg[length] << highBit) | (arg[length - 1] >>
614                                            (64 - highBit));
615            }
616          else
617            {
618              mantissa = arg[length];
619            }
620          DOUBLE_TO_LONGBITS (result) =
621            CREATE_DOUBLE_BITS (mantissa, length * 64 - highBit);
622
623          /* perform rounding, round to even in case of tie */
624          test64 = arg[--length] << highBit;
625          if (test64 > SIGN_MASK || ((test64 == SIGN_MASK) && (mantissa & 1)))
626            DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
627          else if (test64 == SIGN_MASK)
628            {
629              while (--length >= 0)
630                {
631                  if (arg[length] != 0)
632                    {
633                      DOUBLE_TO_LONGBITS (result) =
634                        DOUBLE_TO_LONGBITS (result) + 1;
635                      break;
636                    }
637                }
638            }
639        }
640      else
641        {
642          highBit -= 53;
643          mantissa = arg[length] >> highBit;
644          DOUBLE_TO_LONGBITS (result) =
645            CREATE_DOUBLE_BITS (mantissa, length * 64 + highBit);
646
647          /* perform rounding, round to even in case of tie */
648          test = (LOW_U32_FROM_PTR (arg + length) << (11 - highBit)) & 0x7FF;
649          if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
650            DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
651          else if (test == 0x400)
652            {
653              do
654                {
655                  if (arg[--length] != 0)
656                    {
657                      DOUBLE_TO_LONGBITS (result) =
658                        DOUBLE_TO_LONGBITS (result) + 1;
659                      break;
660                    }
661                }
662              while (length > 0);
663            }
664        }
665    }
666
667  return result;
668}
669
670static uint64_t simpleMultiplyHighPrecision64(uint64_t* arg1, int32_t length, uint64_t arg2);
671
672int32_t
673timesTenToTheEHighPrecision (uint64_t * result, int32_t length, jint e)
674{
675  /* assumes result can hold value */
676  uint64_t overflow;
677  int exp10 = e;
678
679  if (e == 0)
680    return length;
681
682  /* bad O(n) way of doing it, but simple */
683  /*
684     do {
685     overflow = simpleAppendDecimalDigitHighPrecision(result, length, 0);
686     if (overflow)
687     result[length++] = overflow;
688     } while (--e);
689   */
690  /* Replace the current implementaion which performs a
691   * "multiplication" by 10 e number of times with an actual
692   * multiplication. 10e19 is the largest exponent to the power of ten
693   * that will fit in a 64-bit integer, and 10e9 is the largest exponent to
694   * the power of ten that will fit in a 64-bit integer. Not sure where the
695   * break-even point is between an actual multiplication and a
696   * simpleAappendDecimalDigit() so just pick 10e3 as that point for
697   * now.
698   */
699  while (exp10 >= 19)
700    {
701      overflow = simpleMultiplyHighPrecision64 (result, length, TEN_E19);
702      if (overflow)
703        result[length++] = overflow;
704      exp10 -= 19;
705    }
706  while (exp10 >= 9)
707    {
708      overflow = simpleMultiplyHighPrecision (result, length, TEN_E9);
709      if (overflow)
710        result[length++] = overflow;
711      exp10 -= 9;
712    }
713  if (exp10 == 0)
714    return length;
715  else if (exp10 == 1)
716    {
717      overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
718      if (overflow)
719        result[length++] = overflow;
720    }
721  else if (exp10 == 2)
722    {
723      overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
724      if (overflow)
725        result[length++] = overflow;
726      overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
727      if (overflow)
728        result[length++] = overflow;
729    }
730  else if (exp10 == 3)
731    {
732      overflow = simpleMultiplyHighPrecision (result, length, TEN_E3);
733      if (overflow)
734        result[length++] = overflow;
735    }
736  else if (exp10 == 4)
737    {
738      overflow = simpleMultiplyHighPrecision (result, length, TEN_E4);
739      if (overflow)
740        result[length++] = overflow;
741    }
742  else if (exp10 == 5)
743    {
744      overflow = simpleMultiplyHighPrecision (result, length, TEN_E5);
745      if (overflow)
746        result[length++] = overflow;
747    }
748  else if (exp10 == 6)
749    {
750      overflow = simpleMultiplyHighPrecision (result, length, TEN_E6);
751      if (overflow)
752        result[length++] = overflow;
753    }
754  else if (exp10 == 7)
755    {
756      overflow = simpleMultiplyHighPrecision (result, length, TEN_E7);
757      if (overflow)
758        result[length++] = overflow;
759    }
760  else if (exp10 == 8)
761    {
762      overflow = simpleMultiplyHighPrecision (result, length, TEN_E8);
763      if (overflow)
764        result[length++] = overflow;
765    }
766  return length;
767}
768
769uint64_t
770doubleMantissa (jdouble z)
771{
772  uint64_t m = DOUBLE_TO_LONGBITS (z);
773
774  if ((m & EXPONENT_MASK) != 0)
775    m = (m & MANTISSA_MASK) | NORMAL_MASK;
776  else
777    m = (m & MANTISSA_MASK);
778
779  return m;
780}
781
782int32_t
783doubleExponent (jdouble z)
784{
785  /* assumes positive double */
786  int32_t k = HIGH_U32_FROM_VAR (z) >> 20;
787
788  if (k)
789    k -= E_OFFSET;
790  else
791    k = 1 - E_OFFSET;
792
793  return k;
794}
795
796uint32_t floatMantissa(jfloat z) {
797  uint32_t m = FLOAT_TO_INTBITS (z);
798
799  if ((m & FLOAT_EXPONENT_MASK) != 0)
800    m = (m & FLOAT_MANTISSA_MASK) | FLOAT_NORMAL_MASK;
801  else
802    m = (m & FLOAT_MANTISSA_MASK);
803
804  return m;
805}
806
807int32_t
808floatExponent (jfloat z)
809{
810  /* assumes positive float */
811  int32_t k = FLOAT_TO_INTBITS (z) >> 23;
812  if (k)
813    k -= FLOAT_E_OFFSET;
814  else
815    k = 1 - FLOAT_E_OFFSET;
816
817  return k;
818}
819
820/* Allow a 64-bit value in arg2 */
821uint64_t
822simpleMultiplyHighPrecision64 (uint64_t * arg1, int32_t length, uint64_t arg2)
823{
824  uint64_t intermediate, carry1, carry2, prod1, prod2, sum;
825  uint64_t* pArg1;
826  int32_t index;
827  uint32_t buf32;
828
829  index = 0;
830  intermediate = 0;
831  pArg1 = arg1 + index;
832  carry1 = carry2 = 0;
833
834  do
835    {
836      if ((*pArg1 != 0) || (intermediate != 0))
837        {
838          prod1 =
839            static_cast<uint64_t>(LOW_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(LOW_U32_FROM_PTR (pArg1));
840          sum = intermediate + prod1;
841          if ((sum < prod1) || (sum < intermediate))
842            {
843              carry1 = 1;
844            }
845          else
846            {
847              carry1 = 0;
848            }
849          prod1 =
850            static_cast<uint64_t>(LOW_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(HIGH_U32_FROM_PTR (pArg1));
851          prod2 =
852            static_cast<uint64_t>(HIGH_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(LOW_U32_FROM_PTR (pArg1));
853          intermediate = carry2 + HIGH_IN_U64 (sum) + prod1 + prod2;
854          if ((intermediate < prod1) || (intermediate < prod2))
855            {
856              carry2 = 1;
857            }
858          else
859            {
860              carry2 = 0;
861            }
862          LOW_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (sum);
863          buf32 = HIGH_U32_FROM_PTR (pArg1);
864          HIGH_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (intermediate);
865          intermediate = carry1 + HIGH_IN_U64 (intermediate)
866            + static_cast<uint64_t>(HIGH_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(buf32);
867        }
868      pArg1++;
869    }
870  while (++index < length);
871  return intermediate;
872}
873