res_cache.c revision cfd8c45725d85f3e2bfccb6b14a9bff59fd5c4c7
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "resolv_cache.h"
30#include <resolv.h>
31#include <stdlib.h>
32#include <string.h>
33#include <time.h>
34#include "pthread.h"
35
36#include <errno.h>
37#include <arpa/nameser.h>
38#include <sys/system_properties.h>
39#include <net/if.h>
40#include <netdb.h>
41#include <linux/if.h>
42
43#include <arpa/inet.h>
44#include "resolv_private.h"
45#include "resolv_netid.h"
46#include "res_private.h"
47
48/* This code implements a small and *simple* DNS resolver cache.
49 *
50 * It is only used to cache DNS answers for a time defined by the smallest TTL
51 * among the answer records in order to reduce DNS traffic. It is not supposed
52 * to be a full DNS cache, since we plan to implement that in the future in a
53 * dedicated process running on the system.
54 *
55 * Note that its design is kept simple very intentionally, i.e.:
56 *
57 *  - it takes raw DNS query packet data as input, and returns raw DNS
58 *    answer packet data as output
59 *
60 *    (this means that two similar queries that encode the DNS name
61 *     differently will be treated distinctly).
62 *
63 *    the smallest TTL value among the answer records are used as the time
64 *    to keep an answer in the cache.
65 *
66 *    this is bad, but we absolutely want to avoid parsing the answer packets
67 *    (and should be solved by the later full DNS cache process).
68 *
69 *  - the implementation is just a (query-data) => (answer-data) hash table
70 *    with a trivial least-recently-used expiration policy.
71 *
72 * Doing this keeps the code simple and avoids to deal with a lot of things
73 * that a full DNS cache is expected to do.
74 *
75 * The API is also very simple:
76 *
77 *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
78 *     this will initialize the cache on first usage. the result can be NULL
79 *     if the cache is disabled.
80 *
81 *   - the client calls _resolv_cache_lookup() before performing a query
82 *
83 *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
84 *     has been copied into the client-provided answer buffer.
85 *
86 *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
87 *     a request normally, *then* call _resolv_cache_add() to add the received
88 *     answer to the cache.
89 *
90 *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
91 *     perform a request normally, and *not* call _resolv_cache_add()
92 *
93 *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
94 *     is too short to accomodate the cached result.
95 */
96
97/* the name of an environment variable that will be checked the first time
98 * this code is called if its value is "0", then the resolver cache is
99 * disabled.
100 */
101#define  CONFIG_ENV  "BIONIC_DNSCACHE"
102
103/* entries older than CONFIG_SECONDS seconds are always discarded.
104 */
105#define  CONFIG_SECONDS    (60*10)    /* 10 minutes */
106
107/* default number of entries kept in the cache. This value has been
108 * determined by browsing through various sites and counting the number
109 * of corresponding requests. Keep in mind that our framework is currently
110 * performing two requests per name lookup (one for IPv4, the other for IPv6)
111 *
112 *    www.google.com      4
113 *    www.ysearch.com     6
114 *    www.amazon.com      8
115 *    www.nytimes.com     22
116 *    www.espn.com        28
117 *    www.msn.com         28
118 *    www.lemonde.fr      35
119 *
120 * (determined in 2009-2-17 from Paris, France, results may vary depending
121 *  on location)
122 *
123 * most high-level websites use lots of media/ad servers with different names
124 * but these are generally reused when browsing through the site.
125 *
126 * As such, a value of 64 should be relatively comfortable at the moment.
127 *
128 * ******************************************
129 * * NOTE - this has changed.
130 * * 1) we've added IPv6 support so each dns query results in 2 responses
131 * * 2) we've made this a system-wide cache, so the cost is less (it's not
132 * *    duplicated in each process) and the need is greater (more processes
133 * *    making different requests).
134 * * Upping by 2x for IPv6
135 * * Upping by another 5x for the centralized nature
136 * *****************************************
137 */
138#define  CONFIG_MAX_ENTRIES    64 * 2 * 5
139/* name of the system property that can be used to set the cache size */
140
141/****************************************************************************/
142/****************************************************************************/
143/*****                                                                  *****/
144/*****                                                                  *****/
145/*****                                                                  *****/
146/****************************************************************************/
147/****************************************************************************/
148
149/* set to 1 to debug cache operations */
150#define  DEBUG       0
151
152/* set to 1 to debug query data */
153#define  DEBUG_DATA  0
154
155#undef XLOG
156#if DEBUG
157#  include "private/libc_logging.h"
158#  define XLOG(...)  __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__)
159
160#include <stdio.h>
161#include <stdarg.h>
162
163/** BOUNDED BUFFER FORMATTING
164 **/
165
166/* technical note:
167 *
168 *   the following debugging routines are used to append data to a bounded
169 *   buffer they take two parameters that are:
170 *
171 *   - p : a pointer to the current cursor position in the buffer
172 *         this value is initially set to the buffer's address.
173 *
174 *   - end : the address of the buffer's limit, i.e. of the first byte
175 *           after the buffer. this address should never be touched.
176 *
177 *           IMPORTANT: it is assumed that end > buffer_address, i.e.
178 *                      that the buffer is at least one byte.
179 *
180 *   the _bprint_() functions return the new value of 'p' after the data
181 *   has been appended, and also ensure the following:
182 *
183 *   - the returned value will never be strictly greater than 'end'
184 *
185 *   - a return value equal to 'end' means that truncation occured
186 *     (in which case, end[-1] will be set to 0)
187 *
188 *   - after returning from a _bprint_() function, the content of the buffer
189 *     is always 0-terminated, even in the event of truncation.
190 *
191 *  these conventions allow you to call _bprint_ functions multiple times and
192 *  only check for truncation at the end of the sequence, as in:
193 *
194 *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
195 *
196 *     p = _bprint_c(p, end, '"');
197 *     p = _bprint_s(p, end, my_string);
198 *     p = _bprint_c(p, end, '"');
199 *
200 *     if (p >= end) {
201 *        // buffer was too small
202 *     }
203 *
204 *     printf( "%s", buff );
205 */
206
207/* add a char to a bounded buffer */
208static char*
209_bprint_c( char*  p, char*  end, int  c )
210{
211    if (p < end) {
212        if (p+1 == end)
213            *p++ = 0;
214        else {
215            *p++ = (char) c;
216            *p   = 0;
217        }
218    }
219    return p;
220}
221
222/* add a sequence of bytes to a bounded buffer */
223static char*
224_bprint_b( char*  p, char*  end, const char*  buf, int  len )
225{
226    int  avail = end - p;
227
228    if (avail <= 0 || len <= 0)
229        return p;
230
231    if (avail > len)
232        avail = len;
233
234    memcpy( p, buf, avail );
235    p += avail;
236
237    if (p < end)
238        p[0] = 0;
239    else
240        end[-1] = 0;
241
242    return p;
243}
244
245/* add a string to a bounded buffer */
246static char*
247_bprint_s( char*  p, char*  end, const char*  str )
248{
249    return _bprint_b(p, end, str, strlen(str));
250}
251
252/* add a formatted string to a bounded buffer */
253static char*
254_bprint( char*  p, char*  end, const char*  format, ... )
255{
256    int      avail, n;
257    va_list  args;
258
259    avail = end - p;
260
261    if (avail <= 0)
262        return p;
263
264    va_start(args, format);
265    n = vsnprintf( p, avail, format, args);
266    va_end(args);
267
268    /* certain C libraries return -1 in case of truncation */
269    if (n < 0 || n > avail)
270        n = avail;
271
272    p += n;
273    /* certain C libraries do not zero-terminate in case of truncation */
274    if (p == end)
275        p[-1] = 0;
276
277    return p;
278}
279
280/* add a hex value to a bounded buffer, up to 8 digits */
281static char*
282_bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
283{
284    char   text[sizeof(unsigned)*2];
285    int    nn = 0;
286
287    while (numDigits-- > 0) {
288        text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
289    }
290    return _bprint_b(p, end, text, nn);
291}
292
293/* add the hexadecimal dump of some memory area to a bounded buffer */
294static char*
295_bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
296{
297    int   lineSize = 16;
298
299    while (datalen > 0) {
300        int  avail = datalen;
301        int  nn;
302
303        if (avail > lineSize)
304            avail = lineSize;
305
306        for (nn = 0; nn < avail; nn++) {
307            if (nn > 0)
308                p = _bprint_c(p, end, ' ');
309            p = _bprint_hex(p, end, data[nn], 2);
310        }
311        for ( ; nn < lineSize; nn++ ) {
312            p = _bprint_s(p, end, "   ");
313        }
314        p = _bprint_s(p, end, "  ");
315
316        for (nn = 0; nn < avail; nn++) {
317            int  c = data[nn];
318
319            if (c < 32 || c > 127)
320                c = '.';
321
322            p = _bprint_c(p, end, c);
323        }
324        p = _bprint_c(p, end, '\n');
325
326        data    += avail;
327        datalen -= avail;
328    }
329    return p;
330}
331
332/* dump the content of a query of packet to the log */
333static void
334XLOG_BYTES( const void*  base, int  len )
335{
336    char  buff[1024];
337    char*  p = buff, *end = p + sizeof(buff);
338
339    p = _bprint_hexdump(p, end, base, len);
340    XLOG("%s",buff);
341}
342
343#else /* !DEBUG */
344#  define  XLOG(...)        ((void)0)
345#  define  XLOG_BYTES(a,b)  ((void)0)
346#endif
347
348static time_t
349_time_now( void )
350{
351    struct timeval  tv;
352
353    gettimeofday( &tv, NULL );
354    return tv.tv_sec;
355}
356
357/* reminder: the general format of a DNS packet is the following:
358 *
359 *    HEADER  (12 bytes)
360 *    QUESTION  (variable)
361 *    ANSWER (variable)
362 *    AUTHORITY (variable)
363 *    ADDITIONNAL (variable)
364 *
365 * the HEADER is made of:
366 *
367 *   ID     : 16 : 16-bit unique query identification field
368 *
369 *   QR     :  1 : set to 0 for queries, and 1 for responses
370 *   Opcode :  4 : set to 0 for queries
371 *   AA     :  1 : set to 0 for queries
372 *   TC     :  1 : truncation flag, will be set to 0 in queries
373 *   RD     :  1 : recursion desired
374 *
375 *   RA     :  1 : recursion available (0 in queries)
376 *   Z      :  3 : three reserved zero bits
377 *   RCODE  :  4 : response code (always 0=NOERROR in queries)
378 *
379 *   QDCount: 16 : question count
380 *   ANCount: 16 : Answer count (0 in queries)
381 *   NSCount: 16: Authority Record count (0 in queries)
382 *   ARCount: 16: Additionnal Record count (0 in queries)
383 *
384 * the QUESTION is made of QDCount Question Record (QRs)
385 * the ANSWER is made of ANCount RRs
386 * the AUTHORITY is made of NSCount RRs
387 * the ADDITIONNAL is made of ARCount RRs
388 *
389 * Each Question Record (QR) is made of:
390 *
391 *   QNAME   : variable : Query DNS NAME
392 *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
393 *   CLASS   : 16       : class of query (IN=1)
394 *
395 * Each Resource Record (RR) is made of:
396 *
397 *   NAME    : variable : DNS NAME
398 *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
399 *   CLASS   : 16       : class of query (IN=1)
400 *   TTL     : 32       : seconds to cache this RR (0=none)
401 *   RDLENGTH: 16       : size of RDDATA in bytes
402 *   RDDATA  : variable : RR data (depends on TYPE)
403 *
404 * Each QNAME contains a domain name encoded as a sequence of 'labels'
405 * terminated by a zero. Each label has the following format:
406 *
407 *    LEN  : 8     : lenght of label (MUST be < 64)
408 *    NAME : 8*LEN : label length (must exclude dots)
409 *
410 * A value of 0 in the encoding is interpreted as the 'root' domain and
411 * terminates the encoding. So 'www.android.com' will be encoded as:
412 *
413 *   <3>www<7>android<3>com<0>
414 *
415 * Where <n> represents the byte with value 'n'
416 *
417 * Each NAME reflects the QNAME of the question, but has a slightly more
418 * complex encoding in order to provide message compression. This is achieved
419 * by using a 2-byte pointer, with format:
420 *
421 *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
422 *    OFFSET : 14 : offset to another part of the DNS packet
423 *
424 * The offset is relative to the start of the DNS packet and must point
425 * A pointer terminates the encoding.
426 *
427 * The NAME can be encoded in one of the following formats:
428 *
429 *   - a sequence of simple labels terminated by 0 (like QNAMEs)
430 *   - a single pointer
431 *   - a sequence of simple labels terminated by a pointer
432 *
433 * A pointer shall always point to either a pointer of a sequence of
434 * labels (which can themselves be terminated by either a 0 or a pointer)
435 *
436 * The expanded length of a given domain name should not exceed 255 bytes.
437 *
438 * NOTE: we don't parse the answer packets, so don't need to deal with NAME
439 *       records, only QNAMEs.
440 */
441
442#define  DNS_HEADER_SIZE  12
443
444#define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
445#define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
446#define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
447#define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
448#define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
449
450#define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
451
452typedef struct {
453    const uint8_t*  base;
454    const uint8_t*  end;
455    const uint8_t*  cursor;
456} DnsPacket;
457
458static void
459_dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
460{
461    packet->base   = buff;
462    packet->end    = buff + bufflen;
463    packet->cursor = buff;
464}
465
466static void
467_dnsPacket_rewind( DnsPacket*  packet )
468{
469    packet->cursor = packet->base;
470}
471
472static void
473_dnsPacket_skip( DnsPacket*  packet, int  count )
474{
475    const uint8_t*  p = packet->cursor + count;
476
477    if (p > packet->end)
478        p = packet->end;
479
480    packet->cursor = p;
481}
482
483static int
484_dnsPacket_readInt16( DnsPacket*  packet )
485{
486    const uint8_t*  p = packet->cursor;
487
488    if (p+2 > packet->end)
489        return -1;
490
491    packet->cursor = p+2;
492    return (p[0]<< 8) | p[1];
493}
494
495/** QUERY CHECKING
496 **/
497
498/* check bytes in a dns packet. returns 1 on success, 0 on failure.
499 * the cursor is only advanced in the case of success
500 */
501static int
502_dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
503{
504    const uint8_t*  p = packet->cursor;
505
506    if (p + numBytes > packet->end)
507        return 0;
508
509    if (memcmp(p, bytes, numBytes) != 0)
510        return 0;
511
512    packet->cursor = p + numBytes;
513    return 1;
514}
515
516/* parse and skip a given QNAME stored in a query packet,
517 * from the current cursor position. returns 1 on success,
518 * or 0 for malformed data.
519 */
520static int
521_dnsPacket_checkQName( DnsPacket*  packet )
522{
523    const uint8_t*  p   = packet->cursor;
524    const uint8_t*  end = packet->end;
525
526    for (;;) {
527        int  c;
528
529        if (p >= end)
530            break;
531
532        c = *p++;
533
534        if (c == 0) {
535            packet->cursor = p;
536            return 1;
537        }
538
539        /* we don't expect label compression in QNAMEs */
540        if (c >= 64)
541            break;
542
543        p += c;
544        /* we rely on the bound check at the start
545         * of the loop here */
546    }
547    /* malformed data */
548    XLOG("malformed QNAME");
549    return 0;
550}
551
552/* parse and skip a given QR stored in a packet.
553 * returns 1 on success, and 0 on failure
554 */
555static int
556_dnsPacket_checkQR( DnsPacket*  packet )
557{
558    if (!_dnsPacket_checkQName(packet))
559        return 0;
560
561    /* TYPE must be one of the things we support */
562    if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
563        !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
564        !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
565        !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
566        !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
567    {
568        XLOG("unsupported TYPE");
569        return 0;
570    }
571    /* CLASS must be IN */
572    if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
573        XLOG("unsupported CLASS");
574        return 0;
575    }
576
577    return 1;
578}
579
580/* check the header of a DNS Query packet, return 1 if it is one
581 * type of query we can cache, or 0 otherwise
582 */
583static int
584_dnsPacket_checkQuery( DnsPacket*  packet )
585{
586    const uint8_t*  p = packet->base;
587    int             qdCount, anCount, dnCount, arCount;
588
589    if (p + DNS_HEADER_SIZE > packet->end) {
590        XLOG("query packet too small");
591        return 0;
592    }
593
594    /* QR must be set to 0, opcode must be 0 and AA must be 0 */
595    /* RA, Z, and RCODE must be 0 */
596    if ((p[2] & 0xFC) != 0 || p[3] != 0) {
597        XLOG("query packet flags unsupported");
598        return 0;
599    }
600
601    /* Note that we ignore the TC and RD bits here for the
602     * following reasons:
603     *
604     * - there is no point for a query packet sent to a server
605     *   to have the TC bit set, but the implementation might
606     *   set the bit in the query buffer for its own needs
607     *   between a _resolv_cache_lookup and a
608     *   _resolv_cache_add. We should not freak out if this
609     *   is the case.
610     *
611     * - we consider that the result from a RD=0 or a RD=1
612     *   query might be different, hence that the RD bit
613     *   should be used to differentiate cached result.
614     *
615     *   this implies that RD is checked when hashing or
616     *   comparing query packets, but not TC
617     */
618
619    /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
620    qdCount = (p[4] << 8) | p[5];
621    anCount = (p[6] << 8) | p[7];
622    dnCount = (p[8] << 8) | p[9];
623    arCount = (p[10]<< 8) | p[11];
624
625    if (anCount != 0 || dnCount != 0 || arCount != 0) {
626        XLOG("query packet contains non-query records");
627        return 0;
628    }
629
630    if (qdCount == 0) {
631        XLOG("query packet doesn't contain query record");
632        return 0;
633    }
634
635    /* Check QDCOUNT QRs */
636    packet->cursor = p + DNS_HEADER_SIZE;
637
638    for (;qdCount > 0; qdCount--)
639        if (!_dnsPacket_checkQR(packet))
640            return 0;
641
642    return 1;
643}
644
645/** QUERY DEBUGGING
646 **/
647#if DEBUG
648static char*
649_dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
650{
651    const uint8_t*  p   = packet->cursor;
652    const uint8_t*  end = packet->end;
653    int             first = 1;
654
655    for (;;) {
656        int  c;
657
658        if (p >= end)
659            break;
660
661        c = *p++;
662
663        if (c == 0) {
664            packet->cursor = p;
665            return bp;
666        }
667
668        /* we don't expect label compression in QNAMEs */
669        if (c >= 64)
670            break;
671
672        if (first)
673            first = 0;
674        else
675            bp = _bprint_c(bp, bend, '.');
676
677        bp = _bprint_b(bp, bend, (const char*)p, c);
678
679        p += c;
680        /* we rely on the bound check at the start
681         * of the loop here */
682    }
683    /* malformed data */
684    bp = _bprint_s(bp, bend, "<MALFORMED>");
685    return bp;
686}
687
688static char*
689_dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
690{
691#define  QQ(x)   { DNS_TYPE_##x, #x }
692    static const struct {
693        const char*  typeBytes;
694        const char*  typeString;
695    } qTypes[] =
696    {
697        QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
698        { NULL, NULL }
699    };
700    int          nn;
701    const char*  typeString = NULL;
702
703    /* dump QNAME */
704    p = _dnsPacket_bprintQName(packet, p, end);
705
706    /* dump TYPE */
707    p = _bprint_s(p, end, " (");
708
709    for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
710        if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
711            typeString = qTypes[nn].typeString;
712            break;
713        }
714    }
715
716    if (typeString != NULL)
717        p = _bprint_s(p, end, typeString);
718    else {
719        int  typeCode = _dnsPacket_readInt16(packet);
720        p = _bprint(p, end, "UNKNOWN-%d", typeCode);
721    }
722
723    p = _bprint_c(p, end, ')');
724
725    /* skip CLASS */
726    _dnsPacket_skip(packet, 2);
727    return p;
728}
729
730/* this function assumes the packet has already been checked */
731static char*
732_dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
733{
734    int   qdCount;
735
736    if (packet->base[2] & 0x1) {
737        p = _bprint_s(p, end, "RECURSIVE ");
738    }
739
740    _dnsPacket_skip(packet, 4);
741    qdCount = _dnsPacket_readInt16(packet);
742    _dnsPacket_skip(packet, 6);
743
744    for ( ; qdCount > 0; qdCount-- ) {
745        p = _dnsPacket_bprintQR(packet, p, end);
746    }
747    return p;
748}
749#endif
750
751
752/** QUERY HASHING SUPPORT
753 **
754 ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
755 ** BEEN SUCCESFULLY CHECKED.
756 **/
757
758/* use 32-bit FNV hash function */
759#define  FNV_MULT   16777619U
760#define  FNV_BASIS  2166136261U
761
762static unsigned
763_dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
764{
765    const uint8_t*  p   = packet->cursor;
766    const uint8_t*  end = packet->end;
767
768    while (numBytes > 0 && p < end) {
769        hash = hash*FNV_MULT ^ *p++;
770    }
771    packet->cursor = p;
772    return hash;
773}
774
775
776static unsigned
777_dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
778{
779    const uint8_t*  p   = packet->cursor;
780    const uint8_t*  end = packet->end;
781
782    for (;;) {
783        int  c;
784
785        if (p >= end) {  /* should not happen */
786            XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
787            break;
788        }
789
790        c = *p++;
791
792        if (c == 0)
793            break;
794
795        if (c >= 64) {
796            XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
797            break;
798        }
799        if (p + c >= end) {
800            XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
801                    __FUNCTION__);
802            break;
803        }
804        while (c > 0) {
805            hash = hash*FNV_MULT ^ *p++;
806            c   -= 1;
807        }
808    }
809    packet->cursor = p;
810    return hash;
811}
812
813static unsigned
814_dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
815{
816    hash = _dnsPacket_hashQName(packet, hash);
817    hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
818    return hash;
819}
820
821static unsigned
822_dnsPacket_hashQuery( DnsPacket*  packet )
823{
824    unsigned  hash = FNV_BASIS;
825    int       count;
826    _dnsPacket_rewind(packet);
827
828    /* we ignore the TC bit for reasons explained in
829     * _dnsPacket_checkQuery().
830     *
831     * however we hash the RD bit to differentiate
832     * between answers for recursive and non-recursive
833     * queries.
834     */
835    hash = hash*FNV_MULT ^ (packet->base[2] & 1);
836
837    /* assume: other flags are 0 */
838    _dnsPacket_skip(packet, 4);
839
840    /* read QDCOUNT */
841    count = _dnsPacket_readInt16(packet);
842
843    /* assume: ANcount, NScount, ARcount are 0 */
844    _dnsPacket_skip(packet, 6);
845
846    /* hash QDCOUNT QRs */
847    for ( ; count > 0; count-- )
848        hash = _dnsPacket_hashQR(packet, hash);
849
850    return hash;
851}
852
853
854/** QUERY COMPARISON
855 **
856 ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
857 ** BEEN SUCCESFULLY CHECKED.
858 **/
859
860static int
861_dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
862{
863    const uint8_t*  p1   = pack1->cursor;
864    const uint8_t*  end1 = pack1->end;
865    const uint8_t*  p2   = pack2->cursor;
866    const uint8_t*  end2 = pack2->end;
867
868    for (;;) {
869        int  c1, c2;
870
871        if (p1 >= end1 || p2 >= end2) {
872            XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
873            break;
874        }
875        c1 = *p1++;
876        c2 = *p2++;
877        if (c1 != c2)
878            break;
879
880        if (c1 == 0) {
881            pack1->cursor = p1;
882            pack2->cursor = p2;
883            return 1;
884        }
885        if (c1 >= 64) {
886            XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
887            break;
888        }
889        if ((p1+c1 > end1) || (p2+c1 > end2)) {
890            XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
891                    __FUNCTION__);
892            break;
893        }
894        if (memcmp(p1, p2, c1) != 0)
895            break;
896        p1 += c1;
897        p2 += c1;
898        /* we rely on the bound checks at the start of the loop */
899    }
900    /* not the same, or one is malformed */
901    XLOG("different DN");
902    return 0;
903}
904
905static int
906_dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
907{
908    const uint8_t*  p1 = pack1->cursor;
909    const uint8_t*  p2 = pack2->cursor;
910
911    if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
912        return 0;
913
914    if ( memcmp(p1, p2, numBytes) != 0 )
915        return 0;
916
917    pack1->cursor += numBytes;
918    pack2->cursor += numBytes;
919    return 1;
920}
921
922static int
923_dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
924{
925    /* compare domain name encoding + TYPE + CLASS */
926    if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
927         !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
928        return 0;
929
930    return 1;
931}
932
933static int
934_dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
935{
936    int  count1, count2;
937
938    /* compare the headers, ignore most fields */
939    _dnsPacket_rewind(pack1);
940    _dnsPacket_rewind(pack2);
941
942    /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
943    if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
944        XLOG("different RD");
945        return 0;
946    }
947
948    /* assume: other flags are all 0 */
949    _dnsPacket_skip(pack1, 4);
950    _dnsPacket_skip(pack2, 4);
951
952    /* compare QDCOUNT */
953    count1 = _dnsPacket_readInt16(pack1);
954    count2 = _dnsPacket_readInt16(pack2);
955    if (count1 != count2 || count1 < 0) {
956        XLOG("different QDCOUNT");
957        return 0;
958    }
959
960    /* assume: ANcount, NScount and ARcount are all 0 */
961    _dnsPacket_skip(pack1, 6);
962    _dnsPacket_skip(pack2, 6);
963
964    /* compare the QDCOUNT QRs */
965    for ( ; count1 > 0; count1-- ) {
966        if (!_dnsPacket_isEqualQR(pack1, pack2)) {
967            XLOG("different QR");
968            return 0;
969        }
970    }
971    return 1;
972}
973
974/****************************************************************************/
975/****************************************************************************/
976/*****                                                                  *****/
977/*****                                                                  *****/
978/*****                                                                  *****/
979/****************************************************************************/
980/****************************************************************************/
981
982/* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
983 * structure though they are conceptually part of the hash table.
984 *
985 * similarly, mru_next and mru_prev are part of the global MRU list
986 */
987typedef struct Entry {
988    unsigned int     hash;   /* hash value */
989    struct Entry*    hlink;  /* next in collision chain */
990    struct Entry*    mru_prev;
991    struct Entry*    mru_next;
992
993    const uint8_t*   query;
994    int              querylen;
995    const uint8_t*   answer;
996    int              answerlen;
997    time_t           expires;   /* time_t when the entry isn't valid any more */
998    int              id;        /* for debugging purpose */
999} Entry;
1000
1001/**
1002 * Find the TTL for a negative DNS result.  This is defined as the minimum
1003 * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
1004 *
1005 * Return 0 if not found.
1006 */
1007static u_long
1008answer_getNegativeTTL(ns_msg handle) {
1009    int n, nscount;
1010    u_long result = 0;
1011    ns_rr rr;
1012
1013    nscount = ns_msg_count(handle, ns_s_ns);
1014    for (n = 0; n < nscount; n++) {
1015        if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
1016            const u_char *rdata = ns_rr_rdata(rr); // find the data
1017            const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
1018            int len;
1019            u_long ttl, rec_result = ns_rr_ttl(rr);
1020
1021            // find the MINIMUM-TTL field from the blob of binary data for this record
1022            // skip the server name
1023            len = dn_skipname(rdata, edata);
1024            if (len == -1) continue; // error skipping
1025            rdata += len;
1026
1027            // skip the admin name
1028            len = dn_skipname(rdata, edata);
1029            if (len == -1) continue; // error skipping
1030            rdata += len;
1031
1032            if (edata - rdata != 5*NS_INT32SZ) continue;
1033            // skip: serial number + refresh interval + retry interval + expiry
1034            rdata += NS_INT32SZ * 4;
1035            // finally read the MINIMUM TTL
1036            ttl = ns_get32(rdata);
1037            if (ttl < rec_result) {
1038                rec_result = ttl;
1039            }
1040            // Now that the record is read successfully, apply the new min TTL
1041            if (n == 0 || rec_result < result) {
1042                result = rec_result;
1043            }
1044        }
1045    }
1046    return result;
1047}
1048
1049/**
1050 * Parse the answer records and find the appropriate
1051 * smallest TTL among the records.  This might be from
1052 * the answer records if found or from the SOA record
1053 * if it's a negative result.
1054 *
1055 * The returned TTL is the number of seconds to
1056 * keep the answer in the cache.
1057 *
1058 * In case of parse error zero (0) is returned which
1059 * indicates that the answer shall not be cached.
1060 */
1061static u_long
1062answer_getTTL(const void* answer, int answerlen)
1063{
1064    ns_msg handle;
1065    int ancount, n;
1066    u_long result, ttl;
1067    ns_rr rr;
1068
1069    result = 0;
1070    if (ns_initparse(answer, answerlen, &handle) >= 0) {
1071        // get number of answer records
1072        ancount = ns_msg_count(handle, ns_s_an);
1073
1074        if (ancount == 0) {
1075            // a response with no answers?  Cache this negative result.
1076            result = answer_getNegativeTTL(handle);
1077        } else {
1078            for (n = 0; n < ancount; n++) {
1079                if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1080                    ttl = ns_rr_ttl(rr);
1081                    if (n == 0 || ttl < result) {
1082                        result = ttl;
1083                    }
1084                } else {
1085                    XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1086                }
1087            }
1088        }
1089    } else {
1090        XLOG("ns_parserr failed. %s\n", strerror(errno));
1091    }
1092
1093    XLOG("TTL = %d\n", result);
1094
1095    return result;
1096}
1097
1098static void
1099entry_free( Entry*  e )
1100{
1101    /* everything is allocated in a single memory block */
1102    if (e) {
1103        free(e);
1104    }
1105}
1106
1107static __inline__ void
1108entry_mru_remove( Entry*  e )
1109{
1110    e->mru_prev->mru_next = e->mru_next;
1111    e->mru_next->mru_prev = e->mru_prev;
1112}
1113
1114static __inline__ void
1115entry_mru_add( Entry*  e, Entry*  list )
1116{
1117    Entry*  first = list->mru_next;
1118
1119    e->mru_next = first;
1120    e->mru_prev = list;
1121
1122    list->mru_next  = e;
1123    first->mru_prev = e;
1124}
1125
1126/* compute the hash of a given entry, this is a hash of most
1127 * data in the query (key) */
1128static unsigned
1129entry_hash( const Entry*  e )
1130{
1131    DnsPacket  pack[1];
1132
1133    _dnsPacket_init(pack, e->query, e->querylen);
1134    return _dnsPacket_hashQuery(pack);
1135}
1136
1137/* initialize an Entry as a search key, this also checks the input query packet
1138 * returns 1 on success, or 0 in case of unsupported/malformed data */
1139static int
1140entry_init_key( Entry*  e, const void*  query, int  querylen )
1141{
1142    DnsPacket  pack[1];
1143
1144    memset(e, 0, sizeof(*e));
1145
1146    e->query    = query;
1147    e->querylen = querylen;
1148    e->hash     = entry_hash(e);
1149
1150    _dnsPacket_init(pack, query, querylen);
1151
1152    return _dnsPacket_checkQuery(pack);
1153}
1154
1155/* allocate a new entry as a cache node */
1156static Entry*
1157entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
1158{
1159    Entry*  e;
1160    int     size;
1161
1162    size = sizeof(*e) + init->querylen + answerlen;
1163    e    = calloc(size, 1);
1164    if (e == NULL)
1165        return e;
1166
1167    e->hash     = init->hash;
1168    e->query    = (const uint8_t*)(e+1);
1169    e->querylen = init->querylen;
1170
1171    memcpy( (char*)e->query, init->query, e->querylen );
1172
1173    e->answer    = e->query + e->querylen;
1174    e->answerlen = answerlen;
1175
1176    memcpy( (char*)e->answer, answer, e->answerlen );
1177
1178    return e;
1179}
1180
1181static int
1182entry_equals( const Entry*  e1, const Entry*  e2 )
1183{
1184    DnsPacket  pack1[1], pack2[1];
1185
1186    if (e1->querylen != e2->querylen) {
1187        return 0;
1188    }
1189    _dnsPacket_init(pack1, e1->query, e1->querylen);
1190    _dnsPacket_init(pack2, e2->query, e2->querylen);
1191
1192    return _dnsPacket_isEqualQuery(pack1, pack2);
1193}
1194
1195/****************************************************************************/
1196/****************************************************************************/
1197/*****                                                                  *****/
1198/*****                                                                  *****/
1199/*****                                                                  *****/
1200/****************************************************************************/
1201/****************************************************************************/
1202
1203/* We use a simple hash table with external collision lists
1204 * for simplicity, the hash-table fields 'hash' and 'hlink' are
1205 * inlined in the Entry structure.
1206 */
1207
1208/* Maximum time for a thread to wait for an pending request */
1209#define PENDING_REQUEST_TIMEOUT 20;
1210
1211typedef struct pending_req_info {
1212    unsigned int                hash;
1213    pthread_cond_t              cond;
1214    struct pending_req_info*    next;
1215} PendingReqInfo;
1216
1217typedef struct resolv_cache {
1218    int              max_entries;
1219    int              num_entries;
1220    Entry            mru_list;
1221    pthread_mutex_t  lock;
1222    int              last_id;
1223    Entry*           entries;
1224    PendingReqInfo   pending_requests;
1225} Cache;
1226
1227struct resolv_cache_info {
1228    unsigned                    netid;
1229    Cache*                      cache;
1230    struct resolv_cache_info*   next;
1231    char*                       nameservers[MAXNS +1];
1232    struct addrinfo*            nsaddrinfo[MAXNS + 1];
1233    char                        defdname[256];
1234    int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
1235};
1236
1237#define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
1238
1239static void
1240_cache_flush_pending_requests_locked( struct resolv_cache* cache )
1241{
1242    struct pending_req_info *ri, *tmp;
1243    if (cache) {
1244        ri = cache->pending_requests.next;
1245
1246        while (ri) {
1247            tmp = ri;
1248            ri = ri->next;
1249            pthread_cond_broadcast(&tmp->cond);
1250
1251            pthread_cond_destroy(&tmp->cond);
1252            free(tmp);
1253        }
1254
1255        cache->pending_requests.next = NULL;
1256    }
1257}
1258
1259/* return 0 if no pending request is found matching the key
1260 * if a matching request is found the calling thread will wait
1261 * and return 1 when released */
1262static int
1263_cache_check_pending_request_locked( struct resolv_cache* cache, Entry* key )
1264{
1265    struct pending_req_info *ri, *prev;
1266    int exist = 0;
1267
1268    if (cache && key) {
1269        ri = cache->pending_requests.next;
1270        prev = &cache->pending_requests;
1271        while (ri) {
1272            if (ri->hash == key->hash) {
1273                exist = 1;
1274                break;
1275            }
1276            prev = ri;
1277            ri = ri->next;
1278        }
1279
1280        if (!exist) {
1281            ri = calloc(1, sizeof(struct pending_req_info));
1282            if (ri) {
1283                ri->hash = key->hash;
1284                pthread_cond_init(&ri->cond, NULL);
1285                prev->next = ri;
1286            }
1287        } else {
1288            struct timespec ts = {0,0};
1289            XLOG("Waiting for previous request");
1290            ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
1291            pthread_cond_timedwait(&ri->cond, &cache->lock, &ts);
1292        }
1293    }
1294
1295    return exist;
1296}
1297
1298/* notify any waiting thread that waiting on a request
1299 * matching the key has been added to the cache */
1300static void
1301_cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1302{
1303    struct pending_req_info *ri, *prev;
1304
1305    if (cache && key) {
1306        ri = cache->pending_requests.next;
1307        prev = &cache->pending_requests;
1308        while (ri) {
1309            if (ri->hash == key->hash) {
1310                pthread_cond_broadcast(&ri->cond);
1311                break;
1312            }
1313            prev = ri;
1314            ri = ri->next;
1315        }
1316
1317        // remove item from list and destroy
1318        if (ri) {
1319            prev->next = ri->next;
1320            pthread_cond_destroy(&ri->cond);
1321            free(ri);
1322        }
1323    }
1324}
1325
1326/* notify the cache that the query failed */
1327void
1328_resolv_cache_query_failed( struct resolv_cache* cache,
1329                   const void* query,
1330                   int         querylen)
1331{
1332    Entry    key[1];
1333
1334    if (cache && entry_init_key(key, query, querylen)) {
1335        pthread_mutex_lock(&cache->lock);
1336        _cache_notify_waiting_tid_locked(cache, key);
1337        pthread_mutex_unlock(&cache->lock);
1338    }
1339}
1340
1341static void
1342_cache_flush_locked( Cache*  cache )
1343{
1344    int     nn;
1345
1346    for (nn = 0; nn < cache->max_entries; nn++)
1347    {
1348        Entry**  pnode = (Entry**) &cache->entries[nn];
1349
1350        while (*pnode != NULL) {
1351            Entry*  node = *pnode;
1352            *pnode = node->hlink;
1353            entry_free(node);
1354        }
1355    }
1356
1357    // flush pending request
1358    _cache_flush_pending_requests_locked(cache);
1359
1360    cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1361    cache->num_entries       = 0;
1362    cache->last_id           = 0;
1363
1364    XLOG("*************************\n"
1365         "*** DNS CACHE FLUSHED ***\n"
1366         "*************************");
1367}
1368
1369static int
1370_res_cache_get_max_entries( void )
1371{
1372    int cache_size = CONFIG_MAX_ENTRIES;
1373
1374    const char* cache_mode = getenv("ANDROID_DNS_MODE");
1375    if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
1376        // Don't use the cache in local mode. This is used by the proxy itself.
1377        cache_size = 0;
1378    }
1379
1380    XLOG("cache size: %d", cache_size);
1381    return cache_size;
1382}
1383
1384static struct resolv_cache*
1385_resolv_cache_create( void )
1386{
1387    struct resolv_cache*  cache;
1388
1389    cache = calloc(sizeof(*cache), 1);
1390    if (cache) {
1391        cache->max_entries = _res_cache_get_max_entries();
1392        cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1393        if (cache->entries) {
1394            pthread_mutex_init( &cache->lock, NULL );
1395            cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1396            XLOG("%s: cache created\n", __FUNCTION__);
1397        } else {
1398            free(cache);
1399            cache = NULL;
1400        }
1401    }
1402    return cache;
1403}
1404
1405
1406#if DEBUG
1407static void
1408_dump_query( const uint8_t*  query, int  querylen )
1409{
1410    char       temp[256], *p=temp, *end=p+sizeof(temp);
1411    DnsPacket  pack[1];
1412
1413    _dnsPacket_init(pack, query, querylen);
1414    p = _dnsPacket_bprintQuery(pack, p, end);
1415    XLOG("QUERY: %s", temp);
1416}
1417
1418static void
1419_cache_dump_mru( Cache*  cache )
1420{
1421    char    temp[512], *p=temp, *end=p+sizeof(temp);
1422    Entry*  e;
1423
1424    p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1425    for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1426        p = _bprint(p, end, " %d", e->id);
1427
1428    XLOG("%s", temp);
1429}
1430
1431static void
1432_dump_answer(const void* answer, int answerlen)
1433{
1434    res_state statep;
1435    FILE* fp;
1436    char* buf;
1437    int fileLen;
1438
1439    fp = fopen("/data/reslog.txt", "w+e");
1440    if (fp != NULL) {
1441        statep = __res_get_state();
1442
1443        res_pquery(statep, answer, answerlen, fp);
1444
1445        //Get file length
1446        fseek(fp, 0, SEEK_END);
1447        fileLen=ftell(fp);
1448        fseek(fp, 0, SEEK_SET);
1449        buf = (char *)malloc(fileLen+1);
1450        if (buf != NULL) {
1451            //Read file contents into buffer
1452            fread(buf, fileLen, 1, fp);
1453            XLOG("%s\n", buf);
1454            free(buf);
1455        }
1456        fclose(fp);
1457        remove("/data/reslog.txt");
1458    }
1459    else {
1460        errno = 0; // else debug is introducing error signals
1461        XLOG("%s: can't open file\n", __FUNCTION__);
1462    }
1463}
1464#endif
1465
1466#if DEBUG
1467#  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
1468#  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
1469#else
1470#  define  XLOG_QUERY(q,len)   ((void)0)
1471#  define  XLOG_ANSWER(a,len)  ((void)0)
1472#endif
1473
1474/* This function tries to find a key within the hash table
1475 * In case of success, it will return a *pointer* to the hashed key.
1476 * In case of failure, it will return a *pointer* to NULL
1477 *
1478 * So, the caller must check '*result' to check for success/failure.
1479 *
1480 * The main idea is that the result can later be used directly in
1481 * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1482 * parameter. This makes the code simpler and avoids re-searching
1483 * for the key position in the htable.
1484 *
1485 * The result of a lookup_p is only valid until you alter the hash
1486 * table.
1487 */
1488static Entry**
1489_cache_lookup_p( Cache*   cache,
1490                 Entry*   key )
1491{
1492    int      index = key->hash % cache->max_entries;
1493    Entry**  pnode = (Entry**) &cache->entries[ index ];
1494
1495    while (*pnode != NULL) {
1496        Entry*  node = *pnode;
1497
1498        if (node == NULL)
1499            break;
1500
1501        if (node->hash == key->hash && entry_equals(node, key))
1502            break;
1503
1504        pnode = &node->hlink;
1505    }
1506    return pnode;
1507}
1508
1509/* Add a new entry to the hash table. 'lookup' must be the
1510 * result of an immediate previous failed _lookup_p() call
1511 * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1512 * newly created entry
1513 */
1514static void
1515_cache_add_p( Cache*   cache,
1516              Entry**  lookup,
1517              Entry*   e )
1518{
1519    *lookup = e;
1520    e->id = ++cache->last_id;
1521    entry_mru_add(e, &cache->mru_list);
1522    cache->num_entries += 1;
1523
1524    XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1525         e->id, cache->num_entries);
1526}
1527
1528/* Remove an existing entry from the hash table,
1529 * 'lookup' must be the result of an immediate previous
1530 * and succesful _lookup_p() call.
1531 */
1532static void
1533_cache_remove_p( Cache*   cache,
1534                 Entry**  lookup )
1535{
1536    Entry*  e  = *lookup;
1537
1538    XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1539         e->id, cache->num_entries-1);
1540
1541    entry_mru_remove(e);
1542    *lookup = e->hlink;
1543    entry_free(e);
1544    cache->num_entries -= 1;
1545}
1546
1547/* Remove the oldest entry from the hash table.
1548 */
1549static void
1550_cache_remove_oldest( Cache*  cache )
1551{
1552    Entry*   oldest = cache->mru_list.mru_prev;
1553    Entry**  lookup = _cache_lookup_p(cache, oldest);
1554
1555    if (*lookup == NULL) { /* should not happen */
1556        XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1557        return;
1558    }
1559    if (DEBUG) {
1560        XLOG("Cache full - removing oldest");
1561        XLOG_QUERY(oldest->query, oldest->querylen);
1562    }
1563    _cache_remove_p(cache, lookup);
1564}
1565
1566/* Remove all expired entries from the hash table.
1567 */
1568static void _cache_remove_expired(Cache* cache) {
1569    Entry* e;
1570    time_t now = _time_now();
1571
1572    for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1573        // Entry is old, remove
1574        if (now >= e->expires) {
1575            Entry** lookup = _cache_lookup_p(cache, e);
1576            if (*lookup == NULL) { /* should not happen */
1577                XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
1578                return;
1579            }
1580            e = e->mru_next;
1581            _cache_remove_p(cache, lookup);
1582        } else {
1583            e = e->mru_next;
1584        }
1585    }
1586}
1587
1588ResolvCacheStatus
1589_resolv_cache_lookup( struct resolv_cache*  cache,
1590                      const void*           query,
1591                      int                   querylen,
1592                      void*                 answer,
1593                      int                   answersize,
1594                      int                  *answerlen )
1595{
1596    Entry      key[1];
1597    Entry**    lookup;
1598    Entry*     e;
1599    time_t     now;
1600
1601    ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
1602
1603    XLOG("%s: lookup", __FUNCTION__);
1604    XLOG_QUERY(query, querylen);
1605
1606    /* we don't cache malformed queries */
1607    if (!entry_init_key(key, query, querylen)) {
1608        XLOG("%s: unsupported query", __FUNCTION__);
1609        return RESOLV_CACHE_UNSUPPORTED;
1610    }
1611    /* lookup cache */
1612    pthread_mutex_lock( &cache->lock );
1613
1614    /* see the description of _lookup_p to understand this.
1615     * the function always return a non-NULL pointer.
1616     */
1617    lookup = _cache_lookup_p(cache, key);
1618    e      = *lookup;
1619
1620    if (e == NULL) {
1621        XLOG( "NOT IN CACHE");
1622        // calling thread will wait if an outstanding request is found
1623        // that matching this query
1624        if (!_cache_check_pending_request_locked(cache, key)) {
1625            goto Exit;
1626        } else {
1627            lookup = _cache_lookup_p(cache, key);
1628            e = *lookup;
1629            if (e == NULL) {
1630                goto Exit;
1631            }
1632        }
1633    }
1634
1635    now = _time_now();
1636
1637    /* remove stale entries here */
1638    if (now >= e->expires) {
1639        XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1640        XLOG_QUERY(e->query, e->querylen);
1641        _cache_remove_p(cache, lookup);
1642        goto Exit;
1643    }
1644
1645    *answerlen = e->answerlen;
1646    if (e->answerlen > answersize) {
1647        /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1648        result = RESOLV_CACHE_UNSUPPORTED;
1649        XLOG(" ANSWER TOO LONG");
1650        goto Exit;
1651    }
1652
1653    memcpy( answer, e->answer, e->answerlen );
1654
1655    /* bump up this entry to the top of the MRU list */
1656    if (e != cache->mru_list.mru_next) {
1657        entry_mru_remove( e );
1658        entry_mru_add( e, &cache->mru_list );
1659    }
1660
1661    XLOG( "FOUND IN CACHE entry=%p", e );
1662    result = RESOLV_CACHE_FOUND;
1663
1664Exit:
1665    pthread_mutex_unlock( &cache->lock );
1666    return result;
1667}
1668
1669
1670void
1671_resolv_cache_add( struct resolv_cache*  cache,
1672                   const void*           query,
1673                   int                   querylen,
1674                   const void*           answer,
1675                   int                   answerlen )
1676{
1677    Entry    key[1];
1678    Entry*   e;
1679    Entry**  lookup;
1680    u_long   ttl;
1681
1682    /* don't assume that the query has already been cached
1683     */
1684    if (!entry_init_key( key, query, querylen )) {
1685        XLOG( "%s: passed invalid query ?", __FUNCTION__);
1686        return;
1687    }
1688
1689    pthread_mutex_lock( &cache->lock );
1690
1691    XLOG( "%s: query:", __FUNCTION__ );
1692    XLOG_QUERY(query,querylen);
1693    XLOG_ANSWER(answer, answerlen);
1694#if DEBUG_DATA
1695    XLOG( "answer:");
1696    XLOG_BYTES(answer,answerlen);
1697#endif
1698
1699    lookup = _cache_lookup_p(cache, key);
1700    e      = *lookup;
1701
1702    if (e != NULL) { /* should not happen */
1703        XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1704             __FUNCTION__, e);
1705        goto Exit;
1706    }
1707
1708    if (cache->num_entries >= cache->max_entries) {
1709        _cache_remove_expired(cache);
1710        if (cache->num_entries >= cache->max_entries) {
1711            _cache_remove_oldest(cache);
1712        }
1713        /* need to lookup again */
1714        lookup = _cache_lookup_p(cache, key);
1715        e      = *lookup;
1716        if (e != NULL) {
1717            XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1718                __FUNCTION__, e);
1719            goto Exit;
1720        }
1721    }
1722
1723    ttl = answer_getTTL(answer, answerlen);
1724    if (ttl > 0) {
1725        e = entry_alloc(key, answer, answerlen);
1726        if (e != NULL) {
1727            e->expires = ttl + _time_now();
1728            _cache_add_p(cache, lookup, e);
1729        }
1730    }
1731#if DEBUG
1732    _cache_dump_mru(cache);
1733#endif
1734Exit:
1735    _cache_notify_waiting_tid_locked(cache, key);
1736    pthread_mutex_unlock( &cache->lock );
1737}
1738
1739/****************************************************************************/
1740/****************************************************************************/
1741/*****                                                                  *****/
1742/*****                                                                  *****/
1743/*****                                                                  *****/
1744/****************************************************************************/
1745/****************************************************************************/
1746
1747static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
1748
1749// Head of the list of caches.  Protected by _res_cache_list_lock.
1750static struct resolv_cache_info _res_cache_list;
1751
1752// lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1753static pthread_mutex_t _res_cache_list_lock;
1754
1755/* insert resolv_cache_info into the list of resolv_cache_infos */
1756static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1757/* creates a resolv_cache_info */
1758static struct resolv_cache_info* _create_cache_info( void );
1759/* gets cache associated with a network, or NULL if none exists */
1760static struct resolv_cache* _find_named_cache_locked(unsigned netid);
1761/* gets a resolv_cache_info associated with a network, or NULL if not found */
1762static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
1763/* look up the named cache, and creates one if needed */
1764static struct resolv_cache* _get_res_cache_for_net_locked(unsigned netid);
1765/* empty the named cache */
1766static void _flush_cache_for_net_locked(unsigned netid);
1767/* empty the nameservers set for the named cache */
1768static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1769/* return 1 if the provided list of name servers differs from the list of name servers
1770 * currently attached to the provided cache_info */
1771static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1772        const char** servers, int numservers);
1773
1774static void
1775_res_cache_init(void)
1776{
1777    const char*  env = getenv(CONFIG_ENV);
1778
1779    if (env && atoi(env) == 0) {
1780        /* the cache is disabled */
1781        return;
1782    }
1783
1784    memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1785    pthread_mutex_init(&_res_cache_list_lock, NULL);
1786}
1787
1788struct resolv_cache*
1789__get_res_cache(unsigned netid)
1790{
1791    struct resolv_cache *cache;
1792
1793    pthread_once(&_res_cache_once, _res_cache_init);
1794    pthread_mutex_lock(&_res_cache_list_lock);
1795
1796    /* Does NOT create a cache if it does not exist. */
1797    cache = _find_named_cache_locked(netid);
1798
1799    pthread_mutex_unlock(&_res_cache_list_lock);
1800    XLOG("%s: netid=%u, cache=%p\n", __FUNCTION__, netid, cache);
1801    return cache;
1802}
1803
1804static struct resolv_cache*
1805_get_res_cache_for_net_locked(unsigned netid)
1806{
1807    struct resolv_cache* cache = _find_named_cache_locked(netid);
1808    if (!cache) {
1809        struct resolv_cache_info* cache_info = _create_cache_info();
1810        if (cache_info) {
1811            cache = _resolv_cache_create();
1812            if (cache) {
1813                cache_info->cache = cache;
1814                cache_info->netid = netid;
1815                _insert_cache_info_locked(cache_info);
1816            } else {
1817                free(cache_info);
1818            }
1819        }
1820    }
1821    return cache;
1822}
1823
1824void
1825_resolv_flush_cache_for_net(unsigned netid)
1826{
1827    pthread_once(&_res_cache_once, _res_cache_init);
1828    pthread_mutex_lock(&_res_cache_list_lock);
1829
1830    _flush_cache_for_net_locked(netid);
1831
1832    pthread_mutex_unlock(&_res_cache_list_lock);
1833}
1834
1835static void
1836_flush_cache_for_net_locked(unsigned netid)
1837{
1838    struct resolv_cache* cache = _find_named_cache_locked(netid);
1839    if (cache) {
1840        pthread_mutex_lock(&cache->lock);
1841        _cache_flush_locked(cache);
1842        pthread_mutex_unlock(&cache->lock);
1843    }
1844}
1845
1846static struct resolv_cache_info*
1847_create_cache_info(void)
1848{
1849    struct resolv_cache_info* cache_info;
1850
1851    cache_info = calloc(sizeof(*cache_info), 1);
1852    return cache_info;
1853}
1854
1855static void
1856_insert_cache_info_locked(struct resolv_cache_info* cache_info)
1857{
1858    struct resolv_cache_info* last;
1859
1860    for (last = &_res_cache_list; last->next; last = last->next);
1861
1862    last->next = cache_info;
1863
1864}
1865
1866static struct resolv_cache*
1867_find_named_cache_locked(unsigned netid) {
1868
1869    struct resolv_cache_info* info = _find_cache_info_locked(netid);
1870
1871    if (info != NULL) return info->cache;
1872
1873    return NULL;
1874}
1875
1876static struct resolv_cache_info*
1877_find_cache_info_locked(unsigned netid)
1878{
1879    struct resolv_cache_info* cache_info = _res_cache_list.next;
1880
1881    while (cache_info) {
1882        if (cache_info->netid == netid) {
1883            break;
1884        }
1885
1886        cache_info = cache_info->next;
1887    }
1888    return cache_info;
1889}
1890
1891void
1892_resolv_set_nameservers_for_net(unsigned netid, const char** servers, int numservers,
1893        const char *domains)
1894{
1895    int i, rt, index;
1896    struct addrinfo hints;
1897    char sbuf[NI_MAXSERV];
1898    register char *cp;
1899    int *offset;
1900
1901    pthread_once(&_res_cache_once, _res_cache_init);
1902    pthread_mutex_lock(&_res_cache_list_lock);
1903
1904    // creates the cache if not created
1905    _get_res_cache_for_net_locked(netid);
1906
1907    struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
1908
1909    if (cache_info != NULL &&
1910            !_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
1911        // free current before adding new
1912        _free_nameservers_locked(cache_info);
1913
1914        memset(&hints, 0, sizeof(hints));
1915        hints.ai_family = PF_UNSPEC;
1916        hints.ai_socktype = SOCK_DGRAM; /*dummy*/
1917        hints.ai_flags = AI_NUMERICHOST;
1918        snprintf(sbuf, sizeof(sbuf), "%u", NAMESERVER_PORT);
1919
1920        index = 0;
1921        for (i = 0; i < numservers && i < MAXNS; i++) {
1922            rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
1923            if (rt == 0) {
1924                cache_info->nameservers[index] = strdup(servers[i]);
1925                index++;
1926                XLOG("%s: netid = %u, addr = %s\n", __FUNCTION__, netid, servers[i]);
1927            } else {
1928                cache_info->nsaddrinfo[index] = NULL;
1929            }
1930        }
1931
1932        // code moved from res_init.c, load_domain_search_list
1933        strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
1934        if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
1935            *cp = '\0';
1936        cp = cache_info->defdname;
1937        offset = cache_info->dnsrch_offset;
1938        while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
1939            while (*cp == ' ' || *cp == '\t') /* skip leading white space */
1940                cp++;
1941            if (*cp == '\0') /* stop if nothing more to do */
1942                break;
1943            *offset++ = cp - cache_info->defdname; /* record this search domain */
1944            while (*cp) { /* zero-terminate it */
1945                if (*cp == ' '|| *cp == '\t') {
1946                    *cp++ = '\0';
1947                    break;
1948                }
1949                cp++;
1950            }
1951        }
1952        *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
1953
1954        // flush cache since new settings
1955        _flush_cache_for_net_locked(netid);
1956
1957    }
1958
1959    pthread_mutex_unlock(&_res_cache_list_lock);
1960}
1961
1962static int
1963_resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1964        const char** servers, int numservers)
1965{
1966    int i;
1967    char** ns;
1968    int currentservers;
1969    int equal = 1;
1970
1971    if (numservers > MAXNS) numservers = MAXNS;
1972
1973    // Find out how many nameservers we had before.
1974    currentservers = 0;
1975    for (ns = cache_info->nameservers; *ns; ns++)
1976        currentservers++;
1977
1978    if (currentservers != numservers)
1979        return 0;
1980
1981    // Compare each name server against current name servers.
1982    // TODO: this is incorrect if the list of current or previous nameservers
1983    // contains duplicates. This does not really matter because the framework
1984    // filters out duplicates, but we should probably fix it. It's also
1985    // insensitive to the order of the nameservers; we should probably fix that
1986    // too.
1987    for (i = 0; i < numservers && equal; i++) {
1988        ns = cache_info->nameservers;
1989        equal = 0;
1990        while(*ns) {
1991            if (strcmp(*ns, servers[i]) == 0) {
1992                equal = 1;
1993                break;
1994            }
1995            ns++;
1996        }
1997    }
1998
1999    return equal;
2000}
2001
2002static void
2003_free_nameservers_locked(struct resolv_cache_info* cache_info)
2004{
2005    int i;
2006    for (i = 0; i <= MAXNS; i++) {
2007        free(cache_info->nameservers[i]);
2008        cache_info->nameservers[i] = NULL;
2009        if (cache_info->nsaddrinfo[i] != NULL) {
2010            freeaddrinfo(cache_info->nsaddrinfo[i]);
2011            cache_info->nsaddrinfo[i] = NULL;
2012        }
2013    }
2014}
2015
2016void
2017_resolv_populate_res_for_net(res_state statp)
2018{
2019    if (statp == NULL) {
2020        return;
2021    }
2022
2023    pthread_once(&_res_cache_once, _res_cache_init);
2024    pthread_mutex_lock(&_res_cache_list_lock);
2025
2026    struct resolv_cache_info* info = _find_cache_info_locked(statp->netid);
2027    if (info != NULL) {
2028        int nserv;
2029        struct addrinfo* ai;
2030        XLOG("%s: %u\n", __FUNCTION__, statp->netid);
2031        for (nserv = 0; nserv < MAXNS; nserv++) {
2032            ai = info->nsaddrinfo[nserv];
2033            if (ai == NULL) {
2034                break;
2035            }
2036
2037            if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
2038                if (statp->_u._ext.ext != NULL) {
2039                    memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
2040                    statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2041                } else {
2042                    if ((size_t) ai->ai_addrlen
2043                            <= sizeof(statp->nsaddr_list[0])) {
2044                        memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
2045                                ai->ai_addrlen);
2046                    } else {
2047                        statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2048                    }
2049                }
2050            } else {
2051                XLOG("%s: found too long addrlen", __FUNCTION__);
2052            }
2053        }
2054        statp->nscount = nserv;
2055        // now do search domains.  Note that we cache the offsets as this code runs alot
2056        // but the setting/offset-computer only runs when set/changed
2057        strlcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
2058        register char **pp = statp->dnsrch;
2059        register int *p = info->dnsrch_offset;
2060        while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
2061            *pp++ = &statp->defdname[0] + *p++;
2062        }
2063    }
2064    pthread_mutex_unlock(&_res_cache_list_lock);
2065}
2066