1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30#include <sys/atomics.h>
31#include <sys/system_properties.h>
32#include <sys/mman.h>
33
34#if HAVE_DLADDR
35#include <dlfcn.h>
36#endif
37#include <stdint.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <errno.h>
41#include <pthread.h>
42#include <unwind.h>
43#include <unistd.h>
44
45#include "logd.h"
46#include "bionic_tls.h"
47
48/*
49 * ===========================================================================
50 *      Deadlock prediction
51 * ===========================================================================
52 */
53/*
54The idea is to predict the possibility of deadlock by recording the order
55in which locks are acquired.  If we see an attempt to acquire a lock
56out of order, we can identify the locks and offending code.
57
58To make this work, we need to keep track of the locks held by each thread,
59and create history trees for each lock.  When a thread tries to acquire
60a new lock, we walk through the "history children" of the lock, looking
61for a match with locks the thread already holds.  If we find a match,
62it means the thread has made a request that could result in a deadlock.
63
64To support recursive locks, we always allow re-locking a currently-held
65lock, and maintain a recursion depth count.
66
67An ASCII-art example, where letters represent locks:
68
69        A
70       /|\
71      / | \
72     B  |  D
73      \ |
74       \|
75        C
76
77The above is the tree we'd have after handling lock synchronization
78sequences "ABC", "AC", "AD".  A has three children, {B, C, D}.  C is also
79a child of B.  (The lines represent pointers between parent and child.
80Every node can have multiple parents and multiple children.)
81
82If we hold AC, and want to lock B, we recursively search through B's
83children to see if A or C appears.  It does, so we reject the attempt.
84(A straightforward way to implement it: add a link from C to B, then
85determine whether the graph starting at B contains a cycle.)
86
87If we hold AC and want to lock D, we would succeed, creating a new link
88from C to D.
89
90Updates to MutexInfo structs are only allowed for the thread that holds
91the lock, so we actually do most of our deadlock prediction work after
92the lock has been acquired.
93*/
94
95// =============================================================================
96// log functions
97// =============================================================================
98
99#define LOGD(format, ...)  \
100    __libc_android_log_print(ANDROID_LOG_DEBUG, \
101            "pthread_debug", (format), ##__VA_ARGS__ )
102
103#define LOGW(format, ...)  \
104    __libc_android_log_print(ANDROID_LOG_WARN, \
105            "pthread_debug", (format), ##__VA_ARGS__ )
106
107#define LOGE(format, ...)  \
108    __libc_android_log_print(ANDROID_LOG_ERROR, \
109            "pthread_debug", (format), ##__VA_ARGS__ )
110
111#define LOGI(format, ...)  \
112    __libc_android_log_print(ANDROID_LOG_INFO, \
113            "pthread_debug", (format), ##__VA_ARGS__ )
114
115static const char* const kStartBanner =
116        "===============================================================";
117
118static const char* const kEndBanner =
119        "===============================================================";
120
121extern char* __progname;
122
123// =============================================================================
124// map info functions
125// =============================================================================
126
127typedef struct mapinfo {
128    struct mapinfo *next;
129    unsigned start;
130    unsigned end;
131    char name[];
132} mapinfo;
133
134static mapinfo* sMapInfo = NULL;
135
136static mapinfo *parse_maps_line(char *line)
137{
138    mapinfo *mi;
139    int len = strlen(line);
140
141    if(len < 1) return 0;
142    line[--len] = 0;
143
144    if(len < 50) return 0;
145    if(line[20] != 'x') return 0;
146
147    mi = malloc(sizeof(mapinfo) + (len - 47));
148    if(mi == 0) return 0;
149
150    mi->start = strtoul(line, 0, 16);
151    mi->end = strtoul(line + 9, 0, 16);
152    /* To be filled in parse_elf_info if the mapped section starts with
153     * elf_header
154     */
155    mi->next = 0;
156    strcpy(mi->name, line + 49);
157
158    return mi;
159}
160
161static mapinfo *init_mapinfo(int pid)
162{
163    struct mapinfo *milist = NULL;
164    char data[1024];
165    sprintf(data, "/proc/%d/maps", pid);
166    FILE *fp = fopen(data, "r");
167    if(fp) {
168        while(fgets(data, sizeof(data), fp)) {
169            mapinfo *mi = parse_maps_line(data);
170            if(mi) {
171                mi->next = milist;
172                milist = mi;
173            }
174        }
175        fclose(fp);
176    }
177
178    return milist;
179}
180
181static void deinit_mapinfo(mapinfo *mi)
182{
183   mapinfo *del;
184   while(mi) {
185       del = mi;
186       mi = mi->next;
187       free(del);
188   }
189}
190
191/* Find the containing map info for the pc */
192static const mapinfo *pc_to_mapinfo(mapinfo *mi, unsigned pc, unsigned *rel_pc)
193{
194    *rel_pc = pc;
195    while(mi) {
196        if((pc >= mi->start) && (pc < mi->end)){
197            // Only calculate the relative offset for shared libraries
198            if (strstr(mi->name, ".so")) {
199                *rel_pc -= mi->start;
200            }
201            return mi;
202        }
203        mi = mi->next;
204    }
205    return NULL;
206}
207
208// =============================================================================
209// stack trace functions
210// =============================================================================
211
212#define STACK_TRACE_DEPTH 16
213
214typedef struct
215{
216    size_t count;
217    intptr_t* addrs;
218} stack_crawl_state_t;
219
220/* depends how the system includes define this */
221#ifdef HAVE_UNWIND_CONTEXT_STRUCT
222typedef struct _Unwind_Context __unwind_context;
223#else
224typedef _Unwind_Context __unwind_context;
225#endif
226
227static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
228{
229    stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
230    if (state->count) {
231        intptr_t ip = (intptr_t)_Unwind_GetIP(context);
232        if (ip) {
233            state->addrs[0] = ip;
234            state->addrs++;
235            state->count--;
236            return _URC_NO_REASON;
237        }
238    }
239    /*
240     * If we run out of space to record the address or 0 has been seen, stop
241     * unwinding the stack.
242     */
243    return _URC_END_OF_STACK;
244}
245
246static inline
247int get_backtrace(intptr_t* addrs, size_t max_entries)
248{
249    stack_crawl_state_t state;
250    state.count = max_entries;
251    state.addrs = (intptr_t*)addrs;
252    _Unwind_Backtrace(trace_function, (void*)&state);
253    return max_entries - state.count;
254}
255
256static void log_backtrace(intptr_t* addrs, size_t c)
257{
258    int index = 0;
259    size_t i;
260    for (i=0 ; i<c; i++) {
261        unsigned int relpc;
262        void* offset = 0;
263        const char* symbol = NULL;
264
265#if HAVE_DLADDR
266        Dl_info info;
267        if (dladdr((void*)addrs[i], &info)) {
268            offset = info.dli_saddr;
269            symbol = info.dli_sname;
270        }
271#endif
272
273        if (symbol || index>0 || !HAVE_DLADDR) {
274            /*
275             * this test is a bit sketchy, but it allows us to skip the
276             * stack trace entries due to this debugging code. it works
277             * because those don't have a symbol (they're not exported)
278             */
279            mapinfo const* mi = pc_to_mapinfo(sMapInfo, addrs[i], &relpc);
280            char const* soname = mi ? mi->name : NULL;
281#if HAVE_DLADDR
282            if (!soname)
283                soname = info.dli_fname;
284#endif
285            if (!soname)
286                soname = "unknown";
287
288            if (symbol) {
289                LOGW("          "
290                     "#%02d  pc %08lx  %s (%s+0x%x)",
291                     index, relpc, soname, symbol,
292                     addrs[i] - (intptr_t)offset);
293            } else {
294                LOGW("          "
295                     "#%02d  pc %08lx  %s",
296                     index, relpc, soname);
297            }
298            index++;
299        }
300    }
301}
302
303/****************************************************************************/
304
305/*
306 * level <= 0 : deadlock prediction disabled
307 * level    1 : deadlock prediction enabled, w/o call stacks
308 * level    2 : deadlock prediction enabled w/ call stacks
309 */
310#define CAPTURE_CALLSTACK 2
311static int sPthreadDebugLevel = 0;
312static pid_t sPthreadDebugDisabledThread = -1;
313static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
314
315/****************************************************************************/
316
317/* some simple/lame malloc replacement
318 * NOT thread-safe and leaks everything
319 */
320
321#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
322static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
323static char* sDbgAllocPtr = NULL;
324
325static void* DbgAllocLocked(size_t size) {
326    if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
327        sDbgAllocOffset = 0;
328        sDbgAllocPtr = mmap(NULL, DBG_ALLOC_BLOCK_SIZE, PROT_READ|PROT_WRITE,
329                MAP_ANON | MAP_PRIVATE, 0, 0);
330        if (sDbgAllocPtr == MAP_FAILED) {
331            return NULL;
332        }
333    }
334    void* addr = sDbgAllocPtr + sDbgAllocOffset;
335    sDbgAllocOffset += size;
336    return addr;
337}
338
339static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
340    void* addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
341            MAP_ANON | MAP_PRIVATE, 0, 0);
342    if (addr != MAP_FAILED) {
343        if (ptr) {
344            memcpy(addr, ptr, old_size);
345            munmap(ptr, old_size);
346        }
347    } else {
348        addr = NULL;
349    }
350    return addr;
351}
352
353/*****************************************************************************/
354
355struct MutexInfo;
356
357typedef struct CallStack {
358    intptr_t    depth;
359    intptr_t*   addrs;
360} CallStack;
361
362typedef struct MutexInfo* MutexInfoListEntry;
363typedef struct CallStack  CallStackListEntry;
364
365typedef struct GrowingList {
366    int alloc;
367    int count;
368    union {
369        void*               data;
370        MutexInfoListEntry* list;
371        CallStackListEntry* stack;
372    };
373} GrowingList;
374
375typedef GrowingList MutexInfoList;
376typedef GrowingList CallStackList;
377
378typedef struct MutexInfo {
379    // thread currently holding the lock or 0
380    pid_t               owner;
381
382    // most-recently-locked doubly-linked list
383    struct MutexInfo*   prev;
384    struct MutexInfo*   next;
385
386    // for reentrant locks
387    int                 lockCount;
388    // when looking for loops in the graph, marks visited nodes
389    int                 historyMark;
390    // the actual mutex
391    pthread_mutex_t*    mutex;
392    // list of locks directly acquired AFTER this one in the same thread
393    MutexInfoList       children;
394    // list of locks directly acquired BEFORE this one in the same thread
395    MutexInfoList       parents;
396    // list of call stacks when a new link is established to this lock form its parent
397    CallStackList       stacks;
398    // call stack when this lock was acquired last
399    int                 stackDepth;
400    intptr_t            stackTrace[STACK_TRACE_DEPTH];
401} MutexInfo;
402
403static void growingListInit(GrowingList* list) {
404    list->alloc = 0;
405    list->count = 0;
406    list->data = NULL;
407}
408
409static void growingListAdd(GrowingList* pList, size_t objSize) {
410    if (pList->count == pList->alloc) {
411        size_t oldsize = pList->alloc * objSize;
412        pList->alloc += PAGESIZE / objSize;
413        size_t size = pList->alloc * objSize;
414        pList->data = debug_realloc(pList->data, size, oldsize);
415    }
416    pList->count++;
417}
418
419static void initMutexInfo(MutexInfo* object, pthread_mutex_t* mutex) {
420    object->owner = 0;
421    object->prev = 0;
422    object->next = 0;
423    object->lockCount = 0;
424    object->historyMark = 0;
425    object->mutex = mutex;
426    growingListInit(&object->children);
427    growingListInit(&object->parents);
428    growingListInit(&object->stacks);
429    object->stackDepth = 0;
430}
431
432typedef struct ThreadInfo {
433    pid_t       pid;
434    MutexInfo*  mrl;
435} ThreadInfo;
436
437static void initThreadInfo(ThreadInfo* object, pid_t pid) {
438    object->pid = pid;
439    object->mrl = NULL;
440}
441
442/****************************************************************************/
443
444static MutexInfo* get_mutex_info(pthread_mutex_t *mutex);
445static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object);
446static void mutex_unlock_checked(MutexInfo* object);
447
448/****************************************************************************/
449
450extern int pthread_mutex_lock_impl(pthread_mutex_t *mutex);
451extern int pthread_mutex_unlock_impl(pthread_mutex_t *mutex);
452
453static int pthread_mutex_lock_unchecked(pthread_mutex_t *mutex) {
454    return pthread_mutex_lock_impl(mutex);
455}
456
457static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
458    return pthread_mutex_unlock_impl(mutex);
459}
460
461/****************************************************************************/
462
463static void dup_backtrace(CallStack* stack, int count, intptr_t const* addrs) {
464    stack->depth = count;
465    stack->addrs = DbgAllocLocked(count * sizeof(intptr_t));
466    memcpy(stack->addrs, addrs, count * sizeof(intptr_t));
467}
468
469/****************************************************************************/
470
471static int historyListHas(
472        const MutexInfoList* list, MutexInfo const * obj) {
473    int i;
474    for (i=0; i<list->count; i++) {
475        if (list->list[i] == obj) {
476            return i;
477        }
478    }
479    return -1;
480}
481
482static void historyListAdd(MutexInfoList* pList, MutexInfo* obj) {
483    growingListAdd(pList, sizeof(MutexInfoListEntry));
484    pList->list[pList->count - 1] = obj;
485}
486
487static int historyListRemove(MutexInfoList* pList, MutexInfo* obj) {
488    int i;
489    for (i = pList->count-1; i >= 0; i--) {
490        if (pList->list[i] == obj) {
491            break;
492        }
493    }
494    if (i < 0) {
495        // not found!
496        return 0;
497    }
498
499    if (i != pList->count-1) {
500        // copy the last entry to the new free slot
501        pList->list[i] = pList->list[pList->count-1];
502    }
503    pList->count--;
504    memset(&pList->list[pList->count], 0, sizeof(MutexInfoListEntry));
505    return 1;
506}
507
508static void linkParentToChild(MutexInfo* parent, MutexInfo* child) {
509    historyListAdd(&parent->children, child);
510    historyListAdd(&child->parents, parent);
511}
512
513static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
514    historyListRemove(&parent->children, child);
515    historyListRemove(&child->parents, parent);
516}
517
518/****************************************************************************/
519
520static void callstackListAdd(CallStackList* pList,
521        int count, intptr_t const* addrs) {
522    growingListAdd(pList, sizeof(CallStackListEntry));
523    dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
524}
525
526/****************************************************************************/
527
528/*
529 * Recursively traverse the object hierarchy starting at "obj".  We mark
530 * ourselves on entry and clear the mark on exit.  If we ever encounter
531 * a marked object, we have a cycle.
532 *
533 * Returns "true" if all is well, "false" if we found a cycle.
534 */
535
536static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
537{
538    /*
539     * Have we been here before?
540     */
541    if (obj->historyMark) {
542        int stackDepth;
543        intptr_t addrs[STACK_TRACE_DEPTH];
544
545        /* Turn off prediction temporarily in this thread while logging */
546        sPthreadDebugDisabledThread = gettid();
547
548        if (sMapInfo == NULL) {
549            // note: we're protected by sDbgLock
550            sMapInfo = init_mapinfo(getpid());
551        }
552
553        LOGW("%s\n", kStartBanner);
554        LOGW("pid: %d, tid: %d >>> %s <<<", getpid(), gettid(), __progname);
555        LOGW("Illegal lock attempt:\n");
556        LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
557        stackDepth = get_backtrace(addrs, STACK_TRACE_DEPTH);
558        log_backtrace(addrs, stackDepth);
559
560        LOGW("+++ Currently held locks in this thread (in reverse order):");
561        MutexInfo* cur = obj;
562        pid_t ourtid = gettid();
563        int i;
564        for (i=0 ; i<cur->parents.count ; i++) {
565            MutexInfo* parent = cur->parents.list[i];
566            if (parent->owner == ourtid) {
567                LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
568                if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
569                    log_backtrace(parent->stackTrace, parent->stackDepth);
570                }
571                cur = parent;
572                break;
573            }
574        }
575
576        LOGW("+++ Earlier, the following lock order (from last to first) was established\n");
577        return 0;
578    }
579
580    obj->historyMark = 1;
581
582    MutexInfoList* pList = &obj->children;
583    int result = 1;
584    int i;
585    for (i = pList->count-1; i >= 0; i--) {
586        MutexInfo* child = pList->list[i];
587        if (!traverseTree(child,  obj)) {
588            LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
589            if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
590                int index = historyListHas(&obj->parents, objParent);
591                if ((size_t)index < (size_t)obj->stacks.count) {
592                    log_backtrace(
593                            obj->stacks.stack[index].addrs,
594                            obj->stacks.stack[index].depth);
595                } else {
596                    log_backtrace(
597                            obj->stackTrace,
598                            obj->stackDepth);
599                }
600            }
601            result = 0;
602            break;
603        }
604    }
605
606    obj->historyMark = 0;
607    return result;
608}
609
610/****************************************************************************/
611
612static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
613{
614    pid_t tid = gettid();
615    if (object->owner == tid) {
616        object->lockCount++;
617        return;
618    }
619
620    object->owner = tid;
621    object->lockCount = 0;
622
623    if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
624        // always record the call stack when acquiring a lock.
625        // it's not efficient, but is useful during diagnostics
626        object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
627    }
628
629    // no other locks held in this thread -- no deadlock possible!
630    if (mrl == NULL)
631        return;
632
633    // check if the lock we're trying to acquire is a direct descendant of
634    // the most recently locked mutex in this thread, in which case we're
635    // in a good situation -- no deadlock possible
636    if (historyListHas(&mrl->children, object) >= 0)
637        return;
638
639    pthread_mutex_lock_unchecked(&sDbgLock);
640
641    linkParentToChild(mrl, object);
642    if (!traverseTree(object, mrl)) {
643        deinit_mapinfo(sMapInfo);
644        sMapInfo = NULL;
645        LOGW("%s\n", kEndBanner);
646        unlinkParentFromChild(mrl, object);
647        // reenable pthread debugging for this thread
648        sPthreadDebugDisabledThread = -1;
649    } else {
650        // record the call stack for this link
651        // NOTE: the call stack is added at the same index
652        // as mrl in object->parents[]
653        // ie: object->parents.count == object->stacks.count, which is
654        // also the index.
655        if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
656            callstackListAdd(&object->stacks,
657                    object->stackDepth, object->stackTrace);
658        }
659    }
660
661    pthread_mutex_unlock_unchecked(&sDbgLock);
662}
663
664static void mutex_unlock_checked(MutexInfo* object)
665{
666    pid_t tid = gettid();
667    if (object->owner == tid) {
668        if (object->lockCount == 0) {
669            object->owner = 0;
670        } else {
671            object->lockCount--;
672        }
673    }
674}
675
676
677// =============================================================================
678// Hash Table functions
679// =============================================================================
680
681/****************************************************************************/
682
683#define HASHTABLE_SIZE      256
684
685typedef struct HashEntry HashEntry;
686struct HashEntry {
687    size_t slot;
688    HashEntry* prev;
689    HashEntry* next;
690    void* data;
691};
692
693typedef struct HashTable HashTable;
694struct HashTable {
695    HashEntry* slots[HASHTABLE_SIZE];
696};
697
698static HashTable sMutexMap;
699static HashTable sThreadMap;
700
701/****************************************************************************/
702
703static uint32_t get_hashcode(void const * key, size_t keySize)
704{
705    uint32_t h = keySize;
706    char const* data = (char const*)key;
707    size_t i;
708    for (i = 0; i < keySize; i++) {
709        h = h * 31 + *data;
710        data++;
711    }
712    return (uint32_t)h;
713}
714
715static size_t get_index(uint32_t h)
716{
717    // We apply this secondary hashing discovered by Doug Lea to defend
718    // against bad hashes.
719    h += ~(h << 9);
720    h ^= (((unsigned int) h) >> 14);
721    h += (h << 4);
722    h ^= (((unsigned int) h) >> 10);
723    return (size_t)h & (HASHTABLE_SIZE - 1);
724}
725
726/****************************************************************************/
727
728static void hashmap_init(HashTable* table) {
729    memset(table, 0, sizeof(HashTable));
730}
731
732static void hashmap_removeEntry(HashTable* table, HashEntry* entry)
733{
734    HashEntry* prev = entry->prev;
735    HashEntry* next = entry->next;
736    if (prev != NULL) entry->prev->next = next;
737    if (next != NULL) entry->next->prev = prev;
738    if (prev == NULL) {
739        // we are the head of the list. set the head to be next
740        table->slots[entry->slot] = entry->next;
741    }
742}
743
744static HashEntry* hashmap_lookup(HashTable* table,
745        void const* key, size_t ksize,
746        int (*equals)(void const* data, void const* key))
747{
748    const uint32_t hash = get_hashcode(key, ksize);
749    const size_t slot = get_index(hash);
750
751    HashEntry* entry = table->slots[slot];
752    while (entry) {
753        if (equals(entry->data, key)) {
754            break;
755        }
756        entry = entry->next;
757    }
758
759    if (entry == NULL) {
760        // create a new entry
761        entry = (HashEntry*)DbgAllocLocked(sizeof(HashEntry));
762        entry->data = NULL;
763        entry->slot = slot;
764        entry->prev = NULL;
765        entry->next = table->slots[slot];
766        if (entry->next != NULL) {
767            entry->next->prev = entry;
768        }
769        table->slots[slot] = entry;
770    }
771    return entry;
772}
773
774/****************************************************************************/
775
776static int MutexInfo_equals(void const* data, void const* key) {
777    return ((MutexInfo const *)data)->mutex == *(pthread_mutex_t **)key;
778}
779
780static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
781{
782    pthread_mutex_lock_unchecked(&sDbgLock);
783
784    HashEntry* entry = hashmap_lookup(&sMutexMap,
785            &mutex, sizeof(mutex),
786            &MutexInfo_equals);
787    if (entry->data == NULL) {
788        entry->data = (MutexInfo*)DbgAllocLocked(sizeof(MutexInfo));
789        initMutexInfo(entry->data, mutex);
790    }
791
792    pthread_mutex_unlock_unchecked(&sDbgLock);
793
794    return (MutexInfo *)entry->data;
795}
796
797/****************************************************************************/
798
799static int ThreadInfo_equals(void const* data, void const* key) {
800    return ((ThreadInfo const *)data)->pid == *(pid_t *)key;
801}
802
803static ThreadInfo* get_thread_info(pid_t pid)
804{
805    pthread_mutex_lock_unchecked(&sDbgLock);
806
807    HashEntry* entry = hashmap_lookup(&sThreadMap,
808            &pid, sizeof(pid),
809            &ThreadInfo_equals);
810    if (entry->data == NULL) {
811        entry->data = (ThreadInfo*)DbgAllocLocked(sizeof(ThreadInfo));
812        initThreadInfo(entry->data, pid);
813    }
814
815    pthread_mutex_unlock_unchecked(&sDbgLock);
816
817    return (ThreadInfo *)entry->data;
818}
819
820static void push_most_recently_locked(MutexInfo* mrl) {
821    ThreadInfo* tinfo = get_thread_info(gettid());
822    mrl->next = NULL;
823    mrl->prev = tinfo->mrl;
824    tinfo->mrl = mrl;
825}
826
827static void remove_most_recently_locked(MutexInfo* mrl) {
828    ThreadInfo* tinfo = get_thread_info(gettid());
829    if (mrl->next) {
830        (mrl->next)->prev = mrl->prev;
831    }
832    if (mrl->prev) {
833        (mrl->prev)->next = mrl->next;
834    }
835    if (tinfo->mrl == mrl) {
836        tinfo->mrl = mrl->next;
837    }
838}
839
840static MutexInfo* get_most_recently_locked() {
841    ThreadInfo* tinfo = get_thread_info(gettid());
842    return tinfo->mrl;
843}
844
845/****************************************************************************/
846
847/* pthread_debug_init() is called from libc_init_dynamic() just
848 * after system properties have been initialized
849 */
850
851__LIBC_HIDDEN__
852void pthread_debug_init(void) {
853    char env[PROP_VALUE_MAX];
854    if (__system_property_get("debug.libc.pthread", env)) {
855        int level = atoi(env);
856        if (level) {
857            LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
858                    level, getpid(), __progname);
859            hashmap_init(&sMutexMap);
860            sPthreadDebugLevel = level;
861        }
862    }
863}
864
865/*
866 * See if we were allowed to grab the lock at this time.  We do it
867 * *after* acquiring the lock, rather than before, so that we can
868 * freely update the MutexInfo struct.  This seems counter-intuitive,
869 * but our goal is deadlock *prediction* not deadlock *prevention*.
870 * (If we actually deadlock, the situation is easy to diagnose from
871 * a thread dump, so there's no point making a special effort to do
872 * the checks before the lock is held.)
873 */
874
875__LIBC_HIDDEN__
876void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
877{
878    if (sPthreadDebugLevel == 0) return;
879    // prediction disabled for this thread
880    if (sPthreadDebugDisabledThread == gettid())
881        return;
882    MutexInfo* object = get_mutex_info(mutex);
883    MutexInfo* mrl = get_most_recently_locked();
884    mutex_lock_checked(mrl, object);
885    push_most_recently_locked(object);
886}
887
888/*
889 * pthread_debug_mutex_unlock_check() must be called with the mutex
890 * still held (ie: before calling the real unlock)
891 */
892
893__LIBC_HIDDEN__
894void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
895{
896    if (sPthreadDebugLevel == 0) return;
897    // prediction disabled for this thread
898    if (sPthreadDebugDisabledThread == gettid())
899        return;
900    MutexInfo* object = get_mutex_info(mutex);
901    remove_most_recently_locked(object);
902    mutex_unlock_checked(object);
903}
904