1/* libunwind - a platform-independent unwind library
2   Copyright (C) 2014 The Android Open Source Project
3
4This file is part of libunwind.
5
6Permission is hereby granted, free of charge, to any person obtaining
7a copy of this software and associated documentation files (the
8"Software"), to deal in the Software without restriction, including
9without limitation the rights to use, copy, modify, merge, publish,
10distribute, sublicense, and/or sell copies of the Software, and to
11permit persons to whom the Software is furnished to do so, subject to
12the following conditions:
13
14The above copyright notice and this permission notice shall be
15included in all copies or substantial portions of the Software.
16
17THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
24
25#define UNW_LOCAL_ONLY
26#include <libunwind.h>
27#include "libunwind_i.h"
28
29/* Global to hold the map for all local unwinds. */
30extern struct map_info *local_map_list;
31extern lock_rdwr_var (local_rdwr_lock);
32
33static pthread_once_t local_rdwr_lock_init = PTHREAD_ONCE_INIT;
34
35static void
36map_local_init_once (void)
37{
38  lock_rdwr_init (&local_rdwr_lock);
39}
40
41HIDDEN void
42map_local_init (void)
43{
44  pthread_once (&local_rdwr_lock_init, map_local_init_once);
45}
46
47static void
48move_cached_elf_data (struct map_info *old_list, struct map_info *new_list)
49{
50  while (old_list)
51    {
52      if (!old_list->ei.valid)
53        {
54          old_list = old_list->next;
55          continue;
56        }
57      /* Both lists are in order, so it's not necessary to scan through
58         from the beginning of new_list each time looking for a match to
59         the current map. As we progress, simply start from the last element
60         in new_list we checked. */
61      while (new_list && old_list->start <= new_list->start)
62        {
63          if (old_list->start == new_list->start
64              && old_list->end == new_list->end)
65            {
66              /* No need to do any lock, the entire local_map_list is locked
67                 at this point. */
68              new_list->ei = old_list->ei;
69              /* If it was mapped before, make sure to mark it unmapped now. */
70              old_list->ei.mapped = false;
71              /* Clear the old mini debug info so we do not try to free it twice */
72              old_list->ei.mini_debug_info_data = NULL;
73              old_list->ei.mini_debug_info_size = 0;
74              /* Don't bother breaking out of the loop, the next while check
75                 is guaranteed to fail, causing us to break out of the loop
76                 after advancing to the next map element. */
77            }
78          new_list = new_list->next;
79        }
80      old_list = old_list->next;
81    }
82}
83
84/* In order to cache as much as possible while unwinding the local process,
85   we gather a map of the process before starting. If the cache is missing
86   a map, or a map exists but doesn't have the "expected_flags" set, then
87   check if the cache needs to be regenerated.
88   While regenerating the list, grab a write lock to avoid any readers using
89   the list while it's being modified. */
90static int
91rebuild_if_necessary (unw_word_t addr, int expected_flags, size_t bytes)
92{
93  struct map_info *map;
94  struct map_info *new_list;
95  int ret_value = -1;
96  intrmask_t saved_mask;
97
98  new_list = map_create_list (UNW_MAP_CREATE_LOCAL, getpid());
99  map = map_find_from_addr (new_list, addr);
100  if (map && (map->end - addr >= bytes) && (expected_flags == 0 || (map->flags & expected_flags)))
101    {
102      /* Get a write lock on local_map_list since it's going to be modified. */
103      lock_rdwr_wr_acquire (&local_rdwr_lock, saved_mask);
104
105      /* Just in case another thread rebuilt the map, check to see if the
106         ip with expected_flags is in local_map_list. If not, the assumption
107         is that new_list is newer than local_map_list because the map only
108         gets new maps with new permissions. If this is not true, then it
109         would be necessary to regenerate the list one more time. */
110      ret_value = 0;
111      map = map_find_from_addr (local_map_list, addr);
112      if (!map || (map->end - addr < bytes) || (expected_flags != 0 && !(map->flags & expected_flags)))
113        {
114          /* Move any cached items to the new list. */
115          move_cached_elf_data (local_map_list, new_list);
116          map = local_map_list;
117          local_map_list = new_list;
118          new_list = map;
119        }
120
121      lock_rdwr_release (&local_rdwr_lock, saved_mask);
122    }
123
124  map_destroy_list (new_list);
125
126  return ret_value;
127}
128
129static int
130is_flag_set (unw_word_t addr, int flag, size_t bytes)
131{
132  struct map_info *map;
133  int ret = 0;
134  intrmask_t saved_mask;
135
136  lock_rdwr_rd_acquire (&local_rdwr_lock, saved_mask);
137  map = map_find_from_addr (local_map_list, addr);
138  if (map != NULL)
139    {
140      if (map->flags & MAP_FLAGS_DEVICE_MEM)
141        {
142          lock_rdwr_release (&local_rdwr_lock, saved_mask);
143          return 0;
144        }
145      /* Do not bother checking if the next map is readable and right at
146       * the end of this map. All of the reads/writes are of small values
147       * that should never span a map.
148       */
149      if (map->end - addr < bytes)
150        ret = 0;
151      else
152        ret = map->flags & flag;
153    }
154  lock_rdwr_release (&local_rdwr_lock, saved_mask);
155
156  if (!ret && rebuild_if_necessary (addr, flag, bytes) == 0)
157    {
158      return 1;
159    }
160  return ret;
161}
162
163PROTECTED int
164map_local_is_readable (unw_word_t addr, size_t read_bytes)
165{
166  return is_flag_set (addr, PROT_READ, read_bytes);
167}
168
169PROTECTED int
170map_local_is_writable (unw_word_t addr, size_t write_bytes)
171{
172  return is_flag_set (addr, PROT_WRITE, write_bytes);
173}
174
175PROTECTED int
176local_get_elf_image (unw_addr_space_t as, struct elf_image *ei, unw_word_t ip,
177                     unsigned long *segbase, unsigned long *mapoff, char **path, void *as_arg)
178{
179  struct map_info *map;
180  intrmask_t saved_mask;
181  int return_value = -UNW_ENOINFO;
182
183  lock_rdwr_rd_acquire (&local_rdwr_lock, saved_mask);
184  map = map_find_from_addr (local_map_list, ip);
185  if (!map)
186    {
187      lock_rdwr_release (&local_rdwr_lock, saved_mask);
188      if (rebuild_if_necessary (ip, 0, sizeof(unw_word_t)) < 0)
189        return -UNW_ENOINFO;
190
191      lock_rdwr_rd_acquire (&local_rdwr_lock, saved_mask);
192      map = map_find_from_addr (local_map_list, ip);
193    }
194
195  if (map && elf_map_cached_image (as, as_arg, map, ip, true))
196    {
197      /* It is absolutely necessary that the elf structure is a copy of
198       * the map data. The map could be rebuilt and the old ei pointer
199       * will be modified and thrown away.
200       */
201      *ei = map->ei;
202      *segbase = map->start;
203      if (ei->mapped)
204        *mapoff = map->offset;
205      else
206        /* Always use zero as the map offset for in memory maps. The
207         * dlopen of a shared library from an APK will result in a
208         * non-zero offset so it won't match the elf data and cause
209         * unwinds to fail. Currently, only in memory unwinds of an APK
210         * are possible, so only modify this path.
211         */
212        *mapoff = 0;
213      if (path != NULL)
214        {
215          if (map->path)
216            *path = strdup(map->path);
217          else
218            *path = NULL;
219        }
220      return_value = 0;
221    }
222  lock_rdwr_release (&local_rdwr_lock, saved_mask);
223
224  return return_value;
225}
226
227PROTECTED char *
228map_local_get_image_name (unw_word_t ip)
229{
230  struct map_info *map;
231  intrmask_t saved_mask;
232  char *image_name = NULL;
233
234  lock_rdwr_rd_acquire (&local_rdwr_lock, saved_mask);
235  map = map_find_from_addr (local_map_list, ip);
236  if (!map)
237    {
238      lock_rdwr_release (&local_rdwr_lock, saved_mask);
239      if (rebuild_if_necessary (ip, 0, sizeof(unw_word_t)) < 0)
240        return NULL;
241
242      lock_rdwr_rd_acquire (&local_rdwr_lock, saved_mask);
243      map = map_find_from_addr (local_map_list, ip);
244    }
245  if (map)
246    image_name = strdup (map->path);
247  lock_rdwr_release (&local_rdwr_lock, saved_mask);
248
249  return image_name;
250}
251