1/* libunwind - a platform-independent unwind library
2   Copyright (C) 2010, 2011 by FERMI NATIONAL ACCELERATOR LABORATORY
3
4This file is part of libunwind.
5
6Permission is hereby granted, free of charge, to any person obtaining
7a copy of this software and associated documentation files (the
8"Software"), to deal in the Software without restriction, including
9without limitation the rights to use, copy, modify, merge, publish,
10distribute, sublicense, and/or sell copies of the Software, and to
11permit persons to whom the Software is furnished to do so, subject to
12the following conditions:
13
14The above copyright notice and this permission notice shall be
15included in all copies or substantial portions of the Software.
16
17THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
24
25#include "unwind_i.h"
26#include "ucontext_i.h"
27#include <signal.h>
28#include <limits.h>
29
30#pragma weak pthread_once
31#pragma weak pthread_key_create
32#pragma weak pthread_getspecific
33#pragma weak pthread_setspecific
34
35/* Initial hash table size. Table expands by 2 bits (times four). */
36#define HASH_MIN_BITS 14
37
38typedef struct
39{
40  unw_tdep_frame_t *frames;
41  size_t log_size;
42  size_t used;
43  size_t dtor_count;  /* Counts how many times our destructor has already
44			 been called. */
45} unw_trace_cache_t;
46
47static const unw_tdep_frame_t empty_frame = { 0, UNW_X86_64_FRAME_OTHER, -1, -1, 0, -1, -1 };
48static define_lock (trace_init_lock);
49static pthread_once_t trace_cache_once = PTHREAD_ONCE_INIT;
50static sig_atomic_t trace_cache_once_happen;
51static pthread_key_t trace_cache_key;
52static struct mempool trace_cache_pool;
53static __thread  unw_trace_cache_t *tls_cache;
54static __thread  int tls_cache_destroyed;
55
56/* Free memory for a thread's trace cache. */
57static void
58trace_cache_free (void *arg)
59{
60  unw_trace_cache_t *cache = arg;
61  if (++cache->dtor_count < PTHREAD_DESTRUCTOR_ITERATIONS)
62  {
63    /* Not yet our turn to get destroyed. Re-install ourselves into the key. */
64    pthread_setspecific(trace_cache_key, cache);
65    Debug(5, "delayed freeing cache %p (%zx to go)\n", cache,
66	  PTHREAD_DESTRUCTOR_ITERATIONS - cache->dtor_count);
67    return;
68  }
69  tls_cache_destroyed = 1;
70  tls_cache = NULL;
71  munmap (cache->frames, (1u << cache->log_size) * sizeof(unw_tdep_frame_t));
72  mempool_free (&trace_cache_pool, cache);
73  Debug(5, "freed cache %p\n", cache);
74}
75
76/* Initialise frame tracing for threaded use. */
77static void
78trace_cache_init_once (void)
79{
80  pthread_key_create (&trace_cache_key, &trace_cache_free);
81  mempool_init (&trace_cache_pool, sizeof (unw_trace_cache_t), 0);
82  trace_cache_once_happen = 1;
83}
84
85static unw_tdep_frame_t *
86trace_cache_buckets (size_t n)
87{
88  unw_tdep_frame_t *frames;
89  size_t i;
90
91  GET_MEMORY(frames, n * sizeof (unw_tdep_frame_t));
92  if (likely(frames != NULL))
93    for (i = 0; i < n; ++i)
94      frames[i] = empty_frame;
95
96  return frames;
97}
98
99/* Allocate and initialise hash table for frame cache lookups.
100   Returns the cache initialised with (1u << HASH_LOW_BITS) hash
101   buckets, or NULL if there was a memory allocation problem. */
102static unw_trace_cache_t *
103trace_cache_create (void)
104{
105  unw_trace_cache_t *cache;
106
107  if (tls_cache_destroyed)
108  {
109    /* The current thread is in the process of exiting. Don't recreate
110       cache, as we wouldn't have another chance to free it. */
111    Debug(5, "refusing to reallocate cache: "
112	     "thread-locals are being deallocated\n");
113    return NULL;
114  }
115
116  if (! (cache = mempool_alloc(&trace_cache_pool)))
117  {
118    Debug(5, "failed to allocate cache\n");
119    return NULL;
120  }
121
122  if (! (cache->frames = trace_cache_buckets(1u << HASH_MIN_BITS)))
123  {
124    Debug(5, "failed to allocate buckets\n");
125    mempool_free(&trace_cache_pool, cache);
126    return NULL;
127  }
128
129  cache->log_size = HASH_MIN_BITS;
130  cache->used = 0;
131  cache->dtor_count = 0;
132  tls_cache_destroyed = 0;  /* Paranoia: should already be 0. */
133  Debug(5, "allocated cache %p\n", cache);
134  return cache;
135}
136
137/* Expand the hash table in the frame cache if possible. This always
138   quadruples the hash size, and clears all previous frame entries. */
139static int
140trace_cache_expand (unw_trace_cache_t *cache)
141{
142  size_t old_size = (1u << cache->log_size);
143  size_t new_log_size = cache->log_size + 2;
144  unw_tdep_frame_t *new_frames = trace_cache_buckets (1u << new_log_size);
145
146  if (unlikely(! new_frames))
147  {
148    Debug(5, "failed to expand cache to 2^%lu buckets\n", new_log_size);
149    return -UNW_ENOMEM;
150  }
151
152  Debug(5, "expanded cache from 2^%lu to 2^%lu buckets\n", cache->log_size, new_log_size);
153  munmap(cache->frames, old_size * sizeof(unw_tdep_frame_t));
154  cache->frames = new_frames;
155  cache->log_size = new_log_size;
156  cache->used = 0;
157  return 0;
158}
159
160static unw_trace_cache_t *
161trace_cache_get_unthreaded (void)
162{
163  unw_trace_cache_t *cache;
164  intrmask_t saved_mask;
165  static unw_trace_cache_t *global_cache = NULL;
166  lock_acquire (&trace_init_lock, saved_mask);
167  if (! global_cache)
168  {
169    mempool_init (&trace_cache_pool, sizeof (unw_trace_cache_t), 0);
170    global_cache = trace_cache_create ();
171  }
172  cache = global_cache;
173  lock_release (&trace_init_lock, saved_mask);
174  Debug(5, "using cache %p\n", cache);
175  return cache;
176}
177
178/* Get the frame cache for the current thread. Create it if there is none. */
179static unw_trace_cache_t *
180trace_cache_get (void)
181{
182  unw_trace_cache_t *cache;
183  if (likely (pthread_once != NULL))
184  {
185    pthread_once(&trace_cache_once, &trace_cache_init_once);
186    if (!trace_cache_once_happen)
187    {
188      return trace_cache_get_unthreaded();
189    }
190    if (! (cache = tls_cache))
191    {
192      cache = trace_cache_create();
193      pthread_setspecific(trace_cache_key, cache);
194      tls_cache = cache;
195    }
196    Debug(5, "using cache %p\n", cache);
197    return cache;
198  }
199  else
200  {
201    return trace_cache_get_unthreaded();
202  }
203}
204
205/* Initialise frame properties for address cache slot F at address
206   RIP using current CFA, RBP and RSP values.  Modifies CURSOR to
207   that location, performs one unw_step(), and fills F with what
208   was discovered about the location.  Returns F.
209
210   FIXME: This probably should tell DWARF handling to never evaluate
211   or use registers other than RBP, RSP and RIP in case there is
212   highly unusual unwind info which uses these creatively. */
213static unw_tdep_frame_t *
214trace_init_addr (unw_tdep_frame_t *f,
215		 unw_cursor_t *cursor,
216		 unw_word_t cfa,
217		 unw_word_t rip,
218		 unw_word_t rbp,
219		 unw_word_t rsp)
220{
221  struct cursor *c = (struct cursor *) cursor;
222  struct dwarf_cursor *d = &c->dwarf;
223  int ret = -UNW_EINVAL;
224
225  /* Initialise frame properties: unknown, not last. */
226  f->virtual_address = rip;
227  f->frame_type = UNW_X86_64_FRAME_OTHER;
228  f->last_frame = 0;
229  f->cfa_reg_rsp = -1;
230  f->cfa_reg_offset = 0;
231  f->rbp_cfa_offset = -1;
232  f->rsp_cfa_offset = -1;
233
234  /* Reinitialise cursor to this instruction - but undo next/prev RIP
235     adjustment because unw_step will redo it - and force RIP, RBP
236     RSP into register locations (=~ ucontext we keep), then set
237     their desired values. Then perform the step. */
238  d->ip = rip + d->use_prev_instr;
239  d->cfa = cfa;
240  d->loc[UNW_X86_64_RIP] = DWARF_REG_LOC (d, UNW_X86_64_RIP);
241  d->loc[UNW_X86_64_RBP] = DWARF_REG_LOC (d, UNW_X86_64_RBP);
242  d->loc[UNW_X86_64_RSP] = DWARF_REG_LOC (d, UNW_X86_64_RSP);
243  c->frame_info = *f;
244
245  if (likely(dwarf_put (d, d->loc[UNW_X86_64_RIP], rip) >= 0)
246      && likely(dwarf_put (d, d->loc[UNW_X86_64_RBP], rbp) >= 0)
247      && likely(dwarf_put (d, d->loc[UNW_X86_64_RSP], rsp) >= 0)
248      && likely((ret = unw_step (cursor)) >= 0))
249    *f = c->frame_info;
250
251  /* If unw_step() stopped voluntarily, remember that, even if it
252     otherwise could not determine anything useful.  This avoids
253     failing trace if we hit frames without unwind info, which is
254     common for the outermost frame (CRT stuff) on many systems.
255     This avoids failing trace in very common circumstances; failing
256     to unw_step() loop wouldn't produce any better result. */
257  if (ret == 0)
258    f->last_frame = -1;
259
260  Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n",
261	 f->virtual_address, f->frame_type, f->last_frame,
262	 f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset,
263	 f->rbp_cfa_offset, f->rsp_cfa_offset);
264
265  return f;
266}
267
268/* Look up and if necessary fill in frame attributes for address RIP
269   in CACHE using current CFA, RBP and RSP values.  Uses CURSOR to
270   perform any unwind steps necessary to fill the cache.  Returns the
271   frame cache slot which describes RIP. */
272static unw_tdep_frame_t *
273trace_lookup (unw_cursor_t *cursor,
274	      unw_trace_cache_t *cache,
275	      unw_word_t cfa,
276	      unw_word_t rip,
277	      unw_word_t rbp,
278	      unw_word_t rsp)
279{
280  /* First look up for previously cached information using cache as
281     linear probing hash table with probe step of 1.  Majority of
282     lookups should be completed within few steps, but it is very
283     important the hash table does not fill up, or performance falls
284     off the cliff. */
285  uint64_t i, addr;
286  uint64_t cache_size = 1u << cache->log_size;
287  uint64_t slot = ((rip * 0x9e3779b97f4a7c16) >> 43) & (cache_size-1);
288  unw_tdep_frame_t *frame;
289
290  for (i = 0; i < 16; ++i)
291  {
292    frame = &cache->frames[slot];
293    addr = frame->virtual_address;
294
295    /* Return if we found the address. */
296    if (likely(addr == rip))
297    {
298      Debug (4, "found address after %ld steps\n", i);
299      return frame;
300    }
301
302    /* If slot is empty, reuse it. */
303    if (likely(! addr))
304      break;
305
306    /* Linear probe to next slot candidate, step = 1. */
307    if (++slot >= cache_size)
308      slot -= cache_size;
309  }
310
311  /* If we collided after 16 steps, or if the hash is more than half
312     full, force the hash to expand. Fill the selected slot, whether
313     it's free or collides. Note that hash expansion drops previous
314     contents; further lookups will refill the hash. */
315  Debug (4, "updating slot %lu after %ld steps, replacing 0x%lx\n", slot, i, addr);
316  if (unlikely(addr || cache->used >= cache_size / 2))
317  {
318    if (unlikely(trace_cache_expand (cache) < 0))
319      return NULL;
320
321    cache_size = 1u << cache->log_size;
322    slot = ((rip * 0x9e3779b97f4a7c16) >> 43) & (cache_size-1);
323    frame = &cache->frames[slot];
324    addr = frame->virtual_address;
325  }
326
327  if (! addr)
328    ++cache->used;
329
330  return trace_init_addr (frame, cursor, cfa, rip, rbp, rsp);
331}
332
333/* Fast stack backtrace for x86-64.
334
335   This is used by backtrace() implementation to accelerate frequent
336   queries for current stack, without any desire to unwind. It fills
337   BUFFER with the call tree from CURSOR upwards for at most SIZE
338   stack levels. The first frame, backtrace itself, is omitted. When
339   called, SIZE should give the maximum number of entries that can be
340   stored into BUFFER. Uses an internal thread-specific cache to
341   accelerate queries.
342
343   The caller should fall back to a unw_step() loop if this function
344   fails by returning -UNW_ESTOPUNWIND, meaning the routine hit a
345   stack frame that is too complex to be traced in the fast path.
346
347   This function is tuned for clients which only need to walk the
348   stack to get the call tree as fast as possible but without any
349   other details, for example profilers sampling the stack thousands
350   to millions of times per second.  The routine handles the most
351   common x86-64 ABI stack layouts: CFA is RBP or RSP plus/minus
352   constant offset, return address is at CFA-8, and RBP and RSP are
353   either unchanged or saved on stack at constant offset from the CFA;
354   the signal return frame; and frames without unwind info provided
355   they are at the outermost (final) frame or can conservatively be
356   assumed to be frame-pointer based.
357
358   Any other stack layout will cause the routine to give up. There
359   are only a handful of relatively rarely used functions which do
360   not have a stack in the standard form: vfork, longjmp, setcontext
361   and _dl_runtime_profile on common linux systems for example.
362
363   On success BUFFER and *SIZE reflect the trace progress up to *SIZE
364   stack levels or the outermost frame, which ever is less.  It may
365   stop short of outermost frame if unw_step() loop would also do so,
366   e.g. if there is no more unwind information; this is not reported
367   as an error.
368
369   The function returns a negative value for errors, -UNW_ESTOPUNWIND
370   if tracing stopped because of an unusual frame unwind info.  The
371   BUFFER and *SIZE reflect tracing progress up to the error frame.
372
373   Callers of this function would normally look like this:
374
375     unw_cursor_t     cur;
376     unw_context_t    ctx;
377     void             addrs[128];
378     int              depth = 128;
379     int              ret;
380
381     unw_getcontext(&ctx);
382     unw_init_local(&cur, &ctx);
383     if ((ret = unw_tdep_trace(&cur, addrs, &depth)) < 0)
384     {
385       depth = 0;
386       unw_getcontext(&ctx);
387       unw_init_local(&cur, &ctx);
388       while ((ret = unw_step(&cur)) > 0 && depth < 128)
389       {
390         unw_word_t ip;
391         unw_get_reg(&cur, UNW_REG_IP, &ip);
392         addresses[depth++] = (void *) ip;
393       }
394     }
395*/
396HIDDEN int
397tdep_trace (unw_cursor_t *cursor, void **buffer, int *size)
398{
399  struct cursor *c = (struct cursor *) cursor;
400  struct dwarf_cursor *d = &c->dwarf;
401  unw_trace_cache_t *cache;
402  unw_word_t rbp, rsp, rip, cfa;
403  int maxdepth = 0;
404  int depth = 0;
405  int ret;
406
407  /* Check input parametres. */
408  if (unlikely(! cursor || ! buffer || ! size || (maxdepth = *size) <= 0))
409    return -UNW_EINVAL;
410
411  Debug (1, "begin ip 0x%lx cfa 0x%lx\n", d->ip, d->cfa);
412
413  /* Tell core dwarf routines to call back to us. */
414  d->stash_frames = 1;
415
416  /* Determine initial register values. These are direct access safe
417     because we know they come from the initial machine context. */
418  rip = d->ip;
419  rsp = cfa = d->cfa;
420  ACCESS_MEM_FAST(ret, 0, d, DWARF_GET_LOC(d->loc[UNW_X86_64_RBP]), rbp);
421  assert(ret == 0);
422
423  /* Get frame cache. */
424  if (unlikely(! (cache = trace_cache_get())))
425  {
426    Debug (1, "returning %d, cannot get trace cache\n", -UNW_ENOMEM);
427    *size = 0;
428    d->stash_frames = 0;
429    return -UNW_ENOMEM;
430  }
431
432  /* Trace the stack upwards, starting from current RIP.  Adjust
433     the RIP address for previous/next instruction as the main
434     unwinding logic would also do.  We undo this before calling
435     back into unw_step(). */
436  while (depth < maxdepth)
437  {
438    rip -= d->use_prev_instr;
439    Debug (2, "depth %d cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n",
440	   depth, cfa, rip, rsp, rbp);
441
442    /* See if we have this address cached.  If not, evaluate enough of
443       the dwarf unwind information to fill the cache line data, or to
444       decide this frame cannot be handled in fast trace mode.  We
445       cache negative results too to prevent unnecessary dwarf parsing
446       for common failures. */
447    unw_tdep_frame_t *f = trace_lookup (cursor, cache, cfa, rip, rbp, rsp);
448
449    /* If we don't have information for this frame, give up. */
450    if (unlikely(! f))
451    {
452      ret = -UNW_ENOINFO;
453      break;
454    }
455
456    Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n",
457           f->virtual_address, f->frame_type, f->last_frame,
458           f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset,
459           f->rbp_cfa_offset, f->rsp_cfa_offset);
460
461    assert (f->virtual_address == rip);
462
463    /* Stop if this was the last frame.  In particular don't evaluate
464       new register values as it may not be safe - we don't normally
465       run with full validation on, and do not want to - and there's
466       enough bad unwind info floating around that we need to trust
467       what unw_step() previously said, in potentially bogus frames. */
468    if (f->last_frame)
469      break;
470
471    /* Evaluate CFA and registers for the next frame. */
472    switch (f->frame_type)
473    {
474    case UNW_X86_64_FRAME_GUESSED:
475      /* Fall thru to standard processing after forcing validation. */
476      c->validate = 1;
477
478    case UNW_X86_64_FRAME_STANDARD:
479      /* Advance standard traceable frame. */
480      cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset;
481      ACCESS_MEM_FAST(ret, c->validate, d, cfa - 8, rip);
482      if (likely(ret >= 0) && likely(f->rbp_cfa_offset != -1))
483	ACCESS_MEM_FAST(ret, c->validate, d, cfa + f->rbp_cfa_offset, rbp);
484
485      /* Don't bother reading RSP from DWARF, CFA becomes new RSP. */
486      rsp = cfa;
487
488      /* Next frame needs to back up for unwind info lookup. */
489      d->use_prev_instr = 1;
490      break;
491
492    case UNW_X86_64_FRAME_SIGRETURN:
493      cfa = cfa + f->cfa_reg_offset; /* cfa now points to ucontext_t.  */
494
495      ACCESS_MEM_FAST(ret, c->validate, d, cfa + UC_MCONTEXT_GREGS_RIP, rip);
496      if (likely(ret >= 0))
497        ACCESS_MEM_FAST(ret, c->validate, d, cfa + UC_MCONTEXT_GREGS_RBP, rbp);
498      if (likely(ret >= 0))
499        ACCESS_MEM_FAST(ret, c->validate, d, cfa + UC_MCONTEXT_GREGS_RSP, rsp);
500
501      /* Resume stack at signal restoration point. The stack is not
502         necessarily continuous here, especially with sigaltstack(). */
503      cfa = rsp;
504
505      /* Next frame should not back up. */
506      d->use_prev_instr = 0;
507      break;
508
509    default:
510      /* We cannot trace through this frame, give up and tell the
511	 caller we had to stop.  Data collected so far may still be
512	 useful to the caller, so let it know how far we got.  */
513      ret = -UNW_ESTOPUNWIND;
514      break;
515    }
516
517    Debug (4, "new cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n",
518	   cfa, rip, rsp, rbp);
519
520    /* If we failed or ended up somewhere bogus, stop. */
521    if (unlikely(ret < 0 || rip < 0x4000))
522      break;
523
524    /* Record this address in stack trace. We skipped the first address. */
525    buffer[depth++] = (void *) (rip - d->use_prev_instr);
526  }
527
528#if UNW_DEBUG
529  Debug (1, "returning %d, depth %d\n", ret, depth);
530#endif
531  *size = depth;
532  return ret;
533}
534