aspacemgr-linux.c revision 686b8cad58180b009f8df91509d0a5da2191167a
1/* -*- mode: C; c-basic-offset: 3; -*- */
2
3/*--------------------------------------------------------------------*/
4/*--- The address space manager: segment initialisation and        ---*/
5/*--- tracking, stack operations                                   ---*/
6/*---                                                              ---*/
7/*--- Implementation for Linux (and Darwin!)   m_aspacemgr-linux.c ---*/
8/*--------------------------------------------------------------------*/
9
10/*
11   This file is part of Valgrind, a dynamic binary instrumentation
12   framework.
13
14   Copyright (C) 2000-2013 Julian Seward
15      jseward@acm.org
16
17   This program is free software; you can redistribute it and/or
18   modify it under the terms of the GNU General Public License as
19   published by the Free Software Foundation; either version 2 of the
20   License, or (at your option) any later version.
21
22   This program is distributed in the hope that it will be useful, but
23   WITHOUT ANY WARRANTY; without even the implied warranty of
24   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
25   General Public License for more details.
26
27   You should have received a copy of the GNU General Public License
28   along with this program; if not, write to the Free Software
29   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30   02111-1307, USA.
31
32   The GNU General Public License is contained in the file COPYING.
33*/
34
35#if defined(VGO_linux) || defined(VGO_darwin)
36
37/* *************************************************************
38   DO NOT INCLUDE ANY OTHER FILES HERE.
39   ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
40   AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
41   ************************************************************* */
42
43#include "priv_aspacemgr.h"
44#include "config.h"
45
46
47/* Note: many of the exported functions implemented below are
48   described more fully in comments in pub_core_aspacemgr.h.
49*/
50
51
52/*-----------------------------------------------------------------*/
53/*---                                                           ---*/
54/*--- Overview.                                                 ---*/
55/*---                                                           ---*/
56/*-----------------------------------------------------------------*/
57
58/* Purpose
59   ~~~~~~~
60   The purpose of the address space manager (aspacem) is:
61
62   (1) to record the disposition of all parts of the process' address
63       space at all times.
64
65   (2) to the extent that it can, influence layout in ways favourable
66       to our purposes.
67
68   It is important to appreciate that whilst it can and does attempt
69   to influence layout, and usually succeeds, it isn't possible to
70   impose absolute control: in the end, the kernel is the final
71   arbiter, and can always bounce our requests.
72
73   Strategy
74   ~~~~~~~~
75   The strategy is therefore as follows:
76
77   * Track ownership of mappings.  Each one can belong either to
78     Valgrind or to the client.
79
80   * Try to place the client's fixed and hinted mappings at the
81     requested addresses.  Fixed mappings are allowed anywhere except
82     in areas reserved by Valgrind; the client can trash its own
83     mappings if it wants.  Hinted mappings are allowed providing they
84     fall entirely in free areas; if not, they will be placed by
85     aspacem in a free area.
86
87   * Anonymous mappings are allocated so as to keep Valgrind and
88     client areas widely separated when possible.  If address space
89     runs low, then they may become intermingled: aspacem will attempt
90     to use all possible space.  But under most circumstances lack of
91     address space is not a problem and so the areas will remain far
92     apart.
93
94     Searches for client space start at aspacem_cStart and will wrap
95     around the end of the available space if needed.  Searches for
96     Valgrind space start at aspacem_vStart and will also wrap around.
97     Because aspacem_cStart is approximately at the start of the
98     available space and aspacem_vStart is approximately in the
99     middle, for the most part the client anonymous mappings will be
100     clustered towards the start of available space, and Valgrind ones
101     in the middle.
102
103     The available space is delimited by aspacem_minAddr and
104     aspacem_maxAddr.  aspacem is flexible and can operate with these
105     at any (sane) setting.  For 32-bit Linux, aspacem_minAddr is set
106     to some low-ish value at startup (64M) and aspacem_maxAddr is
107     derived from the stack pointer at system startup.  This seems a
108     reliable way to establish the initial boundaries.
109     A command line option allows to change the value of aspacem_minAddr,
110     so as to allow memory hungry applications to use the lowest
111     part of the memory.
112
113     64-bit Linux is similar except for the important detail that the
114     upper boundary is set to 64G.  The reason is so that all
115     anonymous mappings (basically all client data areas) are kept
116     below 64G, since that is the maximum range that memcheck can
117     track shadow memory using a fast 2-level sparse array.  It can go
118     beyond that but runs much more slowly.  The 64G limit is
119     arbitrary and is trivially changed.  So, with the current
120     settings, programs on 64-bit Linux will appear to run out of
121     address space and presumably fail at the 64G limit.  Given the
122     considerable space overhead of Memcheck, that means you should be
123     able to memcheckify programs that use up to about 32G natively.
124
125   Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
126   anonymous mappings.  The client can still do fixed and hinted maps
127   at any addresses provided they do not overlap Valgrind's segments.
128   This makes Valgrind able to load prelinked .so's at their requested
129   addresses on 64-bit platforms, even if they are very high (eg,
130   112TB).
131
132   At startup, aspacem establishes the usable limits, and advises
133   m_main to place the client stack at the top of the range, which on
134   a 32-bit machine will be just below the real initial stack.  One
135   effect of this is that self-hosting sort-of works, because an inner
136   valgrind will then place its client's stack just below its own
137   initial stack.
138
139   The segment array and segment kinds
140   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
141   The central data structure is the segment array (segments[0
142   .. nsegments_used-1]).  This covers the entire address space in
143   order, giving account of every byte of it.  Free spaces are
144   represented explicitly as this makes many operations simpler.
145   Mergeable adjacent segments are aggressively merged so as to create
146   a "normalised" representation (preen_nsegments).
147
148   There are 7 (mutually-exclusive) segment kinds, the meaning of
149   which is important:
150
151   SkFree: a free space, which may be allocated either to Valgrind (V)
152      or the client (C).
153
154   SkAnonC: an anonymous mapping belonging to C.  For these, aspacem
155      tracks a boolean indicating whether or not is is part of the
156      client's heap area (can't remember why).
157
158   SkFileC: a file mapping belonging to C.
159
160   SkShmC: a shared memory segment belonging to C.
161
162   SkAnonV: an anonymous mapping belonging to V.  These cover all V's
163      dynamic memory needs, including non-client malloc/free areas,
164      shadow memory, and the translation cache.
165
166   SkFileV: a file mapping belonging to V.  As far as I know these are
167      only created transiently for the purposes of reading debug info.
168
169   SkResvn: a reservation segment.
170
171   These are mostly straightforward.  Reservation segments have some
172   subtlety, however.
173
174   A reservation segment is unmapped from the kernel's point of view,
175   but is an area in which aspacem will not create anonymous maps
176   (either Vs or Cs).  The idea is that we will try to keep it clear
177   when the choice to do so is ours.  Reservation segments are
178   'invisible' from the client's point of view: it may choose to park
179   a fixed mapping in the middle of one, and that's just tough -- we
180   can't do anything about that.  From the client's perspective
181   reservations are semantically equivalent to (although
182   distinguishable from, if it makes enquiries) free areas.
183
184   Reservations are a primitive mechanism provided for whatever
185   purposes the rest of the system wants.  Currently they are used to
186   reserve the expansion space into which a growdown stack is
187   expanded, and into which the data segment is extended.  Note,
188   though, those uses are entirely external to this module, which only
189   supplies the primitives.
190
191   Reservations may be shrunk in order that an adjoining anonymous
192   mapping may be extended.  This makes dataseg/stack expansion work.
193   A reservation may not be shrunk below one page.
194
195   The advise/notify concept
196   ~~~~~~~~~~~~~~~~~~~~~~~~~
197   All mmap-related calls must be routed via aspacem.  Calling
198   sys_mmap directly from the rest of the system is very dangerous
199   because aspacem's data structures will become out of date.
200
201   The fundamental mode of operation of aspacem is to support client
202   mmaps.  Here's what happens (in ML_(generic_PRE_sys_mmap)):
203
204   * m_syswrap intercepts the mmap call.  It examines the parameters
205     and identifies the requested placement constraints.  There are
206     three possibilities: no constraint (MAny), hinted (MHint, "I
207     prefer X but will accept anything"), and fixed (MFixed, "X or
208     nothing").
209
210   * This request is passed to VG_(am_get_advisory).  This decides on
211     a placement as described in detail in Strategy above.  It may
212     also indicate that the map should fail, because it would trash
213     one of Valgrind's areas, which would probably kill the system.
214
215   * Control returns to the wrapper.  If VG_(am_get_advisory) has
216     declared that the map should fail, then it must be made to do so.
217     Usually, though, the request is considered acceptable, in which
218     case an "advised" address is supplied.  The advised address
219     replaces the original address supplied by the client, and
220     MAP_FIXED is set.
221
222     Note at this point that although aspacem has been asked for
223     advice on where to place the mapping, no commitment has yet been
224     made by either it or the kernel.
225
226   * The adjusted request is handed off to the kernel.
227
228   * The kernel's result is examined.  If the map succeeded, aspacem
229     is told of the outcome (VG_(am_notify_client_mmap)), so it can
230     update its records accordingly.
231
232  This then is the central advise-notify idiom for handling client
233  mmap/munmap/mprotect/shmat:
234
235  * ask aspacem for an advised placement (or a veto)
236
237  * if not vetoed, hand request to kernel, using the advised placement
238
239  * examine result, and if successful, notify aspacem of the result.
240
241  There are also many convenience functions, eg
242  VG_(am_mmap_anon_fixed_client), which do both phases entirely within
243  aspacem.
244
245  To debug all this, a sync-checker is provided.  It reads
246  /proc/self/maps, compares what it sees with aspacem's records, and
247  complains if there is a difference.  --sanity-level=3 runs it before
248  and after each syscall, which is a powerful, if slow way of finding
249  buggy syscall wrappers.
250
251  Loss of pointercheck
252  ~~~~~~~~~~~~~~~~~~~~
253  Up to and including Valgrind 2.4.1, x86 segmentation was used to
254  enforce seperation of V and C, so that wild writes by C could not
255  trash V.  This got called "pointercheck".  Unfortunately, the new
256  more flexible memory layout, plus the need to be portable across
257  different architectures, means doing this in hardware is no longer
258  viable, and doing it in software is expensive.  So at the moment we
259  don't do it at all.
260*/
261
262
263/*-----------------------------------------------------------------*/
264/*---                                                           ---*/
265/*--- The Address Space Manager's state.                        ---*/
266/*---                                                           ---*/
267/*-----------------------------------------------------------------*/
268
269/* ------ start of STATE for the address-space manager ------ */
270
271/* Max number of segments we can track.  On Android, virtual address
272   space is limited, so keep a low limit -- 5000 x sizef(NSegment) is
273   360KB. */
274#if defined(VGPV_arm_linux_android) \
275    || defined(VGPV_x86_linux_android) \
276    || defined(VGPV_mips32_linux_android) \
277    || defined(VGPV_arm64_linux_android)
278# define VG_N_SEGMENTS 5000
279#else
280# define VG_N_SEGMENTS 30000
281#endif
282
283/* Max number of segment file names we can track.  These are big (1002
284   bytes) so on Android limit the space usage to ~1MB. */
285#if defined(VGPV_arm_linux_android) \
286    || defined(VGPV_x86_linux_android) \
287    || defined(VGPV_mips32_linux_android) \
288    || defined(VGPV_arm64_linux_android)
289# define VG_N_SEGNAMES 1000
290#else
291# define VG_N_SEGNAMES 6000
292#endif
293
294/* Max length of a segment file name. FIXME: to be removed */
295#define VG_MAX_SEGNAMELEN 1000
296
297/* String table for segment names */
298
299/* FIXME: This is just for backward compatibility for now. To be adjusted. */
300static HChar segnames[VG_N_SEGNAMES * VG_MAX_SEGNAMELEN];
301static SizeT segnames_used = 0;  /* number of characters used */
302static UInt  num_segnames = 0;   /* number of names in string table */
303
304/* Array [0 .. nsegments_used-1] of all mappings. */
305/* Sorted by .addr field. */
306/* I: len may not be zero. */
307/* I: overlapping segments are not allowed. */
308/* I: the segments cover the entire address space precisely. */
309/* Each segment can optionally hold an index into the filename table. */
310
311static NSegment nsegments[VG_N_SEGMENTS];
312static Int      nsegments_used = 0;
313
314#define Addr_MIN ((Addr)0)
315#define Addr_MAX ((Addr)(-1ULL))
316
317/* Limits etc */
318
319
320Addr VG_(clo_aspacem_minAddr)
321#if defined(VGO_darwin)
322# if VG_WORDSIZE == 4
323   = (Addr) 0x00001000;
324# else
325   = (Addr) 0x100000000;  // 4GB page zero
326# endif
327#else
328   = (Addr) 0x04000000; // 64M
329#endif
330
331
332// The smallest address that aspacem will try to allocate
333static Addr aspacem_minAddr = 0;
334
335// The largest address that aspacem will try to allocate
336static Addr aspacem_maxAddr = 0;
337
338// Where aspacem will start looking for client space
339static Addr aspacem_cStart = 0;
340
341// Where aspacem will start looking for Valgrind space
342static Addr aspacem_vStart = 0;
343
344
345#define AM_SANITY_CHECK                                      \
346   do {                                                      \
347      if (VG_(clo_sanity_level >= 3))                        \
348         aspacem_assert(VG_(am_do_sync_check)                \
349            (__PRETTY_FUNCTION__,__FILE__,__LINE__));        \
350   } while (0)
351
352/* ------ end of STATE for the address-space manager ------ */
353
354/* ------ Forwards decls ------ */
355inline
356static Int  find_nsegment_idx ( Addr a );
357
358static void parse_procselfmaps (
359      void (*record_mapping)( Addr addr, SizeT len, UInt prot,
360                              ULong dev, ULong ino, Off64T offset,
361                              const HChar* filename ),
362      void (*record_gap)( Addr addr, SizeT len )
363   );
364
365/* ----- Hacks to do with the "commpage" on arm-linux ----- */
366/* Not that I have anything against the commpage per se.  It's just
367   that it's not listed in /proc/self/maps, which is a royal PITA --
368   we have to fake it up, in parse_procselfmaps.
369
370   But note also bug 254556 comment #2: this is now fixed in newer
371   kernels -- it is listed as a "[vectors]" entry.  Presumably the
372   fake entry made here duplicates the [vectors] entry, and so, if at
373   some point in the future, we can stop supporting buggy kernels,
374   then this kludge can be removed entirely, since the procmap parser
375   below will read that entry in the normal way. */
376#if defined(VGP_arm_linux)
377#  define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
378#  define ARM_LINUX_FAKE_COMMPAGE_END1  0xFFFF1000
379#endif
380
381
382/*-----------------------------------------------------------------*/
383/*---                                                           ---*/
384/*--- Segment name management.                                  ---*/
385/*---                                                           ---*/
386/*-----------------------------------------------------------------*/
387
388/* Searches the string table to find an index for the given name.
389   If none is found, an index is allocated and the name stored.
390   If the string is too long to store, return -1.
391*/
392static Int allocate_segname ( const HChar* name )
393{
394   SizeT len, l, ix;
395
396   aspacem_assert(name);
397
398   if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
399
400   len = VG_(strlen)(name);
401
402   /* first see if we already have the name. */
403   for (ix = 0; ix < segnames_used; ix += l + 1) {
404      l = VG_(strlen)(segnames + ix);
405      if (l == len && VG_(strcmp)(name, segnames + ix) == 0) return ix;
406   }
407
408   /* Is there enough room in the string table? */
409   if (len + 1 > (sizeof segnames) - segnames_used) {
410      return -1;
411   }
412
413   ++num_segnames;
414
415   /* copy it in */
416   ix = segnames_used;
417
418   VG_(strcpy)(segnames + segnames_used, name);
419   segnames_used += len + 1;
420
421   return ix;
422}
423
424
425/*-----------------------------------------------------------------*/
426/*---                                                           ---*/
427/*--- Displaying the segment array.                             ---*/
428/*---                                                           ---*/
429/*-----------------------------------------------------------------*/
430
431static const HChar* show_SegKind ( SegKind sk )
432{
433   switch (sk) {
434      case SkFree:  return "    ";
435      case SkAnonC: return "anon";
436      case SkAnonV: return "ANON";
437      case SkFileC: return "file";
438      case SkFileV: return "FILE";
439      case SkShmC:  return "shm ";
440      case SkResvn: return "RSVN";
441      default:      return "????";
442   }
443}
444
445static const HChar* show_ShrinkMode ( ShrinkMode sm )
446{
447   switch (sm) {
448      case SmLower: return "SmLower";
449      case SmUpper: return "SmUpper";
450      case SmFixed: return "SmFixed";
451      default: return "Sm?????";
452   }
453}
454
455static void show_len_concisely ( /*OUT*/HChar* buf, Addr start, Addr end )
456{
457   const HChar* fmt;
458   ULong len = ((ULong)end) - ((ULong)start) + 1;
459
460   if (len < 10*1000*1000ULL) {
461      fmt = "%7llu";
462   }
463   else if (len < 999999ULL * (1ULL<<20)) {
464      fmt = "%6llum";
465      len >>= 20;
466   }
467   else if (len < 999999ULL * (1ULL<<30)) {
468      fmt = "%6llug";
469      len >>= 30;
470   }
471   else if (len < 999999ULL * (1ULL<<40)) {
472      fmt = "%6llut";
473      len >>= 40;
474   }
475   else {
476      fmt = "%6llue";
477      len >>= 50;
478   }
479   ML_(am_sprintf)(buf, fmt, len);
480}
481
482/* Returns a sequence number for the fnIdx position in segnames.
483   Used in aspacemgr debug output to associate a segment with
484   the list of segments output at the beginning. */
485static Int fnIdx_seqnr(Int fnIdx)
486{
487   SizeT ix;
488   Int seqnr = -1;
489
490   for (ix = 0; ix < segnames_used; ix += VG_(strlen)(segnames + ix) + 1) {
491      seqnr++;
492      if (ix == fnIdx)
493         return seqnr;
494   }
495
496   return -1;
497}
498
499/* Show full details of an NSegment */
500
501static void show_nsegment_full ( Int logLevel, Int segNo, const NSegment* seg )
502{
503   HChar len_buf[20];
504   const HChar* name = "(none)";
505
506   if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
507                       && segnames[seg->fnIdx] != 0)
508      name = segnames + seg->fnIdx;
509
510   show_len_concisely(len_buf, seg->start, seg->end);
511
512   VG_(debugLog)(
513      logLevel, "aspacem",
514      "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s "
515      "d=0x%03llx i=%-7lld o=%-7lld (%d,%d) %s\n",
516      segNo, show_SegKind(seg->kind),
517      (ULong)seg->start, (ULong)seg->end, len_buf,
518      seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
519      seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
520      seg->isCH ? 'H' : '-',
521      show_ShrinkMode(seg->smode),
522      seg->dev, seg->ino, seg->offset,
523      fnIdx_seqnr(seg->fnIdx), seg->fnIdx,
524      name
525   );
526}
527
528
529/* Show an NSegment in a user-friendly-ish way. */
530
531static void show_nsegment ( Int logLevel, Int segNo, const NSegment* seg )
532{
533   HChar len_buf[20];
534   show_len_concisely(len_buf, seg->start, seg->end);
535
536   switch (seg->kind) {
537
538      case SkFree:
539         VG_(debugLog)(
540            logLevel, "aspacem",
541            "%3d: %s %010llx-%010llx %s\n",
542            segNo, show_SegKind(seg->kind),
543            (ULong)seg->start, (ULong)seg->end, len_buf
544         );
545         break;
546
547      case SkAnonC: case SkAnonV: case SkShmC:
548         VG_(debugLog)(
549            logLevel, "aspacem",
550            "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
551            segNo, show_SegKind(seg->kind),
552            (ULong)seg->start, (ULong)seg->end, len_buf,
553            seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
554            seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
555            seg->isCH ? 'H' : '-'
556         );
557         break;
558
559      case SkFileC: case SkFileV:
560         VG_(debugLog)(
561            logLevel, "aspacem",
562            "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
563            "i=%-7lld o=%-7lld (%d,%d)\n",
564            segNo, show_SegKind(seg->kind),
565            (ULong)seg->start, (ULong)seg->end, len_buf,
566            seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
567            seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
568            seg->isCH ? 'H' : '-',
569            seg->dev, seg->ino, seg->offset,
570            fnIdx_seqnr(seg->fnIdx), seg->fnIdx
571         );
572         break;
573
574      case SkResvn:
575         VG_(debugLog)(
576            logLevel, "aspacem",
577            "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
578            segNo, show_SegKind(seg->kind),
579            (ULong)seg->start, (ULong)seg->end, len_buf,
580            seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
581            seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
582            seg->isCH ? 'H' : '-',
583            show_ShrinkMode(seg->smode)
584         );
585         break;
586
587      default:
588         VG_(debugLog)(
589            logLevel, "aspacem",
590            "%3d: ???? UNKNOWN SEGMENT KIND\n",
591            segNo
592         );
593         break;
594   }
595}
596
597/* Print out the segment array (debugging only!). */
598void VG_(am_show_nsegments) ( Int logLevel, const HChar* who )
599{
600   Int i;
601   SizeT ix;
602   VG_(debugLog)(logLevel, "aspacem",
603                 "<<< SHOW_SEGMENTS: %s (%d segments, %u segnames)\n",
604                 who, nsegments_used, num_segnames);
605   i = 0;
606   for (ix = 0; ix < segnames_used; ix += VG_(strlen)(segnames + ix) + 1) {
607      VG_(debugLog)(logLevel, "aspacem",
608                    "(%d,%lu) %s\n", i++, ix, segnames + ix);
609   }
610   for (i = 0; i < nsegments_used; i++)
611     show_nsegment( logLevel, i, &nsegments[i] );
612   VG_(debugLog)(logLevel, "aspacem",
613                 ">>>\n");
614}
615
616
617/* Get the filename corresponding to this segment, if known and if it
618   has one. */
619const HChar* VG_(am_get_filename)( NSegment const * seg )
620{
621   Int i;
622   aspacem_assert(seg);
623   i = seg->fnIdx;
624   return (i < 0) ? NULL : segnames + i;
625}
626
627/* Collect up the start addresses of all non-free, non-resvn segments.
628   The interface is a bit strange in order to avoid potential
629   segment-creation races caused by dynamic allocation of the result
630   buffer *starts.
631
632   The function first computes how many entries in the result
633   buffer *starts will be needed.  If this number <= nStarts,
634   they are placed in starts[0..], and the number is returned.
635   If nStarts is not large enough, nothing is written to
636   starts[0..], and the negation of the size is returned.
637
638   Correct use of this function may mean calling it multiple times in
639   order to establish a suitably-sized buffer. */
640
641Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
642{
643   Int i, j, nSegs;
644
645   /* don't pass dumbass arguments */
646   aspacem_assert(nStarts > 0);
647
648   nSegs = 0;
649   for (i = 0; i < nsegments_used; i++) {
650      if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
651         continue;
652      nSegs++;
653   }
654
655   if (nSegs > nStarts) {
656      /* The buffer isn't big enough.  Tell the caller how big it needs
657         to be. */
658      return -nSegs;
659   }
660
661   /* There's enough space.  So write into the result buffer. */
662   aspacem_assert(nSegs <= nStarts);
663
664   j = 0;
665   for (i = 0; i < nsegments_used; i++) {
666      if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
667         continue;
668      starts[j] = nsegments[i].start;
669      j++;
670   }
671
672   aspacem_assert(j == nSegs); /* this should not fail */
673   return nSegs;
674}
675
676
677/*-----------------------------------------------------------------*/
678/*---                                                           ---*/
679/*--- Sanity checking and preening of the segment array.        ---*/
680/*---                                                           ---*/
681/*-----------------------------------------------------------------*/
682
683/* Check representational invariants for NSegments. */
684
685static Bool sane_NSegment ( const NSegment* s )
686{
687   if (s == NULL) return False;
688
689   /* No zero sized segments and no wraparounds. */
690   if (s->start >= s->end) return False;
691
692   /* require page alignment */
693   if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
694   if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
695
696   switch (s->kind) {
697
698      case SkFree:
699         return
700            s->smode == SmFixed
701            && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
702            && !s->hasR && !s->hasW && !s->hasX && !s->hasT
703            && !s->isCH;
704
705      case SkAnonC: case SkAnonV: case SkShmC:
706         return
707            s->smode == SmFixed
708            && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
709            && (s->kind==SkAnonC ? True : !s->isCH);
710
711      case SkFileC: case SkFileV:
712         return
713            s->smode == SmFixed
714            && (s->fnIdx == -1 ||
715                (s->fnIdx >= 0 && s->fnIdx < segnames_used))
716            && !s->isCH;
717
718      case SkResvn:
719         return
720            s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
721            && !s->hasR && !s->hasW && !s->hasX && !s->hasT
722            && !s->isCH;
723
724      default:
725         return False;
726   }
727}
728
729
730/* Try merging s2 into s1, if possible.  If successful, s1 is
731   modified, and True is returned.  Otherwise s1 is unchanged and
732   False is returned. */
733
734static Bool maybe_merge_nsegments ( NSegment* s1, const NSegment* s2 )
735{
736   if (s1->kind != s2->kind)
737      return False;
738
739   if (s1->end+1 != s2->start)
740      return False;
741
742   /* reject cases which would cause wraparound */
743   if (s1->start > s2->end)
744      return False;
745
746   switch (s1->kind) {
747
748      case SkFree:
749         s1->end = s2->end;
750         return True;
751
752      case SkAnonC: case SkAnonV:
753         if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
754             && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
755            s1->end = s2->end;
756            s1->hasT |= s2->hasT;
757            return True;
758         }
759         break;
760
761      case SkFileC: case SkFileV:
762         if (s1->hasR == s2->hasR
763             && s1->hasW == s2->hasW && s1->hasX == s2->hasX
764             && s1->dev == s2->dev && s1->ino == s2->ino
765             && s2->offset == s1->offset
766                              + ((ULong)s2->start) - ((ULong)s1->start) ) {
767            s1->end = s2->end;
768            s1->hasT |= s2->hasT;
769            return True;
770         }
771         break;
772
773      case SkShmC:
774         return False;
775
776      case SkResvn:
777         if (s1->smode == SmFixed && s2->smode == SmFixed) {
778            s1->end = s2->end;
779            return True;
780         }
781
782      default:
783         break;
784
785   }
786
787   return False;
788}
789
790
791/* Sanity-check and canonicalise the segment array (merge mergable
792   segments).  Returns True if any segments were merged. */
793
794static Bool preen_nsegments ( void )
795{
796   Int i, r, w, nsegments_used_old = nsegments_used;
797
798   /* Pass 1: check the segment array covers the entire address space
799      exactly once, and also that each segment is sane. */
800   aspacem_assert(nsegments_used > 0);
801   aspacem_assert(nsegments[0].start == Addr_MIN);
802   aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
803
804   aspacem_assert(sane_NSegment(&nsegments[0]));
805   for (i = 1; i < nsegments_used; i++) {
806      aspacem_assert(sane_NSegment(&nsegments[i]));
807      aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
808   }
809
810   /* Pass 2: merge as much as possible, using
811      maybe_merge_segments. */
812   w = 0;
813   for (r = 1; r < nsegments_used; r++) {
814      if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
815         /* nothing */
816      } else {
817         w++;
818         if (w != r)
819            nsegments[w] = nsegments[r];
820      }
821   }
822   w++;
823   aspacem_assert(w > 0 && w <= nsegments_used);
824   nsegments_used = w;
825
826   return nsegments_used != nsegments_used_old;
827}
828
829
830/* Check the segment array corresponds with the kernel's view of
831   memory layout.  sync_check_ok returns True if no anomalies were
832   found, else False.  In the latter case the mismatching segments are
833   displayed.
834
835   The general idea is: we get the kernel to show us all its segments
836   and also the gaps in between.  For each such interval, try and find
837   a sequence of appropriate intervals in our segment array which
838   cover or more than cover the kernel's interval, and which all have
839   suitable kinds/permissions etc.
840
841   Although any specific kernel interval is not matched exactly to a
842   valgrind interval or sequence thereof, eventually any disagreement
843   on mapping boundaries will be detected.  This is because, if for
844   example valgrind's intervals cover a greater range than the current
845   kernel interval, it must be the case that a neighbouring free-space
846   interval belonging to valgrind cannot cover the neighbouring
847   free-space interval belonging to the kernel.  So the disagreement
848   is detected.
849
850   In other words, we examine each kernel interval in turn, and check
851   we do not disagree over the range of that interval.  Because all of
852   the address space is examined, any disagreements must eventually be
853   detected.
854*/
855
856static Bool sync_check_ok = False;
857
858static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
859                                          ULong dev, ULong ino, Off64T offset,
860                                          const HChar* filename )
861{
862   Int  iLo, iHi, i;
863   Bool sloppyXcheck;
864
865   /* If a problem has already been detected, don't continue comparing
866      segments, so as to avoid flooding the output with error
867      messages. */
868#if !defined(VGO_darwin)
869   /* GrP fixme not */
870   if (!sync_check_ok)
871      return;
872#endif
873   if (len == 0)
874      return;
875
876   /* The kernel should not give us wraparounds. */
877   aspacem_assert(addr <= addr + len - 1);
878
879   iLo = find_nsegment_idx( addr );
880   iHi = find_nsegment_idx( addr + len - 1 );
881
882   /* These 5 should be guaranteed by find_nsegment_idx. */
883   aspacem_assert(0 <= iLo && iLo < nsegments_used);
884   aspacem_assert(0 <= iHi && iHi < nsegments_used);
885   aspacem_assert(iLo <= iHi);
886   aspacem_assert(nsegments[iLo].start <= addr );
887   aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
888
889   /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
890      most recent NX-bit enabled CPUs) and so recent kernels attempt
891      to provide execute protection by placing all executable mappings
892      low down in the address space and then reducing the size of the
893      code segment to prevent code at higher addresses being executed.
894
895      These kernels report which mappings are really executable in
896      the /proc/self/maps output rather than mirroring what was asked
897      for when each mapping was created. In order to cope with this we
898      have a sloppyXcheck mode which we enable on x86 and s390 - in this
899      mode we allow the kernel to report execute permission when we weren't
900      expecting it but not vice versa. */
901#  if defined(VGA_x86) || defined (VGA_s390x)
902   sloppyXcheck = True;
903#  else
904   sloppyXcheck = False;
905#  endif
906
907   /* NSegments iLo .. iHi inclusive should agree with the presented
908      data. */
909   for (i = iLo; i <= iHi; i++) {
910
911      Bool same, cmp_offsets, cmp_devino;
912      UInt seg_prot;
913
914      /* compare the kernel's offering against ours. */
915      same = nsegments[i].kind == SkAnonC
916             || nsegments[i].kind == SkAnonV
917             || nsegments[i].kind == SkFileC
918             || nsegments[i].kind == SkFileV
919             || nsegments[i].kind == SkShmC;
920
921      seg_prot = 0;
922      if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
923      if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
924      if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
925
926      cmp_offsets
927         = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
928
929      cmp_devino
930         = nsegments[i].dev != 0 || nsegments[i].ino != 0;
931
932      /* Consider other reasons to not compare dev/inode */
933#if defined(VGO_linux)
934      /* bproc does some godawful hack on /dev/zero at process
935         migration, which changes the name of it, and its dev & ino */
936      if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
937         cmp_devino = False;
938
939      /* hack apparently needed on MontaVista Linux */
940      if (filename && VG_(strstr)(filename, "/.lib-ro/"))
941         cmp_devino = False;
942#endif
943
944#if defined(VGO_darwin)
945      // GrP fixme kernel info doesn't have dev/inode
946      cmp_devino = False;
947
948      // GrP fixme V and kernel don't agree on offsets
949      cmp_offsets = False;
950#endif
951
952      /* If we are doing sloppy execute permission checks then we
953         allow segment to have X permission when we weren't expecting
954         it (but not vice versa) so if the kernel reported execute
955         permission then pretend that this segment has it regardless
956         of what we were expecting. */
957      if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
958         seg_prot |= VKI_PROT_EXEC;
959      }
960
961      same = same
962             && seg_prot == prot
963             && (cmp_devino
964                   ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
965                   : True)
966             && (cmp_offsets
967                   ? nsegments[i].start-nsegments[i].offset == addr-offset
968                   : True);
969      if (!same) {
970         Addr start = addr;
971         Addr end = start + len - 1;
972         HChar len_buf[20];
973         show_len_concisely(len_buf, start, end);
974
975         sync_check_ok = False;
976
977         VG_(debugLog)(
978            0,"aspacem",
979              "segment mismatch: V's seg 1st, kernel's 2nd:\n");
980         show_nsegment_full( 0, i, &nsegments[i] );
981         VG_(debugLog)(0,"aspacem",
982            "...: .... %010llx-%010llx %s %c%c%c.. ....... "
983            "d=0x%03llx i=%-7lld o=%-7lld (.) m=. %s\n",
984            (ULong)start, (ULong)end, len_buf,
985            prot & VKI_PROT_READ  ? 'r' : '-',
986            prot & VKI_PROT_WRITE ? 'w' : '-',
987            prot & VKI_PROT_EXEC  ? 'x' : '-',
988            dev, ino, offset, filename ? filename : "(none)" );
989
990         return;
991      }
992   }
993
994   /* Looks harmless.  Keep going. */
995   return;
996}
997
998static void sync_check_gap_callback ( Addr addr, SizeT len )
999{
1000   Int iLo, iHi, i;
1001
1002   /* If a problem has already been detected, don't continue comparing
1003      segments, so as to avoid flooding the output with error
1004      messages. */
1005#if !defined(VGO_darwin)
1006   /* GrP fixme not */
1007   if (!sync_check_ok)
1008      return;
1009#endif
1010   if (len == 0)
1011      return;
1012
1013   /* The kernel should not give us wraparounds. */
1014   aspacem_assert(addr <= addr + len - 1);
1015
1016   iLo = find_nsegment_idx( addr );
1017   iHi = find_nsegment_idx( addr + len - 1 );
1018
1019   /* These 5 should be guaranteed by find_nsegment_idx. */
1020   aspacem_assert(0 <= iLo && iLo < nsegments_used);
1021   aspacem_assert(0 <= iHi && iHi < nsegments_used);
1022   aspacem_assert(iLo <= iHi);
1023   aspacem_assert(nsegments[iLo].start <= addr );
1024   aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
1025
1026   /* NSegments iLo .. iHi inclusive should agree with the presented
1027      data. */
1028   for (i = iLo; i <= iHi; i++) {
1029
1030      Bool same;
1031
1032      /* compare the kernel's offering against ours. */
1033      same = nsegments[i].kind == SkFree
1034             || nsegments[i].kind == SkResvn;
1035
1036      if (!same) {
1037         Addr start = addr;
1038         Addr end = start + len - 1;
1039         HChar len_buf[20];
1040         show_len_concisely(len_buf, start, end);
1041
1042         sync_check_ok = False;
1043
1044         VG_(debugLog)(
1045            0,"aspacem",
1046              "segment mismatch: V's gap 1st, kernel's 2nd:\n");
1047         show_nsegment_full( 0, i, &nsegments[i] );
1048         VG_(debugLog)(0,"aspacem",
1049            "   : .... %010llx-%010llx %s\n",
1050            (ULong)start, (ULong)end, len_buf);
1051         return;
1052      }
1053   }
1054
1055   /* Looks harmless.  Keep going. */
1056   return;
1057}
1058
1059
1060/* Sanity check: check that Valgrind and the kernel agree on the
1061   address space layout.  Prints offending segments and call point if
1062   a discrepancy is detected, but does not abort the system.  Returned
1063   Bool is False if a discrepancy was found. */
1064
1065Bool VG_(am_do_sync_check) ( const HChar* fn,
1066                             const HChar* file, Int line )
1067{
1068   sync_check_ok = True;
1069   if (0)
1070      VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
1071   parse_procselfmaps( sync_check_mapping_callback,
1072                       sync_check_gap_callback );
1073   if (!sync_check_ok) {
1074      VG_(debugLog)(0,"aspacem",
1075                      "sync check at %s:%d (%s): FAILED\n",
1076                      file, line, fn);
1077      VG_(debugLog)(0,"aspacem", "\n");
1078
1079#     if 0
1080      {
1081         HChar buf[100];   // large enough
1082         VG_(am_show_nsegments)(0,"post syncheck failure");
1083         VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1084         VG_(system)(buf);
1085      }
1086#     endif
1087
1088   }
1089   return sync_check_ok;
1090}
1091
1092/* Hook to allow sanity checks to be done from aspacemgr-common.c. */
1093void ML_(am_do_sanity_check)( void )
1094{
1095   AM_SANITY_CHECK;
1096}
1097
1098
1099/*-----------------------------------------------------------------*/
1100/*---                                                           ---*/
1101/*--- Low level access / modification of the segment array.     ---*/
1102/*---                                                           ---*/
1103/*-----------------------------------------------------------------*/
1104
1105/* Binary search the interval array for a given address.  Since the
1106   array covers the entire address space the search cannot fail.  The
1107   _WRK function does the real work.  Its caller (just below) caches
1108   the results thereof, to save time.  With N_CACHE of 63 we get a hit
1109   rate exceeding 90% when running OpenOffice.
1110
1111   Re ">> 12", it doesn't matter that the page size of some targets
1112   might be different from 12.  Really "(a >> 12) % N_CACHE" is merely
1113   a hash function, and the actual cache entry is always validated
1114   correctly against the selected cache entry before use.
1115*/
1116/* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1117__attribute__((noinline))
1118static Int find_nsegment_idx_WRK ( Addr a )
1119{
1120   Addr a_mid_lo, a_mid_hi;
1121   Int  mid,
1122        lo = 0,
1123        hi = nsegments_used-1;
1124   while (True) {
1125      /* current unsearched space is from lo to hi, inclusive. */
1126      if (lo > hi) {
1127         /* Not found.  This can't happen. */
1128         ML_(am_barf)("find_nsegment_idx: not found");
1129      }
1130      mid      = (lo + hi) / 2;
1131      a_mid_lo = nsegments[mid].start;
1132      a_mid_hi = nsegments[mid].end;
1133
1134      if (a < a_mid_lo) { hi = mid-1; continue; }
1135      if (a > a_mid_hi) { lo = mid+1; continue; }
1136      aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
1137      aspacem_assert(0 <= mid && mid < nsegments_used);
1138      return mid;
1139   }
1140}
1141
1142inline static Int find_nsegment_idx ( Addr a )
1143{
1144#  define N_CACHE 131 /*prime*/
1145   static Addr cache_pageno[N_CACHE];
1146   static Int  cache_segidx[N_CACHE];
1147   static Bool cache_inited = False;
1148
1149   static UWord n_q = 0;
1150   static UWord n_m = 0;
1151
1152   UWord ix;
1153
1154   if (LIKELY(cache_inited)) {
1155      /* do nothing */
1156   } else {
1157      for (ix = 0; ix < N_CACHE; ix++) {
1158         cache_pageno[ix] = 0;
1159         cache_segidx[ix] = -1;
1160      }
1161      cache_inited = True;
1162   }
1163
1164   ix = (a >> 12) % N_CACHE;
1165
1166   n_q++;
1167   if (0 && 0 == (n_q & 0xFFFF))
1168      VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
1169
1170   if ((a >> 12) == cache_pageno[ix]
1171       && cache_segidx[ix] >= 0
1172       && cache_segidx[ix] < nsegments_used
1173       && nsegments[cache_segidx[ix]].start <= a
1174       && a <= nsegments[cache_segidx[ix]].end) {
1175      /* hit */
1176      /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1177      return cache_segidx[ix];
1178   }
1179   /* miss */
1180   n_m++;
1181   cache_segidx[ix] = find_nsegment_idx_WRK(a);
1182   cache_pageno[ix] = a >> 12;
1183   return cache_segidx[ix];
1184#  undef N_CACHE
1185}
1186
1187
1188
1189/* Finds the segment containing 'a'.  Only returns file/anon/resvn
1190   segments.  This returns a 'NSegment const *' - a pointer to
1191   readonly data. */
1192NSegment const * VG_(am_find_nsegment) ( Addr a )
1193{
1194   Int i = find_nsegment_idx(a);
1195   aspacem_assert(i >= 0 && i < nsegments_used);
1196   aspacem_assert(nsegments[i].start <= a);
1197   aspacem_assert(a <= nsegments[i].end);
1198   if (nsegments[i].kind == SkFree)
1199      return NULL;
1200   else
1201      return &nsegments[i];
1202}
1203
1204
1205/* Map segment pointer to segment index. */
1206static Int segAddr_to_index ( const NSegment* seg )
1207{
1208   aspacem_assert(seg >= &nsegments[0] && seg < &nsegments[nsegments_used]);
1209
1210   return seg - &nsegments[0];
1211}
1212
1213
1214/* Find the next segment along from 'here', if it is a file/anon/resvn
1215   segment. */
1216NSegment const * VG_(am_next_nsegment) ( const NSegment* here, Bool fwds )
1217{
1218   Int i = segAddr_to_index(here);
1219
1220   if (fwds) {
1221      i++;
1222      if (i >= nsegments_used)
1223         return NULL;
1224   } else {
1225      i--;
1226      if (i < 0)
1227         return NULL;
1228   }
1229   switch (nsegments[i].kind) {
1230      case SkFileC: case SkFileV: case SkShmC:
1231      case SkAnonC: case SkAnonV: case SkResvn:
1232         return &nsegments[i];
1233      default:
1234         break;
1235   }
1236   return NULL;
1237}
1238
1239
1240/* Trivial fn: return the total amount of space in anonymous mappings,
1241   both for V and the client.  Is used for printing stats in
1242   out-of-memory messages. */
1243ULong VG_(am_get_anonsize_total)( void )
1244{
1245   Int   i;
1246   ULong total = 0;
1247   for (i = 0; i < nsegments_used; i++) {
1248      if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
1249         total += (ULong)nsegments[i].end
1250                  - (ULong)nsegments[i].start + 1ULL;
1251      }
1252   }
1253   return total;
1254}
1255
1256
1257/* Test if a piece of memory is addressable by client or by valgrind with at
1258   least the "prot" protection permissions by examining the underlying
1259   segments.  If client && freeOk is True then SkFree areas are also allowed.
1260*/
1261static
1262Bool is_valid_for( Bool client, Addr start, SizeT len, UInt prot, Bool freeOk )
1263{
1264   Int  i, iLo, iHi;
1265   Bool needR, needW, needX;
1266
1267   if (len == 0)
1268      return True; /* somewhat dubious case */
1269   if (start + len < start)
1270      return False; /* reject wraparounds */
1271
1272   needR = toBool(prot & VKI_PROT_READ);
1273   needW = toBool(prot & VKI_PROT_WRITE);
1274   needX = toBool(prot & VKI_PROT_EXEC);
1275
1276   iLo = find_nsegment_idx(start);
1277   aspacem_assert(start >= nsegments[iLo].start);
1278
1279   if (start+len-1 <= nsegments[iLo].end) {
1280      /* This is a speedup hack which avoids calling find_nsegment_idx
1281         a second time when possible.  It is always correct to just
1282         use the "else" clause below, but is_valid_for_client is
1283         called a lot by the leak checker, so avoiding pointless calls
1284         to find_nsegment_idx, which can be expensive, is helpful. */
1285      iHi = iLo;
1286   } else {
1287      iHi = find_nsegment_idx(start + len - 1);
1288   }
1289
1290   if (client) {
1291      for (i = iLo; i <= iHi; i++) {
1292         if ( (nsegments[i].kind == SkFileC
1293               || nsegments[i].kind == SkAnonC
1294               || nsegments[i].kind == SkShmC
1295               || (nsegments[i].kind == SkFree  && freeOk)
1296               || (nsegments[i].kind == SkResvn && freeOk))
1297              && (needR ? nsegments[i].hasR : True)
1298              && (needW ? nsegments[i].hasW : True)
1299              && (needX ? nsegments[i].hasX : True) ) {
1300            /* ok */
1301         } else {
1302            return False;
1303         }
1304      }
1305   } else {
1306      for (i = iLo; i <= iHi; i++) {
1307         if ( (nsegments[i].kind == SkFileV
1308               || nsegments[i].kind == SkAnonV)
1309              && (needR ? nsegments[i].hasR : True)
1310              && (needW ? nsegments[i].hasW : True)
1311              && (needX ? nsegments[i].hasX : True) ) {
1312            /* ok */
1313         } else {
1314            return False;
1315         }
1316      }
1317   }
1318   return True;
1319}
1320
1321/* Test if a piece of memory is addressable by the client with at
1322   least the "prot" protection permissions by examining the underlying
1323   segments. */
1324Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
1325                                  UInt prot )
1326{
1327   return is_valid_for(/* client */ True,
1328                       start, len, prot, False/*free not OK*/ );
1329}
1330
1331/* Variant of VG_(am_is_valid_for_client) which allows free areas to
1332   be consider part of the client's addressable space.  It also
1333   considers reservations to be allowable, since from the client's
1334   point of view they don't exist. */
1335Bool VG_(am_is_valid_for_client_or_free_or_resvn)
1336   ( Addr start, SizeT len, UInt prot )
1337{
1338   return is_valid_for(/* client */ True,
1339                        start, len, prot, True/*free is OK*/ );
1340}
1341
1342
1343Bool VG_(am_is_valid_for_valgrind) ( Addr start, SizeT len, UInt prot )
1344{
1345   return is_valid_for(/* client */ False,
1346                        start, len, prot, False/*irrelevant*/ );
1347}
1348
1349
1350/* Returns True if any part of the address range is marked as having
1351   translations made from it.  This is used to determine when to
1352   discard code, so if in doubt return True. */
1353
1354static Bool any_Ts_in_range ( Addr start, SizeT len )
1355{
1356   Int iLo, iHi, i;
1357   aspacem_assert(len > 0);
1358   aspacem_assert(start + len > start);
1359   iLo = find_nsegment_idx(start);
1360   iHi = find_nsegment_idx(start + len - 1);
1361   for (i = iLo; i <= iHi; i++) {
1362      if (nsegments[i].hasT)
1363         return True;
1364   }
1365   return False;
1366}
1367
1368
1369/*-----------------------------------------------------------------*/
1370/*---                                                           ---*/
1371/*--- Modifying the segment array, and constructing segments.   ---*/
1372/*---                                                           ---*/
1373/*-----------------------------------------------------------------*/
1374
1375/* Split the segment containing 'a' into two, so that 'a' is
1376   guaranteed to be the start of a new segment.  If 'a' is already the
1377   start of a segment, do nothing. */
1378
1379static void split_nsegment_at ( Addr a )
1380{
1381   Int i, j;
1382
1383   aspacem_assert(a > 0);
1384   aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1385
1386   i = find_nsegment_idx(a);
1387   aspacem_assert(i >= 0 && i < nsegments_used);
1388
1389   if (nsegments[i].start == a)
1390      /* 'a' is already the start point of a segment, so nothing to be
1391         done. */
1392      return;
1393
1394   /* else we have to slide the segments upwards to make a hole */
1395   if (nsegments_used >= VG_N_SEGMENTS)
1396      ML_(am_barf_toolow)("VG_N_SEGMENTS");
1397   for (j = nsegments_used-1; j > i; j--)
1398      nsegments[j+1] = nsegments[j];
1399   nsegments_used++;
1400
1401   nsegments[i+1]       = nsegments[i];
1402   nsegments[i+1].start = a;
1403   nsegments[i].end     = a-1;
1404
1405   if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
1406      nsegments[i+1].offset
1407         += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
1408
1409   aspacem_assert(sane_NSegment(&nsegments[i]));
1410   aspacem_assert(sane_NSegment(&nsegments[i+1]));
1411}
1412
1413
1414/* Do the minimum amount of segment splitting necessary to ensure that
1415   sLo is the first address denoted by some segment and sHi is the
1416   highest address denoted by some other segment.  Returns the indices
1417   of the lowest and highest segments in the range. */
1418
1419static
1420void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
1421                                 /*OUT*/Int* iLo,
1422                                 /*OUT*/Int* iHi )
1423{
1424   aspacem_assert(sLo < sHi);
1425   aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
1426   aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
1427
1428   if (sLo > 0)
1429      split_nsegment_at(sLo);
1430   if (sHi < sHi+1)
1431      split_nsegment_at(sHi+1);
1432
1433   *iLo = find_nsegment_idx(sLo);
1434   *iHi = find_nsegment_idx(sHi);
1435   aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
1436   aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
1437   aspacem_assert(*iLo <= *iHi);
1438   aspacem_assert(nsegments[*iLo].start == sLo);
1439   aspacem_assert(nsegments[*iHi].end == sHi);
1440   /* Not that I'm overly paranoid or anything, definitely not :-) */
1441}
1442
1443
1444/* Add SEG to the collection, deleting/truncating any it overlaps.
1445   This deals with all the tricky cases of splitting up segments as
1446   needed. */
1447
1448static void add_segment ( const NSegment* seg )
1449{
1450   Int  i, iLo, iHi, delta;
1451   Bool segment_is_sane;
1452
1453   Addr sStart = seg->start;
1454   Addr sEnd   = seg->end;
1455
1456   aspacem_assert(sStart <= sEnd);
1457   aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
1458   aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
1459
1460   segment_is_sane = sane_NSegment(seg);
1461   if (!segment_is_sane) show_nsegment_full(0,-1,seg);
1462   aspacem_assert(segment_is_sane);
1463
1464   split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
1465
1466   /* Now iLo .. iHi inclusive is the range of segment indices which
1467      seg will replace.  If we're replacing more than one segment,
1468      slide those above the range down to fill the hole. */
1469   delta = iHi - iLo;
1470   aspacem_assert(delta >= 0);
1471   if (delta > 0) {
1472      for (i = iLo; i < nsegments_used-delta; i++)
1473         nsegments[i] = nsegments[i+delta];
1474      nsegments_used -= delta;
1475   }
1476
1477   nsegments[iLo] = *seg;
1478
1479   (void)preen_nsegments();
1480   if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
1481}
1482
1483
1484/* Clear out an NSegment record. */
1485
1486static void init_nsegment ( /*OUT*/NSegment* seg )
1487{
1488   seg->kind     = SkFree;
1489   seg->start    = 0;
1490   seg->end      = 0;
1491   seg->smode    = SmFixed;
1492   seg->dev      = 0;
1493   seg->ino      = 0;
1494   seg->mode     = 0;
1495   seg->offset   = 0;
1496   seg->fnIdx    = -1;
1497   seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
1498}
1499
1500/* Make an NSegment which holds a reservation. */
1501
1502static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
1503{
1504   aspacem_assert(start < end);
1505   aspacem_assert(VG_IS_PAGE_ALIGNED(start));
1506   aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
1507   init_nsegment(seg);
1508   seg->kind  = SkResvn;
1509   seg->start = start;
1510   seg->end   = end;
1511}
1512
1513
1514/*-----------------------------------------------------------------*/
1515/*---                                                           ---*/
1516/*--- Startup, including reading /proc/self/maps.               ---*/
1517/*---                                                           ---*/
1518/*-----------------------------------------------------------------*/
1519
1520static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
1521                                 ULong dev, ULong ino, Off64T offset,
1522                                 const HChar* filename )
1523{
1524   NSegment seg;
1525   init_nsegment( &seg );
1526   seg.start  = addr;
1527   seg.end    = addr+len-1;
1528   seg.dev    = dev;
1529   seg.ino    = ino;
1530   seg.offset = offset;
1531   seg.hasR   = toBool(prot & VKI_PROT_READ);
1532   seg.hasW   = toBool(prot & VKI_PROT_WRITE);
1533   seg.hasX   = toBool(prot & VKI_PROT_EXEC);
1534   seg.hasT   = False;
1535
1536   /* Don't use the presence of a filename to decide if a segment in
1537      the initial /proc/self/maps to decide if the segment is an AnonV
1538      or FileV segment as some systems don't report the filename. Use
1539      the device and inode numbers instead. Fixes bug #124528. */
1540   seg.kind = SkAnonV;
1541   if (dev != 0 && ino != 0)
1542      seg.kind = SkFileV;
1543
1544#  if defined(VGO_darwin)
1545   // GrP fixme no dev/ino on darwin
1546   if (offset != 0)
1547      seg.kind = SkFileV;
1548#  endif // defined(VGO_darwin)
1549
1550#  if defined(VGP_arm_linux)
1551   /* The standard handling of entries read from /proc/self/maps will
1552      cause the faked up commpage segment to have type SkAnonV, which
1553      is a problem because it contains code we want the client to
1554      execute, and so later m_translate will segfault the client when
1555      it tries to go in there.  Hence change the ownership of it here
1556      to the client (SkAnonC).  The least-worst kludge I could think
1557      of. */
1558   if (addr == ARM_LINUX_FAKE_COMMPAGE_START
1559       && addr + len == ARM_LINUX_FAKE_COMMPAGE_END1
1560       && seg.kind == SkAnonV)
1561      seg.kind = SkAnonC;
1562#  endif // defined(VGP_arm_linux)
1563
1564   if (filename)
1565      seg.fnIdx = allocate_segname( filename );
1566
1567   if (0) show_nsegment( 2,0, &seg );
1568   add_segment( &seg );
1569}
1570
1571/* See description in pub_core_aspacemgr.h */
1572Addr VG_(am_startup) ( Addr sp_at_startup )
1573{
1574   NSegment seg;
1575   Addr     suggested_clstack_end;
1576
1577   aspacem_assert(sizeof(Word)   == sizeof(void*));
1578   aspacem_assert(sizeof(Addr)   == sizeof(void*));
1579   aspacem_assert(sizeof(SizeT)  == sizeof(void*));
1580   aspacem_assert(sizeof(SSizeT) == sizeof(void*));
1581
1582   /* Check that we can store the largest imaginable dev, ino and
1583      offset numbers in an NSegment. */
1584   aspacem_assert(sizeof(seg.dev)    == 8);
1585   aspacem_assert(sizeof(seg.ino)    == 8);
1586   aspacem_assert(sizeof(seg.offset) == 8);
1587   aspacem_assert(sizeof(seg.mode)   == 4);
1588
1589   /* Add a single interval covering the entire address space. */
1590   init_nsegment(&seg);
1591   seg.kind        = SkFree;
1592   seg.start       = Addr_MIN;
1593   seg.end         = Addr_MAX;
1594   nsegments[0]    = seg;
1595   nsegments_used  = 1;
1596
1597   aspacem_minAddr = VG_(clo_aspacem_minAddr);
1598
1599#if defined(VGO_darwin)
1600
1601# if VG_WORDSIZE == 4
1602   aspacem_maxAddr = (Addr) 0xffffffff;
1603
1604   aspacem_cStart = aspacem_minAddr;
1605   aspacem_vStart = 0xf0000000;  // 0xc0000000..0xf0000000 available
1606# else
1607   aspacem_maxAddr = (Addr) 0x7fffffffffff;
1608
1609   aspacem_cStart = aspacem_minAddr;
1610   aspacem_vStart = 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
1611   // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
1612# endif
1613
1614   suggested_clstack_end = -1; // ignored; Mach-O specifies its stack
1615
1616#else /* !defined(VGO_darwin) */
1617
1618   /* Establish address limits and block out unusable parts
1619      accordingly. */
1620
1621   VG_(debugLog)(2, "aspacem",
1622                    "        sp_at_startup = 0x%010llx (supplied)\n",
1623                    (ULong)sp_at_startup );
1624
1625#  if VG_WORDSIZE == 8
1626     aspacem_maxAddr = (Addr)0x1000000000ULL - 1; // 64G
1627#    ifdef ENABLE_INNER
1628     { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
1629       if (aspacem_maxAddr > cse)
1630          aspacem_maxAddr = cse;
1631     }
1632#    endif
1633#  else
1634     aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
1635#  endif
1636
1637   aspacem_cStart = aspacem_minAddr;
1638   aspacem_vStart = VG_PGROUNDUP(aspacem_minAddr
1639                                 + (aspacem_maxAddr - aspacem_minAddr + 1) / 2);
1640#  ifdef ENABLE_INNER
1641   aspacem_vStart -= 0x10000000; // 256M
1642#  endif
1643
1644   suggested_clstack_end = aspacem_maxAddr - 16*1024*1024ULL
1645                                           + VKI_PAGE_SIZE;
1646
1647#endif /* #else of 'defined(VGO_darwin)' */
1648
1649   aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
1650   aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
1651   aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
1652   aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
1653   aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_end + 1));
1654
1655   VG_(debugLog)(2, "aspacem",
1656                    "              minAddr = 0x%010llx (computed)\n",
1657                    (ULong)aspacem_minAddr);
1658   VG_(debugLog)(2, "aspacem",
1659                    "              maxAddr = 0x%010llx (computed)\n",
1660                    (ULong)aspacem_maxAddr);
1661   VG_(debugLog)(2, "aspacem",
1662                    "               cStart = 0x%010llx (computed)\n",
1663                    (ULong)aspacem_cStart);
1664   VG_(debugLog)(2, "aspacem",
1665                    "               vStart = 0x%010llx (computed)\n",
1666                    (ULong)aspacem_vStart);
1667   VG_(debugLog)(2, "aspacem",
1668                    "suggested_clstack_end = 0x%010llx (computed)\n",
1669                    (ULong)suggested_clstack_end);
1670
1671   if (aspacem_cStart > Addr_MIN) {
1672      init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
1673      add_segment(&seg);
1674   }
1675   if (aspacem_maxAddr < Addr_MAX) {
1676      init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
1677      add_segment(&seg);
1678   }
1679
1680   /* Create a 1-page reservation at the notional initial
1681      client/valgrind boundary.  This isn't strictly necessary, but
1682      because the advisor does first-fit and starts searches for
1683      valgrind allocations at the boundary, this is kind of necessary
1684      in order to get it to start allocating in the right place. */
1685   init_resvn(&seg, aspacem_vStart,  aspacem_vStart + VKI_PAGE_SIZE - 1);
1686   add_segment(&seg);
1687
1688   VG_(am_show_nsegments)(2, "Initial layout");
1689
1690   VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
1691   parse_procselfmaps( read_maps_callback, NULL );
1692   /* NB: on arm-linux, parse_procselfmaps automagically kludges up
1693      (iow, hands to its callbacks) a description of the ARM Commpage,
1694      since that's not listed in /proc/self/maps (kernel bug IMO).  We
1695      have to fake up its existence in parse_procselfmaps and not
1696      merely add it here as an extra segment, because doing the latter
1697      causes sync checking to fail: we see we have an extra segment in
1698      the segments array, which isn't listed in /proc/self/maps.
1699      Hence we must make it appear that /proc/self/maps contained this
1700      segment all along.  Sigh. */
1701
1702   VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
1703
1704   AM_SANITY_CHECK;
1705   return suggested_clstack_end;
1706}
1707
1708
1709/*-----------------------------------------------------------------*/
1710/*---                                                           ---*/
1711/*--- The core query-notify mechanism.                          ---*/
1712/*---                                                           ---*/
1713/*-----------------------------------------------------------------*/
1714
1715/* Query aspacem to ask where a mapping should go. */
1716
1717Addr VG_(am_get_advisory) ( const MapRequest*  req,
1718                            Bool  forClient,
1719                            /*OUT*/Bool* ok )
1720{
1721   /* This function implements allocation policy.
1722
1723      The nature of the allocation request is determined by req, which
1724      specifies the start and length of the request and indicates
1725      whether the start address is mandatory, a hint, or irrelevant,
1726      and by forClient, which says whether this is for the client or
1727      for V.
1728
1729      Return values: the request can be vetoed (*ok is set to False),
1730      in which case the caller should not attempt to proceed with
1731      making the mapping.  Otherwise, *ok is set to True, the caller
1732      may proceed, and the preferred address at which the mapping
1733      should happen is returned.
1734
1735      Note that this is an advisory system only: the kernel can in
1736      fact do whatever it likes as far as placement goes, and we have
1737      no absolute control over it.
1738
1739      Allocations will never be granted in a reserved area.
1740
1741      The Default Policy is:
1742
1743        Search the address space for two free intervals: one of them
1744        big enough to contain the request without regard to the
1745        specified address (viz, as if it was a floating request) and
1746        the other being able to contain the request at the specified
1747        address (viz, as if were a fixed request).  Then, depending on
1748        the outcome of the search and the kind of request made, decide
1749        whether the request is allowable and what address to advise.
1750
1751      The Default Policy is overriden by Policy Exception #1:
1752
1753        If the request is for a fixed client map, we are prepared to
1754        grant it providing all areas inside the request are either
1755        free, reservations, or mappings belonging to the client.  In
1756        other words we are prepared to let the client trash its own
1757        mappings if it wants to.
1758
1759      The Default Policy is overriden by Policy Exception #2:
1760
1761        If the request is for a hinted client map, we are prepared to
1762        grant it providing all areas inside the request are either
1763        free or reservations.  In other words we are prepared to let
1764        the client have a hinted mapping anywhere it likes provided
1765        it does not trash either any of its own mappings or any of
1766        valgrind's mappings.
1767   */
1768   Int  i, j;
1769   Addr holeStart, holeEnd, holeLen;
1770   Bool fixed_not_required;
1771
1772   Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
1773
1774   Addr reqStart = req->rkind==MAny ? 0 : req->start;
1775   Addr reqEnd   = reqStart + req->len - 1;
1776   Addr reqLen   = req->len;
1777
1778   /* These hold indices for segments found during search, or -1 if not
1779      found. */
1780   Int floatIdx = -1;
1781   Int fixedIdx = -1;
1782
1783   aspacem_assert(nsegments_used > 0);
1784
1785   if (0) {
1786      VG_(am_show_nsegments)(0,"getAdvisory");
1787      VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
1788                      (ULong)req->start, (ULong)req->len);
1789   }
1790
1791   /* Reject zero-length requests */
1792   if (req->len == 0) {
1793      *ok = False;
1794      return 0;
1795   }
1796
1797   /* Reject wraparounds */
1798   if ((req->rkind==MFixed || req->rkind==MHint)
1799       && req->start + req->len < req->start) {
1800      *ok = False;
1801      return 0;
1802   }
1803
1804   /* ------ Implement Policy Exception #1 ------ */
1805
1806   if (forClient && req->rkind == MFixed) {
1807      Int  iLo   = find_nsegment_idx(reqStart);
1808      Int  iHi   = find_nsegment_idx(reqEnd);
1809      Bool allow = True;
1810      for (i = iLo; i <= iHi; i++) {
1811         if (nsegments[i].kind == SkFree
1812             || nsegments[i].kind == SkFileC
1813             || nsegments[i].kind == SkAnonC
1814             || nsegments[i].kind == SkShmC
1815             || nsegments[i].kind == SkResvn) {
1816            /* ok */
1817         } else {
1818            allow = False;
1819            break;
1820         }
1821      }
1822      if (allow) {
1823         /* Acceptable.  Granted. */
1824         *ok = True;
1825         return reqStart;
1826      }
1827      /* Not acceptable.  Fail. */
1828      *ok = False;
1829      return 0;
1830   }
1831
1832   /* ------ Implement Policy Exception #2 ------ */
1833
1834   if (forClient && req->rkind == MHint) {
1835      Int  iLo   = find_nsegment_idx(reqStart);
1836      Int  iHi   = find_nsegment_idx(reqEnd);
1837      Bool allow = True;
1838      for (i = iLo; i <= iHi; i++) {
1839         if (nsegments[i].kind == SkFree
1840             || nsegments[i].kind == SkResvn) {
1841            /* ok */
1842         } else {
1843            allow = False;
1844            break;
1845         }
1846      }
1847      if (allow) {
1848         /* Acceptable.  Granted. */
1849         *ok = True;
1850         return reqStart;
1851      }
1852      /* Not acceptable.  Fall through to the default policy. */
1853   }
1854
1855   /* ------ Implement the Default Policy ------ */
1856
1857   /* Don't waste time looking for a fixed match if not requested to. */
1858   fixed_not_required = req->rkind == MAny;
1859
1860   i = find_nsegment_idx(startPoint);
1861
1862   /* Examine holes from index i back round to i-1.  Record the
1863      index first fixed hole and the first floating hole which would
1864      satisfy the request. */
1865   for (j = 0; j < nsegments_used; j++) {
1866
1867      if (nsegments[i].kind != SkFree) {
1868         i++;
1869         if (i >= nsegments_used) i = 0;
1870         continue;
1871      }
1872
1873      holeStart = nsegments[i].start;
1874      holeEnd   = nsegments[i].end;
1875
1876      /* Stay sane .. */
1877      aspacem_assert(holeStart <= holeEnd);
1878      aspacem_assert(aspacem_minAddr <= holeStart);
1879      aspacem_assert(holeEnd <= aspacem_maxAddr);
1880
1881      /* See if it's any use to us. */
1882      holeLen = holeEnd - holeStart + 1;
1883
1884      if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
1885         fixedIdx = i;
1886
1887      if (floatIdx == -1 && holeLen >= reqLen)
1888         floatIdx = i;
1889
1890      /* Don't waste time searching once we've found what we wanted. */
1891      if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
1892         break;
1893
1894      i++;
1895      if (i >= nsegments_used) i = 0;
1896   }
1897
1898   aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
1899   if (fixedIdx >= 0)
1900      aspacem_assert(nsegments[fixedIdx].kind == SkFree);
1901
1902   aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
1903   if (floatIdx >= 0)
1904      aspacem_assert(nsegments[floatIdx].kind == SkFree);
1905
1906   AM_SANITY_CHECK;
1907
1908   /* Now see if we found anything which can satisfy the request. */
1909   switch (req->rkind) {
1910      case MFixed:
1911         if (fixedIdx >= 0) {
1912            *ok = True;
1913            return req->start;
1914         } else {
1915            *ok = False;
1916            return 0;
1917         }
1918         break;
1919      case MHint:
1920         if (fixedIdx >= 0) {
1921            *ok = True;
1922            return req->start;
1923         }
1924         if (floatIdx >= 0) {
1925            *ok = True;
1926            return nsegments[floatIdx].start;
1927         }
1928         *ok = False;
1929         return 0;
1930      case MAny:
1931         if (floatIdx >= 0) {
1932            *ok = True;
1933            return nsegments[floatIdx].start;
1934         }
1935         *ok = False;
1936         return 0;
1937      default:
1938         break;
1939   }
1940
1941   /*NOTREACHED*/
1942   ML_(am_barf)("getAdvisory: unknown request kind");
1943   *ok = False;
1944   return 0;
1945}
1946
1947/* Convenience wrapper for VG_(am_get_advisory) for client floating or
1948   fixed requests.  If start is zero, a floating request is issued; if
1949   nonzero, a fixed request at that address is issued.  Same comments
1950   about return values apply. */
1951
1952Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
1953                                          /*OUT*/Bool* ok )
1954{
1955   MapRequest mreq;
1956   mreq.rkind = start==0 ? MAny : MFixed;
1957   mreq.start = start;
1958   mreq.len   = len;
1959   return VG_(am_get_advisory)( &mreq, True/*forClient*/, ok );
1960}
1961
1962/* Similar to VG_(am_find_nsegment) but only returns free segments. */
1963static NSegment const * VG_(am_find_free_nsegment) ( Addr a )
1964{
1965   Int i = find_nsegment_idx(a);
1966   aspacem_assert(i >= 0 && i < nsegments_used);
1967   aspacem_assert(nsegments[i].start <= a);
1968   aspacem_assert(a <= nsegments[i].end);
1969   if (nsegments[i].kind == SkFree)
1970      return &nsegments[i];
1971   else
1972      return NULL;
1973}
1974
1975Bool VG_(am_covered_by_single_free_segment)
1976   ( Addr start, SizeT len)
1977{
1978   NSegment const* segLo = VG_(am_find_free_nsegment)( start );
1979   NSegment const* segHi = VG_(am_find_free_nsegment)( start + len - 1 );
1980
1981   return segLo != NULL && segHi != NULL && segLo == segHi;
1982}
1983
1984
1985/* Notifies aspacem that the client completed an mmap successfully.
1986   The segment array is updated accordingly.  If the returned Bool is
1987   True, the caller should immediately discard translations from the
1988   specified address range. */
1989
1990Bool
1991VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
1992                            Int fd, Off64T offset )
1993{
1994   HChar    buf[VKI_PATH_MAX];
1995   ULong    dev, ino;
1996   UInt     mode;
1997   NSegment seg;
1998   Bool     needDiscard;
1999
2000   aspacem_assert(len > 0);
2001   aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2002   aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2003   aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2004
2005   /* Discard is needed if any of the just-trashed range had T. */
2006   needDiscard = any_Ts_in_range( a, len );
2007
2008   init_nsegment( &seg );
2009   seg.kind   = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
2010   seg.start  = a;
2011   seg.end    = a + len - 1;
2012   seg.hasR   = toBool(prot & VKI_PROT_READ);
2013   seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2014   seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2015   if (!(flags & VKI_MAP_ANONYMOUS)) {
2016      // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2017      seg.offset = offset;
2018      if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2019         seg.dev = dev;
2020         seg.ino = ino;
2021         seg.mode = mode;
2022      }
2023      if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2024         seg.fnIdx = allocate_segname( buf );
2025      }
2026   }
2027   add_segment( &seg );
2028   AM_SANITY_CHECK;
2029   return needDiscard;
2030}
2031
2032/* Notifies aspacem that the client completed a shmat successfully.
2033   The segment array is updated accordingly.  If the returned Bool is
2034   True, the caller should immediately discard translations from the
2035   specified address range. */
2036
2037Bool
2038VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
2039{
2040   NSegment seg;
2041   Bool     needDiscard;
2042
2043   aspacem_assert(len > 0);
2044   aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2045   aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2046
2047   /* Discard is needed if any of the just-trashed range had T. */
2048   needDiscard = any_Ts_in_range( a, len );
2049
2050   init_nsegment( &seg );
2051   seg.kind   = SkShmC;
2052   seg.start  = a;
2053   seg.end    = a + len - 1;
2054   seg.offset = 0;
2055   seg.hasR   = toBool(prot & VKI_PROT_READ);
2056   seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2057   seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2058   add_segment( &seg );
2059   AM_SANITY_CHECK;
2060   return needDiscard;
2061}
2062
2063/* Notifies aspacem that an mprotect was completed successfully.  The
2064   segment array is updated accordingly.  Note, as with
2065   VG_(am_notify_munmap), it is not the job of this function to reject
2066   stupid mprotects, for example the client doing mprotect of
2067   non-client areas.  Such requests should be intercepted earlier, by
2068   the syscall wrapper for mprotect.  This function merely records
2069   whatever it is told.  If the returned Bool is True, the caller
2070   should immediately discard translations from the specified address
2071   range. */
2072
2073Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
2074{
2075   Int  i, iLo, iHi;
2076   Bool newR, newW, newX, needDiscard;
2077
2078   aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2079   aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2080
2081   if (len == 0)
2082      return False;
2083
2084   newR = toBool(prot & VKI_PROT_READ);
2085   newW = toBool(prot & VKI_PROT_WRITE);
2086   newX = toBool(prot & VKI_PROT_EXEC);
2087
2088   /* Discard is needed if we're dumping X permission */
2089   needDiscard = any_Ts_in_range( start, len ) && !newX;
2090
2091   split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2092
2093   iLo = find_nsegment_idx(start);
2094   iHi = find_nsegment_idx(start + len - 1);
2095
2096   for (i = iLo; i <= iHi; i++) {
2097      /* Apply the permissions to all relevant segments. */
2098      switch (nsegments[i].kind) {
2099         case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
2100            nsegments[i].hasR = newR;
2101            nsegments[i].hasW = newW;
2102            nsegments[i].hasX = newX;
2103            aspacem_assert(sane_NSegment(&nsegments[i]));
2104            break;
2105         default:
2106            break;
2107      }
2108   }
2109
2110   /* Changing permissions could have made previously un-mergable
2111      segments mergeable.  Therefore have to re-preen them. */
2112   (void)preen_nsegments();
2113   AM_SANITY_CHECK;
2114   return needDiscard;
2115}
2116
2117
2118/* Notifies aspacem that an munmap completed successfully.  The
2119   segment array is updated accordingly.  As with
2120   VG_(am_notify_munmap), we merely record the given info, and don't
2121   check it for sensibleness.  If the returned Bool is True, the
2122   caller should immediately discard translations from the specified
2123   address range. */
2124
2125Bool VG_(am_notify_munmap)( Addr start, SizeT len )
2126{
2127   NSegment seg;
2128   Bool     needDiscard;
2129   aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2130   aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2131
2132   if (len == 0)
2133      return False;
2134
2135   needDiscard = any_Ts_in_range( start, len );
2136
2137   init_nsegment( &seg );
2138   seg.start = start;
2139   seg.end   = start + len - 1;
2140
2141   /* The segment becomes unused (free).  Segments from above
2142      aspacem_maxAddr were originally SkResvn and so we make them so
2143      again.  Note, this isn't really right when the segment straddles
2144      the aspacem_maxAddr boundary - then really it should be split in
2145      two, the lower part marked as SkFree and the upper part as
2146      SkResvn.  Ah well. */
2147   if (start > aspacem_maxAddr
2148       && /* check previous comparison is meaningful */
2149          aspacem_maxAddr < Addr_MAX)
2150      seg.kind = SkResvn;
2151   else
2152   /* Ditto for segments from below aspacem_minAddr. */
2153   if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
2154      seg.kind = SkResvn;
2155   else
2156      seg.kind = SkFree;
2157
2158   add_segment( &seg );
2159
2160   /* Unmapping could create two adjacent free segments, so a preen is
2161      needed.  add_segment() will do that, so no need to here. */
2162   AM_SANITY_CHECK;
2163   return needDiscard;
2164}
2165
2166
2167/*-----------------------------------------------------------------*/
2168/*---                                                           ---*/
2169/*--- Handling mappings which do not arise directly from the    ---*/
2170/*--- simulation of the client.                                 ---*/
2171/*---                                                           ---*/
2172/*-----------------------------------------------------------------*/
2173
2174/* --- --- --- map, unmap, protect  --- --- --- */
2175
2176/* Map a file at a fixed address for the client, and update the
2177   segment array accordingly. */
2178
2179SysRes VG_(am_mmap_file_fixed_client)
2180     ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
2181{
2182   return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
2183}
2184
2185SysRes VG_(am_mmap_named_file_fixed_client)
2186     ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
2187{
2188   SysRes     sres;
2189   NSegment   seg;
2190   Addr       advised;
2191   Bool       ok;
2192   MapRequest req;
2193   ULong      dev, ino;
2194   UInt       mode;
2195   HChar      buf[VKI_PATH_MAX];
2196
2197   /* Not allowable. */
2198   if (length == 0
2199       || !VG_IS_PAGE_ALIGNED(start)
2200       || !VG_IS_PAGE_ALIGNED(offset))
2201      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2202
2203   /* Ask for an advisory.  If it's negative, fail immediately. */
2204   req.rkind = MFixed;
2205   req.start = start;
2206   req.len   = length;
2207   advised = VG_(am_get_advisory)( &req, True/*forClient*/, &ok );
2208   if (!ok || advised != start)
2209      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2210
2211   /* We have been advised that the mapping is allowable at the
2212      specified address.  So hand it off to the kernel, and propagate
2213      any resulting failure immediately. */
2214   // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2215   sres = VG_(am_do_mmap_NO_NOTIFY)(
2216             start, length, prot,
2217             VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2218             fd, offset
2219          );
2220   if (sr_isError(sres))
2221      return sres;
2222
2223   if (sr_Res(sres) != start) {
2224      /* I don't think this can happen.  It means the kernel made a
2225         fixed map succeed but not at the requested location.  Try to
2226         repair the damage, then return saying the mapping failed. */
2227      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2228      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2229   }
2230
2231   /* Ok, the mapping succeeded.  Now notify the interval map. */
2232   init_nsegment( &seg );
2233   seg.kind   = SkFileC;
2234   seg.start  = start;
2235   seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
2236   seg.offset = offset;
2237   seg.hasR   = toBool(prot & VKI_PROT_READ);
2238   seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2239   seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2240   if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2241      seg.dev = dev;
2242      seg.ino = ino;
2243      seg.mode = mode;
2244   }
2245   if (name) {
2246      seg.fnIdx = allocate_segname( name );
2247   } else if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2248      seg.fnIdx = allocate_segname( buf );
2249   }
2250   add_segment( &seg );
2251
2252   AM_SANITY_CHECK;
2253   return sres;
2254}
2255
2256
2257/* Map anonymously at a fixed address for the client, and update
2258   the segment array accordingly. */
2259
2260SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
2261{
2262   SysRes     sres;
2263   NSegment   seg;
2264   Addr       advised;
2265   Bool       ok;
2266   MapRequest req;
2267
2268   /* Not allowable. */
2269   if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
2270      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2271
2272   /* Ask for an advisory.  If it's negative, fail immediately. */
2273   req.rkind = MFixed;
2274   req.start = start;
2275   req.len   = length;
2276   advised = VG_(am_get_advisory)( &req, True/*forClient*/, &ok );
2277   if (!ok || advised != start)
2278      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2279
2280   /* We have been advised that the mapping is allowable at the
2281      specified address.  So hand it off to the kernel, and propagate
2282      any resulting failure immediately. */
2283   // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2284   sres = VG_(am_do_mmap_NO_NOTIFY)(
2285             start, length, prot,
2286             VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2287             0, 0
2288          );
2289   if (sr_isError(sres))
2290      return sres;
2291
2292   if (sr_Res(sres) != start) {
2293      /* I don't think this can happen.  It means the kernel made a
2294         fixed map succeed but not at the requested location.  Try to
2295         repair the damage, then return saying the mapping failed. */
2296      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2297      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2298   }
2299
2300   /* Ok, the mapping succeeded.  Now notify the interval map. */
2301   init_nsegment( &seg );
2302   seg.kind  = SkAnonC;
2303   seg.start = start;
2304   seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
2305   seg.hasR  = toBool(prot & VKI_PROT_READ);
2306   seg.hasW  = toBool(prot & VKI_PROT_WRITE);
2307   seg.hasX  = toBool(prot & VKI_PROT_EXEC);
2308   add_segment( &seg );
2309
2310   AM_SANITY_CHECK;
2311   return sres;
2312}
2313
2314
2315/* Map anonymously at an unconstrained address for the client, and
2316   update the segment array accordingly.  */
2317
2318SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
2319{
2320   SysRes     sres;
2321   NSegment   seg;
2322   Addr       advised;
2323   Bool       ok;
2324   MapRequest req;
2325
2326   /* Not allowable. */
2327   if (length == 0)
2328      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2329
2330   /* Ask for an advisory.  If it's negative, fail immediately. */
2331   req.rkind = MAny;
2332   req.start = 0;
2333   req.len   = length;
2334   advised = VG_(am_get_advisory)( &req, True/*forClient*/, &ok );
2335   if (!ok)
2336      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2337
2338   /* We have been advised that the mapping is allowable at the
2339      advised address.  So hand it off to the kernel, and propagate
2340      any resulting failure immediately. */
2341   // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2342   sres = VG_(am_do_mmap_NO_NOTIFY)(
2343             advised, length, prot,
2344             VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2345             0, 0
2346          );
2347   if (sr_isError(sres))
2348      return sres;
2349
2350   if (sr_Res(sres) != advised) {
2351      /* I don't think this can happen.  It means the kernel made a
2352         fixed map succeed but not at the requested location.  Try to
2353         repair the damage, then return saying the mapping failed. */
2354      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2355      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2356   }
2357
2358   /* Ok, the mapping succeeded.  Now notify the interval map. */
2359   init_nsegment( &seg );
2360   seg.kind  = SkAnonC;
2361   seg.start = advised;
2362   seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
2363   seg.hasR  = toBool(prot & VKI_PROT_READ);
2364   seg.hasW  = toBool(prot & VKI_PROT_WRITE);
2365   seg.hasX  = toBool(prot & VKI_PROT_EXEC);
2366   add_segment( &seg );
2367
2368   AM_SANITY_CHECK;
2369   return sres;
2370}
2371
2372
2373/* Map anonymously at an unconstrained address for V, and update the
2374   segment array accordingly.  This is fundamentally how V allocates
2375   itself more address space when needed. */
2376
2377SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
2378{
2379   SysRes     sres;
2380   NSegment   seg;
2381   Addr       advised;
2382   Bool       ok;
2383   MapRequest req;
2384
2385   /* Not allowable. */
2386   if (length == 0)
2387      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2388
2389   /* Ask for an advisory.  If it's negative, fail immediately. */
2390   req.rkind = MAny;
2391   req.start = 0;
2392   req.len   = length;
2393   advised = VG_(am_get_advisory)( &req, False/*forClient*/, &ok );
2394   if (!ok)
2395      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2396
2397// On Darwin, for anonymous maps you can pass in a tag which is used by
2398// programs like vmmap for statistical purposes.
2399#ifndef VM_TAG_VALGRIND
2400#  define VM_TAG_VALGRIND 0
2401#endif
2402
2403   /* We have been advised that the mapping is allowable at the
2404      specified address.  So hand it off to the kernel, and propagate
2405      any resulting failure immediately. */
2406   /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
2407      another thread can pre-empt our spot.  [At one point on the DARWIN
2408      branch the VKI_MAP_FIXED was commented out;  unclear if this is
2409      necessary or not given the second Darwin-only call that immediately
2410      follows if this one fails.  --njn]
2411      Also, an inner valgrind cannot observe the mmap syscalls done by
2412      the outer valgrind. The outer Valgrind might make the mmap
2413      fail here, as the inner valgrind believes that a segment is free,
2414      while it is in fact used by the outer valgrind.
2415      So, for an inner valgrind, similarly to DARWIN, if the fixed mmap
2416      fails, retry the mmap without map fixed.
2417      This is a kludge which on linux is only activated for the inner.
2418      The state of the inner aspacemgr is not made correct by this kludge
2419      and so a.o. VG_(am_do_sync_check) could fail.
2420      A proper solution implies a better collaboration between the
2421      inner and the outer (e.g. inner VG_(am_get_advisory) should do
2422      a client request to call the outer VG_(am_get_advisory). */
2423   sres = VG_(am_do_mmap_NO_NOTIFY)(
2424             advised, length,
2425             VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2426             VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2427             VM_TAG_VALGRIND, 0
2428          );
2429#if defined(VGO_darwin) || defined(ENABLE_INNER)
2430   /* Kludge on Darwin and inner linux if the fixed mmap failed. */
2431   if (sr_isError(sres)) {
2432       /* try again, ignoring the advisory */
2433       sres = VG_(am_do_mmap_NO_NOTIFY)(
2434             0, length,
2435             VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2436             /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2437             VM_TAG_VALGRIND, 0
2438          );
2439   }
2440#endif
2441   if (sr_isError(sres))
2442      return sres;
2443
2444#if defined(VGO_linux) && !defined(ENABLE_INNER)
2445   /* Doing the check only in linux not inner, as the below
2446      check can fail when the kludge above has been used. */
2447   if (sr_Res(sres) != advised) {
2448      /* I don't think this can happen.  It means the kernel made a
2449         fixed map succeed but not at the requested location.  Try to
2450         repair the damage, then return saying the mapping failed. */
2451      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2452      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2453   }
2454#endif
2455
2456   /* Ok, the mapping succeeded.  Now notify the interval map. */
2457   init_nsegment( &seg );
2458   seg.kind  = SkAnonV;
2459   seg.start = sr_Res(sres);
2460   seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
2461   seg.hasR  = True;
2462   seg.hasW  = True;
2463   seg.hasX  = True;
2464   add_segment( &seg );
2465
2466   AM_SANITY_CHECK;
2467   return sres;
2468}
2469
2470/* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2471
2472void* VG_(am_shadow_alloc)(SizeT size)
2473{
2474   SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
2475   return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
2476}
2477
2478/* Map a file at an unconstrained address for V, and update the
2479   segment array accordingly. Use the provided flags */
2480
2481static SysRes VG_(am_mmap_file_float_valgrind_flags) ( SizeT length, UInt prot,
2482                                                       UInt flags,
2483                                                       Int fd, Off64T offset )
2484{
2485   SysRes     sres;
2486   NSegment   seg;
2487   Addr       advised;
2488   Bool       ok;
2489   MapRequest req;
2490   ULong      dev, ino;
2491   UInt       mode;
2492   HChar      buf[VKI_PATH_MAX];
2493
2494   /* Not allowable. */
2495   if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
2496      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2497
2498   /* Ask for an advisory.  If it's negative, fail immediately. */
2499   req.rkind = MAny;
2500   req.start = 0;
2501   #if defined(VGA_arm) || defined(VGA_arm64) \
2502      || defined(VGA_mips32) || defined(VGA_mips64)
2503   aspacem_assert(VKI_SHMLBA >= VKI_PAGE_SIZE);
2504   #else
2505   aspacem_assert(VKI_SHMLBA == VKI_PAGE_SIZE);
2506   #endif
2507   if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & flags)) {
2508      /* arm-linux only. See ML_(generic_PRE_sys_shmat) and bug 290974 */
2509      req.len = length + VKI_SHMLBA - VKI_PAGE_SIZE;
2510   } else {
2511      req.len = length;
2512   }
2513   advised = VG_(am_get_advisory)( &req, False/*forClient*/, &ok );
2514   if (!ok)
2515      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2516   if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & flags))
2517      advised = VG_ROUNDUP(advised, VKI_SHMLBA);
2518
2519   /* We have been advised that the mapping is allowable at the
2520      specified address.  So hand it off to the kernel, and propagate
2521      any resulting failure immediately. */
2522   sres = VG_(am_do_mmap_NO_NOTIFY)(
2523             advised, length, prot,
2524             flags,
2525             fd, offset
2526          );
2527   if (sr_isError(sres))
2528      return sres;
2529
2530   if (sr_Res(sres) != advised) {
2531      /* I don't think this can happen.  It means the kernel made a
2532         fixed map succeed but not at the requested location.  Try to
2533         repair the damage, then return saying the mapping failed. */
2534      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2535      return VG_(mk_SysRes_Error)( VKI_EINVAL );
2536   }
2537
2538   /* Ok, the mapping succeeded.  Now notify the interval map. */
2539   init_nsegment( &seg );
2540   seg.kind   = SkFileV;
2541   seg.start  = sr_Res(sres);
2542   seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
2543   seg.offset = offset;
2544   seg.hasR   = toBool(prot & VKI_PROT_READ);
2545   seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2546   seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2547   if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2548      seg.dev  = dev;
2549      seg.ino  = ino;
2550      seg.mode = mode;
2551   }
2552   if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2553      seg.fnIdx = allocate_segname( buf );
2554   }
2555   add_segment( &seg );
2556
2557   AM_SANITY_CHECK;
2558   return sres;
2559}
2560/* Map privately a file at an unconstrained address for V, and update the
2561   segment array accordingly.  This is used by V for transiently
2562   mapping in object files to read their debug info.  */
2563
2564SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
2565                                          Int fd, Off64T offset )
2566{
2567   return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
2568                                                  VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2569                                                  fd, offset );
2570}
2571
2572SysRes VG_(am_shared_mmap_file_float_valgrind)
2573   ( SizeT length, UInt prot, Int fd, Off64T offset )
2574{
2575   return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
2576                                                  VKI_MAP_FIXED|VKI_MAP_SHARED,
2577                                                  fd, offset );
2578}
2579
2580/* --- --- munmap helper --- --- */
2581
2582static
2583SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
2584                            Addr start, SizeT len, Bool forClient )
2585{
2586   Bool   d;
2587   SysRes sres;
2588
2589   if (!VG_IS_PAGE_ALIGNED(start))
2590      goto eINVAL;
2591
2592   if (len == 0) {
2593      *need_discard = False;
2594      return VG_(mk_SysRes_Success)( 0 );
2595   }
2596
2597   if (start + len < len)
2598      goto eINVAL;
2599
2600   len = VG_PGROUNDUP(len);
2601   aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2602   aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2603
2604   if (forClient) {
2605      if (!VG_(am_is_valid_for_client_or_free_or_resvn)
2606            ( start, len, VKI_PROT_NONE ))
2607         goto eINVAL;
2608   } else {
2609      if (!VG_(am_is_valid_for_valgrind)
2610            ( start, len, VKI_PROT_NONE ))
2611         goto eINVAL;
2612   }
2613
2614   d = any_Ts_in_range( start, len );
2615
2616   sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
2617   if (sr_isError(sres))
2618      return sres;
2619
2620   VG_(am_notify_munmap)( start, len );
2621   AM_SANITY_CHECK;
2622   *need_discard = d;
2623   return sres;
2624
2625  eINVAL:
2626   return VG_(mk_SysRes_Error)( VKI_EINVAL );
2627}
2628
2629/* Unmap the given address range and update the segment array
2630   accordingly.  This fails if the range isn't valid for the client.
2631   If *need_discard is True after a successful return, the caller
2632   should immediately discard translations from the specified address
2633   range. */
2634
2635SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
2636                              Addr start, SizeT len )
2637{
2638   return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
2639}
2640
2641/* Unmap the given address range and update the segment array
2642   accordingly.  This fails if the range isn't valid for valgrind. */
2643
2644SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
2645{
2646   Bool need_discard;
2647   SysRes r = am_munmap_both_wrk( &need_discard,
2648                                  start, len, False/*valgrind*/ );
2649   /* If this assertion fails, it means we allowed translations to be
2650      made from a V-owned section.  Which shouldn't happen. */
2651   if (!sr_isError(r))
2652      aspacem_assert(!need_discard);
2653   return r;
2654}
2655
2656/* Let (start,len) denote an area within a single Valgrind-owned
2657  segment (anon or file).  Change the ownership of [start, start+len)
2658  to the client instead.  Fails if (start,len) does not denote a
2659  suitable segment. */
2660
2661Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
2662{
2663   Int i, iLo, iHi;
2664
2665   if (len == 0)
2666      return True;
2667   if (start + len < start)
2668      return False;
2669   if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
2670      return False;
2671
2672   i = find_nsegment_idx(start);
2673   if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
2674      return False;
2675   if (start+len-1 > nsegments[i].end)
2676      return False;
2677
2678   aspacem_assert(start >= nsegments[i].start);
2679   aspacem_assert(start+len-1 <= nsegments[i].end);
2680
2681   /* This scheme is like how mprotect works: split the to-be-changed
2682      range into its own segment(s), then mess with them (it).  There
2683      should be only one. */
2684   split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2685   aspacem_assert(iLo == iHi);
2686   switch (nsegments[iLo].kind) {
2687      case SkFileV: nsegments[iLo].kind = SkFileC; break;
2688      case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
2689      default: aspacem_assert(0); /* can't happen - guarded above */
2690   }
2691
2692   preen_nsegments();
2693   return True;
2694}
2695
2696/* 'seg' must have been obtained from VG_(am_find_nsegment), and still valid.
2697   If it denotes a SkAnonC (anonymous client mapping) area, set the .isCH
2698   (is-client-heap) flag for that area.  Otherwise do nothing.
2699   (Bizarre interface so that the same code works for both Linux and
2700   AIX and does not impose inefficiencies on the Linux version.) */
2701void VG_(am_set_segment_isCH_if_SkAnonC)( const NSegment* seg )
2702{
2703   aspacem_assert(seg != NULL);
2704   Int i = segAddr_to_index( seg );
2705   if (nsegments[i].kind == SkAnonC) {
2706      nsegments[i].isCH = True;
2707   } else {
2708      aspacem_assert(nsegments[i].isCH == False);
2709   }
2710}
2711
2712/* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
2713   segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
2714   segment. */
2715void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( const NSegment* seg )
2716{
2717   aspacem_assert(seg != NULL);
2718   Int i = segAddr_to_index( seg );
2719   if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
2720      nsegments[i].hasT = True;
2721   }
2722}
2723
2724
2725/* --- --- --- reservations --- --- --- */
2726
2727/* Create a reservation from START .. START+LENGTH-1, with the given
2728   ShrinkMode.  When checking whether the reservation can be created,
2729   also ensure that at least abs(EXTRA) extra free bytes will remain
2730   above (> 0) or below (< 0) the reservation.
2731
2732   The reservation will only be created if it, plus the extra-zone,
2733   falls entirely within a single free segment.  The returned Bool
2734   indicates whether the creation succeeded. */
2735
2736Bool VG_(am_create_reservation) ( Addr start, SizeT length,
2737                                  ShrinkMode smode, SSizeT extra )
2738{
2739   Int      startI, endI;
2740   NSegment seg;
2741
2742   /* start and end, not taking into account the extra space. */
2743   Addr start1 = start;
2744   Addr end1   = start + length - 1;
2745
2746   /* start and end, taking into account the extra space. */
2747   Addr start2 = start1;
2748   Addr end2   = end1;
2749
2750   if (extra < 0) start2 += extra; // this moves it down :-)
2751   if (extra > 0) end2 += extra;
2752
2753   aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2754   aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
2755   aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
2756   aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
2757
2758   startI = find_nsegment_idx( start2 );
2759   endI = find_nsegment_idx( end2 );
2760
2761   /* If the start and end points don't fall within the same (free)
2762      segment, we're hosed.  This does rely on the assumption that all
2763      mergeable adjacent segments can be merged, but add_segment()
2764      should ensure that. */
2765   if (startI != endI)
2766      return False;
2767
2768   if (nsegments[startI].kind != SkFree)
2769      return False;
2770
2771   /* Looks good - make the reservation. */
2772   aspacem_assert(nsegments[startI].start <= start2);
2773   aspacem_assert(end2 <= nsegments[startI].end);
2774
2775   init_nsegment( &seg );
2776   seg.kind  = SkResvn;
2777   seg.start = start1;  /* NB: extra space is not included in the
2778                           reservation. */
2779   seg.end   = end1;
2780   seg.smode = smode;
2781   add_segment( &seg );
2782
2783   AM_SANITY_CHECK;
2784   return True;
2785}
2786
2787
2788/* Let SEG be an anonymous client mapping.  This fn extends the
2789   mapping by DELTA bytes, taking the space from a reservation section
2790   which must be adjacent.  If DELTA is positive, the segment is
2791   extended forwards in the address space, and the reservation must be
2792   the next one along.  If DELTA is negative, the segment is extended
2793   backwards in the address space and the reservation must be the
2794   previous one.  DELTA must be page aligned.  abs(DELTA) must not
2795   exceed the size of the reservation segment minus one page, that is,
2796   the reservation segment after the operation must be at least one
2797   page long. */
2798
2799Bool VG_(am_extend_into_adjacent_reservation_client) ( const NSegment* seg,
2800                                                       SSizeT    delta )
2801{
2802   Int    segA, segR;
2803   UInt   prot;
2804   SysRes sres;
2805
2806   /* Find the segment array index for SEG.  If the assertion fails it
2807      probably means you passed in a bogus SEG. */
2808   aspacem_assert(seg != NULL);
2809   segA = segAddr_to_index( seg );
2810
2811   if (nsegments[segA].kind != SkAnonC)
2812      return False;
2813
2814   if (delta == 0)
2815      return True;
2816
2817   prot =   (nsegments[segA].hasR ? VKI_PROT_READ : 0)
2818          | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
2819          | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
2820
2821   aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
2822
2823   if (delta > 0) {
2824
2825      /* Extending the segment forwards. */
2826      segR = segA+1;
2827      if (segR >= nsegments_used
2828          || nsegments[segR].kind != SkResvn
2829          || nsegments[segR].smode != SmLower
2830          || nsegments[segR].start != nsegments[segA].end + 1
2831          || delta + VKI_PAGE_SIZE
2832                > (nsegments[segR].end - nsegments[segR].start + 1))
2833        return False;
2834
2835      /* Extend the kernel's mapping. */
2836      // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2837      sres = VG_(am_do_mmap_NO_NOTIFY)(
2838                nsegments[segR].start, delta,
2839                prot,
2840                VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2841                0, 0
2842             );
2843      if (sr_isError(sres))
2844         return False; /* kernel bug if this happens? */
2845      if (sr_Res(sres) != nsegments[segR].start) {
2846         /* kernel bug if this happens? */
2847        (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2848        return False;
2849      }
2850
2851      /* Ok, success with the kernel.  Update our structures. */
2852      nsegments[segR].start += delta;
2853      nsegments[segA].end += delta;
2854      aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2855
2856   } else {
2857
2858      /* Extending the segment backwards. */
2859      delta = -delta;
2860      aspacem_assert(delta > 0);
2861
2862      segR = segA-1;
2863      if (segR < 0
2864          || nsegments[segR].kind != SkResvn
2865          || nsegments[segR].smode != SmUpper
2866          || nsegments[segR].end + 1 != nsegments[segA].start
2867          || delta + VKI_PAGE_SIZE
2868                > (nsegments[segR].end - nsegments[segR].start + 1))
2869        return False;
2870
2871      /* Extend the kernel's mapping. */
2872      // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2873      sres = VG_(am_do_mmap_NO_NOTIFY)(
2874                nsegments[segA].start-delta, delta,
2875                prot,
2876                VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2877                0, 0
2878             );
2879      if (sr_isError(sres))
2880         return False; /* kernel bug if this happens? */
2881      if (sr_Res(sres) != nsegments[segA].start-delta) {
2882         /* kernel bug if this happens? */
2883        (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2884        return False;
2885      }
2886
2887      /* Ok, success with the kernel.  Update our structures. */
2888      nsegments[segR].end -= delta;
2889      nsegments[segA].start -= delta;
2890      aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2891
2892   }
2893
2894   AM_SANITY_CHECK;
2895   return True;
2896}
2897
2898
2899/* --- --- --- resizing/move a mapping --- --- --- */
2900
2901#if HAVE_MREMAP
2902
2903/* Let SEG be a client mapping (anonymous or file).  This fn extends
2904   the mapping forwards only by DELTA bytes, and trashes whatever was
2905   in the new area.  Fails if SEG is not a single client mapping or if
2906   the new area is not accessible to the client.  Fails if DELTA is
2907   not page aligned.  *seg is invalid after a successful return.  If
2908   *need_discard is True after a successful return, the caller should
2909   immediately discard translations from the new area. */
2910
2911Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
2912                                const NSegment* seg, SizeT delta )
2913{
2914   Addr     xStart;
2915   SysRes   sres;
2916   NSegment seg_copy = *seg;
2917   SizeT    seg_old_len = seg->end + 1 - seg->start;
2918
2919   if (0)
2920      VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
2921
2922   if (seg->kind != SkFileC && seg->kind != SkAnonC)
2923      return False;
2924
2925   if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
2926      return False;
2927
2928   xStart = seg->end+1;
2929   if (xStart + delta < delta)
2930      return False;
2931
2932   if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
2933                                                      VKI_PROT_NONE ))
2934      return False;
2935
2936   AM_SANITY_CHECK;
2937   sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
2938                                               seg_old_len,
2939                                               seg_old_len + delta );
2940   if (sr_isError(sres)) {
2941      AM_SANITY_CHECK;
2942      return False;
2943   } else {
2944      /* the area must not have moved */
2945      aspacem_assert(sr_Res(sres) == seg->start);
2946   }
2947
2948   *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
2949
2950   seg_copy.end += delta;
2951   add_segment( &seg_copy );
2952
2953   if (0)
2954      VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
2955
2956   AM_SANITY_CHECK;
2957   return True;
2958}
2959
2960
2961/* Remap the old address range to the new address range.  Fails if any
2962   parameter is not page aligned, if the either size is zero, if any
2963   wraparound is implied, if the old address range does not fall
2964   entirely within a single segment, if the new address range overlaps
2965   with the old one, or if the old address range is not a valid client
2966   mapping.  If *need_discard is True after a successful return, the
2967   caller should immediately discard translations from both specified
2968   address ranges.  */
2969
2970Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
2971                                        Addr old_addr, SizeT old_len,
2972                                        Addr new_addr, SizeT new_len )
2973{
2974   Int      iLo, iHi;
2975   SysRes   sres;
2976   NSegment seg;
2977
2978   if (old_len == 0 || new_len == 0)
2979      return False;
2980
2981   if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
2982       || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
2983      return False;
2984
2985   if (old_addr + old_len < old_addr
2986       || new_addr + new_len < new_addr)
2987      return False;
2988
2989   if (old_addr + old_len - 1 < new_addr
2990       || new_addr + new_len - 1 < old_addr) {
2991      /* no overlap */
2992   } else
2993      return False;
2994
2995   iLo = find_nsegment_idx( old_addr );
2996   iHi = find_nsegment_idx( old_addr + old_len - 1 );
2997   if (iLo != iHi)
2998      return False;
2999
3000   if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
3001      return False;
3002
3003   sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
3004             ( old_addr, old_len, new_addr, new_len );
3005   if (sr_isError(sres)) {
3006      AM_SANITY_CHECK;
3007      return False;
3008   } else {
3009      aspacem_assert(sr_Res(sres) == new_addr);
3010   }
3011
3012   *need_discard = any_Ts_in_range( old_addr, old_len )
3013                   || any_Ts_in_range( new_addr, new_len );
3014
3015   seg = nsegments[iLo];
3016
3017   /* Mark the new area based on the old seg. */
3018   if (seg.kind == SkFileC) {
3019      seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
3020   } else {
3021      aspacem_assert(seg.kind == SkAnonC);
3022      aspacem_assert(seg.offset == 0);
3023   }
3024   seg.start = new_addr;
3025   seg.end   = new_addr + new_len - 1;
3026   add_segment( &seg );
3027
3028   /* Create a free hole in the old location. */
3029   init_nsegment( &seg );
3030   seg.start = old_addr;
3031   seg.end   = old_addr + old_len - 1;
3032   /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3033      SkFree thing. */
3034   if (old_addr > aspacem_maxAddr
3035       && /* check previous comparison is meaningful */
3036          aspacem_maxAddr < Addr_MAX)
3037      seg.kind = SkResvn;
3038   else
3039      seg.kind = SkFree;
3040
3041   add_segment( &seg );
3042
3043   AM_SANITY_CHECK;
3044   return True;
3045}
3046
3047#endif // HAVE_MREMAP
3048
3049
3050#if defined(VGO_linux)
3051
3052/*-----------------------------------------------------------------*/
3053/*---                                                           ---*/
3054/*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3055/*--- Almost completely independent of the stuff above.  The    ---*/
3056/*--- only function it 'exports' to the code above this comment ---*/
3057/*--- is parse_procselfmaps.                                    ---*/
3058/*---                                                           ---*/
3059/*-----------------------------------------------------------------*/
3060
3061/*------BEGIN-procmaps-parser-for-Linux--------------------------*/
3062
3063/* Size of a smallish table used to read /proc/self/map entries. */
3064#define M_PROCMAP_BUF 100000
3065
3066/* static ... to keep it out of the stack frame. */
3067static HChar procmap_buf[M_PROCMAP_BUF];
3068
3069/* Records length of /proc/self/maps read into procmap_buf. */
3070static Int  buf_n_tot;
3071
3072/* Helper fns. */
3073
3074static Int hexdigit ( HChar c )
3075{
3076   if (c >= '0' && c <= '9') return (Int)(c - '0');
3077   if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
3078   if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
3079   return -1;
3080}
3081
3082static Int decdigit ( HChar c )
3083{
3084   if (c >= '0' && c <= '9') return (Int)(c - '0');
3085   return -1;
3086}
3087
3088static Int readchar ( const HChar* buf, HChar* ch )
3089{
3090   if (*buf == 0) return 0;
3091   *ch = *buf;
3092   return 1;
3093}
3094
3095static Int readhex ( const HChar* buf, UWord* val )
3096{
3097   /* Read a word-sized hex number. */
3098   Int n = 0;
3099   *val = 0;
3100   while (hexdigit(*buf) >= 0) {
3101      *val = (*val << 4) + hexdigit(*buf);
3102      n++; buf++;
3103   }
3104   return n;
3105}
3106
3107static Int readhex64 ( const HChar* buf, ULong* val )
3108{
3109   /* Read a potentially 64-bit hex number. */
3110   Int n = 0;
3111   *val = 0;
3112   while (hexdigit(*buf) >= 0) {
3113      *val = (*val << 4) + hexdigit(*buf);
3114      n++; buf++;
3115   }
3116   return n;
3117}
3118
3119static Int readdec64 ( const HChar* buf, ULong* val )
3120{
3121   Int n = 0;
3122   *val = 0;
3123   while (decdigit(*buf) >= 0) {
3124      *val = (*val * 10) + decdigit(*buf);
3125      n++; buf++;
3126   }
3127   return n;
3128}
3129
3130
3131/* Get the contents of /proc/self/maps into a static buffer.  If
3132   there's a syntax error, it won't fit, or other failure, just
3133   abort. */
3134
3135static void read_procselfmaps_into_buf ( void )
3136{
3137   Int    n_chunk;
3138   SysRes fd;
3139
3140   /* Read the initial memory mapping from the /proc filesystem. */
3141   fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
3142   if (sr_isError(fd))
3143      ML_(am_barf)("can't open /proc/self/maps");
3144
3145   buf_n_tot = 0;
3146   do {
3147      n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
3148                              M_PROCMAP_BUF - buf_n_tot );
3149      if (n_chunk >= 0)
3150         buf_n_tot += n_chunk;
3151   } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
3152
3153   ML_(am_close)(sr_Res(fd));
3154
3155   if (buf_n_tot >= M_PROCMAP_BUF-5)
3156      ML_(am_barf_toolow)("M_PROCMAP_BUF");
3157   if (buf_n_tot == 0)
3158      ML_(am_barf)("I/O error on /proc/self/maps");
3159
3160   procmap_buf[buf_n_tot] = 0;
3161}
3162
3163/* Parse /proc/self/maps.  For each map entry, call
3164   record_mapping, passing it, in this order:
3165
3166      start address in memory
3167      length
3168      page protections (using the VKI_PROT_* flags)
3169      mapped file device and inode
3170      offset in file, or zero if no file
3171      filename, zero terminated, or NULL if no file
3172
3173   So the sig of the called fn might be
3174
3175      void (*record_mapping)( Addr start, SizeT size, UInt prot,
3176			      UInt dev, UInt info,
3177                              ULong foffset, UChar* filename )
3178
3179   Note that the supplied filename is transiently stored; record_mapping
3180   should make a copy if it wants to keep it.
3181
3182   Nb: it is important that this function does not alter the contents of
3183       procmap_buf!
3184*/
3185static void parse_procselfmaps (
3186      void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3187                              ULong dev, ULong ino, Off64T offset,
3188                              const HChar* filename ),
3189      void (*record_gap)( Addr addr, SizeT len )
3190   )
3191{
3192   Int    i, j, i_eol;
3193   Addr   start, endPlusOne, gapStart;
3194   HChar* filename;
3195   HChar  rr, ww, xx, pp, ch, tmp;
3196   UInt	  prot;
3197   UWord  maj, min;
3198   ULong  foffset, dev, ino;
3199
3200   foffset = ino = 0; /* keep gcc-4.1.0 happy */
3201
3202   read_procselfmaps_into_buf();
3203
3204   aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
3205
3206   if (0)
3207      VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
3208
3209   /* Ok, it's safely aboard.  Parse the entries. */
3210   i = 0;
3211   gapStart = Addr_MIN;
3212   while (True) {
3213      if (i >= buf_n_tot) break;
3214
3215      /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3216      j = readhex(&procmap_buf[i], &start);
3217      if (j > 0) i += j; else goto syntaxerror;
3218      j = readchar(&procmap_buf[i], &ch);
3219      if (j == 1 && ch == '-') i += j; else goto syntaxerror;
3220      j = readhex(&procmap_buf[i], &endPlusOne);
3221      if (j > 0) i += j; else goto syntaxerror;
3222
3223      j = readchar(&procmap_buf[i], &ch);
3224      if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3225
3226      j = readchar(&procmap_buf[i], &rr);
3227      if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
3228      j = readchar(&procmap_buf[i], &ww);
3229      if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
3230      j = readchar(&procmap_buf[i], &xx);
3231      if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
3232      /* This field is the shared/private flag */
3233      j = readchar(&procmap_buf[i], &pp);
3234      if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
3235                                              i += j; else goto syntaxerror;
3236
3237      j = readchar(&procmap_buf[i], &ch);
3238      if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3239
3240      j = readhex64(&procmap_buf[i], &foffset);
3241      if (j > 0) i += j; else goto syntaxerror;
3242
3243      j = readchar(&procmap_buf[i], &ch);
3244      if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3245
3246      j = readhex(&procmap_buf[i], &maj);
3247      if (j > 0) i += j; else goto syntaxerror;
3248      j = readchar(&procmap_buf[i], &ch);
3249      if (j == 1 && ch == ':') i += j; else goto syntaxerror;
3250      j = readhex(&procmap_buf[i], &min);
3251      if (j > 0) i += j; else goto syntaxerror;
3252
3253      j = readchar(&procmap_buf[i], &ch);
3254      if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3255
3256      j = readdec64(&procmap_buf[i], &ino);
3257      if (j > 0) i += j; else goto syntaxerror;
3258
3259      goto read_line_ok;
3260
3261    syntaxerror:
3262      VG_(debugLog)(0, "Valgrind:",
3263                       "FATAL: syntax error reading /proc/self/maps\n");
3264      { Int k, m;
3265        HChar buf50[51];
3266        m = 0;
3267        buf50[m] = 0;
3268        k = i - 50;
3269        if (k < 0) k = 0;
3270        for (; k <= i; k++) {
3271           buf50[m] = procmap_buf[k];
3272           buf50[m+1] = 0;
3273           if (m < 50-1) m++;
3274        }
3275        VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
3276      }
3277      ML_(am_exit)(1);
3278
3279    read_line_ok:
3280
3281      aspacem_assert(i < buf_n_tot);
3282
3283      /* Try and find the name of the file mapped to this segment, if
3284         it exists.  Note that file names can contain spaces. */
3285
3286      // Move i to the next non-space char, which should be either a '/',
3287      // a '[', or a newline.
3288      while (procmap_buf[i] == ' ') i++;
3289
3290      // Move i_eol to the end of the line.
3291      i_eol = i;
3292      while (procmap_buf[i_eol] != '\n') i_eol++;
3293
3294      // If there's a filename...
3295      if (procmap_buf[i] == '/') {
3296         /* Minor hack: put a '\0' at the filename end for the call to
3297            'record_mapping', then restore the old char with 'tmp'. */
3298         filename = &procmap_buf[i];
3299         tmp = filename[i_eol - i];
3300         filename[i_eol - i] = '\0';
3301      } else {
3302	 tmp = 0;
3303         filename = NULL;
3304         foffset = 0;
3305      }
3306
3307      prot = 0;
3308      if (rr == 'r') prot |= VKI_PROT_READ;
3309      if (ww == 'w') prot |= VKI_PROT_WRITE;
3310      if (xx == 'x') prot |= VKI_PROT_EXEC;
3311
3312      /* Linux has two ways to encode a device number when it
3313         is exposed to user space (via fstat etc). The old way
3314         is the traditional unix scheme that produces a 16 bit
3315         device number with the top 8 being the major number and
3316         the bottom 8 the minor number.
3317
3318         The new scheme allows for a 12 bit major number and
3319         a 20 bit minor number by using a 32 bit device number
3320         and putting the top 12 bits of the minor number into
3321         the top 12 bits of the device number thus leaving an
3322         extra 4 bits for the major number.
3323
3324         If the minor and major number are both single byte
3325         values then both schemes give the same result so we
3326         use the new scheme here in case either number is
3327         outside the 0-255 range and then use fstat64 when
3328         available (or fstat on 64 bit systems) so that we
3329         should always have a new style device number and
3330         everything should match. */
3331      dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
3332
3333      if (record_gap && gapStart < start)
3334         (*record_gap) ( gapStart, start-gapStart );
3335
3336      if (record_mapping && start < endPlusOne)
3337         (*record_mapping) ( start, endPlusOne-start,
3338                             prot, dev, ino,
3339                             foffset, filename );
3340
3341      if ('\0' != tmp) {
3342         filename[i_eol - i] = tmp;
3343      }
3344
3345      i = i_eol + 1;
3346      gapStart = endPlusOne;
3347   }
3348
3349#  if defined(VGP_arm_linux)
3350   /* ARM puts code at the end of memory that contains processor
3351      specific stuff (cmpxchg, getting the thread local storage, etc.)
3352      This isn't specified in /proc/self/maps, so do it here.  This
3353      kludgery causes the view of memory, as presented to
3354      record_gap/record_mapping, to actually reflect reality.  IMO
3355      (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
3356      the commpage should be regarded as a bug in the kernel. */
3357   { const Addr commpage_start = ARM_LINUX_FAKE_COMMPAGE_START;
3358     const Addr commpage_end1  = ARM_LINUX_FAKE_COMMPAGE_END1;
3359     if (gapStart < commpage_start) {
3360        if (record_gap)
3361           (*record_gap)( gapStart, commpage_start - gapStart );
3362        if (record_mapping)
3363           (*record_mapping)( commpage_start, commpage_end1 - commpage_start,
3364                              VKI_PROT_READ|VKI_PROT_EXEC,
3365                              0/*dev*/, 0/*ino*/, 0/*foffset*/,
3366                              NULL);
3367        gapStart = commpage_end1;
3368     }
3369   }
3370#  endif
3371
3372   if (record_gap && gapStart < Addr_MAX)
3373      (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
3374}
3375
3376/*------END-procmaps-parser-for-Linux----------------------------*/
3377
3378/*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
3379
3380#elif defined(VGO_darwin)
3381#include <mach/mach.h>
3382#include <mach/mach_vm.h>
3383
3384static unsigned int mach2vki(unsigned int vm_prot)
3385{
3386   return
3387      ((vm_prot & VM_PROT_READ)    ? VKI_PROT_READ    : 0) |
3388      ((vm_prot & VM_PROT_WRITE)   ? VKI_PROT_WRITE   : 0) |
3389      ((vm_prot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC    : 0) ;
3390}
3391
3392static UInt stats_machcalls = 0;
3393
3394static void parse_procselfmaps (
3395      void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3396                              ULong dev, ULong ino, Off64T offset,
3397                              const HChar* filename ),
3398      void (*record_gap)( Addr addr, SizeT len )
3399   )
3400{
3401   vm_address_t iter;
3402   unsigned int depth;
3403   vm_address_t last;
3404
3405   iter = 0;
3406   depth = 0;
3407   last = 0;
3408   while (1) {
3409      mach_vm_address_t addr = iter;
3410      mach_vm_size_t size;
3411      vm_region_submap_short_info_data_64_t info;
3412      kern_return_t kr;
3413
3414      while (1) {
3415         mach_msg_type_number_t info_count
3416            = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
3417         stats_machcalls++;
3418         kr = mach_vm_region_recurse(mach_task_self(), &addr, &size, &depth,
3419                                     (vm_region_info_t)&info, &info_count);
3420         if (kr)
3421            return;
3422         if (info.is_submap) {
3423            depth++;
3424            continue;
3425         }
3426         break;
3427      }
3428      iter = addr + size;
3429
3430      if (addr > last  &&  record_gap) {
3431         (*record_gap)(last, addr - last);
3432      }
3433      if (record_mapping) {
3434         (*record_mapping)(addr, size, mach2vki(info.protection),
3435                           0, 0, info.offset, NULL);
3436      }
3437      last = addr + size;
3438   }
3439
3440   if ((Addr)-1 > last  &&  record_gap)
3441      (*record_gap)(last, (Addr)-1 - last);
3442}
3443
3444// Urr.  So much for thread safety.
3445static Bool        css_overflowed;
3446static ChangedSeg* css_local;
3447static Int         css_size_local;
3448static Int         css_used_local;
3449
3450static Addr Addr__max ( Addr a, Addr b ) { return a > b ? a : b; }
3451static Addr Addr__min ( Addr a, Addr b ) { return a < b ? a : b; }
3452
3453static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
3454                                 ULong dev, ULong ino, Off64T offset,
3455                                 const HChar *filename)
3456{
3457   // derived from sync_check_mapping_callback()
3458
3459   /* JRS 2012-Mar-07: this all seems very dubious to me.  It would be
3460      safer to see if we can find, in V's segment collection, one
3461      single segment that completely covers the range [addr, +len)
3462      (and possibly more), and that has the exact same other
3463      properties (prot, dev, ino, offset, etc) as the data presented
3464      here.  If found, we just skip.  Otherwise add the data presented
3465      here into css_local[]. */
3466
3467   Int iLo, iHi, i;
3468
3469   if (len == 0) return;
3470
3471   /* The kernel should not give us wraparounds. */
3472   aspacem_assert(addr <= addr + len - 1);
3473
3474   iLo = find_nsegment_idx( addr );
3475   iHi = find_nsegment_idx( addr + len - 1 );
3476
3477   /* NSegments iLo .. iHi inclusive should agree with the presented
3478      data. */
3479   for (i = iLo; i <= iHi; i++) {
3480
3481      UInt seg_prot;
3482
3483      if (nsegments[i].kind == SkAnonV  ||  nsegments[i].kind == SkFileV) {
3484         /* Ignore V regions */
3485         continue;
3486      }
3487      else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
3488         /* Add mapping for SkResvn regions */
3489         ChangedSeg* cs = &css_local[css_used_local];
3490         if (css_used_local < css_size_local) {
3491            cs->is_added = True;
3492            cs->start    = addr;
3493            cs->end      = addr + len - 1;
3494            cs->prot     = prot;
3495            cs->offset   = offset;
3496            css_used_local++;
3497         } else {
3498            css_overflowed = True;
3499         }
3500         return;
3501
3502      }
3503      else if (nsegments[i].kind == SkAnonC ||
3504               nsegments[i].kind == SkFileC ||
3505               nsegments[i].kind == SkShmC)
3506      {
3507         /* Check permissions on client regions */
3508         // GrP fixme
3509         seg_prot = 0;
3510         if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
3511         if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
3512#        if defined(VGA_x86)
3513         // GrP fixme sloppyXcheck
3514         // darwin: kernel X ignored and spuriously changes? (vm_copy)
3515         seg_prot |= (prot & VKI_PROT_EXEC);
3516#        else
3517         if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
3518#        endif
3519         if (seg_prot != prot) {
3520             if (VG_(clo_trace_syscalls))
3521                 VG_(debugLog)(0,"aspacem","region %p..%p permission "
3522                                 "mismatch (kernel %x, V %x)\n",
3523                                 (void*)nsegments[i].start,
3524                                 (void*)(nsegments[i].end+1), prot, seg_prot);
3525            /* Add mapping for regions with protection changes */
3526            ChangedSeg* cs = &css_local[css_used_local];
3527            if (css_used_local < css_size_local) {
3528               cs->is_added = True;
3529               cs->start    = addr;
3530               cs->end      = addr + len - 1;
3531               cs->prot     = prot;
3532               cs->offset   = offset;
3533               css_used_local++;
3534            } else {
3535               css_overflowed = True;
3536            }
3537	    return;
3538
3539         }
3540
3541      } else {
3542         aspacem_assert(0);
3543      }
3544   }
3545}
3546
3547static void remove_mapping_callback(Addr addr, SizeT len)
3548{
3549   // derived from sync_check_gap_callback()
3550
3551   Int iLo, iHi, i;
3552
3553   if (len == 0)
3554      return;
3555
3556   /* The kernel should not give us wraparounds. */
3557   aspacem_assert(addr <= addr + len - 1);
3558
3559   iLo = find_nsegment_idx( addr );
3560   iHi = find_nsegment_idx( addr + len - 1 );
3561
3562   /* NSegments iLo .. iHi inclusive should agree with the presented data. */
3563   for (i = iLo; i <= iHi; i++) {
3564      if (nsegments[i].kind != SkFree && nsegments[i].kind != SkResvn) {
3565         /* V has a mapping, kernel doesn't.  Add to css_local[],
3566            directives to chop off the part of the V mapping that
3567            falls within the gap that the kernel tells us is
3568            present. */
3569         ChangedSeg* cs = &css_local[css_used_local];
3570         if (css_used_local < css_size_local) {
3571            cs->is_added = False;
3572            cs->start    = Addr__max(nsegments[i].start, addr);
3573            cs->end      = Addr__min(nsegments[i].end,   addr + len - 1);
3574            aspacem_assert(VG_IS_PAGE_ALIGNED(cs->start));
3575            aspacem_assert(VG_IS_PAGE_ALIGNED(cs->end+1));
3576            /* I don't think the following should fail.  But if it
3577               does, just omit the css_used_local++ in the cases where
3578               it doesn't hold. */
3579            aspacem_assert(cs->start < cs->end);
3580            cs->prot     = 0;
3581            cs->offset   = 0;
3582            css_used_local++;
3583         } else {
3584            css_overflowed = True;
3585         }
3586      }
3587   }
3588}
3589
3590
3591// Returns False if 'css' wasn't big enough.
3592Bool VG_(get_changed_segments)(
3593      const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
3594      Int css_size, /*OUT*/Int* css_used)
3595{
3596   static UInt stats_synccalls = 1;
3597   aspacem_assert(when && where);
3598
3599   if (0)
3600      VG_(debugLog)(0,"aspacem",
3601         "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
3602         stats_synccalls++, stats_machcalls, when, where
3603      );
3604
3605   css_overflowed = False;
3606   css_local = css;
3607   css_size_local = css_size;
3608   css_used_local = 0;
3609
3610   // Get the list of segs that need to be added/removed.
3611   parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
3612
3613   *css_used = css_used_local;
3614
3615   if (css_overflowed) {
3616      aspacem_assert(css_used_local == css_size_local);
3617   }
3618
3619   return !css_overflowed;
3620}
3621
3622#endif // defined(VGO_darwin)
3623
3624/*------END-procmaps-parser-for-Darwin---------------------------*/
3625
3626#endif // defined(VGO_linux) || defined(VGO_darwin)
3627
3628/*--------------------------------------------------------------------*/
3629/*--- end                                                          ---*/
3630/*--------------------------------------------------------------------*/
3631