IndirectRefTable.h revision 375fb116bcb817b37509ab579dbd55cdbb765cbf
1/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef DALVIK_INDIRECTREFTABLE_H_
18#define DALVIK_INDIRECTREFTABLE_H_
19
20/*
21 * Maintain a table of indirect references.  Used for local/global JNI
22 * references.
23 *
24 * The table contains object references that are part of the GC root set.
25 * When an object is added we return an IndirectRef that is not a valid
26 * pointer but can be used to find the original value in O(1) time.
27 * Conversions to and from indirect refs are performed on JNI method calls
28 * in and out of the VM, so they need to be very fast.
29 *
30 * To be efficient for JNI local variable storage, we need to provide
31 * operations that allow us to operate on segments of the table, where
32 * segments are pushed and popped as if on a stack.  For example, deletion
33 * of an entry should only succeed if it appears in the current segment,
34 * and we want to be able to strip off the current segment quickly when
35 * a method returns.  Additions to the table must be made in the current
36 * segment even if space is available in an earlier area.
37 *
38 * A new segment is created when we call into native code from interpreted
39 * code, or when we handle the JNI PushLocalFrame function.
40 *
41 * The GC must be able to scan the entire table quickly.
42 *
43 * In summary, these must be very fast:
44 *  - adding or removing a segment
45 *  - adding references to a new segment
46 *  - converting an indirect reference back to an Object
47 * These can be a little slower, but must still be pretty quick:
48 *  - adding references to a "mature" segment
49 *  - removing individual references
50 *  - scanning the entire table straight through
51 *
52 * If there's more than one segment, we don't guarantee that the table
53 * will fill completely before we fail due to lack of space.  We do ensure
54 * that the current segment will pack tightly, which should satisfy JNI
55 * requirements (e.g. EnsureLocalCapacity).
56 *
57 * To make everything fit nicely in 32-bit integers, the maximum size of
58 * the table is capped at 64K.
59 *
60 * None of the table functions are synchronized.
61 */
62
63/*
64 * Indirect reference definition.  This must be interchangeable with JNI's
65 * jobject, and it's convenient to let null be null, so we use void*.
66 *
67 * We need a 16-bit table index and a 2-bit reference type (global, local,
68 * weak global).  Real object pointers will have zeroes in the low 2 or 3
69 * bits (4- or 8-byte alignment), so it's useful to put the ref type
70 * in the low bits and reserve zero as an invalid value.
71 *
72 * The remaining 14 bits can be used to detect stale indirect references.
73 * For example, if objects don't move, we can use a hash of the original
74 * Object* to make sure the entry hasn't been re-used.  (If the Object*
75 * we find there doesn't match because of heap movement, we could do a
76 * secondary check on the preserved hash value; this implies that creating
77 * a global/local ref queries the hash value and forces it to be saved.)
78 * This is only done when CheckJNI is enabled.
79 *
80 * A more rigorous approach would be to put a serial number in the extra
81 * bits, and keep a copy of the serial number in a parallel table.  This is
82 * easier when objects can move, but requires 2x the memory and additional
83 * memory accesses on add/get.  It will catch additional problems, e.g.:
84 * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
85 * iref1.  A pattern based on object bits will miss this.
86 */
87typedef void* IndirectRef;
88
89/*
90 * Indirect reference kind, used as the two low bits of IndirectRef.
91 *
92 * For convenience these match up with enum jobjectRefType from jni.h.
93 */
94enum IndirectRefKind {
95    kIndirectKindInvalid    = 0,
96    kIndirectKindLocal      = 1,
97    kIndirectKindGlobal     = 2,
98    kIndirectKindWeakGlobal = 3
99};
100
101/*
102 * Extended debugging structure.  We keep a parallel array of these, one
103 * per slot in the table.
104 */
105#define kIRTPrevCount   4
106struct IndirectRefSlot {
107    u4          serial;         /* slot serial */
108    Object*     previous[kIRTPrevCount];
109};
110
111/*
112 * Table definition.
113 *
114 * For the global reference table, the expected common operations are
115 * adding a new entry and removing a recently-added entry (usually the
116 * most-recently-added entry).  For JNI local references, the common
117 * operations are adding a new entry and removing an entire table segment.
118 *
119 * If "allocEntries" is not equal to "maxEntries", the table may expand
120 * when entries are added, which means the memory may move.  If you want
121 * to keep pointers into "table" rather than offsets, you must use a
122 * fixed-size table.
123 *
124 * If we delete entries from the middle of the list, we will be left with
125 * "holes".  We track the number of holes so that, when adding new elements,
126 * we can quickly decide to do a trivial append or go slot-hunting.
127 *
128 * When the top-most entry is removed, any holes immediately below it are
129 * also removed.  Thus, deletion of an entry may reduce "topIndex" by more
130 * than one.
131 *
132 * To get the desired behavior for JNI locals, we need to know the bottom
133 * and top of the current "segment".  The top is managed internally, and
134 * the bottom is passed in as a function argument (the VM keeps it in a
135 * slot in the interpreted stack frame).  When we call a native method or
136 * push a local frame, the current top index gets pushed on, and serves
137 * as the new bottom.  When we pop a frame off, the value from the stack
138 * becomes the new top index, and the value stored in the previous frame
139 * becomes the new bottom.
140 *
141 * To avoid having to re-scan the table after a pop, we want to push the
142 * number of holes in the table onto the stack.  Because of our 64K-entry
143 * cap, we can combine the two into a single unsigned 32-bit value.
144 * Instead of a "bottom" argument we take a "cookie", which includes the
145 * bottom index and the count of holes below the bottom.
146 *
147 * We need to minimize method call/return overhead.  If we store the
148 * "cookie" externally, on the interpreted call stack, the VM can handle
149 * pushes and pops with a single 4-byte load and store.  (We could also
150 * store it internally in a public structure, but the local JNI refs are
151 * logically tied to interpreted stack frames anyway.)
152 *
153 * Common alternative implementation: make IndirectRef a pointer to the
154 * actual reference slot.  Instead of getting a table and doing a lookup,
155 * the lookup can be done instantly.  Operations like determining the
156 * type and deleting the reference are more expensive because the table
157 * must be hunted for (i.e. you have to do a pointer comparison to see
158 * which table it's in), you can't move the table when expanding it (so
159 * realloc() is out), and tricks like serial number checking to detect
160 * stale references aren't possible (though we may be able to get similar
161 * benefits with other approaches).
162 *
163 * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
164 * add immediately follows a delete; must invalidate after segment pop
165 * (which could increase the cost/complexity of method call/return).
166 * Might be worth only using it for JNI globals.
167 *
168 * TODO: may want completely different add/remove algorithms for global
169 * and local refs to improve performance.  A large circular buffer might
170 * reduce the amortized cost of adding global references.
171 *
172 * TODO: if we can guarantee that the underlying storage doesn't move,
173 * e.g. by using oversized mmap regions to handle expanding tables, we may
174 * be able to avoid having to synchronize lookups.  Might make sense to
175 * add a "synchronized lookup" call that takes the mutex as an argument,
176 * and either locks or doesn't lock based on internal details.
177 */
178union IRTSegmentState {
179    u4          all;
180    struct {
181        u4      topIndex:16;            /* index of first unused entry */
182        u4      numHoles:16;            /* #of holes in entire table */
183    } parts;
184};
185struct IndirectRefTable {
186    /* semi-public - read/write by interpreter in native call handler */
187    IRTSegmentState segmentState;
188
189    /* semi-public - read-only during GC scan; pointer must not be kept */
190    Object**        table;              /* bottom of the stack */
191
192    /* private */
193    IndirectRefSlot* slotData;          /* extended debugging info */
194    int             allocEntries;       /* #of entries we have space for */
195    int             maxEntries;         /* max #of entries allowed */
196    IndirectRefKind kind;               /* bit mask, ORed into all irefs */
197
198    // TODO: want hole-filling stats (#of holes filled, total entries scanned)
199    //       for performance evaluation.
200};
201
202/* use as initial value for "cookie", and when table has only one segment */
203#define IRT_FIRST_SEGMENT   0
204
205/*
206 * (This is PRIVATE, but we want it inside other inlines in this header.)
207 *
208 * Indirectify the object.
209 *
210 * The object pointer itself is subject to relocation in some GC
211 * implementations, so we shouldn't really be using it here.
212 */
213INLINE IndirectRef dvmObjectToIndirectRef(IndirectRefTable* pRef,
214    Object* obj, u4 tableIndex, IndirectRefKind kind)
215{
216    assert(tableIndex < 65536);
217    //u4 objChunk = (((u4) obj >> 3) ^ ((u4) obj >> 19)) & 0x3fff;
218    //u4 uref = objChunk << 18 | (tableIndex << 2) | kind;
219    u4 serialChunk = pRef->slotData[tableIndex].serial;
220    u4 uref = serialChunk << 20 | (tableIndex << 2) | kind;
221    return (IndirectRef) uref;
222}
223
224/*
225 * (This is PRIVATE, but we want it inside other inlines in this header.)
226 *
227 * Extract the table index from an indirect reference.
228 */
229INLINE u4 dvmIndirectRefToIndex(IndirectRef iref)
230{
231    u4 uref = (u4) iref;
232    return (uref >> 2) & 0xffff;
233}
234
235/*
236 * Determine what kind of indirect reference this is.
237 */
238INLINE IndirectRefKind dvmGetIndirectRefType(IndirectRef iref)
239{
240    return (IndirectRefKind)((u4) iref & 0x03);
241}
242
243/*
244 * Return a string constant describing the indirect ref type.
245 */
246const char* dvmIndirectRefTypeName(IndirectRef iref);
247
248/*
249 * Initialize an IndirectRefTable.
250 *
251 * If "initialCount" != "maxCount", the table will expand as required.
252 *
253 * "kind" should be Local or Global.  The Global table may also hold
254 * WeakGlobal refs.
255 *
256 * Returns "false" if table allocation fails.
257 */
258bool dvmInitIndirectRefTable(IndirectRefTable* pRef, int initialCount,
259    int maxCount, IndirectRefKind kind);
260
261/*
262 * Clear out the contents, freeing allocated storage.  Does not free "pRef".
263 *
264 * You must call dvmInitReferenceTable() before you can re-use this table.
265 */
266void dvmClearIndirectRefTable(IndirectRefTable* pRef);
267
268/*
269 * Start a new segment at the top of the table.
270 *
271 * Returns an opaque 32-bit value that must be provided when the segment
272 * is to be removed.
273 *
274 * IMPORTANT: this is implemented as a single instruction in mterp, rather
275 * than a call here.  You can add debugging aids for the C-language
276 * interpreters, but the basic implementation may not change.
277 */
278INLINE u4 dvmPushIndirectRefTableSegment(IndirectRefTable* pRef)
279{
280    return pRef->segmentState.all;
281}
282
283/* extra debugging checks */
284bool dvmPopIndirectRefTableSegmentCheck(IndirectRefTable* pRef, u4 cookie);
285
286/*
287 * Remove one or more segments from the top.  The table entry identified
288 * by "cookie" becomes the new top-most entry.
289 *
290 * IMPORTANT: this is implemented as a single instruction in mterp, rather
291 * than a call here.  You can add debugging aids for the C-language
292 * interpreters, but the basic implementation must not change.
293 */
294INLINE void dvmPopIndirectRefTableSegment(IndirectRefTable* pRef, u4 cookie)
295{
296    dvmPopIndirectRefTableSegmentCheck(pRef, cookie);
297    pRef->segmentState.all = cookie;
298}
299
300/*
301 * Return the #of entries in the entire table.  This includes holes, and
302 * so may be larger than the actual number of "live" entries.
303 */
304INLINE size_t dvmIndirectRefTableEntries(const IndirectRefTable* pRef)
305{
306    return pRef->segmentState.parts.topIndex;
307}
308
309/*
310 * Returns "true" if the table is full.  The table is considered full if
311 * we would need to expand it to add another entry to the current segment.
312 */
313INLINE size_t dvmIsIndirectRefTableFull(const IndirectRefTable* pRef)
314{
315    return dvmIndirectRefTableEntries(pRef) == (size_t)pRef->allocEntries;
316}
317
318/*
319 * Add a new entry.  "obj" must be a valid non-NULL object reference
320 * (though it's okay if it's not fully-formed, e.g. the result from
321 * dvmMalloc doesn't have obj->clazz set).
322 *
323 * Returns NULL if the table is full (max entries reached, or alloc
324 * failed during expansion).
325 */
326IndirectRef dvmAddToIndirectRefTable(IndirectRefTable* pRef, u4 cookie,
327    Object* obj);
328
329/*
330 * Add a new entry at the end.  Similar to Add but does not usually attempt
331 * to fill in holes.  This is only appropriate to use right after a new
332 * segment has been pushed.
333 *
334 * (This is intended for use when calling into a native JNI method, so
335 * performance is critical.)
336 */
337INLINE IndirectRef dvmAppendToIndirectRefTable(IndirectRefTable* pRef,
338    u4 cookie, Object* obj)
339{
340    int topIndex = pRef->segmentState.parts.topIndex;
341    if (topIndex == pRef->allocEntries) {
342        /* up against alloc or max limit, call the fancy version */
343        return dvmAddToIndirectRefTable(pRef, cookie, obj);
344    } else {
345        IndirectRef result = dvmObjectToIndirectRef(pRef, obj, topIndex,
346            pRef->kind);
347        pRef->table[topIndex++] = obj;
348        pRef->segmentState.parts.topIndex = topIndex;
349        return result;
350    }
351}
352
353/* extra debugging checks */
354bool dvmGetFromIndirectRefTableCheck(IndirectRefTable* pRef, IndirectRef iref);
355
356/* magic failure value; must not pass dvmIsValidObject() */
357#define kInvalidIndirectRefObject ((Object*)0xdead4321)
358
359/*
360 * Given an IndirectRef in the table, return the Object it refers to.
361 *
362 * Returns kInvalidIndirectRefObject if iref is invalid.
363 */
364INLINE Object* dvmGetFromIndirectRefTable(IndirectRefTable* pRef,
365    IndirectRef iref)
366{
367    if (!dvmGetFromIndirectRefTableCheck(pRef, iref))
368        return kInvalidIndirectRefObject;
369
370    int idx = dvmIndirectRefToIndex(iref);
371    return pRef->table[idx];
372}
373
374/*
375 * Remove an existing entry.
376 *
377 * If the entry is not between the current top index and the bottom index
378 * specified by the cookie, we don't remove anything.  This is the behavior
379 * required by JNI's DeleteLocalRef function.
380 *
381 * Returns "false" if nothing was removed.
382 */
383bool dvmRemoveFromIndirectRefTable(IndirectRefTable* pRef, u4 cookie,
384    IndirectRef iref);
385
386/*
387 * Dump the contents of a reference table to the log file.
388 *
389 * The caller should lock any external sync before calling.
390 */
391void dvmDumpIndirectRefTable(const IndirectRefTable* pRef, const char* descr);
392
393#endif  // DALVIK_INDIRECTREFTABLE_H_
394