Parcel.cpp revision e93390b0bd336cd1b69abebf6eeffd18881f531b
1/*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Parcel"
18//#define LOG_NDEBUG 0
19
20#include <binder/Parcel.h>
21
22#include <binder/IPCThreadState.h>
23#include <binder/Binder.h>
24#include <binder/BpBinder.h>
25#include <binder/ProcessState.h>
26#include <binder/Status.h>
27#include <binder/TextOutput.h>
28
29#include <errno.h>
30#include <utils/Debug.h>
31#include <utils/Log.h>
32#include <utils/String8.h>
33#include <utils/String16.h>
34#include <utils/misc.h>
35#include <utils/Flattenable.h>
36#include <cutils/ashmem.h>
37
38#include <private/binder/binder_module.h>
39#include <private/binder/Static.h>
40
41#include <inttypes.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <stdint.h>
45#include <sys/mman.h>
46
47#ifndef INT32_MAX
48#define INT32_MAX ((int32_t)(2147483647))
49#endif
50
51#define LOG_REFS(...)
52//#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
53#define LOG_ALLOC(...)
54//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
55
56// ---------------------------------------------------------------------------
57
58// This macro should never be used at runtime, as a too large value
59// of s could cause an integer overflow. Instead, you should always
60// use the wrapper function pad_size()
61#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
62
63static size_t pad_size(size_t s) {
64    if (s > (SIZE_T_MAX - 3)) {
65        abort();
66    }
67    return PAD_SIZE_UNSAFE(s);
68}
69
70// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
71#define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
72
73// XXX This can be made public if we want to provide
74// support for typed data.
75struct small_flat_data
76{
77    uint32_t type;
78    uint32_t data;
79};
80
81namespace android {
82
83static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
84static size_t gParcelGlobalAllocSize = 0;
85static size_t gParcelGlobalAllocCount = 0;
86
87// Maximum size of a blob to transfer in-place.
88static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
89
90enum {
91    BLOB_INPLACE = 0,
92    BLOB_ASHMEM_IMMUTABLE = 1,
93    BLOB_ASHMEM_MUTABLE = 2,
94};
95
96void acquire_object(const sp<ProcessState>& proc,
97    const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
98{
99    switch (obj.type) {
100        case BINDER_TYPE_BINDER:
101            if (obj.binder) {
102                LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
103                reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
104            }
105            return;
106        case BINDER_TYPE_WEAK_BINDER:
107            if (obj.binder)
108                reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
109            return;
110        case BINDER_TYPE_HANDLE: {
111            const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
112            if (b != NULL) {
113                LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
114                b->incStrong(who);
115            }
116            return;
117        }
118        case BINDER_TYPE_WEAK_HANDLE: {
119            const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
120            if (b != NULL) b.get_refs()->incWeak(who);
121            return;
122        }
123        case BINDER_TYPE_FD: {
124            if (obj.cookie != 0) {
125                if (outAshmemSize != NULL) {
126                    // If we own an ashmem fd, keep track of how much memory it refers to.
127                    int size = ashmem_get_size_region(obj.handle);
128                    if (size > 0) {
129                        *outAshmemSize += size;
130                    }
131                }
132            }
133            return;
134        }
135    }
136
137    ALOGD("Invalid object type 0x%08x", obj.type);
138}
139
140void acquire_object(const sp<ProcessState>& proc,
141    const flat_binder_object& obj, const void* who)
142{
143    acquire_object(proc, obj, who, NULL);
144}
145
146static void release_object(const sp<ProcessState>& proc,
147    const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
148{
149    switch (obj.type) {
150        case BINDER_TYPE_BINDER:
151            if (obj.binder) {
152                LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
153                reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
154            }
155            return;
156        case BINDER_TYPE_WEAK_BINDER:
157            if (obj.binder)
158                reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
159            return;
160        case BINDER_TYPE_HANDLE: {
161            const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
162            if (b != NULL) {
163                LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
164                b->decStrong(who);
165            }
166            return;
167        }
168        case BINDER_TYPE_WEAK_HANDLE: {
169            const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
170            if (b != NULL) b.get_refs()->decWeak(who);
171            return;
172        }
173        case BINDER_TYPE_FD: {
174            if (outAshmemSize != NULL) {
175                if (obj.cookie != 0) {
176                    int size = ashmem_get_size_region(obj.handle);
177                    if (size > 0) {
178                        *outAshmemSize -= size;
179                    }
180
181                    close(obj.handle);
182                }
183            }
184            return;
185        }
186    }
187
188    ALOGE("Invalid object type 0x%08x", obj.type);
189}
190
191void release_object(const sp<ProcessState>& proc,
192    const flat_binder_object& obj, const void* who)
193{
194    release_object(proc, obj, who, NULL);
195}
196
197inline static status_t finish_flatten_binder(
198    const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
199{
200    return out->writeObject(flat, false);
201}
202
203status_t flatten_binder(const sp<ProcessState>& /*proc*/,
204    const sp<IBinder>& binder, Parcel* out)
205{
206    flat_binder_object obj;
207
208    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
209    if (binder != NULL) {
210        IBinder *local = binder->localBinder();
211        if (!local) {
212            BpBinder *proxy = binder->remoteBinder();
213            if (proxy == NULL) {
214                ALOGE("null proxy");
215            }
216            const int32_t handle = proxy ? proxy->handle() : 0;
217            obj.type = BINDER_TYPE_HANDLE;
218            obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
219            obj.handle = handle;
220            obj.cookie = 0;
221        } else {
222            obj.type = BINDER_TYPE_BINDER;
223            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
224            obj.cookie = reinterpret_cast<uintptr_t>(local);
225        }
226    } else {
227        obj.type = BINDER_TYPE_BINDER;
228        obj.binder = 0;
229        obj.cookie = 0;
230    }
231
232    return finish_flatten_binder(binder, obj, out);
233}
234
235status_t flatten_binder(const sp<ProcessState>& /*proc*/,
236    const wp<IBinder>& binder, Parcel* out)
237{
238    flat_binder_object obj;
239
240    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
241    if (binder != NULL) {
242        sp<IBinder> real = binder.promote();
243        if (real != NULL) {
244            IBinder *local = real->localBinder();
245            if (!local) {
246                BpBinder *proxy = real->remoteBinder();
247                if (proxy == NULL) {
248                    ALOGE("null proxy");
249                }
250                const int32_t handle = proxy ? proxy->handle() : 0;
251                obj.type = BINDER_TYPE_WEAK_HANDLE;
252                obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
253                obj.handle = handle;
254                obj.cookie = 0;
255            } else {
256                obj.type = BINDER_TYPE_WEAK_BINDER;
257                obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
258                obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
259            }
260            return finish_flatten_binder(real, obj, out);
261        }
262
263        // XXX How to deal?  In order to flatten the given binder,
264        // we need to probe it for information, which requires a primary
265        // reference...  but we don't have one.
266        //
267        // The OpenBinder implementation uses a dynamic_cast<> here,
268        // but we can't do that with the different reference counting
269        // implementation we are using.
270        ALOGE("Unable to unflatten Binder weak reference!");
271        obj.type = BINDER_TYPE_BINDER;
272        obj.binder = 0;
273        obj.cookie = 0;
274        return finish_flatten_binder(NULL, obj, out);
275
276    } else {
277        obj.type = BINDER_TYPE_BINDER;
278        obj.binder = 0;
279        obj.cookie = 0;
280        return finish_flatten_binder(NULL, obj, out);
281    }
282}
283
284inline static status_t finish_unflatten_binder(
285    BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
286    const Parcel& /*in*/)
287{
288    return NO_ERROR;
289}
290
291status_t unflatten_binder(const sp<ProcessState>& proc,
292    const Parcel& in, sp<IBinder>* out)
293{
294    const flat_binder_object* flat = in.readObject(false);
295
296    if (flat) {
297        switch (flat->type) {
298            case BINDER_TYPE_BINDER:
299                *out = reinterpret_cast<IBinder*>(flat->cookie);
300                return finish_unflatten_binder(NULL, *flat, in);
301            case BINDER_TYPE_HANDLE:
302                *out = proc->getStrongProxyForHandle(flat->handle);
303                return finish_unflatten_binder(
304                    static_cast<BpBinder*>(out->get()), *flat, in);
305        }
306    }
307    return BAD_TYPE;
308}
309
310status_t unflatten_binder(const sp<ProcessState>& proc,
311    const Parcel& in, wp<IBinder>* out)
312{
313    const flat_binder_object* flat = in.readObject(false);
314
315    if (flat) {
316        switch (flat->type) {
317            case BINDER_TYPE_BINDER:
318                *out = reinterpret_cast<IBinder*>(flat->cookie);
319                return finish_unflatten_binder(NULL, *flat, in);
320            case BINDER_TYPE_WEAK_BINDER:
321                if (flat->binder != 0) {
322                    out->set_object_and_refs(
323                        reinterpret_cast<IBinder*>(flat->cookie),
324                        reinterpret_cast<RefBase::weakref_type*>(flat->binder));
325                } else {
326                    *out = NULL;
327                }
328                return finish_unflatten_binder(NULL, *flat, in);
329            case BINDER_TYPE_HANDLE:
330            case BINDER_TYPE_WEAK_HANDLE:
331                *out = proc->getWeakProxyForHandle(flat->handle);
332                return finish_unflatten_binder(
333                    static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
334        }
335    }
336    return BAD_TYPE;
337}
338
339// ---------------------------------------------------------------------------
340
341Parcel::Parcel()
342{
343    LOG_ALLOC("Parcel %p: constructing", this);
344    initState();
345}
346
347Parcel::~Parcel()
348{
349    freeDataNoInit();
350    LOG_ALLOC("Parcel %p: destroyed", this);
351}
352
353size_t Parcel::getGlobalAllocSize() {
354    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
355    size_t size = gParcelGlobalAllocSize;
356    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
357    return size;
358}
359
360size_t Parcel::getGlobalAllocCount() {
361    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
362    size_t count = gParcelGlobalAllocCount;
363    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
364    return count;
365}
366
367const uint8_t* Parcel::data() const
368{
369    return mData;
370}
371
372size_t Parcel::dataSize() const
373{
374    return (mDataSize > mDataPos ? mDataSize : mDataPos);
375}
376
377size_t Parcel::dataAvail() const
378{
379    // TODO: decide what to do about the possibility that this can
380    // report an available-data size that exceeds a Java int's max
381    // positive value, causing havoc.  Fortunately this will only
382    // happen if someone constructs a Parcel containing more than two
383    // gigabytes of data, which on typical phone hardware is simply
384    // not possible.
385    return dataSize() - dataPosition();
386}
387
388size_t Parcel::dataPosition() const
389{
390    return mDataPos;
391}
392
393size_t Parcel::dataCapacity() const
394{
395    return mDataCapacity;
396}
397
398status_t Parcel::setDataSize(size_t size)
399{
400    if (size > INT32_MAX) {
401        // don't accept size_t values which may have come from an
402        // inadvertent conversion from a negative int.
403        return BAD_VALUE;
404    }
405
406    status_t err;
407    err = continueWrite(size);
408    if (err == NO_ERROR) {
409        mDataSize = size;
410        ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
411    }
412    return err;
413}
414
415void Parcel::setDataPosition(size_t pos) const
416{
417    if (pos > INT32_MAX) {
418        // don't accept size_t values which may have come from an
419        // inadvertent conversion from a negative int.
420        abort();
421    }
422
423    mDataPos = pos;
424    mNextObjectHint = 0;
425}
426
427status_t Parcel::setDataCapacity(size_t size)
428{
429    if (size > INT32_MAX) {
430        // don't accept size_t values which may have come from an
431        // inadvertent conversion from a negative int.
432        return BAD_VALUE;
433    }
434
435    if (size > mDataCapacity) return continueWrite(size);
436    return NO_ERROR;
437}
438
439status_t Parcel::setData(const uint8_t* buffer, size_t len)
440{
441    if (len > INT32_MAX) {
442        // don't accept size_t values which may have come from an
443        // inadvertent conversion from a negative int.
444        return BAD_VALUE;
445    }
446
447    status_t err = restartWrite(len);
448    if (err == NO_ERROR) {
449        memcpy(const_cast<uint8_t*>(data()), buffer, len);
450        mDataSize = len;
451        mFdsKnown = false;
452    }
453    return err;
454}
455
456status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
457{
458    const sp<ProcessState> proc(ProcessState::self());
459    status_t err;
460    const uint8_t *data = parcel->mData;
461    const binder_size_t *objects = parcel->mObjects;
462    size_t size = parcel->mObjectsSize;
463    int startPos = mDataPos;
464    int firstIndex = -1, lastIndex = -2;
465
466    if (len == 0) {
467        return NO_ERROR;
468    }
469
470    if (len > INT32_MAX) {
471        // don't accept size_t values which may have come from an
472        // inadvertent conversion from a negative int.
473        return BAD_VALUE;
474    }
475
476    // range checks against the source parcel size
477    if ((offset > parcel->mDataSize)
478            || (len > parcel->mDataSize)
479            || (offset + len > parcel->mDataSize)) {
480        return BAD_VALUE;
481    }
482
483    // Count objects in range
484    for (int i = 0; i < (int) size; i++) {
485        size_t off = objects[i];
486        if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
487            if (firstIndex == -1) {
488                firstIndex = i;
489            }
490            lastIndex = i;
491        }
492    }
493    int numObjects = lastIndex - firstIndex + 1;
494
495    if ((mDataSize+len) > mDataCapacity) {
496        // grow data
497        err = growData(len);
498        if (err != NO_ERROR) {
499            return err;
500        }
501    }
502
503    // append data
504    memcpy(mData + mDataPos, data + offset, len);
505    mDataPos += len;
506    mDataSize += len;
507
508    err = NO_ERROR;
509
510    if (numObjects > 0) {
511        // grow objects
512        if (mObjectsCapacity < mObjectsSize + numObjects) {
513            size_t newSize = ((mObjectsSize + numObjects)*3)/2;
514            if (newSize < mObjectsSize) return NO_MEMORY;   // overflow
515            binder_size_t *objects =
516                (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
517            if (objects == (binder_size_t*)0) {
518                return NO_MEMORY;
519            }
520            mObjects = objects;
521            mObjectsCapacity = newSize;
522        }
523
524        // append and acquire objects
525        int idx = mObjectsSize;
526        for (int i = firstIndex; i <= lastIndex; i++) {
527            size_t off = objects[i] - offset + startPos;
528            mObjects[idx++] = off;
529            mObjectsSize++;
530
531            flat_binder_object* flat
532                = reinterpret_cast<flat_binder_object*>(mData + off);
533            acquire_object(proc, *flat, this, &mOpenAshmemSize);
534
535            if (flat->type == BINDER_TYPE_FD) {
536                // If this is a file descriptor, we need to dup it so the
537                // new Parcel now owns its own fd, and can declare that we
538                // officially know we have fds.
539                flat->handle = dup(flat->handle);
540                flat->cookie = 1;
541                mHasFds = mFdsKnown = true;
542                if (!mAllowFds) {
543                    err = FDS_NOT_ALLOWED;
544                }
545            }
546        }
547    }
548
549    return err;
550}
551
552bool Parcel::allowFds() const
553{
554    return mAllowFds;
555}
556
557bool Parcel::pushAllowFds(bool allowFds)
558{
559    const bool origValue = mAllowFds;
560    if (!allowFds) {
561        mAllowFds = false;
562    }
563    return origValue;
564}
565
566void Parcel::restoreAllowFds(bool lastValue)
567{
568    mAllowFds = lastValue;
569}
570
571bool Parcel::hasFileDescriptors() const
572{
573    if (!mFdsKnown) {
574        scanForFds();
575    }
576    return mHasFds;
577}
578
579// Write RPC headers.  (previously just the interface token)
580status_t Parcel::writeInterfaceToken(const String16& interface)
581{
582    writeInt32(IPCThreadState::self()->getStrictModePolicy() |
583               STRICT_MODE_PENALTY_GATHER);
584    // currently the interface identification token is just its name as a string
585    return writeString16(interface);
586}
587
588bool Parcel::checkInterface(IBinder* binder) const
589{
590    return enforceInterface(binder->getInterfaceDescriptor());
591}
592
593bool Parcel::enforceInterface(const String16& interface,
594                              IPCThreadState* threadState) const
595{
596    int32_t strictPolicy = readInt32();
597    if (threadState == NULL) {
598        threadState = IPCThreadState::self();
599    }
600    if ((threadState->getLastTransactionBinderFlags() &
601         IBinder::FLAG_ONEWAY) != 0) {
602      // For one-way calls, the callee is running entirely
603      // disconnected from the caller, so disable StrictMode entirely.
604      // Not only does disk/network usage not impact the caller, but
605      // there's no way to commuicate back any violations anyway.
606      threadState->setStrictModePolicy(0);
607    } else {
608      threadState->setStrictModePolicy(strictPolicy);
609    }
610    const String16 str(readString16());
611    if (str == interface) {
612        return true;
613    } else {
614        ALOGW("**** enforceInterface() expected '%s' but read '%s'",
615                String8(interface).string(), String8(str).string());
616        return false;
617    }
618}
619
620const binder_size_t* Parcel::objects() const
621{
622    return mObjects;
623}
624
625size_t Parcel::objectsCount() const
626{
627    return mObjectsSize;
628}
629
630status_t Parcel::errorCheck() const
631{
632    return mError;
633}
634
635void Parcel::setError(status_t err)
636{
637    mError = err;
638}
639
640status_t Parcel::finishWrite(size_t len)
641{
642    if (len > INT32_MAX) {
643        // don't accept size_t values which may have come from an
644        // inadvertent conversion from a negative int.
645        return BAD_VALUE;
646    }
647
648    //printf("Finish write of %d\n", len);
649    mDataPos += len;
650    ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
651    if (mDataPos > mDataSize) {
652        mDataSize = mDataPos;
653        ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
654    }
655    //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
656    return NO_ERROR;
657}
658
659status_t Parcel::writeUnpadded(const void* data, size_t len)
660{
661    if (len > INT32_MAX) {
662        // don't accept size_t values which may have come from an
663        // inadvertent conversion from a negative int.
664        return BAD_VALUE;
665    }
666
667    size_t end = mDataPos + len;
668    if (end < mDataPos) {
669        // integer overflow
670        return BAD_VALUE;
671    }
672
673    if (end <= mDataCapacity) {
674restart_write:
675        memcpy(mData+mDataPos, data, len);
676        return finishWrite(len);
677    }
678
679    status_t err = growData(len);
680    if (err == NO_ERROR) goto restart_write;
681    return err;
682}
683
684status_t Parcel::write(const void* data, size_t len)
685{
686    if (len > INT32_MAX) {
687        // don't accept size_t values which may have come from an
688        // inadvertent conversion from a negative int.
689        return BAD_VALUE;
690    }
691
692    void* const d = writeInplace(len);
693    if (d) {
694        memcpy(d, data, len);
695        return NO_ERROR;
696    }
697    return mError;
698}
699
700void* Parcel::writeInplace(size_t len)
701{
702    if (len > INT32_MAX) {
703        // don't accept size_t values which may have come from an
704        // inadvertent conversion from a negative int.
705        return NULL;
706    }
707
708    const size_t padded = pad_size(len);
709
710    // sanity check for integer overflow
711    if (mDataPos+padded < mDataPos) {
712        return NULL;
713    }
714
715    if ((mDataPos+padded) <= mDataCapacity) {
716restart_write:
717        //printf("Writing %ld bytes, padded to %ld\n", len, padded);
718        uint8_t* const data = mData+mDataPos;
719
720        // Need to pad at end?
721        if (padded != len) {
722#if BYTE_ORDER == BIG_ENDIAN
723            static const uint32_t mask[4] = {
724                0x00000000, 0xffffff00, 0xffff0000, 0xff000000
725            };
726#endif
727#if BYTE_ORDER == LITTLE_ENDIAN
728            static const uint32_t mask[4] = {
729                0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
730            };
731#endif
732            //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
733            //    *reinterpret_cast<void**>(data+padded-4));
734            *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
735        }
736
737        finishWrite(padded);
738        return data;
739    }
740
741    status_t err = growData(padded);
742    if (err == NO_ERROR) goto restart_write;
743    return NULL;
744}
745
746status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
747{
748    if (!val) {
749        return writeInt32(-1);
750    }
751
752    return writeByteVector(*val);
753}
754
755status_t Parcel::writeByteVector(const std::vector<int8_t>& val)
756{
757    status_t status;
758    if (val.size() > std::numeric_limits<int32_t>::max()) {
759        status = BAD_VALUE;
760        return status;
761    }
762
763    status = writeInt32(val.size());
764    if (status != OK) {
765        return status;
766    }
767
768    void* data = writeInplace(val.size());
769    if (!data) {
770        status = BAD_VALUE;
771        return status;
772    }
773
774    memcpy(data, val.data(), val.size());
775    return status;
776}
777
778status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
779{
780    return writeTypedVector(val, &Parcel::writeInt32);
781}
782
783status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
784{
785    return writeNullableTypedVector(val, &Parcel::writeInt32);
786}
787
788status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
789{
790    return writeTypedVector(val, &Parcel::writeInt64);
791}
792
793status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
794{
795    return writeNullableTypedVector(val, &Parcel::writeInt64);
796}
797
798status_t Parcel::writeFloatVector(const std::vector<float>& val)
799{
800    return writeTypedVector(val, &Parcel::writeFloat);
801}
802
803status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
804{
805    return writeNullableTypedVector(val, &Parcel::writeFloat);
806}
807
808status_t Parcel::writeDoubleVector(const std::vector<double>& val)
809{
810    return writeTypedVector(val, &Parcel::writeDouble);
811}
812
813status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
814{
815    return writeNullableTypedVector(val, &Parcel::writeDouble);
816}
817
818status_t Parcel::writeBoolVector(const std::vector<bool>& val)
819{
820    return writeTypedVector(val, &Parcel::writeBool);
821}
822
823status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
824{
825    return writeNullableTypedVector(val, &Parcel::writeBool);
826}
827
828status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
829{
830    return writeTypedVector(val, &Parcel::writeChar);
831}
832
833status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
834{
835    return writeNullableTypedVector(val, &Parcel::writeChar);
836}
837
838status_t Parcel::writeString16Vector(const std::vector<String16>& val)
839{
840    return writeTypedVector(val, &Parcel::writeString16);
841}
842
843status_t Parcel::writeString16Vector(
844        const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
845{
846    return writeNullableTypedVector(val, &Parcel::writeString16);
847}
848
849status_t Parcel::writeInt32(int32_t val)
850{
851    return writeAligned(val);
852}
853
854status_t Parcel::writeUint32(uint32_t val)
855{
856    return writeAligned(val);
857}
858
859status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
860    if (len > INT32_MAX) {
861        // don't accept size_t values which may have come from an
862        // inadvertent conversion from a negative int.
863        return BAD_VALUE;
864    }
865
866    if (!val) {
867        return writeInt32(-1);
868    }
869    status_t ret = writeInt32(static_cast<uint32_t>(len));
870    if (ret == NO_ERROR) {
871        ret = write(val, len * sizeof(*val));
872    }
873    return ret;
874}
875status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
876    if (len > INT32_MAX) {
877        // don't accept size_t values which may have come from an
878        // inadvertent conversion from a negative int.
879        return BAD_VALUE;
880    }
881
882    if (!val) {
883        return writeInt32(-1);
884    }
885    status_t ret = writeInt32(static_cast<uint32_t>(len));
886    if (ret == NO_ERROR) {
887        ret = write(val, len * sizeof(*val));
888    }
889    return ret;
890}
891
892status_t Parcel::writeBool(bool val)
893{
894    return writeInt32(int32_t(val));
895}
896
897status_t Parcel::writeChar(char16_t val)
898{
899    return writeInt32(int32_t(val));
900}
901
902status_t Parcel::writeByte(int8_t val)
903{
904    return writeInt32(int32_t(val));
905}
906
907status_t Parcel::writeInt64(int64_t val)
908{
909    return writeAligned(val);
910}
911
912status_t Parcel::writeUint64(uint64_t val)
913{
914    return writeAligned(val);
915}
916
917status_t Parcel::writePointer(uintptr_t val)
918{
919    return writeAligned<binder_uintptr_t>(val);
920}
921
922status_t Parcel::writeFloat(float val)
923{
924    return writeAligned(val);
925}
926
927#if defined(__mips__) && defined(__mips_hard_float)
928
929status_t Parcel::writeDouble(double val)
930{
931    union {
932        double d;
933        unsigned long long ll;
934    } u;
935    u.d = val;
936    return writeAligned(u.ll);
937}
938
939#else
940
941status_t Parcel::writeDouble(double val)
942{
943    return writeAligned(val);
944}
945
946#endif
947
948status_t Parcel::writeCString(const char* str)
949{
950    return write(str, strlen(str)+1);
951}
952
953status_t Parcel::writeString8(const String8& str)
954{
955    status_t err = writeInt32(str.bytes());
956    // only write string if its length is more than zero characters,
957    // as readString8 will only read if the length field is non-zero.
958    // this is slightly different from how writeString16 works.
959    if (str.bytes() > 0 && err == NO_ERROR) {
960        err = write(str.string(), str.bytes()+1);
961    }
962    return err;
963}
964
965status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
966{
967    if (!str) {
968        return writeInt32(-1);
969    }
970
971    return writeString16(*str);
972}
973
974status_t Parcel::writeString16(const String16& str)
975{
976    return writeString16(str.string(), str.size());
977}
978
979status_t Parcel::writeString16(const char16_t* str, size_t len)
980{
981    if (str == NULL) return writeInt32(-1);
982
983    status_t err = writeInt32(len);
984    if (err == NO_ERROR) {
985        len *= sizeof(char16_t);
986        uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
987        if (data) {
988            memcpy(data, str, len);
989            *reinterpret_cast<char16_t*>(data+len) = 0;
990            return NO_ERROR;
991        }
992        err = mError;
993    }
994    return err;
995}
996
997status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
998{
999    return flatten_binder(ProcessState::self(), val, this);
1000}
1001
1002status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1003{
1004    return writeTypedVector(val, &Parcel::writeStrongBinder);
1005}
1006
1007status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1008{
1009    return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1010}
1011
1012status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1013    return readNullableTypedVector(val, &Parcel::readStrongBinder);
1014}
1015
1016status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1017    return readTypedVector(val, &Parcel::readStrongBinder);
1018}
1019
1020status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
1021{
1022    return flatten_binder(ProcessState::self(), val, this);
1023}
1024
1025status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1026    if (!parcelable) {
1027        return writeInt32(0);
1028    }
1029
1030    return writeParcelable(*parcelable);
1031}
1032
1033status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1034    status_t status = writeInt32(1);  // parcelable is not null.
1035    if (status != OK) {
1036        return status;
1037    }
1038    return parcelable.writeToParcel(this);
1039}
1040
1041status_t Parcel::writeNativeHandle(const native_handle* handle)
1042{
1043    if (!handle || handle->version != sizeof(native_handle))
1044        return BAD_TYPE;
1045
1046    status_t err;
1047    err = writeInt32(handle->numFds);
1048    if (err != NO_ERROR) return err;
1049
1050    err = writeInt32(handle->numInts);
1051    if (err != NO_ERROR) return err;
1052
1053    for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1054        err = writeDupFileDescriptor(handle->data[i]);
1055
1056    if (err != NO_ERROR) {
1057        ALOGD("write native handle, write dup fd failed");
1058        return err;
1059    }
1060    err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1061    return err;
1062}
1063
1064status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1065{
1066    flat_binder_object obj;
1067    obj.type = BINDER_TYPE_FD;
1068    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1069    obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1070    obj.handle = fd;
1071    obj.cookie = takeOwnership ? 1 : 0;
1072    return writeObject(obj, true);
1073}
1074
1075status_t Parcel::writeDupFileDescriptor(int fd)
1076{
1077    int dupFd = dup(fd);
1078    if (dupFd < 0) {
1079        return -errno;
1080    }
1081    status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1082    if (err != OK) {
1083        close(dupFd);
1084    }
1085    return err;
1086}
1087
1088status_t Parcel::writeUniqueFileDescriptor(const ScopedFd& fd) {
1089    return writeDupFileDescriptor(fd.get());
1090}
1091
1092status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<ScopedFd>& val) {
1093    return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1094}
1095
1096status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<ScopedFd>>& val) {
1097    return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1098}
1099
1100status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1101{
1102    if (len > INT32_MAX) {
1103        // don't accept size_t values which may have come from an
1104        // inadvertent conversion from a negative int.
1105        return BAD_VALUE;
1106    }
1107
1108    status_t status;
1109    if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1110        ALOGV("writeBlob: write in place");
1111        status = writeInt32(BLOB_INPLACE);
1112        if (status) return status;
1113
1114        void* ptr = writeInplace(len);
1115        if (!ptr) return NO_MEMORY;
1116
1117        outBlob->init(-1, ptr, len, false);
1118        return NO_ERROR;
1119    }
1120
1121    ALOGV("writeBlob: write to ashmem");
1122    int fd = ashmem_create_region("Parcel Blob", len);
1123    if (fd < 0) return NO_MEMORY;
1124
1125    int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1126    if (result < 0) {
1127        status = result;
1128    } else {
1129        void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1130        if (ptr == MAP_FAILED) {
1131            status = -errno;
1132        } else {
1133            if (!mutableCopy) {
1134                result = ashmem_set_prot_region(fd, PROT_READ);
1135            }
1136            if (result < 0) {
1137                status = result;
1138            } else {
1139                status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1140                if (!status) {
1141                    status = writeFileDescriptor(fd, true /*takeOwnership*/);
1142                    if (!status) {
1143                        outBlob->init(fd, ptr, len, mutableCopy);
1144                        return NO_ERROR;
1145                    }
1146                }
1147            }
1148        }
1149        ::munmap(ptr, len);
1150    }
1151    ::close(fd);
1152    return status;
1153}
1154
1155status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1156{
1157    // Must match up with what's done in writeBlob.
1158    if (!mAllowFds) return FDS_NOT_ALLOWED;
1159    status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1160    if (status) return status;
1161    return writeDupFileDescriptor(fd);
1162}
1163
1164status_t Parcel::write(const FlattenableHelperInterface& val)
1165{
1166    status_t err;
1167
1168    // size if needed
1169    const size_t len = val.getFlattenedSize();
1170    const size_t fd_count = val.getFdCount();
1171
1172    if ((len > INT32_MAX) || (fd_count > INT32_MAX)) {
1173        // don't accept size_t values which may have come from an
1174        // inadvertent conversion from a negative int.
1175        return BAD_VALUE;
1176    }
1177
1178    err = this->writeInt32(len);
1179    if (err) return err;
1180
1181    err = this->writeInt32(fd_count);
1182    if (err) return err;
1183
1184    // payload
1185    void* const buf = this->writeInplace(pad_size(len));
1186    if (buf == NULL)
1187        return BAD_VALUE;
1188
1189    int* fds = NULL;
1190    if (fd_count) {
1191        fds = new int[fd_count];
1192    }
1193
1194    err = val.flatten(buf, len, fds, fd_count);
1195    for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1196        err = this->writeDupFileDescriptor( fds[i] );
1197    }
1198
1199    if (fd_count) {
1200        delete [] fds;
1201    }
1202
1203    return err;
1204}
1205
1206status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1207{
1208    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1209    const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1210    if (enoughData && enoughObjects) {
1211restart_write:
1212        *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1213
1214        // remember if it's a file descriptor
1215        if (val.type == BINDER_TYPE_FD) {
1216            if (!mAllowFds) {
1217                // fail before modifying our object index
1218                return FDS_NOT_ALLOWED;
1219            }
1220            mHasFds = mFdsKnown = true;
1221        }
1222
1223        // Need to write meta-data?
1224        if (nullMetaData || val.binder != 0) {
1225            mObjects[mObjectsSize] = mDataPos;
1226            acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1227            mObjectsSize++;
1228        }
1229
1230        return finishWrite(sizeof(flat_binder_object));
1231    }
1232
1233    if (!enoughData) {
1234        const status_t err = growData(sizeof(val));
1235        if (err != NO_ERROR) return err;
1236    }
1237    if (!enoughObjects) {
1238        size_t newSize = ((mObjectsSize+2)*3)/2;
1239        if (newSize < mObjectsSize) return NO_MEMORY;   // overflow
1240        binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1241        if (objects == NULL) return NO_MEMORY;
1242        mObjects = objects;
1243        mObjectsCapacity = newSize;
1244    }
1245
1246    goto restart_write;
1247}
1248
1249status_t Parcel::writeNoException()
1250{
1251    binder::Status status;
1252    return status.writeToParcel(this);
1253}
1254
1255void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1256{
1257    LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1258}
1259
1260status_t Parcel::read(void* outData, size_t len) const
1261{
1262    if (len > INT32_MAX) {
1263        // don't accept size_t values which may have come from an
1264        // inadvertent conversion from a negative int.
1265        return BAD_VALUE;
1266    }
1267
1268    if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1269            && len <= pad_size(len)) {
1270        memcpy(outData, mData+mDataPos, len);
1271        mDataPos += pad_size(len);
1272        ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1273        return NO_ERROR;
1274    }
1275    return NOT_ENOUGH_DATA;
1276}
1277
1278const void* Parcel::readInplace(size_t len) const
1279{
1280    if (len > INT32_MAX) {
1281        // don't accept size_t values which may have come from an
1282        // inadvertent conversion from a negative int.
1283        return NULL;
1284    }
1285
1286    if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1287            && len <= pad_size(len)) {
1288        const void* data = mData+mDataPos;
1289        mDataPos += pad_size(len);
1290        ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1291        return data;
1292    }
1293    return NULL;
1294}
1295
1296template<class T>
1297status_t Parcel::readAligned(T *pArg) const {
1298    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1299
1300    if ((mDataPos+sizeof(T)) <= mDataSize) {
1301        const void* data = mData+mDataPos;
1302        mDataPos += sizeof(T);
1303        *pArg =  *reinterpret_cast<const T*>(data);
1304        return NO_ERROR;
1305    } else {
1306        return NOT_ENOUGH_DATA;
1307    }
1308}
1309
1310template<class T>
1311T Parcel::readAligned() const {
1312    T result;
1313    if (readAligned(&result) != NO_ERROR) {
1314        result = 0;
1315    }
1316
1317    return result;
1318}
1319
1320template<class T>
1321status_t Parcel::writeAligned(T val) {
1322    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1323
1324    if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1325restart_write:
1326        *reinterpret_cast<T*>(mData+mDataPos) = val;
1327        return finishWrite(sizeof(val));
1328    }
1329
1330    status_t err = growData(sizeof(val));
1331    if (err == NO_ERROR) goto restart_write;
1332    return err;
1333}
1334
1335status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1336    val->clear();
1337
1338    int32_t size;
1339    status_t status = readInt32(&size);
1340
1341    if (status != OK) {
1342        return status;
1343    }
1344
1345    if (size < 0) {
1346        status = UNEXPECTED_NULL;
1347        return status;
1348    }
1349    if (size_t(size) > dataAvail()) {
1350        status = BAD_VALUE;
1351        return status;
1352    }
1353
1354    const void* data = readInplace(size);
1355    if (!data) {
1356        status = BAD_VALUE;
1357        return status;
1358    }
1359    val->resize(size);
1360    memcpy(val->data(), data, size);
1361
1362    return status;
1363}
1364
1365status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1366    const int32_t start = dataPosition();
1367    int32_t size;
1368    status_t status = readInt32(&size);
1369    val->reset();
1370
1371    if (status != OK || size < 0) {
1372        return status;
1373    }
1374
1375    setDataPosition(start);
1376    val->reset(new std::vector<int8_t>());
1377
1378    status = readByteVector(val->get());
1379
1380    if (status != OK) {
1381        val->reset();
1382    }
1383
1384    return status;
1385}
1386
1387status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1388    return readNullableTypedVector(val, &Parcel::readInt32);
1389}
1390
1391status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1392    return readTypedVector(val, &Parcel::readInt32);
1393}
1394
1395status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1396    return readNullableTypedVector(val, &Parcel::readInt64);
1397}
1398
1399status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1400    return readTypedVector(val, &Parcel::readInt64);
1401}
1402
1403status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1404    return readNullableTypedVector(val, &Parcel::readFloat);
1405}
1406
1407status_t Parcel::readFloatVector(std::vector<float>* val) const {
1408    return readTypedVector(val, &Parcel::readFloat);
1409}
1410
1411status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1412    return readNullableTypedVector(val, &Parcel::readDouble);
1413}
1414
1415status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1416    return readTypedVector(val, &Parcel::readDouble);
1417}
1418
1419status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1420    const int32_t start = dataPosition();
1421    int32_t size;
1422    status_t status = readInt32(&size);
1423    val->reset();
1424
1425    if (status != OK || size < 0) {
1426        return status;
1427    }
1428
1429    setDataPosition(start);
1430    val->reset(new std::vector<bool>());
1431
1432    status = readBoolVector(val->get());
1433
1434    if (status != OK) {
1435        val->reset();
1436    }
1437
1438    return status;
1439}
1440
1441status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1442    int32_t size;
1443    status_t status = readInt32(&size);
1444
1445    if (status != OK) {
1446        return status;
1447    }
1448
1449    if (size < 0) {
1450        return UNEXPECTED_NULL;
1451    }
1452
1453    val->resize(size);
1454
1455    /* C++ bool handling means a vector of bools isn't necessarily addressable
1456     * (we might use individual bits)
1457     */
1458    bool data;
1459    for (int32_t i = 0; i < size; ++i) {
1460        status = readBool(&data);
1461        (*val)[i] = data;
1462
1463        if (status != OK) {
1464            return status;
1465        }
1466    }
1467
1468    return OK;
1469}
1470
1471status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1472    return readNullableTypedVector(val, &Parcel::readChar);
1473}
1474
1475status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1476    return readTypedVector(val, &Parcel::readChar);
1477}
1478
1479status_t Parcel::readString16Vector(
1480        std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1481    return readNullableTypedVector(val, &Parcel::readString16);
1482}
1483
1484status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1485    return readTypedVector(val, &Parcel::readString16);
1486}
1487
1488
1489status_t Parcel::readInt32(int32_t *pArg) const
1490{
1491    return readAligned(pArg);
1492}
1493
1494int32_t Parcel::readInt32() const
1495{
1496    return readAligned<int32_t>();
1497}
1498
1499status_t Parcel::readUint32(uint32_t *pArg) const
1500{
1501    return readAligned(pArg);
1502}
1503
1504uint32_t Parcel::readUint32() const
1505{
1506    return readAligned<uint32_t>();
1507}
1508
1509status_t Parcel::readInt64(int64_t *pArg) const
1510{
1511    return readAligned(pArg);
1512}
1513
1514
1515int64_t Parcel::readInt64() const
1516{
1517    return readAligned<int64_t>();
1518}
1519
1520status_t Parcel::readUint64(uint64_t *pArg) const
1521{
1522    return readAligned(pArg);
1523}
1524
1525uint64_t Parcel::readUint64() const
1526{
1527    return readAligned<uint64_t>();
1528}
1529
1530status_t Parcel::readPointer(uintptr_t *pArg) const
1531{
1532    status_t ret;
1533    binder_uintptr_t ptr;
1534    ret = readAligned(&ptr);
1535    if (!ret)
1536        *pArg = ptr;
1537    return ret;
1538}
1539
1540uintptr_t Parcel::readPointer() const
1541{
1542    return readAligned<binder_uintptr_t>();
1543}
1544
1545
1546status_t Parcel::readFloat(float *pArg) const
1547{
1548    return readAligned(pArg);
1549}
1550
1551
1552float Parcel::readFloat() const
1553{
1554    return readAligned<float>();
1555}
1556
1557#if defined(__mips__) && defined(__mips_hard_float)
1558
1559status_t Parcel::readDouble(double *pArg) const
1560{
1561    union {
1562      double d;
1563      unsigned long long ll;
1564    } u;
1565    u.d = 0;
1566    status_t status;
1567    status = readAligned(&u.ll);
1568    *pArg = u.d;
1569    return status;
1570}
1571
1572double Parcel::readDouble() const
1573{
1574    union {
1575      double d;
1576      unsigned long long ll;
1577    } u;
1578    u.ll = readAligned<unsigned long long>();
1579    return u.d;
1580}
1581
1582#else
1583
1584status_t Parcel::readDouble(double *pArg) const
1585{
1586    return readAligned(pArg);
1587}
1588
1589double Parcel::readDouble() const
1590{
1591    return readAligned<double>();
1592}
1593
1594#endif
1595
1596status_t Parcel::readIntPtr(intptr_t *pArg) const
1597{
1598    return readAligned(pArg);
1599}
1600
1601
1602intptr_t Parcel::readIntPtr() const
1603{
1604    return readAligned<intptr_t>();
1605}
1606
1607status_t Parcel::readBool(bool *pArg) const
1608{
1609    int32_t tmp;
1610    status_t ret = readInt32(&tmp);
1611    *pArg = (tmp != 0);
1612    return ret;
1613}
1614
1615bool Parcel::readBool() const
1616{
1617    return readInt32() != 0;
1618}
1619
1620status_t Parcel::readChar(char16_t *pArg) const
1621{
1622    int32_t tmp;
1623    status_t ret = readInt32(&tmp);
1624    *pArg = char16_t(tmp);
1625    return ret;
1626}
1627
1628char16_t Parcel::readChar() const
1629{
1630    return char16_t(readInt32());
1631}
1632
1633status_t Parcel::readByte(int8_t *pArg) const
1634{
1635    int32_t tmp;
1636    status_t ret = readInt32(&tmp);
1637    *pArg = int8_t(tmp);
1638    return ret;
1639}
1640
1641int8_t Parcel::readByte() const
1642{
1643    return int8_t(readInt32());
1644}
1645
1646const char* Parcel::readCString() const
1647{
1648    const size_t avail = mDataSize-mDataPos;
1649    if (avail > 0) {
1650        const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1651        // is the string's trailing NUL within the parcel's valid bounds?
1652        const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1653        if (eos) {
1654            const size_t len = eos - str;
1655            mDataPos += pad_size(len+1);
1656            ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1657            return str;
1658        }
1659    }
1660    return NULL;
1661}
1662
1663String8 Parcel::readString8() const
1664{
1665    int32_t size = readInt32();
1666    // watch for potential int overflow adding 1 for trailing NUL
1667    if (size > 0 && size < INT32_MAX) {
1668        const char* str = (const char*)readInplace(size+1);
1669        if (str) return String8(str, size);
1670    }
1671    return String8();
1672}
1673
1674String16 Parcel::readString16() const
1675{
1676    size_t len;
1677    const char16_t* str = readString16Inplace(&len);
1678    if (str) return String16(str, len);
1679    ALOGE("Reading a NULL string not supported here.");
1680    return String16();
1681}
1682
1683status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1684{
1685    const int32_t start = dataPosition();
1686    int32_t size;
1687    status_t status = readInt32(&size);
1688    pArg->reset();
1689
1690    if (status != OK || size < 0) {
1691        return status;
1692    }
1693
1694    setDataPosition(start);
1695    pArg->reset(new String16());
1696
1697    status = readString16(pArg->get());
1698
1699    if (status != OK) {
1700        pArg->reset();
1701    }
1702
1703    return status;
1704}
1705
1706status_t Parcel::readString16(String16* pArg) const
1707{
1708    size_t len;
1709    const char16_t* str = readString16Inplace(&len);
1710    if (str) {
1711        pArg->setTo(str, len);
1712        return 0;
1713    } else {
1714        *pArg = String16();
1715        return UNEXPECTED_NULL;
1716    }
1717}
1718
1719const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1720{
1721    int32_t size = readInt32();
1722    // watch for potential int overflow from size+1
1723    if (size >= 0 && size < INT32_MAX) {
1724        *outLen = size;
1725        const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1726        if (str != NULL) {
1727            return str;
1728        }
1729    }
1730    *outLen = 0;
1731    return NULL;
1732}
1733
1734status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1735{
1736    return unflatten_binder(ProcessState::self(), *this, val);
1737}
1738
1739sp<IBinder> Parcel::readStrongBinder() const
1740{
1741    sp<IBinder> val;
1742    readStrongBinder(&val);
1743    return val;
1744}
1745
1746wp<IBinder> Parcel::readWeakBinder() const
1747{
1748    wp<IBinder> val;
1749    unflatten_binder(ProcessState::self(), *this, &val);
1750    return val;
1751}
1752
1753status_t Parcel::readParcelable(Parcelable* parcelable) const {
1754    int32_t have_parcelable = 0;
1755    status_t status = readInt32(&have_parcelable);
1756    if (status != OK) {
1757        return status;
1758    }
1759    if (!have_parcelable) {
1760        return UNEXPECTED_NULL;
1761    }
1762    return parcelable->readFromParcel(this);
1763}
1764
1765int32_t Parcel::readExceptionCode() const
1766{
1767    binder::Status status;
1768    status.readFromParcel(*this);
1769    return status.exceptionCode();
1770}
1771
1772native_handle* Parcel::readNativeHandle() const
1773{
1774    int numFds, numInts;
1775    status_t err;
1776    err = readInt32(&numFds);
1777    if (err != NO_ERROR) return 0;
1778    err = readInt32(&numInts);
1779    if (err != NO_ERROR) return 0;
1780
1781    native_handle* h = native_handle_create(numFds, numInts);
1782    if (!h) {
1783        return 0;
1784    }
1785
1786    for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
1787        h->data[i] = dup(readFileDescriptor());
1788        if (h->data[i] < 0) err = BAD_VALUE;
1789    }
1790    err = read(h->data + numFds, sizeof(int)*numInts);
1791    if (err != NO_ERROR) {
1792        native_handle_close(h);
1793        native_handle_delete(h);
1794        h = 0;
1795    }
1796    return h;
1797}
1798
1799
1800int Parcel::readFileDescriptor() const
1801{
1802    const flat_binder_object* flat = readObject(true);
1803
1804    if (flat && flat->type == BINDER_TYPE_FD) {
1805        return flat->handle;
1806    }
1807
1808    return BAD_TYPE;
1809}
1810
1811status_t Parcel::readUniqueFileDescriptor(ScopedFd* val) const
1812{
1813    int got = readFileDescriptor();
1814
1815    if (got == BAD_TYPE) {
1816        return BAD_TYPE;
1817    }
1818
1819    val->reset(dup(got));
1820
1821    if (val->get() < 0) {
1822        return BAD_VALUE;
1823    }
1824
1825    return OK;
1826}
1827
1828
1829status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<ScopedFd>>* val) const {
1830    return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
1831}
1832
1833status_t Parcel::readUniqueFileDescriptorVector(std::vector<ScopedFd>* val) const {
1834    return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
1835}
1836
1837status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
1838{
1839    int32_t blobType;
1840    status_t status = readInt32(&blobType);
1841    if (status) return status;
1842
1843    if (blobType == BLOB_INPLACE) {
1844        ALOGV("readBlob: read in place");
1845        const void* ptr = readInplace(len);
1846        if (!ptr) return BAD_VALUE;
1847
1848        outBlob->init(-1, const_cast<void*>(ptr), len, false);
1849        return NO_ERROR;
1850    }
1851
1852    ALOGV("readBlob: read from ashmem");
1853    bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
1854    int fd = readFileDescriptor();
1855    if (fd == int(BAD_TYPE)) return BAD_VALUE;
1856
1857    void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
1858            MAP_SHARED, fd, 0);
1859    if (ptr == MAP_FAILED) return NO_MEMORY;
1860
1861    outBlob->init(fd, ptr, len, isMutable);
1862    return NO_ERROR;
1863}
1864
1865status_t Parcel::read(FlattenableHelperInterface& val) const
1866{
1867    // size
1868    const size_t len = this->readInt32();
1869    const size_t fd_count = this->readInt32();
1870
1871    if (len > INT32_MAX) {
1872        // don't accept size_t values which may have come from an
1873        // inadvertent conversion from a negative int.
1874        return BAD_VALUE;
1875    }
1876
1877    // payload
1878    void const* const buf = this->readInplace(pad_size(len));
1879    if (buf == NULL)
1880        return BAD_VALUE;
1881
1882    int* fds = NULL;
1883    if (fd_count) {
1884        fds = new int[fd_count];
1885    }
1886
1887    status_t err = NO_ERROR;
1888    for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1889        fds[i] = dup(this->readFileDescriptor());
1890        if (fds[i] < 0) {
1891            err = BAD_VALUE;
1892            ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
1893                i, fds[i], fd_count, strerror(errno));
1894        }
1895    }
1896
1897    if (err == NO_ERROR) {
1898        err = val.unflatten(buf, len, fds, fd_count);
1899    }
1900
1901    if (fd_count) {
1902        delete [] fds;
1903    }
1904
1905    return err;
1906}
1907const flat_binder_object* Parcel::readObject(bool nullMetaData) const
1908{
1909    const size_t DPOS = mDataPos;
1910    if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
1911        const flat_binder_object* obj
1912                = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
1913        mDataPos = DPOS + sizeof(flat_binder_object);
1914        if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
1915            // When transferring a NULL object, we don't write it into
1916            // the object list, so we don't want to check for it when
1917            // reading.
1918            ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1919            return obj;
1920        }
1921
1922        // Ensure that this object is valid...
1923        binder_size_t* const OBJS = mObjects;
1924        const size_t N = mObjectsSize;
1925        size_t opos = mNextObjectHint;
1926
1927        if (N > 0) {
1928            ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
1929                 this, DPOS, opos);
1930
1931            // Start at the current hint position, looking for an object at
1932            // the current data position.
1933            if (opos < N) {
1934                while (opos < (N-1) && OBJS[opos] < DPOS) {
1935                    opos++;
1936                }
1937            } else {
1938                opos = N-1;
1939            }
1940            if (OBJS[opos] == DPOS) {
1941                // Found it!
1942                ALOGV("Parcel %p found obj %zu at index %zu with forward search",
1943                     this, DPOS, opos);
1944                mNextObjectHint = opos+1;
1945                ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1946                return obj;
1947            }
1948
1949            // Look backwards for it...
1950            while (opos > 0 && OBJS[opos] > DPOS) {
1951                opos--;
1952            }
1953            if (OBJS[opos] == DPOS) {
1954                // Found it!
1955                ALOGV("Parcel %p found obj %zu at index %zu with backward search",
1956                     this, DPOS, opos);
1957                mNextObjectHint = opos+1;
1958                ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1959                return obj;
1960            }
1961        }
1962        ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
1963             this, DPOS);
1964    }
1965    return NULL;
1966}
1967
1968void Parcel::closeFileDescriptors()
1969{
1970    size_t i = mObjectsSize;
1971    if (i > 0) {
1972        //ALOGI("Closing file descriptors for %zu objects...", i);
1973    }
1974    while (i > 0) {
1975        i--;
1976        const flat_binder_object* flat
1977            = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1978        if (flat->type == BINDER_TYPE_FD) {
1979            //ALOGI("Closing fd: %ld", flat->handle);
1980            close(flat->handle);
1981        }
1982    }
1983}
1984
1985uintptr_t Parcel::ipcData() const
1986{
1987    return reinterpret_cast<uintptr_t>(mData);
1988}
1989
1990size_t Parcel::ipcDataSize() const
1991{
1992    return (mDataSize > mDataPos ? mDataSize : mDataPos);
1993}
1994
1995uintptr_t Parcel::ipcObjects() const
1996{
1997    return reinterpret_cast<uintptr_t>(mObjects);
1998}
1999
2000size_t Parcel::ipcObjectsCount() const
2001{
2002    return mObjectsSize;
2003}
2004
2005void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2006    const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2007{
2008    binder_size_t minOffset = 0;
2009    freeDataNoInit();
2010    mError = NO_ERROR;
2011    mData = const_cast<uint8_t*>(data);
2012    mDataSize = mDataCapacity = dataSize;
2013    //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2014    mDataPos = 0;
2015    ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2016    mObjects = const_cast<binder_size_t*>(objects);
2017    mObjectsSize = mObjectsCapacity = objectsCount;
2018    mNextObjectHint = 0;
2019    mOwner = relFunc;
2020    mOwnerCookie = relCookie;
2021    for (size_t i = 0; i < mObjectsSize; i++) {
2022        binder_size_t offset = mObjects[i];
2023        if (offset < minOffset) {
2024            ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2025                  __func__, (uint64_t)offset, (uint64_t)minOffset);
2026            mObjectsSize = 0;
2027            break;
2028        }
2029        minOffset = offset + sizeof(flat_binder_object);
2030    }
2031    scanForFds();
2032}
2033
2034void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2035{
2036    to << "Parcel(";
2037
2038    if (errorCheck() != NO_ERROR) {
2039        const status_t err = errorCheck();
2040        to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2041    } else if (dataSize() > 0) {
2042        const uint8_t* DATA = data();
2043        to << indent << HexDump(DATA, dataSize()) << dedent;
2044        const binder_size_t* OBJS = objects();
2045        const size_t N = objectsCount();
2046        for (size_t i=0; i<N; i++) {
2047            const flat_binder_object* flat
2048                = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2049            to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2050                << TypeCode(flat->type & 0x7f7f7f00)
2051                << " = " << flat->binder;
2052        }
2053    } else {
2054        to << "NULL";
2055    }
2056
2057    to << ")";
2058}
2059
2060void Parcel::releaseObjects()
2061{
2062    const sp<ProcessState> proc(ProcessState::self());
2063    size_t i = mObjectsSize;
2064    uint8_t* const data = mData;
2065    binder_size_t* const objects = mObjects;
2066    while (i > 0) {
2067        i--;
2068        const flat_binder_object* flat
2069            = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2070        release_object(proc, *flat, this, &mOpenAshmemSize);
2071    }
2072}
2073
2074void Parcel::acquireObjects()
2075{
2076    const sp<ProcessState> proc(ProcessState::self());
2077    size_t i = mObjectsSize;
2078    uint8_t* const data = mData;
2079    binder_size_t* const objects = mObjects;
2080    while (i > 0) {
2081        i--;
2082        const flat_binder_object* flat
2083            = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2084        acquire_object(proc, *flat, this, &mOpenAshmemSize);
2085    }
2086}
2087
2088void Parcel::freeData()
2089{
2090    freeDataNoInit();
2091    initState();
2092}
2093
2094void Parcel::freeDataNoInit()
2095{
2096    if (mOwner) {
2097        LOG_ALLOC("Parcel %p: freeing other owner data", this);
2098        //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2099        mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2100    } else {
2101        LOG_ALLOC("Parcel %p: freeing allocated data", this);
2102        releaseObjects();
2103        if (mData) {
2104            LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2105            pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2106            if (mDataCapacity <= gParcelGlobalAllocSize) {
2107              gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2108            } else {
2109              gParcelGlobalAllocSize = 0;
2110            }
2111            if (gParcelGlobalAllocCount > 0) {
2112              gParcelGlobalAllocCount--;
2113            }
2114            pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2115            free(mData);
2116        }
2117        if (mObjects) free(mObjects);
2118    }
2119}
2120
2121status_t Parcel::growData(size_t len)
2122{
2123    if (len > INT32_MAX) {
2124        // don't accept size_t values which may have come from an
2125        // inadvertent conversion from a negative int.
2126        return BAD_VALUE;
2127    }
2128
2129    size_t newSize = ((mDataSize+len)*3)/2;
2130    return (newSize <= mDataSize)
2131            ? (status_t) NO_MEMORY
2132            : continueWrite(newSize);
2133}
2134
2135status_t Parcel::restartWrite(size_t desired)
2136{
2137    if (desired > INT32_MAX) {
2138        // don't accept size_t values which may have come from an
2139        // inadvertent conversion from a negative int.
2140        return BAD_VALUE;
2141    }
2142
2143    if (mOwner) {
2144        freeData();
2145        return continueWrite(desired);
2146    }
2147
2148    uint8_t* data = (uint8_t*)realloc(mData, desired);
2149    if (!data && desired > mDataCapacity) {
2150        mError = NO_MEMORY;
2151        return NO_MEMORY;
2152    }
2153
2154    releaseObjects();
2155
2156    if (data) {
2157        LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2158        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2159        gParcelGlobalAllocSize += desired;
2160        gParcelGlobalAllocSize -= mDataCapacity;
2161        if (!mData) {
2162            gParcelGlobalAllocCount++;
2163        }
2164        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2165        mData = data;
2166        mDataCapacity = desired;
2167    }
2168
2169    mDataSize = mDataPos = 0;
2170    ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2171    ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2172
2173    free(mObjects);
2174    mObjects = NULL;
2175    mObjectsSize = mObjectsCapacity = 0;
2176    mNextObjectHint = 0;
2177    mHasFds = false;
2178    mFdsKnown = true;
2179    mAllowFds = true;
2180
2181    return NO_ERROR;
2182}
2183
2184status_t Parcel::continueWrite(size_t desired)
2185{
2186    if (desired > INT32_MAX) {
2187        // don't accept size_t values which may have come from an
2188        // inadvertent conversion from a negative int.
2189        return BAD_VALUE;
2190    }
2191
2192    // If shrinking, first adjust for any objects that appear
2193    // after the new data size.
2194    size_t objectsSize = mObjectsSize;
2195    if (desired < mDataSize) {
2196        if (desired == 0) {
2197            objectsSize = 0;
2198        } else {
2199            while (objectsSize > 0) {
2200                if (mObjects[objectsSize-1] < desired)
2201                    break;
2202                objectsSize--;
2203            }
2204        }
2205    }
2206
2207    if (mOwner) {
2208        // If the size is going to zero, just release the owner's data.
2209        if (desired == 0) {
2210            freeData();
2211            return NO_ERROR;
2212        }
2213
2214        // If there is a different owner, we need to take
2215        // posession.
2216        uint8_t* data = (uint8_t*)malloc(desired);
2217        if (!data) {
2218            mError = NO_MEMORY;
2219            return NO_MEMORY;
2220        }
2221        binder_size_t* objects = NULL;
2222
2223        if (objectsSize) {
2224            objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2225            if (!objects) {
2226                free(data);
2227
2228                mError = NO_MEMORY;
2229                return NO_MEMORY;
2230            }
2231
2232            // Little hack to only acquire references on objects
2233            // we will be keeping.
2234            size_t oldObjectsSize = mObjectsSize;
2235            mObjectsSize = objectsSize;
2236            acquireObjects();
2237            mObjectsSize = oldObjectsSize;
2238        }
2239
2240        if (mData) {
2241            memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2242        }
2243        if (objects && mObjects) {
2244            memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2245        }
2246        //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2247        mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2248        mOwner = NULL;
2249
2250        LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2251        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2252        gParcelGlobalAllocSize += desired;
2253        gParcelGlobalAllocCount++;
2254        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2255
2256        mData = data;
2257        mObjects = objects;
2258        mDataSize = (mDataSize < desired) ? mDataSize : desired;
2259        ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2260        mDataCapacity = desired;
2261        mObjectsSize = mObjectsCapacity = objectsSize;
2262        mNextObjectHint = 0;
2263
2264    } else if (mData) {
2265        if (objectsSize < mObjectsSize) {
2266            // Need to release refs on any objects we are dropping.
2267            const sp<ProcessState> proc(ProcessState::self());
2268            for (size_t i=objectsSize; i<mObjectsSize; i++) {
2269                const flat_binder_object* flat
2270                    = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2271                if (flat->type == BINDER_TYPE_FD) {
2272                    // will need to rescan because we may have lopped off the only FDs
2273                    mFdsKnown = false;
2274                }
2275                release_object(proc, *flat, this, &mOpenAshmemSize);
2276            }
2277            binder_size_t* objects =
2278                (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2279            if (objects) {
2280                mObjects = objects;
2281            }
2282            mObjectsSize = objectsSize;
2283            mNextObjectHint = 0;
2284        }
2285
2286        // We own the data, so we can just do a realloc().
2287        if (desired > mDataCapacity) {
2288            uint8_t* data = (uint8_t*)realloc(mData, desired);
2289            if (data) {
2290                LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2291                        desired);
2292                pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2293                gParcelGlobalAllocSize += desired;
2294                gParcelGlobalAllocSize -= mDataCapacity;
2295                pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2296                mData = data;
2297                mDataCapacity = desired;
2298            } else if (desired > mDataCapacity) {
2299                mError = NO_MEMORY;
2300                return NO_MEMORY;
2301            }
2302        } else {
2303            if (mDataSize > desired) {
2304                mDataSize = desired;
2305                ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2306            }
2307            if (mDataPos > desired) {
2308                mDataPos = desired;
2309                ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2310            }
2311        }
2312
2313    } else {
2314        // This is the first data.  Easy!
2315        uint8_t* data = (uint8_t*)malloc(desired);
2316        if (!data) {
2317            mError = NO_MEMORY;
2318            return NO_MEMORY;
2319        }
2320
2321        if(!(mDataCapacity == 0 && mObjects == NULL
2322             && mObjectsCapacity == 0)) {
2323            ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2324        }
2325
2326        LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2327        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2328        gParcelGlobalAllocSize += desired;
2329        gParcelGlobalAllocCount++;
2330        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2331
2332        mData = data;
2333        mDataSize = mDataPos = 0;
2334        ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2335        ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2336        mDataCapacity = desired;
2337    }
2338
2339    return NO_ERROR;
2340}
2341
2342void Parcel::initState()
2343{
2344    LOG_ALLOC("Parcel %p: initState", this);
2345    mError = NO_ERROR;
2346    mData = 0;
2347    mDataSize = 0;
2348    mDataCapacity = 0;
2349    mDataPos = 0;
2350    ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2351    ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2352    mObjects = NULL;
2353    mObjectsSize = 0;
2354    mObjectsCapacity = 0;
2355    mNextObjectHint = 0;
2356    mHasFds = false;
2357    mFdsKnown = true;
2358    mAllowFds = true;
2359    mOwner = NULL;
2360    mOpenAshmemSize = 0;
2361}
2362
2363void Parcel::scanForFds() const
2364{
2365    bool hasFds = false;
2366    for (size_t i=0; i<mObjectsSize; i++) {
2367        const flat_binder_object* flat
2368            = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2369        if (flat->type == BINDER_TYPE_FD) {
2370            hasFds = true;
2371            break;
2372        }
2373    }
2374    mHasFds = hasFds;
2375    mFdsKnown = true;
2376}
2377
2378size_t Parcel::getBlobAshmemSize() const
2379{
2380    // This used to return the size of all blobs that were written to ashmem, now we're returning
2381    // the ashmem currently referenced by this Parcel, which should be equivalent.
2382    // TODO: Remove method once ABI can be changed.
2383    return mOpenAshmemSize;
2384}
2385
2386size_t Parcel::getOpenAshmemSize() const
2387{
2388    return mOpenAshmemSize;
2389}
2390
2391// --- Parcel::Blob ---
2392
2393Parcel::Blob::Blob() :
2394        mFd(-1), mData(NULL), mSize(0), mMutable(false) {
2395}
2396
2397Parcel::Blob::~Blob() {
2398    release();
2399}
2400
2401void Parcel::Blob::release() {
2402    if (mFd != -1 && mData) {
2403        ::munmap(mData, mSize);
2404    }
2405    clear();
2406}
2407
2408void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2409    mFd = fd;
2410    mData = data;
2411    mSize = size;
2412    mMutable = isMutable;
2413}
2414
2415void Parcel::Blob::clear() {
2416    mFd = -1;
2417    mData = NULL;
2418    mSize = 0;
2419    mMutable = false;
2420}
2421
2422}; // namespace android
2423