1/*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Parcel"
18//#define LOG_NDEBUG 0
19
20#include <errno.h>
21#include <fcntl.h>
22#include <inttypes.h>
23#include <pthread.h>
24#include <stdint.h>
25#include <stdio.h>
26#include <stdlib.h>
27#include <sys/mman.h>
28#include <sys/stat.h>
29#include <sys/types.h>
30#include <sys/resource.h>
31#include <unistd.h>
32
33#include <binder/Binder.h>
34#include <binder/BpBinder.h>
35#include <binder/IPCThreadState.h>
36#include <binder/Parcel.h>
37#include <binder/ProcessState.h>
38#include <binder/Status.h>
39#include <binder/TextOutput.h>
40#include <binder/Value.h>
41
42#include <cutils/ashmem.h>
43#include <utils/Debug.h>
44#include <utils/Flattenable.h>
45#include <utils/Log.h>
46#include <utils/misc.h>
47#include <utils/String8.h>
48#include <utils/String16.h>
49
50#include <private/binder/binder_module.h>
51#include <private/binder/Static.h>
52
53#ifndef INT32_MAX
54#define INT32_MAX ((int32_t)(2147483647))
55#endif
56
57#define LOG_REFS(...)
58//#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
59#define LOG_ALLOC(...)
60//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
61
62// ---------------------------------------------------------------------------
63
64// This macro should never be used at runtime, as a too large value
65// of s could cause an integer overflow. Instead, you should always
66// use the wrapper function pad_size()
67#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
68
69static size_t pad_size(size_t s) {
70    if (s > (SIZE_T_MAX - 3)) {
71        abort();
72    }
73    return PAD_SIZE_UNSAFE(s);
74}
75
76// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
77#define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
78
79// XXX This can be made public if we want to provide
80// support for typed data.
81struct small_flat_data
82{
83    uint32_t type;
84    uint32_t data;
85};
86
87namespace android {
88
89static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
90static size_t gParcelGlobalAllocSize = 0;
91static size_t gParcelGlobalAllocCount = 0;
92
93static size_t gMaxFds = 0;
94
95// Maximum size of a blob to transfer in-place.
96static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
97
98enum {
99    BLOB_INPLACE = 0,
100    BLOB_ASHMEM_IMMUTABLE = 1,
101    BLOB_ASHMEM_MUTABLE = 2,
102};
103
104void acquire_object(const sp<ProcessState>& proc,
105    const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
106{
107    switch (obj.hdr.type) {
108        case BINDER_TYPE_BINDER:
109            if (obj.binder) {
110                LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
111                reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
112            }
113            return;
114        case BINDER_TYPE_WEAK_BINDER:
115            if (obj.binder)
116                reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
117            return;
118        case BINDER_TYPE_HANDLE: {
119            const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
120            if (b != NULL) {
121                LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
122                b->incStrong(who);
123            }
124            return;
125        }
126        case BINDER_TYPE_WEAK_HANDLE: {
127            const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
128            if (b != NULL) b.get_refs()->incWeak(who);
129            return;
130        }
131        case BINDER_TYPE_FD: {
132            if ((obj.cookie != 0) && (outAshmemSize != NULL) && ashmem_valid(obj.handle)) {
133                // If we own an ashmem fd, keep track of how much memory it refers to.
134                int size = ashmem_get_size_region(obj.handle);
135                if (size > 0) {
136                    *outAshmemSize += size;
137                }
138            }
139            return;
140        }
141    }
142
143    ALOGD("Invalid object type 0x%08x", obj.hdr.type);
144}
145
146void acquire_object(const sp<ProcessState>& proc,
147    const flat_binder_object& obj, const void* who)
148{
149    acquire_object(proc, obj, who, NULL);
150}
151
152static void release_object(const sp<ProcessState>& proc,
153    const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
154{
155    switch (obj.hdr.type) {
156        case BINDER_TYPE_BINDER:
157            if (obj.binder) {
158                LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
159                reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
160            }
161            return;
162        case BINDER_TYPE_WEAK_BINDER:
163            if (obj.binder)
164                reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
165            return;
166        case BINDER_TYPE_HANDLE: {
167            const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
168            if (b != NULL) {
169                LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
170                b->decStrong(who);
171            }
172            return;
173        }
174        case BINDER_TYPE_WEAK_HANDLE: {
175            const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
176            if (b != NULL) b.get_refs()->decWeak(who);
177            return;
178        }
179        case BINDER_TYPE_FD: {
180            if (obj.cookie != 0) { // owned
181                if ((outAshmemSize != NULL) && ashmem_valid(obj.handle)) {
182                    int size = ashmem_get_size_region(obj.handle);
183                    if (size > 0) {
184                        *outAshmemSize -= size;
185                    }
186                }
187
188                close(obj.handle);
189            }
190            return;
191        }
192    }
193
194    ALOGE("Invalid object type 0x%08x", obj.hdr.type);
195}
196
197void release_object(const sp<ProcessState>& proc,
198    const flat_binder_object& obj, const void* who)
199{
200    release_object(proc, obj, who, NULL);
201}
202
203inline static status_t finish_flatten_binder(
204    const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
205{
206    return out->writeObject(flat, false);
207}
208
209status_t flatten_binder(const sp<ProcessState>& /*proc*/,
210    const sp<IBinder>& binder, Parcel* out)
211{
212    flat_binder_object obj;
213
214    if (IPCThreadState::self()->backgroundSchedulingDisabled()) {
215        /* minimum priority for all nodes is nice 0 */
216        obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
217    } else {
218        /* minimum priority for all nodes is MAX_NICE(19) */
219        obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS;
220    }
221
222    if (binder != NULL) {
223        IBinder *local = binder->localBinder();
224        if (!local) {
225            BpBinder *proxy = binder->remoteBinder();
226            if (proxy == NULL) {
227                ALOGE("null proxy");
228            }
229            const int32_t handle = proxy ? proxy->handle() : 0;
230            obj.hdr.type = BINDER_TYPE_HANDLE;
231            obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
232            obj.handle = handle;
233            obj.cookie = 0;
234        } else {
235            obj.hdr.type = BINDER_TYPE_BINDER;
236            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
237            obj.cookie = reinterpret_cast<uintptr_t>(local);
238        }
239    } else {
240        obj.hdr.type = BINDER_TYPE_BINDER;
241        obj.binder = 0;
242        obj.cookie = 0;
243    }
244
245    return finish_flatten_binder(binder, obj, out);
246}
247
248status_t flatten_binder(const sp<ProcessState>& /*proc*/,
249    const wp<IBinder>& binder, Parcel* out)
250{
251    flat_binder_object obj;
252
253    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
254    if (binder != NULL) {
255        sp<IBinder> real = binder.promote();
256        if (real != NULL) {
257            IBinder *local = real->localBinder();
258            if (!local) {
259                BpBinder *proxy = real->remoteBinder();
260                if (proxy == NULL) {
261                    ALOGE("null proxy");
262                }
263                const int32_t handle = proxy ? proxy->handle() : 0;
264                obj.hdr.type = BINDER_TYPE_WEAK_HANDLE;
265                obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
266                obj.handle = handle;
267                obj.cookie = 0;
268            } else {
269                obj.hdr.type = BINDER_TYPE_WEAK_BINDER;
270                obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
271                obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
272            }
273            return finish_flatten_binder(real, obj, out);
274        }
275
276        // XXX How to deal?  In order to flatten the given binder,
277        // we need to probe it for information, which requires a primary
278        // reference...  but we don't have one.
279        //
280        // The OpenBinder implementation uses a dynamic_cast<> here,
281        // but we can't do that with the different reference counting
282        // implementation we are using.
283        ALOGE("Unable to unflatten Binder weak reference!");
284        obj.hdr.type = BINDER_TYPE_BINDER;
285        obj.binder = 0;
286        obj.cookie = 0;
287        return finish_flatten_binder(NULL, obj, out);
288
289    } else {
290        obj.hdr.type = BINDER_TYPE_BINDER;
291        obj.binder = 0;
292        obj.cookie = 0;
293        return finish_flatten_binder(NULL, obj, out);
294    }
295}
296
297inline static status_t finish_unflatten_binder(
298    BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
299    const Parcel& /*in*/)
300{
301    return NO_ERROR;
302}
303
304status_t unflatten_binder(const sp<ProcessState>& proc,
305    const Parcel& in, sp<IBinder>* out)
306{
307    const flat_binder_object* flat = in.readObject(false);
308
309    if (flat) {
310        switch (flat->hdr.type) {
311            case BINDER_TYPE_BINDER:
312                *out = reinterpret_cast<IBinder*>(flat->cookie);
313                return finish_unflatten_binder(NULL, *flat, in);
314            case BINDER_TYPE_HANDLE:
315                *out = proc->getStrongProxyForHandle(flat->handle);
316                return finish_unflatten_binder(
317                    static_cast<BpBinder*>(out->get()), *flat, in);
318        }
319    }
320    return BAD_TYPE;
321}
322
323status_t unflatten_binder(const sp<ProcessState>& proc,
324    const Parcel& in, wp<IBinder>* out)
325{
326    const flat_binder_object* flat = in.readObject(false);
327
328    if (flat) {
329        switch (flat->hdr.type) {
330            case BINDER_TYPE_BINDER:
331                *out = reinterpret_cast<IBinder*>(flat->cookie);
332                return finish_unflatten_binder(NULL, *flat, in);
333            case BINDER_TYPE_WEAK_BINDER:
334                if (flat->binder != 0) {
335                    out->set_object_and_refs(
336                        reinterpret_cast<IBinder*>(flat->cookie),
337                        reinterpret_cast<RefBase::weakref_type*>(flat->binder));
338                } else {
339                    *out = NULL;
340                }
341                return finish_unflatten_binder(NULL, *flat, in);
342            case BINDER_TYPE_HANDLE:
343            case BINDER_TYPE_WEAK_HANDLE:
344                *out = proc->getWeakProxyForHandle(flat->handle);
345                return finish_unflatten_binder(
346                    static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
347        }
348    }
349    return BAD_TYPE;
350}
351
352// ---------------------------------------------------------------------------
353
354Parcel::Parcel()
355{
356    LOG_ALLOC("Parcel %p: constructing", this);
357    initState();
358}
359
360Parcel::~Parcel()
361{
362    freeDataNoInit();
363    LOG_ALLOC("Parcel %p: destroyed", this);
364}
365
366size_t Parcel::getGlobalAllocSize() {
367    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
368    size_t size = gParcelGlobalAllocSize;
369    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
370    return size;
371}
372
373size_t Parcel::getGlobalAllocCount() {
374    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
375    size_t count = gParcelGlobalAllocCount;
376    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
377    return count;
378}
379
380const uint8_t* Parcel::data() const
381{
382    return mData;
383}
384
385size_t Parcel::dataSize() const
386{
387    return (mDataSize > mDataPos ? mDataSize : mDataPos);
388}
389
390size_t Parcel::dataAvail() const
391{
392    size_t result = dataSize() - dataPosition();
393    if (result > INT32_MAX) {
394        abort();
395    }
396    return result;
397}
398
399size_t Parcel::dataPosition() const
400{
401    return mDataPos;
402}
403
404size_t Parcel::dataCapacity() const
405{
406    return mDataCapacity;
407}
408
409status_t Parcel::setDataSize(size_t size)
410{
411    if (size > INT32_MAX) {
412        // don't accept size_t values which may have come from an
413        // inadvertent conversion from a negative int.
414        return BAD_VALUE;
415    }
416
417    status_t err;
418    err = continueWrite(size);
419    if (err == NO_ERROR) {
420        mDataSize = size;
421        ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
422    }
423    return err;
424}
425
426void Parcel::setDataPosition(size_t pos) const
427{
428    if (pos > INT32_MAX) {
429        // don't accept size_t values which may have come from an
430        // inadvertent conversion from a negative int.
431        abort();
432    }
433
434    mDataPos = pos;
435    mNextObjectHint = 0;
436    mObjectsSorted = false;
437}
438
439status_t Parcel::setDataCapacity(size_t size)
440{
441    if (size > INT32_MAX) {
442        // don't accept size_t values which may have come from an
443        // inadvertent conversion from a negative int.
444        return BAD_VALUE;
445    }
446
447    if (size > mDataCapacity) return continueWrite(size);
448    return NO_ERROR;
449}
450
451status_t Parcel::setData(const uint8_t* buffer, size_t len)
452{
453    if (len > INT32_MAX) {
454        // don't accept size_t values which may have come from an
455        // inadvertent conversion from a negative int.
456        return BAD_VALUE;
457    }
458
459    status_t err = restartWrite(len);
460    if (err == NO_ERROR) {
461        memcpy(const_cast<uint8_t*>(data()), buffer, len);
462        mDataSize = len;
463        mFdsKnown = false;
464    }
465    return err;
466}
467
468status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
469{
470    const sp<ProcessState> proc(ProcessState::self());
471    status_t err;
472    const uint8_t *data = parcel->mData;
473    const binder_size_t *objects = parcel->mObjects;
474    size_t size = parcel->mObjectsSize;
475    int startPos = mDataPos;
476    int firstIndex = -1, lastIndex = -2;
477
478    if (len == 0) {
479        return NO_ERROR;
480    }
481
482    if (len > INT32_MAX) {
483        // don't accept size_t values which may have come from an
484        // inadvertent conversion from a negative int.
485        return BAD_VALUE;
486    }
487
488    // range checks against the source parcel size
489    if ((offset > parcel->mDataSize)
490            || (len > parcel->mDataSize)
491            || (offset + len > parcel->mDataSize)) {
492        return BAD_VALUE;
493    }
494
495    // Count objects in range
496    for (int i = 0; i < (int) size; i++) {
497        size_t off = objects[i];
498        if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
499            if (firstIndex == -1) {
500                firstIndex = i;
501            }
502            lastIndex = i;
503        }
504    }
505    int numObjects = lastIndex - firstIndex + 1;
506
507    if ((mDataSize+len) > mDataCapacity) {
508        // grow data
509        err = growData(len);
510        if (err != NO_ERROR) {
511            return err;
512        }
513    }
514
515    // append data
516    memcpy(mData + mDataPos, data + offset, len);
517    mDataPos += len;
518    mDataSize += len;
519
520    err = NO_ERROR;
521
522    if (numObjects > 0) {
523        // grow objects
524        if (mObjectsCapacity < mObjectsSize + numObjects) {
525            size_t newSize = ((mObjectsSize + numObjects)*3)/2;
526            if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY;   // overflow
527            binder_size_t *objects =
528                (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
529            if (objects == (binder_size_t*)0) {
530                return NO_MEMORY;
531            }
532            mObjects = objects;
533            mObjectsCapacity = newSize;
534        }
535
536        // append and acquire objects
537        int idx = mObjectsSize;
538        for (int i = firstIndex; i <= lastIndex; i++) {
539            size_t off = objects[i] - offset + startPos;
540            mObjects[idx++] = off;
541            mObjectsSize++;
542
543            flat_binder_object* flat
544                = reinterpret_cast<flat_binder_object*>(mData + off);
545            acquire_object(proc, *flat, this, &mOpenAshmemSize);
546
547            if (flat->hdr.type == BINDER_TYPE_FD) {
548                // If this is a file descriptor, we need to dup it so the
549                // new Parcel now owns its own fd, and can declare that we
550                // officially know we have fds.
551                flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
552                flat->cookie = 1;
553                mHasFds = mFdsKnown = true;
554                if (!mAllowFds) {
555                    err = FDS_NOT_ALLOWED;
556                }
557            }
558        }
559    }
560
561    return err;
562}
563
564int Parcel::compareData(const Parcel& other) {
565    size_t size = dataSize();
566    if (size != other.dataSize()) {
567        return size < other.dataSize() ? -1 : 1;
568    }
569    return memcmp(data(), other.data(), size);
570}
571
572bool Parcel::allowFds() const
573{
574    return mAllowFds;
575}
576
577bool Parcel::pushAllowFds(bool allowFds)
578{
579    const bool origValue = mAllowFds;
580    if (!allowFds) {
581        mAllowFds = false;
582    }
583    return origValue;
584}
585
586void Parcel::restoreAllowFds(bool lastValue)
587{
588    mAllowFds = lastValue;
589}
590
591bool Parcel::hasFileDescriptors() const
592{
593    if (!mFdsKnown) {
594        scanForFds();
595    }
596    return mHasFds;
597}
598
599// Write RPC headers.  (previously just the interface token)
600status_t Parcel::writeInterfaceToken(const String16& interface)
601{
602    writeInt32(IPCThreadState::self()->getStrictModePolicy() |
603               STRICT_MODE_PENALTY_GATHER);
604    // currently the interface identification token is just its name as a string
605    return writeString16(interface);
606}
607
608bool Parcel::checkInterface(IBinder* binder) const
609{
610    return enforceInterface(binder->getInterfaceDescriptor());
611}
612
613bool Parcel::enforceInterface(const String16& interface,
614                              IPCThreadState* threadState) const
615{
616    int32_t strictPolicy = readInt32();
617    if (threadState == NULL) {
618        threadState = IPCThreadState::self();
619    }
620    if ((threadState->getLastTransactionBinderFlags() &
621         IBinder::FLAG_ONEWAY) != 0) {
622      // For one-way calls, the callee is running entirely
623      // disconnected from the caller, so disable StrictMode entirely.
624      // Not only does disk/network usage not impact the caller, but
625      // there's no way to commuicate back any violations anyway.
626      threadState->setStrictModePolicy(0);
627    } else {
628      threadState->setStrictModePolicy(strictPolicy);
629    }
630    const String16 str(readString16());
631    if (str == interface) {
632        return true;
633    } else {
634        ALOGW("**** enforceInterface() expected '%s' but read '%s'",
635                String8(interface).string(), String8(str).string());
636        return false;
637    }
638}
639
640const binder_size_t* Parcel::objects() const
641{
642    return mObjects;
643}
644
645size_t Parcel::objectsCount() const
646{
647    return mObjectsSize;
648}
649
650status_t Parcel::errorCheck() const
651{
652    return mError;
653}
654
655void Parcel::setError(status_t err)
656{
657    mError = err;
658}
659
660status_t Parcel::finishWrite(size_t len)
661{
662    if (len > INT32_MAX) {
663        // don't accept size_t values which may have come from an
664        // inadvertent conversion from a negative int.
665        return BAD_VALUE;
666    }
667
668    //printf("Finish write of %d\n", len);
669    mDataPos += len;
670    ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
671    if (mDataPos > mDataSize) {
672        mDataSize = mDataPos;
673        ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
674    }
675    //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
676    return NO_ERROR;
677}
678
679status_t Parcel::writeUnpadded(const void* data, size_t len)
680{
681    if (len > INT32_MAX) {
682        // don't accept size_t values which may have come from an
683        // inadvertent conversion from a negative int.
684        return BAD_VALUE;
685    }
686
687    size_t end = mDataPos + len;
688    if (end < mDataPos) {
689        // integer overflow
690        return BAD_VALUE;
691    }
692
693    if (end <= mDataCapacity) {
694restart_write:
695        memcpy(mData+mDataPos, data, len);
696        return finishWrite(len);
697    }
698
699    status_t err = growData(len);
700    if (err == NO_ERROR) goto restart_write;
701    return err;
702}
703
704status_t Parcel::write(const void* data, size_t len)
705{
706    if (len > INT32_MAX) {
707        // don't accept size_t values which may have come from an
708        // inadvertent conversion from a negative int.
709        return BAD_VALUE;
710    }
711
712    void* const d = writeInplace(len);
713    if (d) {
714        memcpy(d, data, len);
715        return NO_ERROR;
716    }
717    return mError;
718}
719
720void* Parcel::writeInplace(size_t len)
721{
722    if (len > INT32_MAX) {
723        // don't accept size_t values which may have come from an
724        // inadvertent conversion from a negative int.
725        return NULL;
726    }
727
728    const size_t padded = pad_size(len);
729
730    // sanity check for integer overflow
731    if (mDataPos+padded < mDataPos) {
732        return NULL;
733    }
734
735    if ((mDataPos+padded) <= mDataCapacity) {
736restart_write:
737        //printf("Writing %ld bytes, padded to %ld\n", len, padded);
738        uint8_t* const data = mData+mDataPos;
739
740        // Need to pad at end?
741        if (padded != len) {
742#if BYTE_ORDER == BIG_ENDIAN
743            static const uint32_t mask[4] = {
744                0x00000000, 0xffffff00, 0xffff0000, 0xff000000
745            };
746#endif
747#if BYTE_ORDER == LITTLE_ENDIAN
748            static const uint32_t mask[4] = {
749                0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
750            };
751#endif
752            //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
753            //    *reinterpret_cast<void**>(data+padded-4));
754            *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
755        }
756
757        finishWrite(padded);
758        return data;
759    }
760
761    status_t err = growData(padded);
762    if (err == NO_ERROR) goto restart_write;
763    return NULL;
764}
765
766status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
767    const uint8_t* strData = (uint8_t*)str.data();
768    const size_t strLen= str.length();
769    const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
770    if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
771        return BAD_VALUE;
772    }
773
774    status_t err = writeInt32(utf16Len);
775    if (err) {
776        return err;
777    }
778
779    // Allocate enough bytes to hold our converted string and its terminating NULL.
780    void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
781    if (!dst) {
782        return NO_MEMORY;
783    }
784
785    utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
786
787    return NO_ERROR;
788}
789
790status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
791  if (!str) {
792    return writeInt32(-1);
793  }
794  return writeUtf8AsUtf16(*str);
795}
796
797namespace {
798
799template<typename T>
800status_t writeByteVectorInternal(Parcel* parcel, const std::vector<T>& val)
801{
802    status_t status;
803    if (val.size() > std::numeric_limits<int32_t>::max()) {
804        status = BAD_VALUE;
805        return status;
806    }
807
808    status = parcel->writeInt32(val.size());
809    if (status != OK) {
810        return status;
811    }
812
813    void* data = parcel->writeInplace(val.size());
814    if (!data) {
815        status = BAD_VALUE;
816        return status;
817    }
818
819    memcpy(data, val.data(), val.size());
820    return status;
821}
822
823template<typename T>
824status_t writeByteVectorInternalPtr(Parcel* parcel,
825                                    const std::unique_ptr<std::vector<T>>& val)
826{
827    if (!val) {
828        return parcel->writeInt32(-1);
829    }
830
831    return writeByteVectorInternal(parcel, *val);
832}
833
834}  // namespace
835
836status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
837    return writeByteVectorInternal(this, val);
838}
839
840status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
841{
842    return writeByteVectorInternalPtr(this, val);
843}
844
845status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
846    return writeByteVectorInternal(this, val);
847}
848
849status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
850{
851    return writeByteVectorInternalPtr(this, val);
852}
853
854status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
855{
856    return writeTypedVector(val, &Parcel::writeInt32);
857}
858
859status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
860{
861    return writeNullableTypedVector(val, &Parcel::writeInt32);
862}
863
864status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
865{
866    return writeTypedVector(val, &Parcel::writeInt64);
867}
868
869status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
870{
871    return writeNullableTypedVector(val, &Parcel::writeInt64);
872}
873
874status_t Parcel::writeFloatVector(const std::vector<float>& val)
875{
876    return writeTypedVector(val, &Parcel::writeFloat);
877}
878
879status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
880{
881    return writeNullableTypedVector(val, &Parcel::writeFloat);
882}
883
884status_t Parcel::writeDoubleVector(const std::vector<double>& val)
885{
886    return writeTypedVector(val, &Parcel::writeDouble);
887}
888
889status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
890{
891    return writeNullableTypedVector(val, &Parcel::writeDouble);
892}
893
894status_t Parcel::writeBoolVector(const std::vector<bool>& val)
895{
896    return writeTypedVector(val, &Parcel::writeBool);
897}
898
899status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
900{
901    return writeNullableTypedVector(val, &Parcel::writeBool);
902}
903
904status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
905{
906    return writeTypedVector(val, &Parcel::writeChar);
907}
908
909status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
910{
911    return writeNullableTypedVector(val, &Parcel::writeChar);
912}
913
914status_t Parcel::writeString16Vector(const std::vector<String16>& val)
915{
916    return writeTypedVector(val, &Parcel::writeString16);
917}
918
919status_t Parcel::writeString16Vector(
920        const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
921{
922    return writeNullableTypedVector(val, &Parcel::writeString16);
923}
924
925status_t Parcel::writeUtf8VectorAsUtf16Vector(
926                        const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
927    return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
928}
929
930status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
931    return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
932}
933
934status_t Parcel::writeInt32(int32_t val)
935{
936    return writeAligned(val);
937}
938
939status_t Parcel::writeUint32(uint32_t val)
940{
941    return writeAligned(val);
942}
943
944status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
945    if (len > INT32_MAX) {
946        // don't accept size_t values which may have come from an
947        // inadvertent conversion from a negative int.
948        return BAD_VALUE;
949    }
950
951    if (!val) {
952        return writeInt32(-1);
953    }
954    status_t ret = writeInt32(static_cast<uint32_t>(len));
955    if (ret == NO_ERROR) {
956        ret = write(val, len * sizeof(*val));
957    }
958    return ret;
959}
960status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
961    if (len > INT32_MAX) {
962        // don't accept size_t values which may have come from an
963        // inadvertent conversion from a negative int.
964        return BAD_VALUE;
965    }
966
967    if (!val) {
968        return writeInt32(-1);
969    }
970    status_t ret = writeInt32(static_cast<uint32_t>(len));
971    if (ret == NO_ERROR) {
972        ret = write(val, len * sizeof(*val));
973    }
974    return ret;
975}
976
977status_t Parcel::writeBool(bool val)
978{
979    return writeInt32(int32_t(val));
980}
981
982status_t Parcel::writeChar(char16_t val)
983{
984    return writeInt32(int32_t(val));
985}
986
987status_t Parcel::writeByte(int8_t val)
988{
989    return writeInt32(int32_t(val));
990}
991
992status_t Parcel::writeInt64(int64_t val)
993{
994    return writeAligned(val);
995}
996
997status_t Parcel::writeUint64(uint64_t val)
998{
999    return writeAligned(val);
1000}
1001
1002status_t Parcel::writePointer(uintptr_t val)
1003{
1004    return writeAligned<binder_uintptr_t>(val);
1005}
1006
1007status_t Parcel::writeFloat(float val)
1008{
1009    return writeAligned(val);
1010}
1011
1012#if defined(__mips__) && defined(__mips_hard_float)
1013
1014status_t Parcel::writeDouble(double val)
1015{
1016    union {
1017        double d;
1018        unsigned long long ll;
1019    } u;
1020    u.d = val;
1021    return writeAligned(u.ll);
1022}
1023
1024#else
1025
1026status_t Parcel::writeDouble(double val)
1027{
1028    return writeAligned(val);
1029}
1030
1031#endif
1032
1033status_t Parcel::writeCString(const char* str)
1034{
1035    return write(str, strlen(str)+1);
1036}
1037
1038status_t Parcel::writeString8(const String8& str)
1039{
1040    status_t err = writeInt32(str.bytes());
1041    // only write string if its length is more than zero characters,
1042    // as readString8 will only read if the length field is non-zero.
1043    // this is slightly different from how writeString16 works.
1044    if (str.bytes() > 0 && err == NO_ERROR) {
1045        err = write(str.string(), str.bytes()+1);
1046    }
1047    return err;
1048}
1049
1050status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1051{
1052    if (!str) {
1053        return writeInt32(-1);
1054    }
1055
1056    return writeString16(*str);
1057}
1058
1059status_t Parcel::writeString16(const String16& str)
1060{
1061    return writeString16(str.string(), str.size());
1062}
1063
1064status_t Parcel::writeString16(const char16_t* str, size_t len)
1065{
1066    if (str == NULL) return writeInt32(-1);
1067
1068    status_t err = writeInt32(len);
1069    if (err == NO_ERROR) {
1070        len *= sizeof(char16_t);
1071        uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1072        if (data) {
1073            memcpy(data, str, len);
1074            *reinterpret_cast<char16_t*>(data+len) = 0;
1075            return NO_ERROR;
1076        }
1077        err = mError;
1078    }
1079    return err;
1080}
1081
1082status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1083{
1084    return flatten_binder(ProcessState::self(), val, this);
1085}
1086
1087status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1088{
1089    return writeTypedVector(val, &Parcel::writeStrongBinder);
1090}
1091
1092status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1093{
1094    return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1095}
1096
1097status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1098    return readNullableTypedVector(val, &Parcel::readNullableStrongBinder);
1099}
1100
1101status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1102    return readTypedVector(val, &Parcel::readStrongBinder);
1103}
1104
1105status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
1106{
1107    return flatten_binder(ProcessState::self(), val, this);
1108}
1109
1110status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1111    if (!parcelable) {
1112        return writeInt32(0);
1113    }
1114
1115    return writeParcelable(*parcelable);
1116}
1117
1118status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1119    status_t status = writeInt32(1);  // parcelable is not null.
1120    if (status != OK) {
1121        return status;
1122    }
1123    return parcelable.writeToParcel(this);
1124}
1125
1126status_t Parcel::writeValue(const binder::Value& value) {
1127    return value.writeToParcel(this);
1128}
1129
1130status_t Parcel::writeNativeHandle(const native_handle* handle)
1131{
1132    if (!handle || handle->version != sizeof(native_handle))
1133        return BAD_TYPE;
1134
1135    status_t err;
1136    err = writeInt32(handle->numFds);
1137    if (err != NO_ERROR) return err;
1138
1139    err = writeInt32(handle->numInts);
1140    if (err != NO_ERROR) return err;
1141
1142    for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1143        err = writeDupFileDescriptor(handle->data[i]);
1144
1145    if (err != NO_ERROR) {
1146        ALOGD("write native handle, write dup fd failed");
1147        return err;
1148    }
1149    err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1150    return err;
1151}
1152
1153status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1154{
1155    flat_binder_object obj;
1156    obj.hdr.type = BINDER_TYPE_FD;
1157    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1158    obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1159    obj.handle = fd;
1160    obj.cookie = takeOwnership ? 1 : 0;
1161    return writeObject(obj, true);
1162}
1163
1164status_t Parcel::writeDupFileDescriptor(int fd)
1165{
1166    int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1167    if (dupFd < 0) {
1168        return -errno;
1169    }
1170    status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1171    if (err != OK) {
1172        close(dupFd);
1173    }
1174    return err;
1175}
1176
1177status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1178{
1179    writeInt32(0);
1180    return writeFileDescriptor(fd, takeOwnership);
1181}
1182
1183status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) {
1184    return writeDupFileDescriptor(fd.get());
1185}
1186
1187status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) {
1188    return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1189}
1190
1191status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) {
1192    return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1193}
1194
1195status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1196{
1197    if (len > INT32_MAX) {
1198        // don't accept size_t values which may have come from an
1199        // inadvertent conversion from a negative int.
1200        return BAD_VALUE;
1201    }
1202
1203    status_t status;
1204    if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1205        ALOGV("writeBlob: write in place");
1206        status = writeInt32(BLOB_INPLACE);
1207        if (status) return status;
1208
1209        void* ptr = writeInplace(len);
1210        if (!ptr) return NO_MEMORY;
1211
1212        outBlob->init(-1, ptr, len, false);
1213        return NO_ERROR;
1214    }
1215
1216    ALOGV("writeBlob: write to ashmem");
1217    int fd = ashmem_create_region("Parcel Blob", len);
1218    if (fd < 0) return NO_MEMORY;
1219
1220    int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1221    if (result < 0) {
1222        status = result;
1223    } else {
1224        void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1225        if (ptr == MAP_FAILED) {
1226            status = -errno;
1227        } else {
1228            if (!mutableCopy) {
1229                result = ashmem_set_prot_region(fd, PROT_READ);
1230            }
1231            if (result < 0) {
1232                status = result;
1233            } else {
1234                status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1235                if (!status) {
1236                    status = writeFileDescriptor(fd, true /*takeOwnership*/);
1237                    if (!status) {
1238                        outBlob->init(fd, ptr, len, mutableCopy);
1239                        return NO_ERROR;
1240                    }
1241                }
1242            }
1243        }
1244        ::munmap(ptr, len);
1245    }
1246    ::close(fd);
1247    return status;
1248}
1249
1250status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1251{
1252    // Must match up with what's done in writeBlob.
1253    if (!mAllowFds) return FDS_NOT_ALLOWED;
1254    status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1255    if (status) return status;
1256    return writeDupFileDescriptor(fd);
1257}
1258
1259status_t Parcel::write(const FlattenableHelperInterface& val)
1260{
1261    status_t err;
1262
1263    // size if needed
1264    const size_t len = val.getFlattenedSize();
1265    const size_t fd_count = val.getFdCount();
1266
1267    if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1268        // don't accept size_t values which may have come from an
1269        // inadvertent conversion from a negative int.
1270        return BAD_VALUE;
1271    }
1272
1273    err = this->writeInt32(len);
1274    if (err) return err;
1275
1276    err = this->writeInt32(fd_count);
1277    if (err) return err;
1278
1279    // payload
1280    void* const buf = this->writeInplace(len);
1281    if (buf == NULL)
1282        return BAD_VALUE;
1283
1284    int* fds = NULL;
1285    if (fd_count) {
1286        fds = new (std::nothrow) int[fd_count];
1287        if (fds == nullptr) {
1288            ALOGE("write: failed to allocate requested %zu fds", fd_count);
1289            return BAD_VALUE;
1290        }
1291    }
1292
1293    err = val.flatten(buf, len, fds, fd_count);
1294    for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1295        err = this->writeDupFileDescriptor( fds[i] );
1296    }
1297
1298    if (fd_count) {
1299        delete [] fds;
1300    }
1301
1302    return err;
1303}
1304
1305status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1306{
1307    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1308    const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1309    if (enoughData && enoughObjects) {
1310restart_write:
1311        *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1312
1313        // remember if it's a file descriptor
1314        if (val.hdr.type == BINDER_TYPE_FD) {
1315            if (!mAllowFds) {
1316                // fail before modifying our object index
1317                return FDS_NOT_ALLOWED;
1318            }
1319            mHasFds = mFdsKnown = true;
1320        }
1321
1322        // Need to write meta-data?
1323        if (nullMetaData || val.binder != 0) {
1324            mObjects[mObjectsSize] = mDataPos;
1325            acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1326            mObjectsSize++;
1327        }
1328
1329        return finishWrite(sizeof(flat_binder_object));
1330    }
1331
1332    if (!enoughData) {
1333        const status_t err = growData(sizeof(val));
1334        if (err != NO_ERROR) return err;
1335    }
1336    if (!enoughObjects) {
1337        size_t newSize = ((mObjectsSize+2)*3)/2;
1338        if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY;   // overflow
1339        binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1340        if (objects == NULL) return NO_MEMORY;
1341        mObjects = objects;
1342        mObjectsCapacity = newSize;
1343    }
1344
1345    goto restart_write;
1346}
1347
1348status_t Parcel::writeNoException()
1349{
1350    binder::Status status;
1351    return status.writeToParcel(this);
1352}
1353
1354status_t Parcel::writeMap(const ::android::binder::Map& map_in)
1355{
1356    using ::std::map;
1357    using ::android::binder::Value;
1358    using ::android::binder::Map;
1359
1360    Map::const_iterator iter;
1361    status_t ret;
1362
1363    ret = writeInt32(map_in.size());
1364
1365    if (ret != NO_ERROR) {
1366        return ret;
1367    }
1368
1369    for (iter = map_in.begin(); iter != map_in.end(); ++iter) {
1370        ret = writeValue(Value(iter->first));
1371        if (ret != NO_ERROR) {
1372            return ret;
1373        }
1374
1375        ret = writeValue(iter->second);
1376        if (ret != NO_ERROR) {
1377            return ret;
1378        }
1379    }
1380
1381    return ret;
1382}
1383
1384status_t Parcel::writeNullableMap(const std::unique_ptr<binder::Map>& map)
1385{
1386    if (map == NULL) {
1387        return writeInt32(-1);
1388    }
1389
1390    return writeMap(*map.get());
1391}
1392
1393status_t Parcel::readMap(::android::binder::Map* map_out)const
1394{
1395    using ::std::map;
1396    using ::android::String16;
1397    using ::android::String8;
1398    using ::android::binder::Value;
1399    using ::android::binder::Map;
1400
1401    status_t ret = NO_ERROR;
1402    int32_t count;
1403
1404    ret = readInt32(&count);
1405    if (ret != NO_ERROR) {
1406        return ret;
1407    }
1408
1409    if (count < 0) {
1410        ALOGE("readMap: Unexpected count: %d", count);
1411        return (count == -1)
1412            ? UNEXPECTED_NULL
1413            : BAD_VALUE;
1414    }
1415
1416    map_out->clear();
1417
1418    while (count--) {
1419        Map::key_type key;
1420        Value value;
1421
1422        ret = readValue(&value);
1423        if (ret != NO_ERROR) {
1424            return ret;
1425        }
1426
1427        if (!value.getString(&key)) {
1428            ALOGE("readMap: Key type not a string (parcelType = %d)", value.parcelType());
1429            return BAD_VALUE;
1430        }
1431
1432        ret = readValue(&value);
1433        if (ret != NO_ERROR) {
1434            return ret;
1435        }
1436
1437        (*map_out)[key] = value;
1438    }
1439
1440    return ret;
1441}
1442
1443status_t Parcel::readNullableMap(std::unique_ptr<binder::Map>* map) const
1444{
1445    const size_t start = dataPosition();
1446    int32_t count;
1447    status_t status = readInt32(&count);
1448    map->reset();
1449
1450    if (status != OK || count == -1) {
1451        return status;
1452    }
1453
1454    setDataPosition(start);
1455    map->reset(new binder::Map());
1456
1457    status = readMap(map->get());
1458
1459    if (status != OK) {
1460        map->reset();
1461    }
1462
1463    return status;
1464}
1465
1466
1467
1468void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1469{
1470    LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1471}
1472
1473status_t Parcel::validateReadData(size_t upperBound) const
1474{
1475    // Don't allow non-object reads on object data
1476    if (mObjectsSorted || mObjectsSize <= 1) {
1477data_sorted:
1478        // Expect to check only against the next object
1479        if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
1480            // For some reason the current read position is greater than the next object
1481            // hint. Iterate until we find the right object
1482            size_t nextObject = mNextObjectHint;
1483            do {
1484                if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
1485                    // Requested info overlaps with an object
1486                    ALOGE("Attempt to read from protected data in Parcel %p", this);
1487                    return PERMISSION_DENIED;
1488                }
1489                nextObject++;
1490            } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
1491            mNextObjectHint = nextObject;
1492        }
1493        return NO_ERROR;
1494    }
1495    // Quickly determine if mObjects is sorted.
1496    binder_size_t* currObj = mObjects + mObjectsSize - 1;
1497    binder_size_t* prevObj = currObj;
1498    while (currObj > mObjects) {
1499        prevObj--;
1500        if(*prevObj > *currObj) {
1501            goto data_unsorted;
1502        }
1503        currObj--;
1504    }
1505    mObjectsSorted = true;
1506    goto data_sorted;
1507
1508data_unsorted:
1509    // Insertion Sort mObjects
1510    // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1511    // switch to std::sort(mObjects, mObjects + mObjectsSize);
1512    for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
1513        binder_size_t temp = *iter0;
1514        binder_size_t* iter1 = iter0 - 1;
1515        while (iter1 >= mObjects && *iter1 > temp) {
1516            *(iter1 + 1) = *iter1;
1517            iter1--;
1518        }
1519        *(iter1 + 1) = temp;
1520    }
1521    mNextObjectHint = 0;
1522    mObjectsSorted = true;
1523    goto data_sorted;
1524}
1525
1526status_t Parcel::read(void* outData, size_t len) const
1527{
1528    if (len > INT32_MAX) {
1529        // don't accept size_t values which may have come from an
1530        // inadvertent conversion from a negative int.
1531        return BAD_VALUE;
1532    }
1533
1534    if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1535            && len <= pad_size(len)) {
1536        if (mObjectsSize > 0) {
1537            status_t err = validateReadData(mDataPos + pad_size(len));
1538            if(err != NO_ERROR) {
1539                // Still increment the data position by the expected length
1540                mDataPos += pad_size(len);
1541                ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1542                return err;
1543            }
1544        }
1545        memcpy(outData, mData+mDataPos, len);
1546        mDataPos += pad_size(len);
1547        ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1548        return NO_ERROR;
1549    }
1550    return NOT_ENOUGH_DATA;
1551}
1552
1553const void* Parcel::readInplace(size_t len) const
1554{
1555    if (len > INT32_MAX) {
1556        // don't accept size_t values which may have come from an
1557        // inadvertent conversion from a negative int.
1558        return NULL;
1559    }
1560
1561    if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1562            && len <= pad_size(len)) {
1563        if (mObjectsSize > 0) {
1564            status_t err = validateReadData(mDataPos + pad_size(len));
1565            if(err != NO_ERROR) {
1566                // Still increment the data position by the expected length
1567                mDataPos += pad_size(len);
1568                ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1569                return NULL;
1570            }
1571        }
1572
1573        const void* data = mData+mDataPos;
1574        mDataPos += pad_size(len);
1575        ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1576        return data;
1577    }
1578    return NULL;
1579}
1580
1581template<class T>
1582status_t Parcel::readAligned(T *pArg) const {
1583    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1584
1585    if ((mDataPos+sizeof(T)) <= mDataSize) {
1586        if (mObjectsSize > 0) {
1587            status_t err = validateReadData(mDataPos + sizeof(T));
1588            if(err != NO_ERROR) {
1589                // Still increment the data position by the expected length
1590                mDataPos += sizeof(T);
1591                return err;
1592            }
1593        }
1594
1595        const void* data = mData+mDataPos;
1596        mDataPos += sizeof(T);
1597        *pArg =  *reinterpret_cast<const T*>(data);
1598        return NO_ERROR;
1599    } else {
1600        return NOT_ENOUGH_DATA;
1601    }
1602}
1603
1604template<class T>
1605T Parcel::readAligned() const {
1606    T result;
1607    if (readAligned(&result) != NO_ERROR) {
1608        result = 0;
1609    }
1610
1611    return result;
1612}
1613
1614template<class T>
1615status_t Parcel::writeAligned(T val) {
1616    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1617
1618    if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1619restart_write:
1620        *reinterpret_cast<T*>(mData+mDataPos) = val;
1621        return finishWrite(sizeof(val));
1622    }
1623
1624    status_t err = growData(sizeof(val));
1625    if (err == NO_ERROR) goto restart_write;
1626    return err;
1627}
1628
1629namespace {
1630
1631template<typename T>
1632status_t readByteVectorInternal(const Parcel* parcel,
1633                                std::vector<T>* val) {
1634    val->clear();
1635
1636    int32_t size;
1637    status_t status = parcel->readInt32(&size);
1638
1639    if (status != OK) {
1640        return status;
1641    }
1642
1643    if (size < 0) {
1644        status = UNEXPECTED_NULL;
1645        return status;
1646    }
1647    if (size_t(size) > parcel->dataAvail()) {
1648        status = BAD_VALUE;
1649        return status;
1650    }
1651
1652    T* data = const_cast<T*>(reinterpret_cast<const T*>(parcel->readInplace(size)));
1653    if (!data) {
1654        status = BAD_VALUE;
1655        return status;
1656    }
1657    val->reserve(size);
1658    val->insert(val->end(), data, data + size);
1659
1660    return status;
1661}
1662
1663template<typename T>
1664status_t readByteVectorInternalPtr(
1665        const Parcel* parcel,
1666        std::unique_ptr<std::vector<T>>* val) {
1667    const int32_t start = parcel->dataPosition();
1668    int32_t size;
1669    status_t status = parcel->readInt32(&size);
1670    val->reset();
1671
1672    if (status != OK || size < 0) {
1673        return status;
1674    }
1675
1676    parcel->setDataPosition(start);
1677    val->reset(new (std::nothrow) std::vector<T>());
1678
1679    status = readByteVectorInternal(parcel, val->get());
1680
1681    if (status != OK) {
1682        val->reset();
1683    }
1684
1685    return status;
1686}
1687
1688}  // namespace
1689
1690status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1691    return readByteVectorInternal(this, val);
1692}
1693
1694status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1695    return readByteVectorInternal(this, val);
1696}
1697
1698status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1699    return readByteVectorInternalPtr(this, val);
1700}
1701
1702status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1703    return readByteVectorInternalPtr(this, val);
1704}
1705
1706status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1707    return readNullableTypedVector(val, &Parcel::readInt32);
1708}
1709
1710status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1711    return readTypedVector(val, &Parcel::readInt32);
1712}
1713
1714status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1715    return readNullableTypedVector(val, &Parcel::readInt64);
1716}
1717
1718status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1719    return readTypedVector(val, &Parcel::readInt64);
1720}
1721
1722status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1723    return readNullableTypedVector(val, &Parcel::readFloat);
1724}
1725
1726status_t Parcel::readFloatVector(std::vector<float>* val) const {
1727    return readTypedVector(val, &Parcel::readFloat);
1728}
1729
1730status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1731    return readNullableTypedVector(val, &Parcel::readDouble);
1732}
1733
1734status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1735    return readTypedVector(val, &Parcel::readDouble);
1736}
1737
1738status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1739    const int32_t start = dataPosition();
1740    int32_t size;
1741    status_t status = readInt32(&size);
1742    val->reset();
1743
1744    if (status != OK || size < 0) {
1745        return status;
1746    }
1747
1748    setDataPosition(start);
1749    val->reset(new (std::nothrow) std::vector<bool>());
1750
1751    status = readBoolVector(val->get());
1752
1753    if (status != OK) {
1754        val->reset();
1755    }
1756
1757    return status;
1758}
1759
1760status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1761    int32_t size;
1762    status_t status = readInt32(&size);
1763
1764    if (status != OK) {
1765        return status;
1766    }
1767
1768    if (size < 0) {
1769        return UNEXPECTED_NULL;
1770    }
1771
1772    val->resize(size);
1773
1774    /* C++ bool handling means a vector of bools isn't necessarily addressable
1775     * (we might use individual bits)
1776     */
1777    bool data;
1778    for (int32_t i = 0; i < size; ++i) {
1779        status = readBool(&data);
1780        (*val)[i] = data;
1781
1782        if (status != OK) {
1783            return status;
1784        }
1785    }
1786
1787    return OK;
1788}
1789
1790status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1791    return readNullableTypedVector(val, &Parcel::readChar);
1792}
1793
1794status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1795    return readTypedVector(val, &Parcel::readChar);
1796}
1797
1798status_t Parcel::readString16Vector(
1799        std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1800    return readNullableTypedVector(val, &Parcel::readString16);
1801}
1802
1803status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1804    return readTypedVector(val, &Parcel::readString16);
1805}
1806
1807status_t Parcel::readUtf8VectorFromUtf16Vector(
1808        std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1809    return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1810}
1811
1812status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1813    return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1814}
1815
1816status_t Parcel::readInt32(int32_t *pArg) const
1817{
1818    return readAligned(pArg);
1819}
1820
1821int32_t Parcel::readInt32() const
1822{
1823    return readAligned<int32_t>();
1824}
1825
1826status_t Parcel::readUint32(uint32_t *pArg) const
1827{
1828    return readAligned(pArg);
1829}
1830
1831uint32_t Parcel::readUint32() const
1832{
1833    return readAligned<uint32_t>();
1834}
1835
1836status_t Parcel::readInt64(int64_t *pArg) const
1837{
1838    return readAligned(pArg);
1839}
1840
1841
1842int64_t Parcel::readInt64() const
1843{
1844    return readAligned<int64_t>();
1845}
1846
1847status_t Parcel::readUint64(uint64_t *pArg) const
1848{
1849    return readAligned(pArg);
1850}
1851
1852uint64_t Parcel::readUint64() const
1853{
1854    return readAligned<uint64_t>();
1855}
1856
1857status_t Parcel::readPointer(uintptr_t *pArg) const
1858{
1859    status_t ret;
1860    binder_uintptr_t ptr;
1861    ret = readAligned(&ptr);
1862    if (!ret)
1863        *pArg = ptr;
1864    return ret;
1865}
1866
1867uintptr_t Parcel::readPointer() const
1868{
1869    return readAligned<binder_uintptr_t>();
1870}
1871
1872
1873status_t Parcel::readFloat(float *pArg) const
1874{
1875    return readAligned(pArg);
1876}
1877
1878
1879float Parcel::readFloat() const
1880{
1881    return readAligned<float>();
1882}
1883
1884#if defined(__mips__) && defined(__mips_hard_float)
1885
1886status_t Parcel::readDouble(double *pArg) const
1887{
1888    union {
1889      double d;
1890      unsigned long long ll;
1891    } u;
1892    u.d = 0;
1893    status_t status;
1894    status = readAligned(&u.ll);
1895    *pArg = u.d;
1896    return status;
1897}
1898
1899double Parcel::readDouble() const
1900{
1901    union {
1902      double d;
1903      unsigned long long ll;
1904    } u;
1905    u.ll = readAligned<unsigned long long>();
1906    return u.d;
1907}
1908
1909#else
1910
1911status_t Parcel::readDouble(double *pArg) const
1912{
1913    return readAligned(pArg);
1914}
1915
1916double Parcel::readDouble() const
1917{
1918    return readAligned<double>();
1919}
1920
1921#endif
1922
1923status_t Parcel::readIntPtr(intptr_t *pArg) const
1924{
1925    return readAligned(pArg);
1926}
1927
1928
1929intptr_t Parcel::readIntPtr() const
1930{
1931    return readAligned<intptr_t>();
1932}
1933
1934status_t Parcel::readBool(bool *pArg) const
1935{
1936    int32_t tmp = 0;
1937    status_t ret = readInt32(&tmp);
1938    *pArg = (tmp != 0);
1939    return ret;
1940}
1941
1942bool Parcel::readBool() const
1943{
1944    return readInt32() != 0;
1945}
1946
1947status_t Parcel::readChar(char16_t *pArg) const
1948{
1949    int32_t tmp = 0;
1950    status_t ret = readInt32(&tmp);
1951    *pArg = char16_t(tmp);
1952    return ret;
1953}
1954
1955char16_t Parcel::readChar() const
1956{
1957    return char16_t(readInt32());
1958}
1959
1960status_t Parcel::readByte(int8_t *pArg) const
1961{
1962    int32_t tmp = 0;
1963    status_t ret = readInt32(&tmp);
1964    *pArg = int8_t(tmp);
1965    return ret;
1966}
1967
1968int8_t Parcel::readByte() const
1969{
1970    return int8_t(readInt32());
1971}
1972
1973status_t Parcel::readUtf8FromUtf16(std::string* str) const {
1974    size_t utf16Size = 0;
1975    const char16_t* src = readString16Inplace(&utf16Size);
1976    if (!src) {
1977        return UNEXPECTED_NULL;
1978    }
1979
1980    // Save ourselves the trouble, we're done.
1981    if (utf16Size == 0u) {
1982        str->clear();
1983       return NO_ERROR;
1984    }
1985
1986    // Allow for closing '\0'
1987    ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
1988    if (utf8Size < 1) {
1989        return BAD_VALUE;
1990    }
1991    // Note that while it is probably safe to assume string::resize keeps a
1992    // spare byte around for the trailing null, we still pass the size including the trailing null
1993    str->resize(utf8Size);
1994    utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
1995    str->resize(utf8Size - 1);
1996    return NO_ERROR;
1997}
1998
1999status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
2000    const int32_t start = dataPosition();
2001    int32_t size;
2002    status_t status = readInt32(&size);
2003    str->reset();
2004
2005    if (status != OK || size < 0) {
2006        return status;
2007    }
2008
2009    setDataPosition(start);
2010    str->reset(new (std::nothrow) std::string());
2011    return readUtf8FromUtf16(str->get());
2012}
2013
2014const char* Parcel::readCString() const
2015{
2016    const size_t avail = mDataSize-mDataPos;
2017    if (avail > 0) {
2018        const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2019        // is the string's trailing NUL within the parcel's valid bounds?
2020        const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2021        if (eos) {
2022            const size_t len = eos - str;
2023            mDataPos += pad_size(len+1);
2024            ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
2025            return str;
2026        }
2027    }
2028    return NULL;
2029}
2030
2031String8 Parcel::readString8() const
2032{
2033    String8 retString;
2034    status_t status = readString8(&retString);
2035    if (status != OK) {
2036        // We don't care about errors here, so just return an empty string.
2037        return String8();
2038    }
2039    return retString;
2040}
2041
2042status_t Parcel::readString8(String8* pArg) const
2043{
2044    int32_t size;
2045    status_t status = readInt32(&size);
2046    if (status != OK) {
2047        return status;
2048    }
2049    // watch for potential int overflow from size+1
2050    if (size < 0 || size >= INT32_MAX) {
2051        return BAD_VALUE;
2052    }
2053    // |writeString8| writes nothing for empty string.
2054    if (size == 0) {
2055        *pArg = String8();
2056        return OK;
2057    }
2058    const char* str = (const char*)readInplace(size + 1);
2059    if (str == NULL) {
2060        return BAD_VALUE;
2061    }
2062    pArg->setTo(str, size);
2063    return OK;
2064}
2065
2066String16 Parcel::readString16() const
2067{
2068    size_t len;
2069    const char16_t* str = readString16Inplace(&len);
2070    if (str) return String16(str, len);
2071    ALOGE("Reading a NULL string not supported here.");
2072    return String16();
2073}
2074
2075status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
2076{
2077    const int32_t start = dataPosition();
2078    int32_t size;
2079    status_t status = readInt32(&size);
2080    pArg->reset();
2081
2082    if (status != OK || size < 0) {
2083        return status;
2084    }
2085
2086    setDataPosition(start);
2087    pArg->reset(new (std::nothrow) String16());
2088
2089    status = readString16(pArg->get());
2090
2091    if (status != OK) {
2092        pArg->reset();
2093    }
2094
2095    return status;
2096}
2097
2098status_t Parcel::readString16(String16* pArg) const
2099{
2100    size_t len;
2101    const char16_t* str = readString16Inplace(&len);
2102    if (str) {
2103        pArg->setTo(str, len);
2104        return 0;
2105    } else {
2106        *pArg = String16();
2107        return UNEXPECTED_NULL;
2108    }
2109}
2110
2111const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2112{
2113    int32_t size = readInt32();
2114    // watch for potential int overflow from size+1
2115    if (size >= 0 && size < INT32_MAX) {
2116        *outLen = size;
2117        const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2118        if (str != NULL) {
2119            return str;
2120        }
2121    }
2122    *outLen = 0;
2123    return NULL;
2124}
2125
2126status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2127{
2128    status_t status = readNullableStrongBinder(val);
2129    if (status == OK && !val->get()) {
2130        status = UNEXPECTED_NULL;
2131    }
2132    return status;
2133}
2134
2135status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2136{
2137    return unflatten_binder(ProcessState::self(), *this, val);
2138}
2139
2140sp<IBinder> Parcel::readStrongBinder() const
2141{
2142    sp<IBinder> val;
2143    // Note that a lot of code in Android reads binders by hand with this
2144    // method, and that code has historically been ok with getting nullptr
2145    // back (while ignoring error codes).
2146    readNullableStrongBinder(&val);
2147    return val;
2148}
2149
2150wp<IBinder> Parcel::readWeakBinder() const
2151{
2152    wp<IBinder> val;
2153    unflatten_binder(ProcessState::self(), *this, &val);
2154    return val;
2155}
2156
2157status_t Parcel::readParcelable(Parcelable* parcelable) const {
2158    int32_t have_parcelable = 0;
2159    status_t status = readInt32(&have_parcelable);
2160    if (status != OK) {
2161        return status;
2162    }
2163    if (!have_parcelable) {
2164        return UNEXPECTED_NULL;
2165    }
2166    return parcelable->readFromParcel(this);
2167}
2168
2169status_t Parcel::readValue(binder::Value* value) const {
2170    return value->readFromParcel(this);
2171}
2172
2173int32_t Parcel::readExceptionCode() const
2174{
2175    binder::Status status;
2176    status.readFromParcel(*this);
2177    return status.exceptionCode();
2178}
2179
2180native_handle* Parcel::readNativeHandle() const
2181{
2182    int numFds, numInts;
2183    status_t err;
2184    err = readInt32(&numFds);
2185    if (err != NO_ERROR) return 0;
2186    err = readInt32(&numInts);
2187    if (err != NO_ERROR) return 0;
2188
2189    native_handle* h = native_handle_create(numFds, numInts);
2190    if (!h) {
2191        return 0;
2192    }
2193
2194    for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2195        h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2196        if (h->data[i] < 0) {
2197            for (int j = 0; j < i; j++) {
2198                close(h->data[j]);
2199            }
2200            native_handle_delete(h);
2201            return 0;
2202        }
2203    }
2204    err = read(h->data + numFds, sizeof(int)*numInts);
2205    if (err != NO_ERROR) {
2206        native_handle_close(h);
2207        native_handle_delete(h);
2208        h = 0;
2209    }
2210    return h;
2211}
2212
2213int Parcel::readFileDescriptor() const
2214{
2215    const flat_binder_object* flat = readObject(true);
2216
2217    if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2218        return flat->handle;
2219    }
2220
2221    return BAD_TYPE;
2222}
2223
2224int Parcel::readParcelFileDescriptor() const
2225{
2226    int32_t hasComm = readInt32();
2227    int fd = readFileDescriptor();
2228    if (hasComm != 0) {
2229        // skip
2230        readFileDescriptor();
2231    }
2232    return fd;
2233}
2234
2235status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const
2236{
2237    int got = readFileDescriptor();
2238
2239    if (got == BAD_TYPE) {
2240        return BAD_TYPE;
2241    }
2242
2243    val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2244
2245    if (val->get() < 0) {
2246        return BAD_VALUE;
2247    }
2248
2249    return OK;
2250}
2251
2252
2253status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const {
2254    return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2255}
2256
2257status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const {
2258    return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2259}
2260
2261status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2262{
2263    int32_t blobType;
2264    status_t status = readInt32(&blobType);
2265    if (status) return status;
2266
2267    if (blobType == BLOB_INPLACE) {
2268        ALOGV("readBlob: read in place");
2269        const void* ptr = readInplace(len);
2270        if (!ptr) return BAD_VALUE;
2271
2272        outBlob->init(-1, const_cast<void*>(ptr), len, false);
2273        return NO_ERROR;
2274    }
2275
2276    ALOGV("readBlob: read from ashmem");
2277    bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2278    int fd = readFileDescriptor();
2279    if (fd == int(BAD_TYPE)) return BAD_VALUE;
2280
2281    void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2282            MAP_SHARED, fd, 0);
2283    if (ptr == MAP_FAILED) return NO_MEMORY;
2284
2285    outBlob->init(fd, ptr, len, isMutable);
2286    return NO_ERROR;
2287}
2288
2289status_t Parcel::read(FlattenableHelperInterface& val) const
2290{
2291    // size
2292    const size_t len = this->readInt32();
2293    const size_t fd_count = this->readInt32();
2294
2295    if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2296        // don't accept size_t values which may have come from an
2297        // inadvertent conversion from a negative int.
2298        return BAD_VALUE;
2299    }
2300
2301    // payload
2302    void const* const buf = this->readInplace(pad_size(len));
2303    if (buf == NULL)
2304        return BAD_VALUE;
2305
2306    int* fds = NULL;
2307    if (fd_count) {
2308        fds = new (std::nothrow) int[fd_count];
2309        if (fds == nullptr) {
2310            ALOGE("read: failed to allocate requested %zu fds", fd_count);
2311            return BAD_VALUE;
2312        }
2313    }
2314
2315    status_t err = NO_ERROR;
2316    for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2317        int fd = this->readFileDescriptor();
2318        if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2319            err = BAD_VALUE;
2320            ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2321                  i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2322            // Close all the file descriptors that were dup-ed.
2323            for (size_t j=0; j<i ;j++) {
2324                close(fds[j]);
2325            }
2326        }
2327    }
2328
2329    if (err == NO_ERROR) {
2330        err = val.unflatten(buf, len, fds, fd_count);
2331    }
2332
2333    if (fd_count) {
2334        delete [] fds;
2335    }
2336
2337    return err;
2338}
2339const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2340{
2341    const size_t DPOS = mDataPos;
2342    if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2343        const flat_binder_object* obj
2344                = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2345        mDataPos = DPOS + sizeof(flat_binder_object);
2346        if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2347            // When transferring a NULL object, we don't write it into
2348            // the object list, so we don't want to check for it when
2349            // reading.
2350            ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2351            return obj;
2352        }
2353
2354        // Ensure that this object is valid...
2355        binder_size_t* const OBJS = mObjects;
2356        const size_t N = mObjectsSize;
2357        size_t opos = mNextObjectHint;
2358
2359        if (N > 0) {
2360            ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2361                 this, DPOS, opos);
2362
2363            // Start at the current hint position, looking for an object at
2364            // the current data position.
2365            if (opos < N) {
2366                while (opos < (N-1) && OBJS[opos] < DPOS) {
2367                    opos++;
2368                }
2369            } else {
2370                opos = N-1;
2371            }
2372            if (OBJS[opos] == DPOS) {
2373                // Found it!
2374                ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2375                     this, DPOS, opos);
2376                mNextObjectHint = opos+1;
2377                ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2378                return obj;
2379            }
2380
2381            // Look backwards for it...
2382            while (opos > 0 && OBJS[opos] > DPOS) {
2383                opos--;
2384            }
2385            if (OBJS[opos] == DPOS) {
2386                // Found it!
2387                ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2388                     this, DPOS, opos);
2389                mNextObjectHint = opos+1;
2390                ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2391                return obj;
2392            }
2393        }
2394        ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2395             this, DPOS);
2396    }
2397    return NULL;
2398}
2399
2400void Parcel::closeFileDescriptors()
2401{
2402    size_t i = mObjectsSize;
2403    if (i > 0) {
2404        //ALOGI("Closing file descriptors for %zu objects...", i);
2405    }
2406    while (i > 0) {
2407        i--;
2408        const flat_binder_object* flat
2409            = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2410        if (flat->hdr.type == BINDER_TYPE_FD) {
2411            //ALOGI("Closing fd: %ld", flat->handle);
2412            close(flat->handle);
2413        }
2414    }
2415}
2416
2417uintptr_t Parcel::ipcData() const
2418{
2419    return reinterpret_cast<uintptr_t>(mData);
2420}
2421
2422size_t Parcel::ipcDataSize() const
2423{
2424    return (mDataSize > mDataPos ? mDataSize : mDataPos);
2425}
2426
2427uintptr_t Parcel::ipcObjects() const
2428{
2429    return reinterpret_cast<uintptr_t>(mObjects);
2430}
2431
2432size_t Parcel::ipcObjectsCount() const
2433{
2434    return mObjectsSize;
2435}
2436
2437void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2438    const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2439{
2440    binder_size_t minOffset = 0;
2441    freeDataNoInit();
2442    mError = NO_ERROR;
2443    mData = const_cast<uint8_t*>(data);
2444    mDataSize = mDataCapacity = dataSize;
2445    //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2446    mDataPos = 0;
2447    ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2448    mObjects = const_cast<binder_size_t*>(objects);
2449    mObjectsSize = mObjectsCapacity = objectsCount;
2450    mNextObjectHint = 0;
2451    mObjectsSorted = false;
2452    mOwner = relFunc;
2453    mOwnerCookie = relCookie;
2454    for (size_t i = 0; i < mObjectsSize; i++) {
2455        binder_size_t offset = mObjects[i];
2456        if (offset < minOffset) {
2457            ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2458                  __func__, (uint64_t)offset, (uint64_t)minOffset);
2459            mObjectsSize = 0;
2460            break;
2461        }
2462        minOffset = offset + sizeof(flat_binder_object);
2463    }
2464    scanForFds();
2465}
2466
2467void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2468{
2469    to << "Parcel(";
2470
2471    if (errorCheck() != NO_ERROR) {
2472        const status_t err = errorCheck();
2473        to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2474    } else if (dataSize() > 0) {
2475        const uint8_t* DATA = data();
2476        to << indent << HexDump(DATA, dataSize()) << dedent;
2477        const binder_size_t* OBJS = objects();
2478        const size_t N = objectsCount();
2479        for (size_t i=0; i<N; i++) {
2480            const flat_binder_object* flat
2481                = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2482            to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2483                << TypeCode(flat->hdr.type & 0x7f7f7f00)
2484                << " = " << flat->binder;
2485        }
2486    } else {
2487        to << "NULL";
2488    }
2489
2490    to << ")";
2491}
2492
2493void Parcel::releaseObjects()
2494{
2495    const sp<ProcessState> proc(ProcessState::self());
2496    size_t i = mObjectsSize;
2497    uint8_t* const data = mData;
2498    binder_size_t* const objects = mObjects;
2499    while (i > 0) {
2500        i--;
2501        const flat_binder_object* flat
2502            = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2503        release_object(proc, *flat, this, &mOpenAshmemSize);
2504    }
2505}
2506
2507void Parcel::acquireObjects()
2508{
2509    const sp<ProcessState> proc(ProcessState::self());
2510    size_t i = mObjectsSize;
2511    uint8_t* const data = mData;
2512    binder_size_t* const objects = mObjects;
2513    while (i > 0) {
2514        i--;
2515        const flat_binder_object* flat
2516            = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2517        acquire_object(proc, *flat, this, &mOpenAshmemSize);
2518    }
2519}
2520
2521void Parcel::freeData()
2522{
2523    freeDataNoInit();
2524    initState();
2525}
2526
2527void Parcel::freeDataNoInit()
2528{
2529    if (mOwner) {
2530        LOG_ALLOC("Parcel %p: freeing other owner data", this);
2531        //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2532        mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2533    } else {
2534        LOG_ALLOC("Parcel %p: freeing allocated data", this);
2535        releaseObjects();
2536        if (mData) {
2537            LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2538            pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2539            if (mDataCapacity <= gParcelGlobalAllocSize) {
2540              gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2541            } else {
2542              gParcelGlobalAllocSize = 0;
2543            }
2544            if (gParcelGlobalAllocCount > 0) {
2545              gParcelGlobalAllocCount--;
2546            }
2547            pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2548            free(mData);
2549        }
2550        if (mObjects) free(mObjects);
2551    }
2552}
2553
2554status_t Parcel::growData(size_t len)
2555{
2556    if (len > INT32_MAX) {
2557        // don't accept size_t values which may have come from an
2558        // inadvertent conversion from a negative int.
2559        return BAD_VALUE;
2560    }
2561
2562    size_t newSize = ((mDataSize+len)*3)/2;
2563    return (newSize <= mDataSize)
2564            ? (status_t) NO_MEMORY
2565            : continueWrite(newSize);
2566}
2567
2568status_t Parcel::restartWrite(size_t desired)
2569{
2570    if (desired > INT32_MAX) {
2571        // don't accept size_t values which may have come from an
2572        // inadvertent conversion from a negative int.
2573        return BAD_VALUE;
2574    }
2575
2576    if (mOwner) {
2577        freeData();
2578        return continueWrite(desired);
2579    }
2580
2581    uint8_t* data = (uint8_t*)realloc(mData, desired);
2582    if (!data && desired > mDataCapacity) {
2583        mError = NO_MEMORY;
2584        return NO_MEMORY;
2585    }
2586
2587    releaseObjects();
2588
2589    if (data) {
2590        LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2591        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2592        gParcelGlobalAllocSize += desired;
2593        gParcelGlobalAllocSize -= mDataCapacity;
2594        if (!mData) {
2595            gParcelGlobalAllocCount++;
2596        }
2597        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2598        mData = data;
2599        mDataCapacity = desired;
2600    }
2601
2602    mDataSize = mDataPos = 0;
2603    ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2604    ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2605
2606    free(mObjects);
2607    mObjects = NULL;
2608    mObjectsSize = mObjectsCapacity = 0;
2609    mNextObjectHint = 0;
2610    mObjectsSorted = false;
2611    mHasFds = false;
2612    mFdsKnown = true;
2613    mAllowFds = true;
2614
2615    return NO_ERROR;
2616}
2617
2618status_t Parcel::continueWrite(size_t desired)
2619{
2620    if (desired > INT32_MAX) {
2621        // don't accept size_t values which may have come from an
2622        // inadvertent conversion from a negative int.
2623        return BAD_VALUE;
2624    }
2625
2626    // If shrinking, first adjust for any objects that appear
2627    // after the new data size.
2628    size_t objectsSize = mObjectsSize;
2629    if (desired < mDataSize) {
2630        if (desired == 0) {
2631            objectsSize = 0;
2632        } else {
2633            while (objectsSize > 0) {
2634                if (mObjects[objectsSize-1] < desired)
2635                    break;
2636                objectsSize--;
2637            }
2638        }
2639    }
2640
2641    if (mOwner) {
2642        // If the size is going to zero, just release the owner's data.
2643        if (desired == 0) {
2644            freeData();
2645            return NO_ERROR;
2646        }
2647
2648        // If there is a different owner, we need to take
2649        // posession.
2650        uint8_t* data = (uint8_t*)malloc(desired);
2651        if (!data) {
2652            mError = NO_MEMORY;
2653            return NO_MEMORY;
2654        }
2655        binder_size_t* objects = NULL;
2656
2657        if (objectsSize) {
2658            objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2659            if (!objects) {
2660                free(data);
2661
2662                mError = NO_MEMORY;
2663                return NO_MEMORY;
2664            }
2665
2666            // Little hack to only acquire references on objects
2667            // we will be keeping.
2668            size_t oldObjectsSize = mObjectsSize;
2669            mObjectsSize = objectsSize;
2670            acquireObjects();
2671            mObjectsSize = oldObjectsSize;
2672        }
2673
2674        if (mData) {
2675            memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2676        }
2677        if (objects && mObjects) {
2678            memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2679        }
2680        //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2681        mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2682        mOwner = NULL;
2683
2684        LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2685        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2686        gParcelGlobalAllocSize += desired;
2687        gParcelGlobalAllocCount++;
2688        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2689
2690        mData = data;
2691        mObjects = objects;
2692        mDataSize = (mDataSize < desired) ? mDataSize : desired;
2693        ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2694        mDataCapacity = desired;
2695        mObjectsSize = mObjectsCapacity = objectsSize;
2696        mNextObjectHint = 0;
2697        mObjectsSorted = false;
2698
2699    } else if (mData) {
2700        if (objectsSize < mObjectsSize) {
2701            // Need to release refs on any objects we are dropping.
2702            const sp<ProcessState> proc(ProcessState::self());
2703            for (size_t i=objectsSize; i<mObjectsSize; i++) {
2704                const flat_binder_object* flat
2705                    = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2706                if (flat->hdr.type == BINDER_TYPE_FD) {
2707                    // will need to rescan because we may have lopped off the only FDs
2708                    mFdsKnown = false;
2709                }
2710                release_object(proc, *flat, this, &mOpenAshmemSize);
2711            }
2712            binder_size_t* objects =
2713                (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2714            if (objects) {
2715                mObjects = objects;
2716            }
2717            mObjectsSize = objectsSize;
2718            mNextObjectHint = 0;
2719            mObjectsSorted = false;
2720        }
2721
2722        // We own the data, so we can just do a realloc().
2723        if (desired > mDataCapacity) {
2724            uint8_t* data = (uint8_t*)realloc(mData, desired);
2725            if (data) {
2726                LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2727                        desired);
2728                pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2729                gParcelGlobalAllocSize += desired;
2730                gParcelGlobalAllocSize -= mDataCapacity;
2731                pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2732                mData = data;
2733                mDataCapacity = desired;
2734            } else {
2735                mError = NO_MEMORY;
2736                return NO_MEMORY;
2737            }
2738        } else {
2739            if (mDataSize > desired) {
2740                mDataSize = desired;
2741                ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2742            }
2743            if (mDataPos > desired) {
2744                mDataPos = desired;
2745                ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2746            }
2747        }
2748
2749    } else {
2750        // This is the first data.  Easy!
2751        uint8_t* data = (uint8_t*)malloc(desired);
2752        if (!data) {
2753            mError = NO_MEMORY;
2754            return NO_MEMORY;
2755        }
2756
2757        if(!(mDataCapacity == 0 && mObjects == NULL
2758             && mObjectsCapacity == 0)) {
2759            ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2760        }
2761
2762        LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2763        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2764        gParcelGlobalAllocSize += desired;
2765        gParcelGlobalAllocCount++;
2766        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2767
2768        mData = data;
2769        mDataSize = mDataPos = 0;
2770        ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2771        ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2772        mDataCapacity = desired;
2773    }
2774
2775    return NO_ERROR;
2776}
2777
2778void Parcel::initState()
2779{
2780    LOG_ALLOC("Parcel %p: initState", this);
2781    mError = NO_ERROR;
2782    mData = 0;
2783    mDataSize = 0;
2784    mDataCapacity = 0;
2785    mDataPos = 0;
2786    ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2787    ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2788    mObjects = NULL;
2789    mObjectsSize = 0;
2790    mObjectsCapacity = 0;
2791    mNextObjectHint = 0;
2792    mObjectsSorted = false;
2793    mHasFds = false;
2794    mFdsKnown = true;
2795    mAllowFds = true;
2796    mOwner = NULL;
2797    mOpenAshmemSize = 0;
2798
2799    // racing multiple init leads only to multiple identical write
2800    if (gMaxFds == 0) {
2801        struct rlimit result;
2802        if (!getrlimit(RLIMIT_NOFILE, &result)) {
2803            gMaxFds = (size_t)result.rlim_cur;
2804            //ALOGI("parcel fd limit set to %zu", gMaxFds);
2805        } else {
2806            ALOGW("Unable to getrlimit: %s", strerror(errno));
2807            gMaxFds = 1024;
2808        }
2809    }
2810}
2811
2812void Parcel::scanForFds() const
2813{
2814    bool hasFds = false;
2815    for (size_t i=0; i<mObjectsSize; i++) {
2816        const flat_binder_object* flat
2817            = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2818        if (flat->hdr.type == BINDER_TYPE_FD) {
2819            hasFds = true;
2820            break;
2821        }
2822    }
2823    mHasFds = hasFds;
2824    mFdsKnown = true;
2825}
2826
2827size_t Parcel::getBlobAshmemSize() const
2828{
2829    // This used to return the size of all blobs that were written to ashmem, now we're returning
2830    // the ashmem currently referenced by this Parcel, which should be equivalent.
2831    // TODO: Remove method once ABI can be changed.
2832    return mOpenAshmemSize;
2833}
2834
2835size_t Parcel::getOpenAshmemSize() const
2836{
2837    return mOpenAshmemSize;
2838}
2839
2840// --- Parcel::Blob ---
2841
2842Parcel::Blob::Blob() :
2843        mFd(-1), mData(NULL), mSize(0), mMutable(false) {
2844}
2845
2846Parcel::Blob::~Blob() {
2847    release();
2848}
2849
2850void Parcel::Blob::release() {
2851    if (mFd != -1 && mData) {
2852        ::munmap(mData, mSize);
2853    }
2854    clear();
2855}
2856
2857void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2858    mFd = fd;
2859    mData = data;
2860    mSize = size;
2861    mMutable = isMutable;
2862}
2863
2864void Parcel::Blob::clear() {
2865    mFd = -1;
2866    mData = NULL;
2867    mSize = 0;
2868    mMutable = false;
2869}
2870
2871}; // namespace android
2872