Parcel.cpp revision 0dd1a5d2f4e69c5df233fa9468191c03dc9b639f
1/* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "Parcel" 18//#define LOG_NDEBUG 0 19 20#include <binder/Parcel.h> 21 22#include <binder/IPCThreadState.h> 23#include <binder/Binder.h> 24#include <binder/BpBinder.h> 25#include <binder/ProcessState.h> 26#include <binder/Status.h> 27#include <binder/TextOutput.h> 28 29#include <errno.h> 30#include <utils/Debug.h> 31#include <utils/Log.h> 32#include <utils/String8.h> 33#include <utils/String16.h> 34#include <utils/misc.h> 35#include <utils/Flattenable.h> 36#include <cutils/ashmem.h> 37 38#include <private/binder/binder_module.h> 39#include <private/binder/Static.h> 40 41#include <inttypes.h> 42#include <stdio.h> 43#include <stdlib.h> 44#include <stdint.h> 45#include <sys/mman.h> 46 47#ifndef INT32_MAX 48#define INT32_MAX ((int32_t)(2147483647)) 49#endif 50 51#define LOG_REFS(...) 52//#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__) 53#define LOG_ALLOC(...) 54//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__) 55 56// --------------------------------------------------------------------------- 57 58// This macro should never be used at runtime, as a too large value 59// of s could cause an integer overflow. Instead, you should always 60// use the wrapper function pad_size() 61#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3) 62 63static size_t pad_size(size_t s) { 64 if (s > (SIZE_T_MAX - 3)) { 65 abort(); 66 } 67 return PAD_SIZE_UNSAFE(s); 68} 69 70// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 71#define STRICT_MODE_PENALTY_GATHER (0x40 << 16) 72 73// XXX This can be made public if we want to provide 74// support for typed data. 75struct small_flat_data 76{ 77 uint32_t type; 78 uint32_t data; 79}; 80 81namespace android { 82 83using android::base::unique_fd; 84 85static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; 86static size_t gParcelGlobalAllocSize = 0; 87static size_t gParcelGlobalAllocCount = 0; 88 89// Maximum size of a blob to transfer in-place. 90static const size_t BLOB_INPLACE_LIMIT = 16 * 1024; 91 92enum { 93 BLOB_INPLACE = 0, 94 BLOB_ASHMEM_IMMUTABLE = 1, 95 BLOB_ASHMEM_MUTABLE = 2, 96}; 97 98void acquire_object(const sp<ProcessState>& proc, 99 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 100{ 101 switch (obj.type) { 102 case BINDER_TYPE_BINDER: 103 if (obj.binder) { 104 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 105 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 106 } 107 return; 108 case BINDER_TYPE_WEAK_BINDER: 109 if (obj.binder) 110 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 111 return; 112 case BINDER_TYPE_HANDLE: { 113 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 114 if (b != NULL) { 115 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 116 b->incStrong(who); 117 } 118 return; 119 } 120 case BINDER_TYPE_WEAK_HANDLE: { 121 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 122 if (b != NULL) b.get_refs()->incWeak(who); 123 return; 124 } 125 case BINDER_TYPE_FD: { 126 if (obj.cookie != 0) { 127 if (outAshmemSize != NULL) { 128 // If we own an ashmem fd, keep track of how much memory it refers to. 129 int size = ashmem_get_size_region(obj.handle); 130 if (size > 0) { 131 *outAshmemSize += size; 132 } 133 } 134 } 135 return; 136 } 137 } 138 139 ALOGD("Invalid object type 0x%08x", obj.type); 140} 141 142void acquire_object(const sp<ProcessState>& proc, 143 const flat_binder_object& obj, const void* who) 144{ 145 acquire_object(proc, obj, who, NULL); 146} 147 148static void release_object(const sp<ProcessState>& proc, 149 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 150{ 151 switch (obj.type) { 152 case BINDER_TYPE_BINDER: 153 if (obj.binder) { 154 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 155 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 156 } 157 return; 158 case BINDER_TYPE_WEAK_BINDER: 159 if (obj.binder) 160 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 161 return; 162 case BINDER_TYPE_HANDLE: { 163 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 164 if (b != NULL) { 165 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 166 b->decStrong(who); 167 } 168 return; 169 } 170 case BINDER_TYPE_WEAK_HANDLE: { 171 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 172 if (b != NULL) b.get_refs()->decWeak(who); 173 return; 174 } 175 case BINDER_TYPE_FD: { 176 if (outAshmemSize != NULL) { 177 if (obj.cookie != 0) { 178 int size = ashmem_get_size_region(obj.handle); 179 if (size > 0) { 180 *outAshmemSize -= size; 181 } 182 183 close(obj.handle); 184 } 185 } 186 return; 187 } 188 } 189 190 ALOGE("Invalid object type 0x%08x", obj.type); 191} 192 193void release_object(const sp<ProcessState>& proc, 194 const flat_binder_object& obj, const void* who) 195{ 196 release_object(proc, obj, who, NULL); 197} 198 199inline static status_t finish_flatten_binder( 200 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 201{ 202 return out->writeObject(flat, false); 203} 204 205status_t flatten_binder(const sp<ProcessState>& /*proc*/, 206 const sp<IBinder>& binder, Parcel* out) 207{ 208 flat_binder_object obj; 209 210 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 211 if (binder != NULL) { 212 IBinder *local = binder->localBinder(); 213 if (!local) { 214 BpBinder *proxy = binder->remoteBinder(); 215 if (proxy == NULL) { 216 ALOGE("null proxy"); 217 } 218 const int32_t handle = proxy ? proxy->handle() : 0; 219 obj.type = BINDER_TYPE_HANDLE; 220 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 221 obj.handle = handle; 222 obj.cookie = 0; 223 } else { 224 obj.type = BINDER_TYPE_BINDER; 225 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 226 obj.cookie = reinterpret_cast<uintptr_t>(local); 227 } 228 } else { 229 obj.type = BINDER_TYPE_BINDER; 230 obj.binder = 0; 231 obj.cookie = 0; 232 } 233 234 return finish_flatten_binder(binder, obj, out); 235} 236 237status_t flatten_binder(const sp<ProcessState>& /*proc*/, 238 const wp<IBinder>& binder, Parcel* out) 239{ 240 flat_binder_object obj; 241 242 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 243 if (binder != NULL) { 244 sp<IBinder> real = binder.promote(); 245 if (real != NULL) { 246 IBinder *local = real->localBinder(); 247 if (!local) { 248 BpBinder *proxy = real->remoteBinder(); 249 if (proxy == NULL) { 250 ALOGE("null proxy"); 251 } 252 const int32_t handle = proxy ? proxy->handle() : 0; 253 obj.type = BINDER_TYPE_WEAK_HANDLE; 254 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 255 obj.handle = handle; 256 obj.cookie = 0; 257 } else { 258 obj.type = BINDER_TYPE_WEAK_BINDER; 259 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs()); 260 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get()); 261 } 262 return finish_flatten_binder(real, obj, out); 263 } 264 265 // XXX How to deal? In order to flatten the given binder, 266 // we need to probe it for information, which requires a primary 267 // reference... but we don't have one. 268 // 269 // The OpenBinder implementation uses a dynamic_cast<> here, 270 // but we can't do that with the different reference counting 271 // implementation we are using. 272 ALOGE("Unable to unflatten Binder weak reference!"); 273 obj.type = BINDER_TYPE_BINDER; 274 obj.binder = 0; 275 obj.cookie = 0; 276 return finish_flatten_binder(NULL, obj, out); 277 278 } else { 279 obj.type = BINDER_TYPE_BINDER; 280 obj.binder = 0; 281 obj.cookie = 0; 282 return finish_flatten_binder(NULL, obj, out); 283 } 284} 285 286inline static status_t finish_unflatten_binder( 287 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/, 288 const Parcel& /*in*/) 289{ 290 return NO_ERROR; 291} 292 293status_t unflatten_binder(const sp<ProcessState>& proc, 294 const Parcel& in, sp<IBinder>* out) 295{ 296 const flat_binder_object* flat = in.readObject(false); 297 298 if (flat) { 299 switch (flat->type) { 300 case BINDER_TYPE_BINDER: 301 *out = reinterpret_cast<IBinder*>(flat->cookie); 302 return finish_unflatten_binder(NULL, *flat, in); 303 case BINDER_TYPE_HANDLE: 304 *out = proc->getStrongProxyForHandle(flat->handle); 305 return finish_unflatten_binder( 306 static_cast<BpBinder*>(out->get()), *flat, in); 307 } 308 } 309 return BAD_TYPE; 310} 311 312status_t unflatten_binder(const sp<ProcessState>& proc, 313 const Parcel& in, wp<IBinder>* out) 314{ 315 const flat_binder_object* flat = in.readObject(false); 316 317 if (flat) { 318 switch (flat->type) { 319 case BINDER_TYPE_BINDER: 320 *out = reinterpret_cast<IBinder*>(flat->cookie); 321 return finish_unflatten_binder(NULL, *flat, in); 322 case BINDER_TYPE_WEAK_BINDER: 323 if (flat->binder != 0) { 324 out->set_object_and_refs( 325 reinterpret_cast<IBinder*>(flat->cookie), 326 reinterpret_cast<RefBase::weakref_type*>(flat->binder)); 327 } else { 328 *out = NULL; 329 } 330 return finish_unflatten_binder(NULL, *flat, in); 331 case BINDER_TYPE_HANDLE: 332 case BINDER_TYPE_WEAK_HANDLE: 333 *out = proc->getWeakProxyForHandle(flat->handle); 334 return finish_unflatten_binder( 335 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 336 } 337 } 338 return BAD_TYPE; 339} 340 341namespace { 342 343template<typename T> 344status_t readTypedVector(std::vector<T>* val, const Parcel* p, 345 status_t(Parcel::*read_func)(T*) const) { 346 val->clear(); 347 348 int32_t size; 349 status_t status = p->readInt32(&size); 350 351 if (status != OK) { 352 return status; 353 } 354 355 if (size < 0) { 356 return UNEXPECTED_NULL; 357 } 358 359 val->resize(size); 360 361 for (auto& v: *val) { 362 status = (p->*read_func)(&v); 363 364 if (status != OK) { 365 return status; 366 } 367 } 368 369 return OK; 370} 371 372} // namespace 373 374// --------------------------------------------------------------------------- 375 376Parcel::Parcel() 377{ 378 LOG_ALLOC("Parcel %p: constructing", this); 379 initState(); 380} 381 382Parcel::~Parcel() 383{ 384 freeDataNoInit(); 385 LOG_ALLOC("Parcel %p: destroyed", this); 386} 387 388size_t Parcel::getGlobalAllocSize() { 389 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 390 size_t size = gParcelGlobalAllocSize; 391 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 392 return size; 393} 394 395size_t Parcel::getGlobalAllocCount() { 396 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 397 size_t count = gParcelGlobalAllocCount; 398 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 399 return count; 400} 401 402const uint8_t* Parcel::data() const 403{ 404 return mData; 405} 406 407size_t Parcel::dataSize() const 408{ 409 return (mDataSize > mDataPos ? mDataSize : mDataPos); 410} 411 412size_t Parcel::dataAvail() const 413{ 414 // TODO: decide what to do about the possibility that this can 415 // report an available-data size that exceeds a Java int's max 416 // positive value, causing havoc. Fortunately this will only 417 // happen if someone constructs a Parcel containing more than two 418 // gigabytes of data, which on typical phone hardware is simply 419 // not possible. 420 return dataSize() - dataPosition(); 421} 422 423size_t Parcel::dataPosition() const 424{ 425 return mDataPos; 426} 427 428size_t Parcel::dataCapacity() const 429{ 430 return mDataCapacity; 431} 432 433status_t Parcel::setDataSize(size_t size) 434{ 435 if (size > INT32_MAX) { 436 // don't accept size_t values which may have come from an 437 // inadvertent conversion from a negative int. 438 return BAD_VALUE; 439 } 440 441 status_t err; 442 err = continueWrite(size); 443 if (err == NO_ERROR) { 444 mDataSize = size; 445 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 446 } 447 return err; 448} 449 450void Parcel::setDataPosition(size_t pos) const 451{ 452 if (pos > INT32_MAX) { 453 // don't accept size_t values which may have come from an 454 // inadvertent conversion from a negative int. 455 abort(); 456 } 457 458 mDataPos = pos; 459 mNextObjectHint = 0; 460} 461 462status_t Parcel::setDataCapacity(size_t size) 463{ 464 if (size > INT32_MAX) { 465 // don't accept size_t values which may have come from an 466 // inadvertent conversion from a negative int. 467 return BAD_VALUE; 468 } 469 470 if (size > mDataCapacity) return continueWrite(size); 471 return NO_ERROR; 472} 473 474status_t Parcel::setData(const uint8_t* buffer, size_t len) 475{ 476 if (len > INT32_MAX) { 477 // don't accept size_t values which may have come from an 478 // inadvertent conversion from a negative int. 479 return BAD_VALUE; 480 } 481 482 status_t err = restartWrite(len); 483 if (err == NO_ERROR) { 484 memcpy(const_cast<uint8_t*>(data()), buffer, len); 485 mDataSize = len; 486 mFdsKnown = false; 487 } 488 return err; 489} 490 491status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 492{ 493 const sp<ProcessState> proc(ProcessState::self()); 494 status_t err; 495 const uint8_t *data = parcel->mData; 496 const binder_size_t *objects = parcel->mObjects; 497 size_t size = parcel->mObjectsSize; 498 int startPos = mDataPos; 499 int firstIndex = -1, lastIndex = -2; 500 501 if (len == 0) { 502 return NO_ERROR; 503 } 504 505 if (len > INT32_MAX) { 506 // don't accept size_t values which may have come from an 507 // inadvertent conversion from a negative int. 508 return BAD_VALUE; 509 } 510 511 // range checks against the source parcel size 512 if ((offset > parcel->mDataSize) 513 || (len > parcel->mDataSize) 514 || (offset + len > parcel->mDataSize)) { 515 return BAD_VALUE; 516 } 517 518 // Count objects in range 519 for (int i = 0; i < (int) size; i++) { 520 size_t off = objects[i]; 521 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) { 522 if (firstIndex == -1) { 523 firstIndex = i; 524 } 525 lastIndex = i; 526 } 527 } 528 int numObjects = lastIndex - firstIndex + 1; 529 530 if ((mDataSize+len) > mDataCapacity) { 531 // grow data 532 err = growData(len); 533 if (err != NO_ERROR) { 534 return err; 535 } 536 } 537 538 // append data 539 memcpy(mData + mDataPos, data + offset, len); 540 mDataPos += len; 541 mDataSize += len; 542 543 err = NO_ERROR; 544 545 if (numObjects > 0) { 546 // grow objects 547 if (mObjectsCapacity < mObjectsSize + numObjects) { 548 size_t newSize = ((mObjectsSize + numObjects)*3)/2; 549 if (newSize < mObjectsSize) return NO_MEMORY; // overflow 550 binder_size_t *objects = 551 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 552 if (objects == (binder_size_t*)0) { 553 return NO_MEMORY; 554 } 555 mObjects = objects; 556 mObjectsCapacity = newSize; 557 } 558 559 // append and acquire objects 560 int idx = mObjectsSize; 561 for (int i = firstIndex; i <= lastIndex; i++) { 562 size_t off = objects[i] - offset + startPos; 563 mObjects[idx++] = off; 564 mObjectsSize++; 565 566 flat_binder_object* flat 567 = reinterpret_cast<flat_binder_object*>(mData + off); 568 acquire_object(proc, *flat, this, &mOpenAshmemSize); 569 570 if (flat->type == BINDER_TYPE_FD) { 571 // If this is a file descriptor, we need to dup it so the 572 // new Parcel now owns its own fd, and can declare that we 573 // officially know we have fds. 574 flat->handle = dup(flat->handle); 575 flat->cookie = 1; 576 mHasFds = mFdsKnown = true; 577 if (!mAllowFds) { 578 err = FDS_NOT_ALLOWED; 579 } 580 } 581 } 582 } 583 584 return err; 585} 586 587bool Parcel::allowFds() const 588{ 589 return mAllowFds; 590} 591 592bool Parcel::pushAllowFds(bool allowFds) 593{ 594 const bool origValue = mAllowFds; 595 if (!allowFds) { 596 mAllowFds = false; 597 } 598 return origValue; 599} 600 601void Parcel::restoreAllowFds(bool lastValue) 602{ 603 mAllowFds = lastValue; 604} 605 606bool Parcel::hasFileDescriptors() const 607{ 608 if (!mFdsKnown) { 609 scanForFds(); 610 } 611 return mHasFds; 612} 613 614// Write RPC headers. (previously just the interface token) 615status_t Parcel::writeInterfaceToken(const String16& interface) 616{ 617 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 618 STRICT_MODE_PENALTY_GATHER); 619 // currently the interface identification token is just its name as a string 620 return writeString16(interface); 621} 622 623bool Parcel::checkInterface(IBinder* binder) const 624{ 625 return enforceInterface(binder->getInterfaceDescriptor()); 626} 627 628bool Parcel::enforceInterface(const String16& interface, 629 IPCThreadState* threadState) const 630{ 631 int32_t strictPolicy = readInt32(); 632 if (threadState == NULL) { 633 threadState = IPCThreadState::self(); 634 } 635 if ((threadState->getLastTransactionBinderFlags() & 636 IBinder::FLAG_ONEWAY) != 0) { 637 // For one-way calls, the callee is running entirely 638 // disconnected from the caller, so disable StrictMode entirely. 639 // Not only does disk/network usage not impact the caller, but 640 // there's no way to commuicate back any violations anyway. 641 threadState->setStrictModePolicy(0); 642 } else { 643 threadState->setStrictModePolicy(strictPolicy); 644 } 645 const String16 str(readString16()); 646 if (str == interface) { 647 return true; 648 } else { 649 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 650 String8(interface).string(), String8(str).string()); 651 return false; 652 } 653} 654 655const binder_size_t* Parcel::objects() const 656{ 657 return mObjects; 658} 659 660size_t Parcel::objectsCount() const 661{ 662 return mObjectsSize; 663} 664 665status_t Parcel::errorCheck() const 666{ 667 return mError; 668} 669 670void Parcel::setError(status_t err) 671{ 672 mError = err; 673} 674 675status_t Parcel::finishWrite(size_t len) 676{ 677 if (len > INT32_MAX) { 678 // don't accept size_t values which may have come from an 679 // inadvertent conversion from a negative int. 680 return BAD_VALUE; 681 } 682 683 //printf("Finish write of %d\n", len); 684 mDataPos += len; 685 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 686 if (mDataPos > mDataSize) { 687 mDataSize = mDataPos; 688 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 689 } 690 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 691 return NO_ERROR; 692} 693 694status_t Parcel::writeUnpadded(const void* data, size_t len) 695{ 696 if (len > INT32_MAX) { 697 // don't accept size_t values which may have come from an 698 // inadvertent conversion from a negative int. 699 return BAD_VALUE; 700 } 701 702 size_t end = mDataPos + len; 703 if (end < mDataPos) { 704 // integer overflow 705 return BAD_VALUE; 706 } 707 708 if (end <= mDataCapacity) { 709restart_write: 710 memcpy(mData+mDataPos, data, len); 711 return finishWrite(len); 712 } 713 714 status_t err = growData(len); 715 if (err == NO_ERROR) goto restart_write; 716 return err; 717} 718 719status_t Parcel::write(const void* data, size_t len) 720{ 721 if (len > INT32_MAX) { 722 // don't accept size_t values which may have come from an 723 // inadvertent conversion from a negative int. 724 return BAD_VALUE; 725 } 726 727 void* const d = writeInplace(len); 728 if (d) { 729 memcpy(d, data, len); 730 return NO_ERROR; 731 } 732 return mError; 733} 734 735void* Parcel::writeInplace(size_t len) 736{ 737 if (len > INT32_MAX) { 738 // don't accept size_t values which may have come from an 739 // inadvertent conversion from a negative int. 740 return NULL; 741 } 742 743 const size_t padded = pad_size(len); 744 745 // sanity check for integer overflow 746 if (mDataPos+padded < mDataPos) { 747 return NULL; 748 } 749 750 if ((mDataPos+padded) <= mDataCapacity) { 751restart_write: 752 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 753 uint8_t* const data = mData+mDataPos; 754 755 // Need to pad at end? 756 if (padded != len) { 757#if BYTE_ORDER == BIG_ENDIAN 758 static const uint32_t mask[4] = { 759 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 760 }; 761#endif 762#if BYTE_ORDER == LITTLE_ENDIAN 763 static const uint32_t mask[4] = { 764 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 765 }; 766#endif 767 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 768 // *reinterpret_cast<void**>(data+padded-4)); 769 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 770 } 771 772 finishWrite(padded); 773 return data; 774 } 775 776 status_t err = growData(padded); 777 if (err == NO_ERROR) goto restart_write; 778 return NULL; 779} 780 781namespace { 782 783template<typename T, typename U> 784status_t unsafeWriteTypedVector(const std::vector<T>& val, Parcel* p, 785 status_t(Parcel::*write_func)(U)) { 786 if (val.size() > std::numeric_limits<int32_t>::max()) { 787 return BAD_VALUE; 788 } 789 790 status_t status = p->writeInt32(val.size()); 791 792 if (status != OK) { 793 return status; 794 } 795 796 for (const auto& item : val) { 797 status = (p->*write_func)(item); 798 799 if (status != OK) { 800 return status; 801 } 802 } 803 804 return OK; 805} 806 807template<typename T> 808status_t writeTypedVector(const std::vector<T>& val, Parcel* p, 809 status_t(Parcel::*write_func)(const T&)) { 810 return unsafeWriteTypedVector(val, p, write_func); 811} 812 813template<typename T> 814status_t writeTypedVector(const std::vector<T>& val, Parcel* p, 815 status_t(Parcel::*write_func)(T)) { 816 return unsafeWriteTypedVector(val, p, write_func); 817} 818 819} // namespace 820 821status_t Parcel::writeByteVector(const std::vector<int8_t>& val) 822{ 823 status_t status; 824 if (val.size() > std::numeric_limits<int32_t>::max()) { 825 status = BAD_VALUE; 826 return status; 827 } 828 829 status = writeInt32(val.size()); 830 if (status != OK) { 831 return status; 832 } 833 834 void* data = writeInplace(val.size()); 835 if (!data) { 836 status = BAD_VALUE; 837 return status; 838 } 839 840 memcpy(data, val.data(), val.size()); 841 return status; 842} 843 844status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) 845{ 846 return writeTypedVector(val, this, &Parcel::writeInt32); 847} 848 849status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) 850{ 851 return writeTypedVector(val, this, &Parcel::writeInt64); 852} 853 854status_t Parcel::writeFloatVector(const std::vector<float>& val) 855{ 856 return writeTypedVector(val, this, &Parcel::writeFloat); 857} 858 859status_t Parcel::writeDoubleVector(const std::vector<double>& val) 860{ 861 return writeTypedVector(val, this, &Parcel::writeDouble); 862} 863 864status_t Parcel::writeBoolVector(const std::vector<bool>& val) 865{ 866 return writeTypedVector(val, this, &Parcel::writeBool); 867} 868 869status_t Parcel::writeCharVector(const std::vector<char16_t>& val) 870{ 871 return writeTypedVector(val, this, &Parcel::writeChar); 872} 873 874status_t Parcel::writeString16Vector(const std::vector<String16>& val) 875{ 876 return writeTypedVector(val, this, &Parcel::writeString16); 877} 878 879status_t Parcel::writeInt32(int32_t val) 880{ 881 return writeAligned(val); 882} 883 884status_t Parcel::writeUint32(uint32_t val) 885{ 886 return writeAligned(val); 887} 888 889status_t Parcel::writeInt32Array(size_t len, const int32_t *val) { 890 if (len > INT32_MAX) { 891 // don't accept size_t values which may have come from an 892 // inadvertent conversion from a negative int. 893 return BAD_VALUE; 894 } 895 896 if (!val) { 897 return writeInt32(-1); 898 } 899 status_t ret = writeInt32(static_cast<uint32_t>(len)); 900 if (ret == NO_ERROR) { 901 ret = write(val, len * sizeof(*val)); 902 } 903 return ret; 904} 905status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 906 if (len > INT32_MAX) { 907 // don't accept size_t values which may have come from an 908 // inadvertent conversion from a negative int. 909 return BAD_VALUE; 910 } 911 912 if (!val) { 913 return writeInt32(-1); 914 } 915 status_t ret = writeInt32(static_cast<uint32_t>(len)); 916 if (ret == NO_ERROR) { 917 ret = write(val, len * sizeof(*val)); 918 } 919 return ret; 920} 921 922status_t Parcel::writeBool(bool val) 923{ 924 return writeInt32(int32_t(val)); 925} 926 927status_t Parcel::writeChar(char16_t val) 928{ 929 return writeInt32(int32_t(val)); 930} 931 932status_t Parcel::writeByte(int8_t val) 933{ 934 return writeInt32(int32_t(val)); 935} 936 937status_t Parcel::writeInt64(int64_t val) 938{ 939 return writeAligned(val); 940} 941 942status_t Parcel::writeUint64(uint64_t val) 943{ 944 return writeAligned(val); 945} 946 947status_t Parcel::writePointer(uintptr_t val) 948{ 949 return writeAligned<binder_uintptr_t>(val); 950} 951 952status_t Parcel::writeFloat(float val) 953{ 954 return writeAligned(val); 955} 956 957#if defined(__mips__) && defined(__mips_hard_float) 958 959status_t Parcel::writeDouble(double val) 960{ 961 union { 962 double d; 963 unsigned long long ll; 964 } u; 965 u.d = val; 966 return writeAligned(u.ll); 967} 968 969#else 970 971status_t Parcel::writeDouble(double val) 972{ 973 return writeAligned(val); 974} 975 976#endif 977 978status_t Parcel::writeCString(const char* str) 979{ 980 return write(str, strlen(str)+1); 981} 982 983status_t Parcel::writeString8(const String8& str) 984{ 985 status_t err = writeInt32(str.bytes()); 986 // only write string if its length is more than zero characters, 987 // as readString8 will only read if the length field is non-zero. 988 // this is slightly different from how writeString16 works. 989 if (str.bytes() > 0 && err == NO_ERROR) { 990 err = write(str.string(), str.bytes()+1); 991 } 992 return err; 993} 994 995status_t Parcel::writeString16(const String16& str) 996{ 997 return writeString16(str.string(), str.size()); 998} 999 1000status_t Parcel::writeString16(const char16_t* str, size_t len) 1001{ 1002 if (str == NULL) return writeInt32(-1); 1003 1004 status_t err = writeInt32(len); 1005 if (err == NO_ERROR) { 1006 len *= sizeof(char16_t); 1007 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 1008 if (data) { 1009 memcpy(data, str, len); 1010 *reinterpret_cast<char16_t*>(data+len) = 0; 1011 return NO_ERROR; 1012 } 1013 err = mError; 1014 } 1015 return err; 1016} 1017 1018status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 1019{ 1020 return flatten_binder(ProcessState::self(), val, this); 1021} 1022 1023status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) 1024{ 1025 return writeTypedVector(val, this, &Parcel::writeStrongBinder); 1026} 1027 1028status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { 1029 return readTypedVector(val, this, &Parcel::readStrongBinder); 1030} 1031 1032status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 1033{ 1034 return flatten_binder(ProcessState::self(), val, this); 1035} 1036 1037status_t Parcel::writeNativeHandle(const native_handle* handle) 1038{ 1039 if (!handle || handle->version != sizeof(native_handle)) 1040 return BAD_TYPE; 1041 1042 status_t err; 1043 err = writeInt32(handle->numFds); 1044 if (err != NO_ERROR) return err; 1045 1046 err = writeInt32(handle->numInts); 1047 if (err != NO_ERROR) return err; 1048 1049 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 1050 err = writeDupFileDescriptor(handle->data[i]); 1051 1052 if (err != NO_ERROR) { 1053 ALOGD("write native handle, write dup fd failed"); 1054 return err; 1055 } 1056 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 1057 return err; 1058} 1059 1060status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 1061{ 1062 flat_binder_object obj; 1063 obj.type = BINDER_TYPE_FD; 1064 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 1065 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 1066 obj.handle = fd; 1067 obj.cookie = takeOwnership ? 1 : 0; 1068 return writeObject(obj, true); 1069} 1070 1071status_t Parcel::writeDupFileDescriptor(int fd) 1072{ 1073 int dupFd = dup(fd); 1074 if (dupFd < 0) { 1075 return -errno; 1076 } 1077 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 1078 if (err != OK) { 1079 close(dupFd); 1080 } 1081 return err; 1082} 1083 1084status_t Parcel::writeUniqueFileDescriptor(const unique_fd& fd) { 1085 return writeDupFileDescriptor(fd.get()); 1086} 1087 1088status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<unique_fd>& val) { 1089 return writeTypedVector(val, this, &Parcel::writeUniqueFileDescriptor); 1090} 1091 1092status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob) 1093{ 1094 if (len > INT32_MAX) { 1095 // don't accept size_t values which may have come from an 1096 // inadvertent conversion from a negative int. 1097 return BAD_VALUE; 1098 } 1099 1100 status_t status; 1101 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) { 1102 ALOGV("writeBlob: write in place"); 1103 status = writeInt32(BLOB_INPLACE); 1104 if (status) return status; 1105 1106 void* ptr = writeInplace(len); 1107 if (!ptr) return NO_MEMORY; 1108 1109 outBlob->init(-1, ptr, len, false); 1110 return NO_ERROR; 1111 } 1112 1113 ALOGV("writeBlob: write to ashmem"); 1114 int fd = ashmem_create_region("Parcel Blob", len); 1115 if (fd < 0) return NO_MEMORY; 1116 1117 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 1118 if (result < 0) { 1119 status = result; 1120 } else { 1121 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 1122 if (ptr == MAP_FAILED) { 1123 status = -errno; 1124 } else { 1125 if (!mutableCopy) { 1126 result = ashmem_set_prot_region(fd, PROT_READ); 1127 } 1128 if (result < 0) { 1129 status = result; 1130 } else { 1131 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE); 1132 if (!status) { 1133 status = writeFileDescriptor(fd, true /*takeOwnership*/); 1134 if (!status) { 1135 outBlob->init(fd, ptr, len, mutableCopy); 1136 return NO_ERROR; 1137 } 1138 } 1139 } 1140 } 1141 ::munmap(ptr, len); 1142 } 1143 ::close(fd); 1144 return status; 1145} 1146 1147status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd) 1148{ 1149 // Must match up with what's done in writeBlob. 1150 if (!mAllowFds) return FDS_NOT_ALLOWED; 1151 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE); 1152 if (status) return status; 1153 return writeDupFileDescriptor(fd); 1154} 1155 1156status_t Parcel::write(const FlattenableHelperInterface& val) 1157{ 1158 status_t err; 1159 1160 // size if needed 1161 const size_t len = val.getFlattenedSize(); 1162 const size_t fd_count = val.getFdCount(); 1163 1164 if ((len > INT32_MAX) || (fd_count > INT32_MAX)) { 1165 // don't accept size_t values which may have come from an 1166 // inadvertent conversion from a negative int. 1167 return BAD_VALUE; 1168 } 1169 1170 err = this->writeInt32(len); 1171 if (err) return err; 1172 1173 err = this->writeInt32(fd_count); 1174 if (err) return err; 1175 1176 // payload 1177 void* const buf = this->writeInplace(pad_size(len)); 1178 if (buf == NULL) 1179 return BAD_VALUE; 1180 1181 int* fds = NULL; 1182 if (fd_count) { 1183 fds = new int[fd_count]; 1184 } 1185 1186 err = val.flatten(buf, len, fds, fd_count); 1187 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1188 err = this->writeDupFileDescriptor( fds[i] ); 1189 } 1190 1191 if (fd_count) { 1192 delete [] fds; 1193 } 1194 1195 return err; 1196} 1197 1198status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 1199{ 1200 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 1201 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 1202 if (enoughData && enoughObjects) { 1203restart_write: 1204 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 1205 1206 // remember if it's a file descriptor 1207 if (val.type == BINDER_TYPE_FD) { 1208 if (!mAllowFds) { 1209 // fail before modifying our object index 1210 return FDS_NOT_ALLOWED; 1211 } 1212 mHasFds = mFdsKnown = true; 1213 } 1214 1215 // Need to write meta-data? 1216 if (nullMetaData || val.binder != 0) { 1217 mObjects[mObjectsSize] = mDataPos; 1218 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize); 1219 mObjectsSize++; 1220 } 1221 1222 return finishWrite(sizeof(flat_binder_object)); 1223 } 1224 1225 if (!enoughData) { 1226 const status_t err = growData(sizeof(val)); 1227 if (err != NO_ERROR) return err; 1228 } 1229 if (!enoughObjects) { 1230 size_t newSize = ((mObjectsSize+2)*3)/2; 1231 if (newSize < mObjectsSize) return NO_MEMORY; // overflow 1232 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 1233 if (objects == NULL) return NO_MEMORY; 1234 mObjects = objects; 1235 mObjectsCapacity = newSize; 1236 } 1237 1238 goto restart_write; 1239} 1240 1241status_t Parcel::writeNoException() 1242{ 1243 binder::Status status; 1244 return status.writeToParcel(this); 1245} 1246 1247void Parcel::remove(size_t /*start*/, size_t /*amt*/) 1248{ 1249 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 1250} 1251 1252status_t Parcel::read(void* outData, size_t len) const 1253{ 1254 if (len > INT32_MAX) { 1255 // don't accept size_t values which may have come from an 1256 // inadvertent conversion from a negative int. 1257 return BAD_VALUE; 1258 } 1259 1260 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1261 && len <= pad_size(len)) { 1262 memcpy(outData, mData+mDataPos, len); 1263 mDataPos += pad_size(len); 1264 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 1265 return NO_ERROR; 1266 } 1267 return NOT_ENOUGH_DATA; 1268} 1269 1270const void* Parcel::readInplace(size_t len) const 1271{ 1272 if (len > INT32_MAX) { 1273 // don't accept size_t values which may have come from an 1274 // inadvertent conversion from a negative int. 1275 return NULL; 1276 } 1277 1278 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1279 && len <= pad_size(len)) { 1280 const void* data = mData+mDataPos; 1281 mDataPos += pad_size(len); 1282 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 1283 return data; 1284 } 1285 return NULL; 1286} 1287 1288template<class T> 1289status_t Parcel::readAligned(T *pArg) const { 1290 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1291 1292 if ((mDataPos+sizeof(T)) <= mDataSize) { 1293 const void* data = mData+mDataPos; 1294 mDataPos += sizeof(T); 1295 *pArg = *reinterpret_cast<const T*>(data); 1296 return NO_ERROR; 1297 } else { 1298 return NOT_ENOUGH_DATA; 1299 } 1300} 1301 1302template<class T> 1303T Parcel::readAligned() const { 1304 T result; 1305 if (readAligned(&result) != NO_ERROR) { 1306 result = 0; 1307 } 1308 1309 return result; 1310} 1311 1312template<class T> 1313status_t Parcel::writeAligned(T val) { 1314 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1315 1316 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 1317restart_write: 1318 *reinterpret_cast<T*>(mData+mDataPos) = val; 1319 return finishWrite(sizeof(val)); 1320 } 1321 1322 status_t err = growData(sizeof(val)); 1323 if (err == NO_ERROR) goto restart_write; 1324 return err; 1325} 1326 1327status_t Parcel::readByteVector(std::vector<int8_t>* val) const { 1328 val->clear(); 1329 1330 int32_t size; 1331 status_t status = readInt32(&size); 1332 1333 if (status != OK) { 1334 return status; 1335 } 1336 1337 if (size < 0) { 1338 status = UNEXPECTED_NULL; 1339 return status; 1340 } 1341 if (size_t(size) > dataAvail()) { 1342 status = BAD_VALUE; 1343 return status; 1344 } 1345 1346 const void* data = readInplace(size); 1347 if (!data) { 1348 status = BAD_VALUE; 1349 return status; 1350 } 1351 val->resize(size); 1352 memcpy(val->data(), data, size); 1353 1354 return status; 1355} 1356 1357status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { 1358 return readTypedVector(val, this, &Parcel::readInt32); 1359} 1360 1361status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { 1362 return readTypedVector(val, this, &Parcel::readInt64); 1363} 1364 1365status_t Parcel::readFloatVector(std::vector<float>* val) const { 1366 return readTypedVector(val, this, &Parcel::readFloat); 1367} 1368 1369status_t Parcel::readDoubleVector(std::vector<double>* val) const { 1370 return readTypedVector(val, this, &Parcel::readDouble); 1371} 1372 1373status_t Parcel::readBoolVector(std::vector<bool>* val) const { 1374 val->clear(); 1375 1376 int32_t size; 1377 status_t status = readInt32(&size); 1378 1379 if (status != OK) { 1380 return status; 1381 } 1382 1383 if (size < 0) { 1384 return UNEXPECTED_NULL; 1385 } 1386 1387 val->resize(size); 1388 1389 /* C++ bool handling means a vector of bools isn't necessarily addressable 1390 * (we might use individual bits) 1391 */ 1392 bool data; 1393 for (int32_t i = 0; i < size; ++i) { 1394 status = readBool(&data); 1395 (*val)[i] = data; 1396 1397 if (status != OK) { 1398 return status; 1399 } 1400 } 1401 1402 return OK; 1403} 1404 1405status_t Parcel::readCharVector(std::vector<char16_t>* val) const { 1406 return readTypedVector(val, this, &Parcel::readChar); 1407} 1408 1409status_t Parcel::readString16Vector(std::vector<String16>* val) const { 1410 return readTypedVector(val, this, &Parcel::readString16); 1411} 1412 1413 1414status_t Parcel::readInt32(int32_t *pArg) const 1415{ 1416 return readAligned(pArg); 1417} 1418 1419int32_t Parcel::readInt32() const 1420{ 1421 return readAligned<int32_t>(); 1422} 1423 1424status_t Parcel::readUint32(uint32_t *pArg) const 1425{ 1426 return readAligned(pArg); 1427} 1428 1429uint32_t Parcel::readUint32() const 1430{ 1431 return readAligned<uint32_t>(); 1432} 1433 1434status_t Parcel::readInt64(int64_t *pArg) const 1435{ 1436 return readAligned(pArg); 1437} 1438 1439 1440int64_t Parcel::readInt64() const 1441{ 1442 return readAligned<int64_t>(); 1443} 1444 1445status_t Parcel::readUint64(uint64_t *pArg) const 1446{ 1447 return readAligned(pArg); 1448} 1449 1450uint64_t Parcel::readUint64() const 1451{ 1452 return readAligned<uint64_t>(); 1453} 1454 1455status_t Parcel::readPointer(uintptr_t *pArg) const 1456{ 1457 status_t ret; 1458 binder_uintptr_t ptr; 1459 ret = readAligned(&ptr); 1460 if (!ret) 1461 *pArg = ptr; 1462 return ret; 1463} 1464 1465uintptr_t Parcel::readPointer() const 1466{ 1467 return readAligned<binder_uintptr_t>(); 1468} 1469 1470 1471status_t Parcel::readFloat(float *pArg) const 1472{ 1473 return readAligned(pArg); 1474} 1475 1476 1477float Parcel::readFloat() const 1478{ 1479 return readAligned<float>(); 1480} 1481 1482#if defined(__mips__) && defined(__mips_hard_float) 1483 1484status_t Parcel::readDouble(double *pArg) const 1485{ 1486 union { 1487 double d; 1488 unsigned long long ll; 1489 } u; 1490 u.d = 0; 1491 status_t status; 1492 status = readAligned(&u.ll); 1493 *pArg = u.d; 1494 return status; 1495} 1496 1497double Parcel::readDouble() const 1498{ 1499 union { 1500 double d; 1501 unsigned long long ll; 1502 } u; 1503 u.ll = readAligned<unsigned long long>(); 1504 return u.d; 1505} 1506 1507#else 1508 1509status_t Parcel::readDouble(double *pArg) const 1510{ 1511 return readAligned(pArg); 1512} 1513 1514double Parcel::readDouble() const 1515{ 1516 return readAligned<double>(); 1517} 1518 1519#endif 1520 1521status_t Parcel::readIntPtr(intptr_t *pArg) const 1522{ 1523 return readAligned(pArg); 1524} 1525 1526 1527intptr_t Parcel::readIntPtr() const 1528{ 1529 return readAligned<intptr_t>(); 1530} 1531 1532status_t Parcel::readBool(bool *pArg) const 1533{ 1534 int32_t tmp; 1535 status_t ret = readInt32(&tmp); 1536 *pArg = (tmp != 0); 1537 return ret; 1538} 1539 1540bool Parcel::readBool() const 1541{ 1542 return readInt32() != 0; 1543} 1544 1545status_t Parcel::readChar(char16_t *pArg) const 1546{ 1547 int32_t tmp; 1548 status_t ret = readInt32(&tmp); 1549 *pArg = char16_t(tmp); 1550 return ret; 1551} 1552 1553char16_t Parcel::readChar() const 1554{ 1555 return char16_t(readInt32()); 1556} 1557 1558status_t Parcel::readByte(int8_t *pArg) const 1559{ 1560 int32_t tmp; 1561 status_t ret = readInt32(&tmp); 1562 *pArg = int8_t(tmp); 1563 return ret; 1564} 1565 1566int8_t Parcel::readByte() const 1567{ 1568 return int8_t(readInt32()); 1569} 1570 1571const char* Parcel::readCString() const 1572{ 1573 const size_t avail = mDataSize-mDataPos; 1574 if (avail > 0) { 1575 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1576 // is the string's trailing NUL within the parcel's valid bounds? 1577 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1578 if (eos) { 1579 const size_t len = eos - str; 1580 mDataPos += pad_size(len+1); 1581 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1582 return str; 1583 } 1584 } 1585 return NULL; 1586} 1587 1588String8 Parcel::readString8() const 1589{ 1590 int32_t size = readInt32(); 1591 // watch for potential int overflow adding 1 for trailing NUL 1592 if (size > 0 && size < INT32_MAX) { 1593 const char* str = (const char*)readInplace(size+1); 1594 if (str) return String8(str, size); 1595 } 1596 return String8(); 1597} 1598 1599String16 Parcel::readString16() const 1600{ 1601 size_t len; 1602 const char16_t* str = readString16Inplace(&len); 1603 if (str) return String16(str, len); 1604 ALOGE("Reading a NULL string not supported here."); 1605 return String16(); 1606} 1607 1608status_t Parcel::readString16(String16* pArg) const 1609{ 1610 size_t len; 1611 const char16_t* str = readString16Inplace(&len); 1612 if (str) { 1613 pArg->setTo(str, len); 1614 return 0; 1615 } else { 1616 *pArg = String16(); 1617 return UNEXPECTED_NULL; 1618 } 1619} 1620 1621const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1622{ 1623 int32_t size = readInt32(); 1624 // watch for potential int overflow from size+1 1625 if (size >= 0 && size < INT32_MAX) { 1626 *outLen = size; 1627 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1628 if (str != NULL) { 1629 return str; 1630 } 1631 } 1632 *outLen = 0; 1633 return NULL; 1634} 1635 1636status_t Parcel::readStrongBinder(sp<IBinder>* val) const 1637{ 1638 return unflatten_binder(ProcessState::self(), *this, val); 1639} 1640 1641sp<IBinder> Parcel::readStrongBinder() const 1642{ 1643 sp<IBinder> val; 1644 readStrongBinder(&val); 1645 return val; 1646} 1647 1648wp<IBinder> Parcel::readWeakBinder() const 1649{ 1650 wp<IBinder> val; 1651 unflatten_binder(ProcessState::self(), *this, &val); 1652 return val; 1653} 1654 1655int32_t Parcel::readExceptionCode() const 1656{ 1657 binder::Status status; 1658 status.readFromParcel(*this); 1659 return status.exceptionCode(); 1660} 1661 1662native_handle* Parcel::readNativeHandle() const 1663{ 1664 int numFds, numInts; 1665 status_t err; 1666 err = readInt32(&numFds); 1667 if (err != NO_ERROR) return 0; 1668 err = readInt32(&numInts); 1669 if (err != NO_ERROR) return 0; 1670 1671 native_handle* h = native_handle_create(numFds, numInts); 1672 if (!h) { 1673 return 0; 1674 } 1675 1676 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 1677 h->data[i] = dup(readFileDescriptor()); 1678 if (h->data[i] < 0) err = BAD_VALUE; 1679 } 1680 err = read(h->data + numFds, sizeof(int)*numInts); 1681 if (err != NO_ERROR) { 1682 native_handle_close(h); 1683 native_handle_delete(h); 1684 h = 0; 1685 } 1686 return h; 1687} 1688 1689 1690int Parcel::readFileDescriptor() const 1691{ 1692 const flat_binder_object* flat = readObject(true); 1693 1694 if (flat && flat->type == BINDER_TYPE_FD) { 1695 return flat->handle; 1696 } 1697 1698 return BAD_TYPE; 1699} 1700 1701status_t Parcel::readUniqueFileDescriptor(unique_fd* val) const 1702{ 1703 int got = readFileDescriptor(); 1704 1705 if (got == BAD_TYPE) { 1706 return BAD_TYPE; 1707 } 1708 1709 val->reset(dup(got)); 1710 1711 if (val->get() < 0) { 1712 return BAD_VALUE; 1713 } 1714 1715 return OK; 1716} 1717 1718 1719status_t Parcel::readUniqueFileDescriptorVector(std::vector<unique_fd>* val) const { 1720 return readTypedVector(val, this, &Parcel::readUniqueFileDescriptor); 1721} 1722 1723status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 1724{ 1725 int32_t blobType; 1726 status_t status = readInt32(&blobType); 1727 if (status) return status; 1728 1729 if (blobType == BLOB_INPLACE) { 1730 ALOGV("readBlob: read in place"); 1731 const void* ptr = readInplace(len); 1732 if (!ptr) return BAD_VALUE; 1733 1734 outBlob->init(-1, const_cast<void*>(ptr), len, false); 1735 return NO_ERROR; 1736 } 1737 1738 ALOGV("readBlob: read from ashmem"); 1739 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE); 1740 int fd = readFileDescriptor(); 1741 if (fd == int(BAD_TYPE)) return BAD_VALUE; 1742 1743 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ, 1744 MAP_SHARED, fd, 0); 1745 if (ptr == MAP_FAILED) return NO_MEMORY; 1746 1747 outBlob->init(fd, ptr, len, isMutable); 1748 return NO_ERROR; 1749} 1750 1751status_t Parcel::read(FlattenableHelperInterface& val) const 1752{ 1753 // size 1754 const size_t len = this->readInt32(); 1755 const size_t fd_count = this->readInt32(); 1756 1757 if (len > INT32_MAX) { 1758 // don't accept size_t values which may have come from an 1759 // inadvertent conversion from a negative int. 1760 return BAD_VALUE; 1761 } 1762 1763 // payload 1764 void const* const buf = this->readInplace(pad_size(len)); 1765 if (buf == NULL) 1766 return BAD_VALUE; 1767 1768 int* fds = NULL; 1769 if (fd_count) { 1770 fds = new int[fd_count]; 1771 } 1772 1773 status_t err = NO_ERROR; 1774 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1775 fds[i] = dup(this->readFileDescriptor()); 1776 if (fds[i] < 0) { 1777 err = BAD_VALUE; 1778 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s", 1779 i, fds[i], fd_count, strerror(errno)); 1780 } 1781 } 1782 1783 if (err == NO_ERROR) { 1784 err = val.unflatten(buf, len, fds, fd_count); 1785 } 1786 1787 if (fd_count) { 1788 delete [] fds; 1789 } 1790 1791 return err; 1792} 1793const flat_binder_object* Parcel::readObject(bool nullMetaData) const 1794{ 1795 const size_t DPOS = mDataPos; 1796 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 1797 const flat_binder_object* obj 1798 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 1799 mDataPos = DPOS + sizeof(flat_binder_object); 1800 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) { 1801 // When transferring a NULL object, we don't write it into 1802 // the object list, so we don't want to check for it when 1803 // reading. 1804 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1805 return obj; 1806 } 1807 1808 // Ensure that this object is valid... 1809 binder_size_t* const OBJS = mObjects; 1810 const size_t N = mObjectsSize; 1811 size_t opos = mNextObjectHint; 1812 1813 if (N > 0) { 1814 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 1815 this, DPOS, opos); 1816 1817 // Start at the current hint position, looking for an object at 1818 // the current data position. 1819 if (opos < N) { 1820 while (opos < (N-1) && OBJS[opos] < DPOS) { 1821 opos++; 1822 } 1823 } else { 1824 opos = N-1; 1825 } 1826 if (OBJS[opos] == DPOS) { 1827 // Found it! 1828 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 1829 this, DPOS, opos); 1830 mNextObjectHint = opos+1; 1831 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1832 return obj; 1833 } 1834 1835 // Look backwards for it... 1836 while (opos > 0 && OBJS[opos] > DPOS) { 1837 opos--; 1838 } 1839 if (OBJS[opos] == DPOS) { 1840 // Found it! 1841 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 1842 this, DPOS, opos); 1843 mNextObjectHint = opos+1; 1844 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1845 return obj; 1846 } 1847 } 1848 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 1849 this, DPOS); 1850 } 1851 return NULL; 1852} 1853 1854void Parcel::closeFileDescriptors() 1855{ 1856 size_t i = mObjectsSize; 1857 if (i > 0) { 1858 //ALOGI("Closing file descriptors for %zu objects...", i); 1859 } 1860 while (i > 0) { 1861 i--; 1862 const flat_binder_object* flat 1863 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1864 if (flat->type == BINDER_TYPE_FD) { 1865 //ALOGI("Closing fd: %ld", flat->handle); 1866 close(flat->handle); 1867 } 1868 } 1869} 1870 1871uintptr_t Parcel::ipcData() const 1872{ 1873 return reinterpret_cast<uintptr_t>(mData); 1874} 1875 1876size_t Parcel::ipcDataSize() const 1877{ 1878 return (mDataSize > mDataPos ? mDataSize : mDataPos); 1879} 1880 1881uintptr_t Parcel::ipcObjects() const 1882{ 1883 return reinterpret_cast<uintptr_t>(mObjects); 1884} 1885 1886size_t Parcel::ipcObjectsCount() const 1887{ 1888 return mObjectsSize; 1889} 1890 1891void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 1892 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 1893{ 1894 binder_size_t minOffset = 0; 1895 freeDataNoInit(); 1896 mError = NO_ERROR; 1897 mData = const_cast<uint8_t*>(data); 1898 mDataSize = mDataCapacity = dataSize; 1899 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 1900 mDataPos = 0; 1901 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 1902 mObjects = const_cast<binder_size_t*>(objects); 1903 mObjectsSize = mObjectsCapacity = objectsCount; 1904 mNextObjectHint = 0; 1905 mOwner = relFunc; 1906 mOwnerCookie = relCookie; 1907 for (size_t i = 0; i < mObjectsSize; i++) { 1908 binder_size_t offset = mObjects[i]; 1909 if (offset < minOffset) { 1910 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n", 1911 __func__, (uint64_t)offset, (uint64_t)minOffset); 1912 mObjectsSize = 0; 1913 break; 1914 } 1915 minOffset = offset + sizeof(flat_binder_object); 1916 } 1917 scanForFds(); 1918} 1919 1920void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 1921{ 1922 to << "Parcel("; 1923 1924 if (errorCheck() != NO_ERROR) { 1925 const status_t err = errorCheck(); 1926 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 1927 } else if (dataSize() > 0) { 1928 const uint8_t* DATA = data(); 1929 to << indent << HexDump(DATA, dataSize()) << dedent; 1930 const binder_size_t* OBJS = objects(); 1931 const size_t N = objectsCount(); 1932 for (size_t i=0; i<N; i++) { 1933 const flat_binder_object* flat 1934 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 1935 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 1936 << TypeCode(flat->type & 0x7f7f7f00) 1937 << " = " << flat->binder; 1938 } 1939 } else { 1940 to << "NULL"; 1941 } 1942 1943 to << ")"; 1944} 1945 1946void Parcel::releaseObjects() 1947{ 1948 const sp<ProcessState> proc(ProcessState::self()); 1949 size_t i = mObjectsSize; 1950 uint8_t* const data = mData; 1951 binder_size_t* const objects = mObjects; 1952 while (i > 0) { 1953 i--; 1954 const flat_binder_object* flat 1955 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1956 release_object(proc, *flat, this, &mOpenAshmemSize); 1957 } 1958} 1959 1960void Parcel::acquireObjects() 1961{ 1962 const sp<ProcessState> proc(ProcessState::self()); 1963 size_t i = mObjectsSize; 1964 uint8_t* const data = mData; 1965 binder_size_t* const objects = mObjects; 1966 while (i > 0) { 1967 i--; 1968 const flat_binder_object* flat 1969 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1970 acquire_object(proc, *flat, this, &mOpenAshmemSize); 1971 } 1972} 1973 1974void Parcel::freeData() 1975{ 1976 freeDataNoInit(); 1977 initState(); 1978} 1979 1980void Parcel::freeDataNoInit() 1981{ 1982 if (mOwner) { 1983 LOG_ALLOC("Parcel %p: freeing other owner data", this); 1984 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1985 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1986 } else { 1987 LOG_ALLOC("Parcel %p: freeing allocated data", this); 1988 releaseObjects(); 1989 if (mData) { 1990 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); 1991 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1992 if (mDataCapacity <= gParcelGlobalAllocSize) { 1993 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity; 1994 } else { 1995 gParcelGlobalAllocSize = 0; 1996 } 1997 if (gParcelGlobalAllocCount > 0) { 1998 gParcelGlobalAllocCount--; 1999 } 2000 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2001 free(mData); 2002 } 2003 if (mObjects) free(mObjects); 2004 } 2005} 2006 2007status_t Parcel::growData(size_t len) 2008{ 2009 if (len > INT32_MAX) { 2010 // don't accept size_t values which may have come from an 2011 // inadvertent conversion from a negative int. 2012 return BAD_VALUE; 2013 } 2014 2015 size_t newSize = ((mDataSize+len)*3)/2; 2016 return (newSize <= mDataSize) 2017 ? (status_t) NO_MEMORY 2018 : continueWrite(newSize); 2019} 2020 2021status_t Parcel::restartWrite(size_t desired) 2022{ 2023 if (desired > INT32_MAX) { 2024 // don't accept size_t values which may have come from an 2025 // inadvertent conversion from a negative int. 2026 return BAD_VALUE; 2027 } 2028 2029 if (mOwner) { 2030 freeData(); 2031 return continueWrite(desired); 2032 } 2033 2034 uint8_t* data = (uint8_t*)realloc(mData, desired); 2035 if (!data && desired > mDataCapacity) { 2036 mError = NO_MEMORY; 2037 return NO_MEMORY; 2038 } 2039 2040 releaseObjects(); 2041 2042 if (data) { 2043 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); 2044 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2045 gParcelGlobalAllocSize += desired; 2046 gParcelGlobalAllocSize -= mDataCapacity; 2047 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2048 mData = data; 2049 mDataCapacity = desired; 2050 } 2051 2052 mDataSize = mDataPos = 0; 2053 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 2054 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 2055 2056 free(mObjects); 2057 mObjects = NULL; 2058 mObjectsSize = mObjectsCapacity = 0; 2059 mNextObjectHint = 0; 2060 mHasFds = false; 2061 mFdsKnown = true; 2062 mAllowFds = true; 2063 2064 return NO_ERROR; 2065} 2066 2067status_t Parcel::continueWrite(size_t desired) 2068{ 2069 if (desired > INT32_MAX) { 2070 // don't accept size_t values which may have come from an 2071 // inadvertent conversion from a negative int. 2072 return BAD_VALUE; 2073 } 2074 2075 // If shrinking, first adjust for any objects that appear 2076 // after the new data size. 2077 size_t objectsSize = mObjectsSize; 2078 if (desired < mDataSize) { 2079 if (desired == 0) { 2080 objectsSize = 0; 2081 } else { 2082 while (objectsSize > 0) { 2083 if (mObjects[objectsSize-1] < desired) 2084 break; 2085 objectsSize--; 2086 } 2087 } 2088 } 2089 2090 if (mOwner) { 2091 // If the size is going to zero, just release the owner's data. 2092 if (desired == 0) { 2093 freeData(); 2094 return NO_ERROR; 2095 } 2096 2097 // If there is a different owner, we need to take 2098 // posession. 2099 uint8_t* data = (uint8_t*)malloc(desired); 2100 if (!data) { 2101 mError = NO_MEMORY; 2102 return NO_MEMORY; 2103 } 2104 binder_size_t* objects = NULL; 2105 2106 if (objectsSize) { 2107 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t)); 2108 if (!objects) { 2109 free(data); 2110 2111 mError = NO_MEMORY; 2112 return NO_MEMORY; 2113 } 2114 2115 // Little hack to only acquire references on objects 2116 // we will be keeping. 2117 size_t oldObjectsSize = mObjectsSize; 2118 mObjectsSize = objectsSize; 2119 acquireObjects(); 2120 mObjectsSize = oldObjectsSize; 2121 } 2122 2123 if (mData) { 2124 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 2125 } 2126 if (objects && mObjects) { 2127 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 2128 } 2129 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 2130 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 2131 mOwner = NULL; 2132 2133 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); 2134 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2135 gParcelGlobalAllocSize += desired; 2136 gParcelGlobalAllocCount++; 2137 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2138 2139 mData = data; 2140 mObjects = objects; 2141 mDataSize = (mDataSize < desired) ? mDataSize : desired; 2142 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2143 mDataCapacity = desired; 2144 mObjectsSize = mObjectsCapacity = objectsSize; 2145 mNextObjectHint = 0; 2146 2147 } else if (mData) { 2148 if (objectsSize < mObjectsSize) { 2149 // Need to release refs on any objects we are dropping. 2150 const sp<ProcessState> proc(ProcessState::self()); 2151 for (size_t i=objectsSize; i<mObjectsSize; i++) { 2152 const flat_binder_object* flat 2153 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 2154 if (flat->type == BINDER_TYPE_FD) { 2155 // will need to rescan because we may have lopped off the only FDs 2156 mFdsKnown = false; 2157 } 2158 release_object(proc, *flat, this, &mOpenAshmemSize); 2159 } 2160 binder_size_t* objects = 2161 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 2162 if (objects) { 2163 mObjects = objects; 2164 } 2165 mObjectsSize = objectsSize; 2166 mNextObjectHint = 0; 2167 } 2168 2169 // We own the data, so we can just do a realloc(). 2170 if (desired > mDataCapacity) { 2171 uint8_t* data = (uint8_t*)realloc(mData, desired); 2172 if (data) { 2173 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, 2174 desired); 2175 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2176 gParcelGlobalAllocSize += desired; 2177 gParcelGlobalAllocSize -= mDataCapacity; 2178 gParcelGlobalAllocCount++; 2179 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2180 mData = data; 2181 mDataCapacity = desired; 2182 } else if (desired > mDataCapacity) { 2183 mError = NO_MEMORY; 2184 return NO_MEMORY; 2185 } 2186 } else { 2187 if (mDataSize > desired) { 2188 mDataSize = desired; 2189 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2190 } 2191 if (mDataPos > desired) { 2192 mDataPos = desired; 2193 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 2194 } 2195 } 2196 2197 } else { 2198 // This is the first data. Easy! 2199 uint8_t* data = (uint8_t*)malloc(desired); 2200 if (!data) { 2201 mError = NO_MEMORY; 2202 return NO_MEMORY; 2203 } 2204 2205 if(!(mDataCapacity == 0 && mObjects == NULL 2206 && mObjectsCapacity == 0)) { 2207 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 2208 } 2209 2210 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); 2211 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2212 gParcelGlobalAllocSize += desired; 2213 gParcelGlobalAllocCount++; 2214 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2215 2216 mData = data; 2217 mDataSize = mDataPos = 0; 2218 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2219 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 2220 mDataCapacity = desired; 2221 } 2222 2223 return NO_ERROR; 2224} 2225 2226void Parcel::initState() 2227{ 2228 LOG_ALLOC("Parcel %p: initState", this); 2229 mError = NO_ERROR; 2230 mData = 0; 2231 mDataSize = 0; 2232 mDataCapacity = 0; 2233 mDataPos = 0; 2234 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 2235 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 2236 mObjects = NULL; 2237 mObjectsSize = 0; 2238 mObjectsCapacity = 0; 2239 mNextObjectHint = 0; 2240 mHasFds = false; 2241 mFdsKnown = true; 2242 mAllowFds = true; 2243 mOwner = NULL; 2244 mOpenAshmemSize = 0; 2245} 2246 2247void Parcel::scanForFds() const 2248{ 2249 bool hasFds = false; 2250 for (size_t i=0; i<mObjectsSize; i++) { 2251 const flat_binder_object* flat 2252 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 2253 if (flat->type == BINDER_TYPE_FD) { 2254 hasFds = true; 2255 break; 2256 } 2257 } 2258 mHasFds = hasFds; 2259 mFdsKnown = true; 2260} 2261 2262size_t Parcel::getBlobAshmemSize() const 2263{ 2264 // This used to return the size of all blobs that were written to ashmem, now we're returning 2265 // the ashmem currently referenced by this Parcel, which should be equivalent. 2266 // TODO: Remove method once ABI can be changed. 2267 return mOpenAshmemSize; 2268} 2269 2270size_t Parcel::getOpenAshmemSize() const 2271{ 2272 return mOpenAshmemSize; 2273} 2274 2275// --- Parcel::Blob --- 2276 2277Parcel::Blob::Blob() : 2278 mFd(-1), mData(NULL), mSize(0), mMutable(false) { 2279} 2280 2281Parcel::Blob::~Blob() { 2282 release(); 2283} 2284 2285void Parcel::Blob::release() { 2286 if (mFd != -1 && mData) { 2287 ::munmap(mData, mSize); 2288 } 2289 clear(); 2290} 2291 2292void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) { 2293 mFd = fd; 2294 mData = data; 2295 mSize = size; 2296 mMutable = isMutable; 2297} 2298 2299void Parcel::Blob::clear() { 2300 mFd = -1; 2301 mData = NULL; 2302 mSize = 0; 2303 mMutable = false; 2304} 2305 2306}; // namespace android 2307