Parcel.cpp revision f6ec7d5e3a55ee7eca4203dc386b6d0837b8e660
1/* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "Parcel" 18//#define LOG_NDEBUG 0 19 20#include <errno.h> 21#include <inttypes.h> 22#include <stdint.h> 23#include <stdio.h> 24#include <stdlib.h> 25#include <sys/mman.h> 26 27#include <binder/Binder.h> 28#include <binder/BpBinder.h> 29#include <binder/IPCThreadState.h> 30#include <binder/Parcel.h> 31#include <binder/ProcessState.h> 32#include <binder/Status.h> 33#include <binder/TextOutput.h> 34 35#include <cutils/ashmem.h> 36#include <utils/Debug.h> 37#include <utils/Flattenable.h> 38#include <utils/Log.h> 39#include <utils/misc.h> 40#include <utils/String8.h> 41#include <utils/String16.h> 42 43#include <private/binder/binder_module.h> 44#include <private/binder/Static.h> 45 46#ifndef INT32_MAX 47#define INT32_MAX ((int32_t)(2147483647)) 48#endif 49 50#define LOG_REFS(...) 51//#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 52#define LOG_ALLOC(...) 53//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 54 55// --------------------------------------------------------------------------- 56 57// This macro should never be used at runtime, as a too large value 58// of s could cause an integer overflow. Instead, you should always 59// use the wrapper function pad_size() 60#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3) 61 62static size_t pad_size(size_t s) { 63 if (s > (SIZE_T_MAX - 3)) { 64 abort(); 65 } 66 return PAD_SIZE_UNSAFE(s); 67} 68 69// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 70#define STRICT_MODE_PENALTY_GATHER (0x40 << 16) 71 72// XXX This can be made public if we want to provide 73// support for typed data. 74struct small_flat_data 75{ 76 uint32_t type; 77 uint32_t data; 78}; 79 80namespace android { 81 82static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; 83static size_t gParcelGlobalAllocSize = 0; 84static size_t gParcelGlobalAllocCount = 0; 85 86// Maximum size of a blob to transfer in-place. 87static const size_t BLOB_INPLACE_LIMIT = 16 * 1024; 88 89enum { 90 BLOB_INPLACE = 0, 91 BLOB_ASHMEM_IMMUTABLE = 1, 92 BLOB_ASHMEM_MUTABLE = 2, 93}; 94 95void acquire_object(const sp<ProcessState>& proc, 96 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 97{ 98 switch (obj.type) { 99 case BINDER_TYPE_BINDER: 100 if (obj.binder) { 101 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 102 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 103 } 104 return; 105 case BINDER_TYPE_WEAK_BINDER: 106 if (obj.binder) 107 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 108 return; 109 case BINDER_TYPE_HANDLE: { 110 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 111 if (b != NULL) { 112 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 113 b->incStrong(who); 114 } 115 return; 116 } 117 case BINDER_TYPE_WEAK_HANDLE: { 118 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 119 if (b != NULL) b.get_refs()->incWeak(who); 120 return; 121 } 122 case BINDER_TYPE_FD: { 123 if (obj.cookie != 0) { 124 if (outAshmemSize != NULL) { 125 // If we own an ashmem fd, keep track of how much memory it refers to. 126 int size = ashmem_get_size_region(obj.handle); 127 if (size > 0) { 128 *outAshmemSize += size; 129 } 130 } 131 } 132 return; 133 } 134 } 135 136 ALOGD("Invalid object type 0x%08x", obj.type); 137} 138 139void acquire_object(const sp<ProcessState>& proc, 140 const flat_binder_object& obj, const void* who) 141{ 142 acquire_object(proc, obj, who, NULL); 143} 144 145static void release_object(const sp<ProcessState>& proc, 146 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 147{ 148 switch (obj.type) { 149 case BINDER_TYPE_BINDER: 150 if (obj.binder) { 151 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 152 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 153 } 154 return; 155 case BINDER_TYPE_WEAK_BINDER: 156 if (obj.binder) 157 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 158 return; 159 case BINDER_TYPE_HANDLE: { 160 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 161 if (b != NULL) { 162 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 163 b->decStrong(who); 164 } 165 return; 166 } 167 case BINDER_TYPE_WEAK_HANDLE: { 168 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 169 if (b != NULL) b.get_refs()->decWeak(who); 170 return; 171 } 172 case BINDER_TYPE_FD: { 173 if (obj.cookie != 0) { // owned 174 if (outAshmemSize != NULL) { 175 int size = ashmem_get_size_region(obj.handle); 176 if (size > 0) { 177 *outAshmemSize -= size; 178 } 179 } 180 181 close(obj.handle); 182 } 183 return; 184 } 185 } 186 187 ALOGE("Invalid object type 0x%08x", obj.type); 188} 189 190void release_object(const sp<ProcessState>& proc, 191 const flat_binder_object& obj, const void* who) 192{ 193 release_object(proc, obj, who, NULL); 194} 195 196inline static status_t finish_flatten_binder( 197 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 198{ 199 return out->writeObject(flat, false); 200} 201 202status_t flatten_binder(const sp<ProcessState>& /*proc*/, 203 const sp<IBinder>& binder, Parcel* out) 204{ 205 flat_binder_object obj; 206 207 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 208 if (binder != NULL) { 209 IBinder *local = binder->localBinder(); 210 if (!local) { 211 BpBinder *proxy = binder->remoteBinder(); 212 if (proxy == NULL) { 213 ALOGE("null proxy"); 214 } 215 const int32_t handle = proxy ? proxy->handle() : 0; 216 obj.type = BINDER_TYPE_HANDLE; 217 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 218 obj.handle = handle; 219 obj.cookie = 0; 220 } else { 221 obj.type = BINDER_TYPE_BINDER; 222 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 223 obj.cookie = reinterpret_cast<uintptr_t>(local); 224 } 225 } else { 226 obj.type = BINDER_TYPE_BINDER; 227 obj.binder = 0; 228 obj.cookie = 0; 229 } 230 231 return finish_flatten_binder(binder, obj, out); 232} 233 234status_t flatten_binder(const sp<ProcessState>& /*proc*/, 235 const wp<IBinder>& binder, Parcel* out) 236{ 237 flat_binder_object obj; 238 239 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 240 if (binder != NULL) { 241 sp<IBinder> real = binder.promote(); 242 if (real != NULL) { 243 IBinder *local = real->localBinder(); 244 if (!local) { 245 BpBinder *proxy = real->remoteBinder(); 246 if (proxy == NULL) { 247 ALOGE("null proxy"); 248 } 249 const int32_t handle = proxy ? proxy->handle() : 0; 250 obj.type = BINDER_TYPE_WEAK_HANDLE; 251 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 252 obj.handle = handle; 253 obj.cookie = 0; 254 } else { 255 obj.type = BINDER_TYPE_WEAK_BINDER; 256 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs()); 257 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get()); 258 } 259 return finish_flatten_binder(real, obj, out); 260 } 261 262 // XXX How to deal? In order to flatten the given binder, 263 // we need to probe it for information, which requires a primary 264 // reference... but we don't have one. 265 // 266 // The OpenBinder implementation uses a dynamic_cast<> here, 267 // but we can't do that with the different reference counting 268 // implementation we are using. 269 ALOGE("Unable to unflatten Binder weak reference!"); 270 obj.type = BINDER_TYPE_BINDER; 271 obj.binder = 0; 272 obj.cookie = 0; 273 return finish_flatten_binder(NULL, obj, out); 274 275 } else { 276 obj.type = BINDER_TYPE_BINDER; 277 obj.binder = 0; 278 obj.cookie = 0; 279 return finish_flatten_binder(NULL, obj, out); 280 } 281} 282 283inline static status_t finish_unflatten_binder( 284 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/, 285 const Parcel& /*in*/) 286{ 287 return NO_ERROR; 288} 289 290status_t unflatten_binder(const sp<ProcessState>& proc, 291 const Parcel& in, sp<IBinder>* out) 292{ 293 const flat_binder_object* flat = in.readObject(false); 294 295 if (flat) { 296 switch (flat->type) { 297 case BINDER_TYPE_BINDER: 298 *out = reinterpret_cast<IBinder*>(flat->cookie); 299 return finish_unflatten_binder(NULL, *flat, in); 300 case BINDER_TYPE_HANDLE: 301 *out = proc->getStrongProxyForHandle(flat->handle); 302 return finish_unflatten_binder( 303 static_cast<BpBinder*>(out->get()), *flat, in); 304 } 305 } 306 return BAD_TYPE; 307} 308 309status_t unflatten_binder(const sp<ProcessState>& proc, 310 const Parcel& in, wp<IBinder>* out) 311{ 312 const flat_binder_object* flat = in.readObject(false); 313 314 if (flat) { 315 switch (flat->type) { 316 case BINDER_TYPE_BINDER: 317 *out = reinterpret_cast<IBinder*>(flat->cookie); 318 return finish_unflatten_binder(NULL, *flat, in); 319 case BINDER_TYPE_WEAK_BINDER: 320 if (flat->binder != 0) { 321 out->set_object_and_refs( 322 reinterpret_cast<IBinder*>(flat->cookie), 323 reinterpret_cast<RefBase::weakref_type*>(flat->binder)); 324 } else { 325 *out = NULL; 326 } 327 return finish_unflatten_binder(NULL, *flat, in); 328 case BINDER_TYPE_HANDLE: 329 case BINDER_TYPE_WEAK_HANDLE: 330 *out = proc->getWeakProxyForHandle(flat->handle); 331 return finish_unflatten_binder( 332 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 333 } 334 } 335 return BAD_TYPE; 336} 337 338// --------------------------------------------------------------------------- 339 340Parcel::Parcel() 341{ 342 LOG_ALLOC("Parcel %p: constructing", this); 343 initState(); 344} 345 346Parcel::~Parcel() 347{ 348 freeDataNoInit(); 349 LOG_ALLOC("Parcel %p: destroyed", this); 350} 351 352size_t Parcel::getGlobalAllocSize() { 353 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 354 size_t size = gParcelGlobalAllocSize; 355 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 356 return size; 357} 358 359size_t Parcel::getGlobalAllocCount() { 360 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 361 size_t count = gParcelGlobalAllocCount; 362 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 363 return count; 364} 365 366const uint8_t* Parcel::data() const 367{ 368 return mData; 369} 370 371size_t Parcel::dataSize() const 372{ 373 return (mDataSize > mDataPos ? mDataSize : mDataPos); 374} 375 376size_t Parcel::dataAvail() const 377{ 378 size_t result = dataSize() - dataPosition(); 379 if (result > INT32_MAX) { 380 abort(); 381 } 382 return result; 383} 384 385size_t Parcel::dataPosition() const 386{ 387 return mDataPos; 388} 389 390size_t Parcel::dataCapacity() const 391{ 392 return mDataCapacity; 393} 394 395status_t Parcel::setDataSize(size_t size) 396{ 397 if (size > INT32_MAX) { 398 // don't accept size_t values which may have come from an 399 // inadvertent conversion from a negative int. 400 return BAD_VALUE; 401 } 402 403 status_t err; 404 err = continueWrite(size); 405 if (err == NO_ERROR) { 406 mDataSize = size; 407 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 408 } 409 return err; 410} 411 412void Parcel::setDataPosition(size_t pos) const 413{ 414 if (pos > INT32_MAX) { 415 // don't accept size_t values which may have come from an 416 // inadvertent conversion from a negative int. 417 abort(); 418 } 419 420 mDataPos = pos; 421 mNextObjectHint = 0; 422} 423 424status_t Parcel::setDataCapacity(size_t size) 425{ 426 if (size > INT32_MAX) { 427 // don't accept size_t values which may have come from an 428 // inadvertent conversion from a negative int. 429 return BAD_VALUE; 430 } 431 432 if (size > mDataCapacity) return continueWrite(size); 433 return NO_ERROR; 434} 435 436status_t Parcel::setData(const uint8_t* buffer, size_t len) 437{ 438 if (len > INT32_MAX) { 439 // don't accept size_t values which may have come from an 440 // inadvertent conversion from a negative int. 441 return BAD_VALUE; 442 } 443 444 status_t err = restartWrite(len); 445 if (err == NO_ERROR) { 446 memcpy(const_cast<uint8_t*>(data()), buffer, len); 447 mDataSize = len; 448 mFdsKnown = false; 449 } 450 return err; 451} 452 453status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 454{ 455 const sp<ProcessState> proc(ProcessState::self()); 456 status_t err; 457 const uint8_t *data = parcel->mData; 458 const binder_size_t *objects = parcel->mObjects; 459 size_t size = parcel->mObjectsSize; 460 int startPos = mDataPos; 461 int firstIndex = -1, lastIndex = -2; 462 463 if (len == 0) { 464 return NO_ERROR; 465 } 466 467 if (len > INT32_MAX) { 468 // don't accept size_t values which may have come from an 469 // inadvertent conversion from a negative int. 470 return BAD_VALUE; 471 } 472 473 // range checks against the source parcel size 474 if ((offset > parcel->mDataSize) 475 || (len > parcel->mDataSize) 476 || (offset + len > parcel->mDataSize)) { 477 return BAD_VALUE; 478 } 479 480 // Count objects in range 481 for (int i = 0; i < (int) size; i++) { 482 size_t off = objects[i]; 483 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) { 484 if (firstIndex == -1) { 485 firstIndex = i; 486 } 487 lastIndex = i; 488 } 489 } 490 int numObjects = lastIndex - firstIndex + 1; 491 492 if ((mDataSize+len) > mDataCapacity) { 493 // grow data 494 err = growData(len); 495 if (err != NO_ERROR) { 496 return err; 497 } 498 } 499 500 // append data 501 memcpy(mData + mDataPos, data + offset, len); 502 mDataPos += len; 503 mDataSize += len; 504 505 err = NO_ERROR; 506 507 if (numObjects > 0) { 508 // grow objects 509 if (mObjectsCapacity < mObjectsSize + numObjects) { 510 size_t newSize = ((mObjectsSize + numObjects)*3)/2; 511 if (newSize < mObjectsSize) return NO_MEMORY; // overflow 512 binder_size_t *objects = 513 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 514 if (objects == (binder_size_t*)0) { 515 return NO_MEMORY; 516 } 517 mObjects = objects; 518 mObjectsCapacity = newSize; 519 } 520 521 // append and acquire objects 522 int idx = mObjectsSize; 523 for (int i = firstIndex; i <= lastIndex; i++) { 524 size_t off = objects[i] - offset + startPos; 525 mObjects[idx++] = off; 526 mObjectsSize++; 527 528 flat_binder_object* flat 529 = reinterpret_cast<flat_binder_object*>(mData + off); 530 acquire_object(proc, *flat, this, &mOpenAshmemSize); 531 532 if (flat->type == BINDER_TYPE_FD) { 533 // If this is a file descriptor, we need to dup it so the 534 // new Parcel now owns its own fd, and can declare that we 535 // officially know we have fds. 536 flat->handle = dup(flat->handle); 537 flat->cookie = 1; 538 mHasFds = mFdsKnown = true; 539 if (!mAllowFds) { 540 err = FDS_NOT_ALLOWED; 541 } 542 } 543 } 544 } 545 546 return err; 547} 548 549bool Parcel::allowFds() const 550{ 551 return mAllowFds; 552} 553 554bool Parcel::pushAllowFds(bool allowFds) 555{ 556 const bool origValue = mAllowFds; 557 if (!allowFds) { 558 mAllowFds = false; 559 } 560 return origValue; 561} 562 563void Parcel::restoreAllowFds(bool lastValue) 564{ 565 mAllowFds = lastValue; 566} 567 568bool Parcel::hasFileDescriptors() const 569{ 570 if (!mFdsKnown) { 571 scanForFds(); 572 } 573 return mHasFds; 574} 575 576// Write RPC headers. (previously just the interface token) 577status_t Parcel::writeInterfaceToken(const String16& interface) 578{ 579 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 580 STRICT_MODE_PENALTY_GATHER); 581 // currently the interface identification token is just its name as a string 582 return writeString16(interface); 583} 584 585bool Parcel::checkInterface(IBinder* binder) const 586{ 587 return enforceInterface(binder->getInterfaceDescriptor()); 588} 589 590bool Parcel::enforceInterface(const String16& interface, 591 IPCThreadState* threadState) const 592{ 593 int32_t strictPolicy = readInt32(); 594 if (threadState == NULL) { 595 threadState = IPCThreadState::self(); 596 } 597 if ((threadState->getLastTransactionBinderFlags() & 598 IBinder::FLAG_ONEWAY) != 0) { 599 // For one-way calls, the callee is running entirely 600 // disconnected from the caller, so disable StrictMode entirely. 601 // Not only does disk/network usage not impact the caller, but 602 // there's no way to commuicate back any violations anyway. 603 threadState->setStrictModePolicy(0); 604 } else { 605 threadState->setStrictModePolicy(strictPolicy); 606 } 607 const String16 str(readString16()); 608 if (str == interface) { 609 return true; 610 } else { 611 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 612 String8(interface).string(), String8(str).string()); 613 return false; 614 } 615} 616 617const binder_size_t* Parcel::objects() const 618{ 619 return mObjects; 620} 621 622size_t Parcel::objectsCount() const 623{ 624 return mObjectsSize; 625} 626 627status_t Parcel::errorCheck() const 628{ 629 return mError; 630} 631 632void Parcel::setError(status_t err) 633{ 634 mError = err; 635} 636 637status_t Parcel::finishWrite(size_t len) 638{ 639 if (len > INT32_MAX) { 640 // don't accept size_t values which may have come from an 641 // inadvertent conversion from a negative int. 642 return BAD_VALUE; 643 } 644 645 //printf("Finish write of %d\n", len); 646 mDataPos += len; 647 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 648 if (mDataPos > mDataSize) { 649 mDataSize = mDataPos; 650 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 651 } 652 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 653 return NO_ERROR; 654} 655 656status_t Parcel::writeUnpadded(const void* data, size_t len) 657{ 658 if (len > INT32_MAX) { 659 // don't accept size_t values which may have come from an 660 // inadvertent conversion from a negative int. 661 return BAD_VALUE; 662 } 663 664 size_t end = mDataPos + len; 665 if (end < mDataPos) { 666 // integer overflow 667 return BAD_VALUE; 668 } 669 670 if (end <= mDataCapacity) { 671restart_write: 672 memcpy(mData+mDataPos, data, len); 673 return finishWrite(len); 674 } 675 676 status_t err = growData(len); 677 if (err == NO_ERROR) goto restart_write; 678 return err; 679} 680 681status_t Parcel::write(const void* data, size_t len) 682{ 683 if (len > INT32_MAX) { 684 // don't accept size_t values which may have come from an 685 // inadvertent conversion from a negative int. 686 return BAD_VALUE; 687 } 688 689 void* const d = writeInplace(len); 690 if (d) { 691 memcpy(d, data, len); 692 return NO_ERROR; 693 } 694 return mError; 695} 696 697void* Parcel::writeInplace(size_t len) 698{ 699 if (len > INT32_MAX) { 700 // don't accept size_t values which may have come from an 701 // inadvertent conversion from a negative int. 702 return NULL; 703 } 704 705 const size_t padded = pad_size(len); 706 707 // sanity check for integer overflow 708 if (mDataPos+padded < mDataPos) { 709 return NULL; 710 } 711 712 if ((mDataPos+padded) <= mDataCapacity) { 713restart_write: 714 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 715 uint8_t* const data = mData+mDataPos; 716 717 // Need to pad at end? 718 if (padded != len) { 719#if BYTE_ORDER == BIG_ENDIAN 720 static const uint32_t mask[4] = { 721 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 722 }; 723#endif 724#if BYTE_ORDER == LITTLE_ENDIAN 725 static const uint32_t mask[4] = { 726 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 727 }; 728#endif 729 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 730 // *reinterpret_cast<void**>(data+padded-4)); 731 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 732 } 733 734 finishWrite(padded); 735 return data; 736 } 737 738 status_t err = growData(padded); 739 if (err == NO_ERROR) goto restart_write; 740 return NULL; 741} 742 743status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val) 744{ 745 if (!val) { 746 return writeInt32(-1); 747 } 748 749 return writeByteVector(*val); 750} 751 752status_t Parcel::writeByteVector(const std::vector<int8_t>& val) 753{ 754 status_t status; 755 if (val.size() > std::numeric_limits<int32_t>::max()) { 756 status = BAD_VALUE; 757 return status; 758 } 759 760 status = writeInt32(val.size()); 761 if (status != OK) { 762 return status; 763 } 764 765 void* data = writeInplace(val.size()); 766 if (!data) { 767 status = BAD_VALUE; 768 return status; 769 } 770 771 memcpy(data, val.data(), val.size()); 772 return status; 773} 774 775status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) 776{ 777 return writeTypedVector(val, &Parcel::writeInt32); 778} 779 780status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val) 781{ 782 return writeNullableTypedVector(val, &Parcel::writeInt32); 783} 784 785status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) 786{ 787 return writeTypedVector(val, &Parcel::writeInt64); 788} 789 790status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val) 791{ 792 return writeNullableTypedVector(val, &Parcel::writeInt64); 793} 794 795status_t Parcel::writeFloatVector(const std::vector<float>& val) 796{ 797 return writeTypedVector(val, &Parcel::writeFloat); 798} 799 800status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val) 801{ 802 return writeNullableTypedVector(val, &Parcel::writeFloat); 803} 804 805status_t Parcel::writeDoubleVector(const std::vector<double>& val) 806{ 807 return writeTypedVector(val, &Parcel::writeDouble); 808} 809 810status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val) 811{ 812 return writeNullableTypedVector(val, &Parcel::writeDouble); 813} 814 815status_t Parcel::writeBoolVector(const std::vector<bool>& val) 816{ 817 return writeTypedVector(val, &Parcel::writeBool); 818} 819 820status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val) 821{ 822 return writeNullableTypedVector(val, &Parcel::writeBool); 823} 824 825status_t Parcel::writeCharVector(const std::vector<char16_t>& val) 826{ 827 return writeTypedVector(val, &Parcel::writeChar); 828} 829 830status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val) 831{ 832 return writeNullableTypedVector(val, &Parcel::writeChar); 833} 834 835status_t Parcel::writeString16Vector(const std::vector<String16>& val) 836{ 837 return writeTypedVector(val, &Parcel::writeString16); 838} 839 840status_t Parcel::writeString16Vector( 841 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val) 842{ 843 return writeNullableTypedVector(val, &Parcel::writeString16); 844} 845 846status_t Parcel::writeInt32(int32_t val) 847{ 848 return writeAligned(val); 849} 850 851status_t Parcel::writeUint32(uint32_t val) 852{ 853 return writeAligned(val); 854} 855 856status_t Parcel::writeInt32Array(size_t len, const int32_t *val) { 857 if (len > INT32_MAX) { 858 // don't accept size_t values which may have come from an 859 // inadvertent conversion from a negative int. 860 return BAD_VALUE; 861 } 862 863 if (!val) { 864 return writeInt32(-1); 865 } 866 status_t ret = writeInt32(static_cast<uint32_t>(len)); 867 if (ret == NO_ERROR) { 868 ret = write(val, len * sizeof(*val)); 869 } 870 return ret; 871} 872status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 873 if (len > INT32_MAX) { 874 // don't accept size_t values which may have come from an 875 // inadvertent conversion from a negative int. 876 return BAD_VALUE; 877 } 878 879 if (!val) { 880 return writeInt32(-1); 881 } 882 status_t ret = writeInt32(static_cast<uint32_t>(len)); 883 if (ret == NO_ERROR) { 884 ret = write(val, len * sizeof(*val)); 885 } 886 return ret; 887} 888 889status_t Parcel::writeBool(bool val) 890{ 891 return writeInt32(int32_t(val)); 892} 893 894status_t Parcel::writeChar(char16_t val) 895{ 896 return writeInt32(int32_t(val)); 897} 898 899status_t Parcel::writeByte(int8_t val) 900{ 901 return writeInt32(int32_t(val)); 902} 903 904status_t Parcel::writeInt64(int64_t val) 905{ 906 return writeAligned(val); 907} 908 909status_t Parcel::writeUint64(uint64_t val) 910{ 911 return writeAligned(val); 912} 913 914status_t Parcel::writePointer(uintptr_t val) 915{ 916 return writeAligned<binder_uintptr_t>(val); 917} 918 919status_t Parcel::writeFloat(float val) 920{ 921 return writeAligned(val); 922} 923 924#if defined(__mips__) && defined(__mips_hard_float) 925 926status_t Parcel::writeDouble(double val) 927{ 928 union { 929 double d; 930 unsigned long long ll; 931 } u; 932 u.d = val; 933 return writeAligned(u.ll); 934} 935 936#else 937 938status_t Parcel::writeDouble(double val) 939{ 940 return writeAligned(val); 941} 942 943#endif 944 945status_t Parcel::writeCString(const char* str) 946{ 947 return write(str, strlen(str)+1); 948} 949 950status_t Parcel::writeString8(const String8& str) 951{ 952 status_t err = writeInt32(str.bytes()); 953 // only write string if its length is more than zero characters, 954 // as readString8 will only read if the length field is non-zero. 955 // this is slightly different from how writeString16 works. 956 if (str.bytes() > 0 && err == NO_ERROR) { 957 err = write(str.string(), str.bytes()+1); 958 } 959 return err; 960} 961 962status_t Parcel::writeString16(const std::unique_ptr<String16>& str) 963{ 964 if (!str) { 965 return writeInt32(-1); 966 } 967 968 return writeString16(*str); 969} 970 971status_t Parcel::writeString16(const String16& str) 972{ 973 return writeString16(str.string(), str.size()); 974} 975 976status_t Parcel::writeString16(const char16_t* str, size_t len) 977{ 978 if (str == NULL) return writeInt32(-1); 979 980 status_t err = writeInt32(len); 981 if (err == NO_ERROR) { 982 len *= sizeof(char16_t); 983 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 984 if (data) { 985 memcpy(data, str, len); 986 *reinterpret_cast<char16_t*>(data+len) = 0; 987 return NO_ERROR; 988 } 989 err = mError; 990 } 991 return err; 992} 993 994status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 995{ 996 return flatten_binder(ProcessState::self(), val, this); 997} 998 999status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) 1000{ 1001 return writeTypedVector(val, &Parcel::writeStrongBinder); 1002} 1003 1004status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val) 1005{ 1006 return writeNullableTypedVector(val, &Parcel::writeStrongBinder); 1007} 1008 1009status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const { 1010 return readNullableTypedVector(val, &Parcel::readStrongBinder); 1011} 1012 1013status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { 1014 return readTypedVector(val, &Parcel::readStrongBinder); 1015} 1016 1017status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 1018{ 1019 return flatten_binder(ProcessState::self(), val, this); 1020} 1021 1022status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) { 1023 if (!parcelable) { 1024 return writeInt32(0); 1025 } 1026 1027 return writeParcelable(*parcelable); 1028} 1029 1030status_t Parcel::writeParcelable(const Parcelable& parcelable) { 1031 status_t status = writeInt32(1); // parcelable is not null. 1032 if (status != OK) { 1033 return status; 1034 } 1035 return parcelable.writeToParcel(this); 1036} 1037 1038status_t Parcel::writeNativeHandle(const native_handle* handle) 1039{ 1040 if (!handle || handle->version != sizeof(native_handle)) 1041 return BAD_TYPE; 1042 1043 status_t err; 1044 err = writeInt32(handle->numFds); 1045 if (err != NO_ERROR) return err; 1046 1047 err = writeInt32(handle->numInts); 1048 if (err != NO_ERROR) return err; 1049 1050 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 1051 err = writeDupFileDescriptor(handle->data[i]); 1052 1053 if (err != NO_ERROR) { 1054 ALOGD("write native handle, write dup fd failed"); 1055 return err; 1056 } 1057 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 1058 return err; 1059} 1060 1061status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 1062{ 1063 flat_binder_object obj; 1064 obj.type = BINDER_TYPE_FD; 1065 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 1066 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 1067 obj.handle = fd; 1068 obj.cookie = takeOwnership ? 1 : 0; 1069 return writeObject(obj, true); 1070} 1071 1072status_t Parcel::writeDupFileDescriptor(int fd) 1073{ 1074 int dupFd = dup(fd); 1075 if (dupFd < 0) { 1076 return -errno; 1077 } 1078 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 1079 if (err != OK) { 1080 close(dupFd); 1081 } 1082 return err; 1083} 1084 1085status_t Parcel::writeUniqueFileDescriptor(const ScopedFd& fd) { 1086 return writeDupFileDescriptor(fd.get()); 1087} 1088 1089status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<ScopedFd>& val) { 1090 return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor); 1091} 1092 1093status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<ScopedFd>>& val) { 1094 return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor); 1095} 1096 1097status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob) 1098{ 1099 if (len > INT32_MAX) { 1100 // don't accept size_t values which may have come from an 1101 // inadvertent conversion from a negative int. 1102 return BAD_VALUE; 1103 } 1104 1105 status_t status; 1106 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) { 1107 ALOGV("writeBlob: write in place"); 1108 status = writeInt32(BLOB_INPLACE); 1109 if (status) return status; 1110 1111 void* ptr = writeInplace(len); 1112 if (!ptr) return NO_MEMORY; 1113 1114 outBlob->init(-1, ptr, len, false); 1115 return NO_ERROR; 1116 } 1117 1118 ALOGV("writeBlob: write to ashmem"); 1119 int fd = ashmem_create_region("Parcel Blob", len); 1120 if (fd < 0) return NO_MEMORY; 1121 1122 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 1123 if (result < 0) { 1124 status = result; 1125 } else { 1126 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 1127 if (ptr == MAP_FAILED) { 1128 status = -errno; 1129 } else { 1130 if (!mutableCopy) { 1131 result = ashmem_set_prot_region(fd, PROT_READ); 1132 } 1133 if (result < 0) { 1134 status = result; 1135 } else { 1136 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE); 1137 if (!status) { 1138 status = writeFileDescriptor(fd, true /*takeOwnership*/); 1139 if (!status) { 1140 outBlob->init(fd, ptr, len, mutableCopy); 1141 return NO_ERROR; 1142 } 1143 } 1144 } 1145 } 1146 ::munmap(ptr, len); 1147 } 1148 ::close(fd); 1149 return status; 1150} 1151 1152status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd) 1153{ 1154 // Must match up with what's done in writeBlob. 1155 if (!mAllowFds) return FDS_NOT_ALLOWED; 1156 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE); 1157 if (status) return status; 1158 return writeDupFileDescriptor(fd); 1159} 1160 1161status_t Parcel::write(const FlattenableHelperInterface& val) 1162{ 1163 status_t err; 1164 1165 // size if needed 1166 const size_t len = val.getFlattenedSize(); 1167 const size_t fd_count = val.getFdCount(); 1168 1169 if ((len > INT32_MAX) || (fd_count > INT32_MAX)) { 1170 // don't accept size_t values which may have come from an 1171 // inadvertent conversion from a negative int. 1172 return BAD_VALUE; 1173 } 1174 1175 err = this->writeInt32(len); 1176 if (err) return err; 1177 1178 err = this->writeInt32(fd_count); 1179 if (err) return err; 1180 1181 // payload 1182 void* const buf = this->writeInplace(pad_size(len)); 1183 if (buf == NULL) 1184 return BAD_VALUE; 1185 1186 int* fds = NULL; 1187 if (fd_count) { 1188 fds = new int[fd_count]; 1189 } 1190 1191 err = val.flatten(buf, len, fds, fd_count); 1192 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1193 err = this->writeDupFileDescriptor( fds[i] ); 1194 } 1195 1196 if (fd_count) { 1197 delete [] fds; 1198 } 1199 1200 return err; 1201} 1202 1203status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 1204{ 1205 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 1206 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 1207 if (enoughData && enoughObjects) { 1208restart_write: 1209 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 1210 1211 // remember if it's a file descriptor 1212 if (val.type == BINDER_TYPE_FD) { 1213 if (!mAllowFds) { 1214 // fail before modifying our object index 1215 return FDS_NOT_ALLOWED; 1216 } 1217 mHasFds = mFdsKnown = true; 1218 } 1219 1220 // Need to write meta-data? 1221 if (nullMetaData || val.binder != 0) { 1222 mObjects[mObjectsSize] = mDataPos; 1223 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize); 1224 mObjectsSize++; 1225 } 1226 1227 return finishWrite(sizeof(flat_binder_object)); 1228 } 1229 1230 if (!enoughData) { 1231 const status_t err = growData(sizeof(val)); 1232 if (err != NO_ERROR) return err; 1233 } 1234 if (!enoughObjects) { 1235 size_t newSize = ((mObjectsSize+2)*3)/2; 1236 if (newSize < mObjectsSize) return NO_MEMORY; // overflow 1237 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 1238 if (objects == NULL) return NO_MEMORY; 1239 mObjects = objects; 1240 mObjectsCapacity = newSize; 1241 } 1242 1243 goto restart_write; 1244} 1245 1246status_t Parcel::writeNoException() 1247{ 1248 binder::Status status; 1249 return status.writeToParcel(this); 1250} 1251 1252void Parcel::remove(size_t /*start*/, size_t /*amt*/) 1253{ 1254 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 1255} 1256 1257status_t Parcel::read(void* outData, size_t len) const 1258{ 1259 if (len > INT32_MAX) { 1260 // don't accept size_t values which may have come from an 1261 // inadvertent conversion from a negative int. 1262 return BAD_VALUE; 1263 } 1264 1265 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1266 && len <= pad_size(len)) { 1267 memcpy(outData, mData+mDataPos, len); 1268 mDataPos += pad_size(len); 1269 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 1270 return NO_ERROR; 1271 } 1272 return NOT_ENOUGH_DATA; 1273} 1274 1275const void* Parcel::readInplace(size_t len) const 1276{ 1277 if (len > INT32_MAX) { 1278 // don't accept size_t values which may have come from an 1279 // inadvertent conversion from a negative int. 1280 return NULL; 1281 } 1282 1283 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1284 && len <= pad_size(len)) { 1285 const void* data = mData+mDataPos; 1286 mDataPos += pad_size(len); 1287 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 1288 return data; 1289 } 1290 return NULL; 1291} 1292 1293template<class T> 1294status_t Parcel::readAligned(T *pArg) const { 1295 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1296 1297 if ((mDataPos+sizeof(T)) <= mDataSize) { 1298 const void* data = mData+mDataPos; 1299 mDataPos += sizeof(T); 1300 *pArg = *reinterpret_cast<const T*>(data); 1301 return NO_ERROR; 1302 } else { 1303 return NOT_ENOUGH_DATA; 1304 } 1305} 1306 1307template<class T> 1308T Parcel::readAligned() const { 1309 T result; 1310 if (readAligned(&result) != NO_ERROR) { 1311 result = 0; 1312 } 1313 1314 return result; 1315} 1316 1317template<class T> 1318status_t Parcel::writeAligned(T val) { 1319 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1320 1321 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 1322restart_write: 1323 *reinterpret_cast<T*>(mData+mDataPos) = val; 1324 return finishWrite(sizeof(val)); 1325 } 1326 1327 status_t err = growData(sizeof(val)); 1328 if (err == NO_ERROR) goto restart_write; 1329 return err; 1330} 1331 1332status_t Parcel::readByteVector(std::vector<int8_t>* val) const { 1333 val->clear(); 1334 1335 int32_t size; 1336 status_t status = readInt32(&size); 1337 1338 if (status != OK) { 1339 return status; 1340 } 1341 1342 if (size < 0) { 1343 status = UNEXPECTED_NULL; 1344 return status; 1345 } 1346 if (size_t(size) > dataAvail()) { 1347 status = BAD_VALUE; 1348 return status; 1349 } 1350 1351 const void* data = readInplace(size); 1352 if (!data) { 1353 status = BAD_VALUE; 1354 return status; 1355 } 1356 val->resize(size); 1357 memcpy(val->data(), data, size); 1358 1359 return status; 1360} 1361 1362status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const { 1363 const int32_t start = dataPosition(); 1364 int32_t size; 1365 status_t status = readInt32(&size); 1366 val->reset(); 1367 1368 if (status != OK || size < 0) { 1369 return status; 1370 } 1371 1372 setDataPosition(start); 1373 val->reset(new std::vector<int8_t>()); 1374 1375 status = readByteVector(val->get()); 1376 1377 if (status != OK) { 1378 val->reset(); 1379 } 1380 1381 return status; 1382} 1383 1384status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const { 1385 return readNullableTypedVector(val, &Parcel::readInt32); 1386} 1387 1388status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { 1389 return readTypedVector(val, &Parcel::readInt32); 1390} 1391 1392status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const { 1393 return readNullableTypedVector(val, &Parcel::readInt64); 1394} 1395 1396status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { 1397 return readTypedVector(val, &Parcel::readInt64); 1398} 1399 1400status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const { 1401 return readNullableTypedVector(val, &Parcel::readFloat); 1402} 1403 1404status_t Parcel::readFloatVector(std::vector<float>* val) const { 1405 return readTypedVector(val, &Parcel::readFloat); 1406} 1407 1408status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const { 1409 return readNullableTypedVector(val, &Parcel::readDouble); 1410} 1411 1412status_t Parcel::readDoubleVector(std::vector<double>* val) const { 1413 return readTypedVector(val, &Parcel::readDouble); 1414} 1415 1416status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const { 1417 const int32_t start = dataPosition(); 1418 int32_t size; 1419 status_t status = readInt32(&size); 1420 val->reset(); 1421 1422 if (status != OK || size < 0) { 1423 return status; 1424 } 1425 1426 setDataPosition(start); 1427 val->reset(new std::vector<bool>()); 1428 1429 status = readBoolVector(val->get()); 1430 1431 if (status != OK) { 1432 val->reset(); 1433 } 1434 1435 return status; 1436} 1437 1438status_t Parcel::readBoolVector(std::vector<bool>* val) const { 1439 int32_t size; 1440 status_t status = readInt32(&size); 1441 1442 if (status != OK) { 1443 return status; 1444 } 1445 1446 if (size < 0) { 1447 return UNEXPECTED_NULL; 1448 } 1449 1450 val->resize(size); 1451 1452 /* C++ bool handling means a vector of bools isn't necessarily addressable 1453 * (we might use individual bits) 1454 */ 1455 bool data; 1456 for (int32_t i = 0; i < size; ++i) { 1457 status = readBool(&data); 1458 (*val)[i] = data; 1459 1460 if (status != OK) { 1461 return status; 1462 } 1463 } 1464 1465 return OK; 1466} 1467 1468status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const { 1469 return readNullableTypedVector(val, &Parcel::readChar); 1470} 1471 1472status_t Parcel::readCharVector(std::vector<char16_t>* val) const { 1473 return readTypedVector(val, &Parcel::readChar); 1474} 1475 1476status_t Parcel::readString16Vector( 1477 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const { 1478 return readNullableTypedVector(val, &Parcel::readString16); 1479} 1480 1481status_t Parcel::readString16Vector(std::vector<String16>* val) const { 1482 return readTypedVector(val, &Parcel::readString16); 1483} 1484 1485 1486status_t Parcel::readInt32(int32_t *pArg) const 1487{ 1488 return readAligned(pArg); 1489} 1490 1491int32_t Parcel::readInt32() const 1492{ 1493 return readAligned<int32_t>(); 1494} 1495 1496status_t Parcel::readUint32(uint32_t *pArg) const 1497{ 1498 return readAligned(pArg); 1499} 1500 1501uint32_t Parcel::readUint32() const 1502{ 1503 return readAligned<uint32_t>(); 1504} 1505 1506status_t Parcel::readInt64(int64_t *pArg) const 1507{ 1508 return readAligned(pArg); 1509} 1510 1511 1512int64_t Parcel::readInt64() const 1513{ 1514 return readAligned<int64_t>(); 1515} 1516 1517status_t Parcel::readUint64(uint64_t *pArg) const 1518{ 1519 return readAligned(pArg); 1520} 1521 1522uint64_t Parcel::readUint64() const 1523{ 1524 return readAligned<uint64_t>(); 1525} 1526 1527status_t Parcel::readPointer(uintptr_t *pArg) const 1528{ 1529 status_t ret; 1530 binder_uintptr_t ptr; 1531 ret = readAligned(&ptr); 1532 if (!ret) 1533 *pArg = ptr; 1534 return ret; 1535} 1536 1537uintptr_t Parcel::readPointer() const 1538{ 1539 return readAligned<binder_uintptr_t>(); 1540} 1541 1542 1543status_t Parcel::readFloat(float *pArg) const 1544{ 1545 return readAligned(pArg); 1546} 1547 1548 1549float Parcel::readFloat() const 1550{ 1551 return readAligned<float>(); 1552} 1553 1554#if defined(__mips__) && defined(__mips_hard_float) 1555 1556status_t Parcel::readDouble(double *pArg) const 1557{ 1558 union { 1559 double d; 1560 unsigned long long ll; 1561 } u; 1562 u.d = 0; 1563 status_t status; 1564 status = readAligned(&u.ll); 1565 *pArg = u.d; 1566 return status; 1567} 1568 1569double Parcel::readDouble() const 1570{ 1571 union { 1572 double d; 1573 unsigned long long ll; 1574 } u; 1575 u.ll = readAligned<unsigned long long>(); 1576 return u.d; 1577} 1578 1579#else 1580 1581status_t Parcel::readDouble(double *pArg) const 1582{ 1583 return readAligned(pArg); 1584} 1585 1586double Parcel::readDouble() const 1587{ 1588 return readAligned<double>(); 1589} 1590 1591#endif 1592 1593status_t Parcel::readIntPtr(intptr_t *pArg) const 1594{ 1595 return readAligned(pArg); 1596} 1597 1598 1599intptr_t Parcel::readIntPtr() const 1600{ 1601 return readAligned<intptr_t>(); 1602} 1603 1604status_t Parcel::readBool(bool *pArg) const 1605{ 1606 int32_t tmp; 1607 status_t ret = readInt32(&tmp); 1608 *pArg = (tmp != 0); 1609 return ret; 1610} 1611 1612bool Parcel::readBool() const 1613{ 1614 return readInt32() != 0; 1615} 1616 1617status_t Parcel::readChar(char16_t *pArg) const 1618{ 1619 int32_t tmp; 1620 status_t ret = readInt32(&tmp); 1621 *pArg = char16_t(tmp); 1622 return ret; 1623} 1624 1625char16_t Parcel::readChar() const 1626{ 1627 return char16_t(readInt32()); 1628} 1629 1630status_t Parcel::readByte(int8_t *pArg) const 1631{ 1632 int32_t tmp; 1633 status_t ret = readInt32(&tmp); 1634 *pArg = int8_t(tmp); 1635 return ret; 1636} 1637 1638int8_t Parcel::readByte() const 1639{ 1640 return int8_t(readInt32()); 1641} 1642 1643const char* Parcel::readCString() const 1644{ 1645 const size_t avail = mDataSize-mDataPos; 1646 if (avail > 0) { 1647 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1648 // is the string's trailing NUL within the parcel's valid bounds? 1649 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1650 if (eos) { 1651 const size_t len = eos - str; 1652 mDataPos += pad_size(len+1); 1653 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1654 return str; 1655 } 1656 } 1657 return NULL; 1658} 1659 1660String8 Parcel::readString8() const 1661{ 1662 int32_t size = readInt32(); 1663 // watch for potential int overflow adding 1 for trailing NUL 1664 if (size > 0 && size < INT32_MAX) { 1665 const char* str = (const char*)readInplace(size+1); 1666 if (str) return String8(str, size); 1667 } 1668 return String8(); 1669} 1670 1671String16 Parcel::readString16() const 1672{ 1673 size_t len; 1674 const char16_t* str = readString16Inplace(&len); 1675 if (str) return String16(str, len); 1676 ALOGE("Reading a NULL string not supported here."); 1677 return String16(); 1678} 1679 1680status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const 1681{ 1682 const int32_t start = dataPosition(); 1683 int32_t size; 1684 status_t status = readInt32(&size); 1685 pArg->reset(); 1686 1687 if (status != OK || size < 0) { 1688 return status; 1689 } 1690 1691 setDataPosition(start); 1692 pArg->reset(new String16()); 1693 1694 status = readString16(pArg->get()); 1695 1696 if (status != OK) { 1697 pArg->reset(); 1698 } 1699 1700 return status; 1701} 1702 1703status_t Parcel::readString16(String16* pArg) const 1704{ 1705 size_t len; 1706 const char16_t* str = readString16Inplace(&len); 1707 if (str) { 1708 pArg->setTo(str, len); 1709 return 0; 1710 } else { 1711 *pArg = String16(); 1712 return UNEXPECTED_NULL; 1713 } 1714} 1715 1716const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1717{ 1718 int32_t size = readInt32(); 1719 // watch for potential int overflow from size+1 1720 if (size >= 0 && size < INT32_MAX) { 1721 *outLen = size; 1722 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1723 if (str != NULL) { 1724 return str; 1725 } 1726 } 1727 *outLen = 0; 1728 return NULL; 1729} 1730 1731status_t Parcel::readStrongBinder(sp<IBinder>* val) const 1732{ 1733 return unflatten_binder(ProcessState::self(), *this, val); 1734} 1735 1736sp<IBinder> Parcel::readStrongBinder() const 1737{ 1738 sp<IBinder> val; 1739 readStrongBinder(&val); 1740 return val; 1741} 1742 1743wp<IBinder> Parcel::readWeakBinder() const 1744{ 1745 wp<IBinder> val; 1746 unflatten_binder(ProcessState::self(), *this, &val); 1747 return val; 1748} 1749 1750status_t Parcel::readParcelable(Parcelable* parcelable) const { 1751 int32_t have_parcelable = 0; 1752 status_t status = readInt32(&have_parcelable); 1753 if (status != OK) { 1754 return status; 1755 } 1756 if (!have_parcelable) { 1757 return UNEXPECTED_NULL; 1758 } 1759 return parcelable->readFromParcel(this); 1760} 1761 1762int32_t Parcel::readExceptionCode() const 1763{ 1764 binder::Status status; 1765 status.readFromParcel(*this); 1766 return status.exceptionCode(); 1767} 1768 1769native_handle* Parcel::readNativeHandle() const 1770{ 1771 int numFds, numInts; 1772 status_t err; 1773 err = readInt32(&numFds); 1774 if (err != NO_ERROR) return 0; 1775 err = readInt32(&numInts); 1776 if (err != NO_ERROR) return 0; 1777 1778 native_handle* h = native_handle_create(numFds, numInts); 1779 if (!h) { 1780 return 0; 1781 } 1782 1783 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 1784 h->data[i] = dup(readFileDescriptor()); 1785 if (h->data[i] < 0) err = BAD_VALUE; 1786 } 1787 err = read(h->data + numFds, sizeof(int)*numInts); 1788 if (err != NO_ERROR) { 1789 native_handle_close(h); 1790 native_handle_delete(h); 1791 h = 0; 1792 } 1793 return h; 1794} 1795 1796 1797int Parcel::readFileDescriptor() const 1798{ 1799 const flat_binder_object* flat = readObject(true); 1800 1801 if (flat && flat->type == BINDER_TYPE_FD) { 1802 return flat->handle; 1803 } 1804 1805 return BAD_TYPE; 1806} 1807 1808status_t Parcel::readUniqueFileDescriptor(ScopedFd* val) const 1809{ 1810 int got = readFileDescriptor(); 1811 1812 if (got == BAD_TYPE) { 1813 return BAD_TYPE; 1814 } 1815 1816 val->reset(dup(got)); 1817 1818 if (val->get() < 0) { 1819 return BAD_VALUE; 1820 } 1821 1822 return OK; 1823} 1824 1825 1826status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<ScopedFd>>* val) const { 1827 return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor); 1828} 1829 1830status_t Parcel::readUniqueFileDescriptorVector(std::vector<ScopedFd>* val) const { 1831 return readTypedVector(val, &Parcel::readUniqueFileDescriptor); 1832} 1833 1834status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 1835{ 1836 int32_t blobType; 1837 status_t status = readInt32(&blobType); 1838 if (status) return status; 1839 1840 if (blobType == BLOB_INPLACE) { 1841 ALOGV("readBlob: read in place"); 1842 const void* ptr = readInplace(len); 1843 if (!ptr) return BAD_VALUE; 1844 1845 outBlob->init(-1, const_cast<void*>(ptr), len, false); 1846 return NO_ERROR; 1847 } 1848 1849 ALOGV("readBlob: read from ashmem"); 1850 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE); 1851 int fd = readFileDescriptor(); 1852 if (fd == int(BAD_TYPE)) return BAD_VALUE; 1853 1854 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ, 1855 MAP_SHARED, fd, 0); 1856 if (ptr == MAP_FAILED) return NO_MEMORY; 1857 1858 outBlob->init(fd, ptr, len, isMutable); 1859 return NO_ERROR; 1860} 1861 1862status_t Parcel::read(FlattenableHelperInterface& val) const 1863{ 1864 // size 1865 const size_t len = this->readInt32(); 1866 const size_t fd_count = this->readInt32(); 1867 1868 if (len > INT32_MAX) { 1869 // don't accept size_t values which may have come from an 1870 // inadvertent conversion from a negative int. 1871 return BAD_VALUE; 1872 } 1873 1874 // payload 1875 void const* const buf = this->readInplace(pad_size(len)); 1876 if (buf == NULL) 1877 return BAD_VALUE; 1878 1879 int* fds = NULL; 1880 if (fd_count) { 1881 fds = new int[fd_count]; 1882 } 1883 1884 status_t err = NO_ERROR; 1885 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1886 fds[i] = dup(this->readFileDescriptor()); 1887 if (fds[i] < 0) { 1888 err = BAD_VALUE; 1889 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s", 1890 i, fds[i], fd_count, strerror(errno)); 1891 } 1892 } 1893 1894 if (err == NO_ERROR) { 1895 err = val.unflatten(buf, len, fds, fd_count); 1896 } 1897 1898 if (fd_count) { 1899 delete [] fds; 1900 } 1901 1902 return err; 1903} 1904const flat_binder_object* Parcel::readObject(bool nullMetaData) const 1905{ 1906 const size_t DPOS = mDataPos; 1907 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 1908 const flat_binder_object* obj 1909 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 1910 mDataPos = DPOS + sizeof(flat_binder_object); 1911 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) { 1912 // When transferring a NULL object, we don't write it into 1913 // the object list, so we don't want to check for it when 1914 // reading. 1915 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1916 return obj; 1917 } 1918 1919 // Ensure that this object is valid... 1920 binder_size_t* const OBJS = mObjects; 1921 const size_t N = mObjectsSize; 1922 size_t opos = mNextObjectHint; 1923 1924 if (N > 0) { 1925 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 1926 this, DPOS, opos); 1927 1928 // Start at the current hint position, looking for an object at 1929 // the current data position. 1930 if (opos < N) { 1931 while (opos < (N-1) && OBJS[opos] < DPOS) { 1932 opos++; 1933 } 1934 } else { 1935 opos = N-1; 1936 } 1937 if (OBJS[opos] == DPOS) { 1938 // Found it! 1939 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 1940 this, DPOS, opos); 1941 mNextObjectHint = opos+1; 1942 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1943 return obj; 1944 } 1945 1946 // Look backwards for it... 1947 while (opos > 0 && OBJS[opos] > DPOS) { 1948 opos--; 1949 } 1950 if (OBJS[opos] == DPOS) { 1951 // Found it! 1952 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 1953 this, DPOS, opos); 1954 mNextObjectHint = opos+1; 1955 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1956 return obj; 1957 } 1958 } 1959 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 1960 this, DPOS); 1961 } 1962 return NULL; 1963} 1964 1965void Parcel::closeFileDescriptors() 1966{ 1967 size_t i = mObjectsSize; 1968 if (i > 0) { 1969 //ALOGI("Closing file descriptors for %zu objects...", i); 1970 } 1971 while (i > 0) { 1972 i--; 1973 const flat_binder_object* flat 1974 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1975 if (flat->type == BINDER_TYPE_FD) { 1976 //ALOGI("Closing fd: %ld", flat->handle); 1977 close(flat->handle); 1978 } 1979 } 1980} 1981 1982uintptr_t Parcel::ipcData() const 1983{ 1984 return reinterpret_cast<uintptr_t>(mData); 1985} 1986 1987size_t Parcel::ipcDataSize() const 1988{ 1989 return (mDataSize > mDataPos ? mDataSize : mDataPos); 1990} 1991 1992uintptr_t Parcel::ipcObjects() const 1993{ 1994 return reinterpret_cast<uintptr_t>(mObjects); 1995} 1996 1997size_t Parcel::ipcObjectsCount() const 1998{ 1999 return mObjectsSize; 2000} 2001 2002void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 2003 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 2004{ 2005 binder_size_t minOffset = 0; 2006 freeDataNoInit(); 2007 mError = NO_ERROR; 2008 mData = const_cast<uint8_t*>(data); 2009 mDataSize = mDataCapacity = dataSize; 2010 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 2011 mDataPos = 0; 2012 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 2013 mObjects = const_cast<binder_size_t*>(objects); 2014 mObjectsSize = mObjectsCapacity = objectsCount; 2015 mNextObjectHint = 0; 2016 mOwner = relFunc; 2017 mOwnerCookie = relCookie; 2018 for (size_t i = 0; i < mObjectsSize; i++) { 2019 binder_size_t offset = mObjects[i]; 2020 if (offset < minOffset) { 2021 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n", 2022 __func__, (uint64_t)offset, (uint64_t)minOffset); 2023 mObjectsSize = 0; 2024 break; 2025 } 2026 minOffset = offset + sizeof(flat_binder_object); 2027 } 2028 scanForFds(); 2029} 2030 2031void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 2032{ 2033 to << "Parcel("; 2034 2035 if (errorCheck() != NO_ERROR) { 2036 const status_t err = errorCheck(); 2037 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 2038 } else if (dataSize() > 0) { 2039 const uint8_t* DATA = data(); 2040 to << indent << HexDump(DATA, dataSize()) << dedent; 2041 const binder_size_t* OBJS = objects(); 2042 const size_t N = objectsCount(); 2043 for (size_t i=0; i<N; i++) { 2044 const flat_binder_object* flat 2045 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 2046 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 2047 << TypeCode(flat->type & 0x7f7f7f00) 2048 << " = " << flat->binder; 2049 } 2050 } else { 2051 to << "NULL"; 2052 } 2053 2054 to << ")"; 2055} 2056 2057void Parcel::releaseObjects() 2058{ 2059 const sp<ProcessState> proc(ProcessState::self()); 2060 size_t i = mObjectsSize; 2061 uint8_t* const data = mData; 2062 binder_size_t* const objects = mObjects; 2063 while (i > 0) { 2064 i--; 2065 const flat_binder_object* flat 2066 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 2067 release_object(proc, *flat, this, &mOpenAshmemSize); 2068 } 2069} 2070 2071void Parcel::acquireObjects() 2072{ 2073 const sp<ProcessState> proc(ProcessState::self()); 2074 size_t i = mObjectsSize; 2075 uint8_t* const data = mData; 2076 binder_size_t* const objects = mObjects; 2077 while (i > 0) { 2078 i--; 2079 const flat_binder_object* flat 2080 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 2081 acquire_object(proc, *flat, this, &mOpenAshmemSize); 2082 } 2083} 2084 2085void Parcel::freeData() 2086{ 2087 freeDataNoInit(); 2088 initState(); 2089} 2090 2091void Parcel::freeDataNoInit() 2092{ 2093 if (mOwner) { 2094 LOG_ALLOC("Parcel %p: freeing other owner data", this); 2095 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 2096 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 2097 } else { 2098 LOG_ALLOC("Parcel %p: freeing allocated data", this); 2099 releaseObjects(); 2100 if (mData) { 2101 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); 2102 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2103 if (mDataCapacity <= gParcelGlobalAllocSize) { 2104 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity; 2105 } else { 2106 gParcelGlobalAllocSize = 0; 2107 } 2108 if (gParcelGlobalAllocCount > 0) { 2109 gParcelGlobalAllocCount--; 2110 } 2111 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2112 free(mData); 2113 } 2114 if (mObjects) free(mObjects); 2115 } 2116} 2117 2118status_t Parcel::growData(size_t len) 2119{ 2120 if (len > INT32_MAX) { 2121 // don't accept size_t values which may have come from an 2122 // inadvertent conversion from a negative int. 2123 return BAD_VALUE; 2124 } 2125 2126 size_t newSize = ((mDataSize+len)*3)/2; 2127 return (newSize <= mDataSize) 2128 ? (status_t) NO_MEMORY 2129 : continueWrite(newSize); 2130} 2131 2132status_t Parcel::restartWrite(size_t desired) 2133{ 2134 if (desired > INT32_MAX) { 2135 // don't accept size_t values which may have come from an 2136 // inadvertent conversion from a negative int. 2137 return BAD_VALUE; 2138 } 2139 2140 if (mOwner) { 2141 freeData(); 2142 return continueWrite(desired); 2143 } 2144 2145 uint8_t* data = (uint8_t*)realloc(mData, desired); 2146 if (!data && desired > mDataCapacity) { 2147 mError = NO_MEMORY; 2148 return NO_MEMORY; 2149 } 2150 2151 releaseObjects(); 2152 2153 if (data) { 2154 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); 2155 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2156 gParcelGlobalAllocSize += desired; 2157 gParcelGlobalAllocSize -= mDataCapacity; 2158 if (!mData) { 2159 gParcelGlobalAllocCount++; 2160 } 2161 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2162 mData = data; 2163 mDataCapacity = desired; 2164 } 2165 2166 mDataSize = mDataPos = 0; 2167 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 2168 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 2169 2170 free(mObjects); 2171 mObjects = NULL; 2172 mObjectsSize = mObjectsCapacity = 0; 2173 mNextObjectHint = 0; 2174 mHasFds = false; 2175 mFdsKnown = true; 2176 mAllowFds = true; 2177 2178 return NO_ERROR; 2179} 2180 2181status_t Parcel::continueWrite(size_t desired) 2182{ 2183 if (desired > INT32_MAX) { 2184 // don't accept size_t values which may have come from an 2185 // inadvertent conversion from a negative int. 2186 return BAD_VALUE; 2187 } 2188 2189 // If shrinking, first adjust for any objects that appear 2190 // after the new data size. 2191 size_t objectsSize = mObjectsSize; 2192 if (desired < mDataSize) { 2193 if (desired == 0) { 2194 objectsSize = 0; 2195 } else { 2196 while (objectsSize > 0) { 2197 if (mObjects[objectsSize-1] < desired) 2198 break; 2199 objectsSize--; 2200 } 2201 } 2202 } 2203 2204 if (mOwner) { 2205 // If the size is going to zero, just release the owner's data. 2206 if (desired == 0) { 2207 freeData(); 2208 return NO_ERROR; 2209 } 2210 2211 // If there is a different owner, we need to take 2212 // posession. 2213 uint8_t* data = (uint8_t*)malloc(desired); 2214 if (!data) { 2215 mError = NO_MEMORY; 2216 return NO_MEMORY; 2217 } 2218 binder_size_t* objects = NULL; 2219 2220 if (objectsSize) { 2221 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t)); 2222 if (!objects) { 2223 free(data); 2224 2225 mError = NO_MEMORY; 2226 return NO_MEMORY; 2227 } 2228 2229 // Little hack to only acquire references on objects 2230 // we will be keeping. 2231 size_t oldObjectsSize = mObjectsSize; 2232 mObjectsSize = objectsSize; 2233 acquireObjects(); 2234 mObjectsSize = oldObjectsSize; 2235 } 2236 2237 if (mData) { 2238 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 2239 } 2240 if (objects && mObjects) { 2241 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 2242 } 2243 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 2244 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 2245 mOwner = NULL; 2246 2247 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); 2248 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2249 gParcelGlobalAllocSize += desired; 2250 gParcelGlobalAllocCount++; 2251 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2252 2253 mData = data; 2254 mObjects = objects; 2255 mDataSize = (mDataSize < desired) ? mDataSize : desired; 2256 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2257 mDataCapacity = desired; 2258 mObjectsSize = mObjectsCapacity = objectsSize; 2259 mNextObjectHint = 0; 2260 2261 } else if (mData) { 2262 if (objectsSize < mObjectsSize) { 2263 // Need to release refs on any objects we are dropping. 2264 const sp<ProcessState> proc(ProcessState::self()); 2265 for (size_t i=objectsSize; i<mObjectsSize; i++) { 2266 const flat_binder_object* flat 2267 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 2268 if (flat->type == BINDER_TYPE_FD) { 2269 // will need to rescan because we may have lopped off the only FDs 2270 mFdsKnown = false; 2271 } 2272 release_object(proc, *flat, this, &mOpenAshmemSize); 2273 } 2274 binder_size_t* objects = 2275 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 2276 if (objects) { 2277 mObjects = objects; 2278 } 2279 mObjectsSize = objectsSize; 2280 mNextObjectHint = 0; 2281 } 2282 2283 // We own the data, so we can just do a realloc(). 2284 if (desired > mDataCapacity) { 2285 uint8_t* data = (uint8_t*)realloc(mData, desired); 2286 if (data) { 2287 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, 2288 desired); 2289 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2290 gParcelGlobalAllocSize += desired; 2291 gParcelGlobalAllocSize -= mDataCapacity; 2292 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2293 mData = data; 2294 mDataCapacity = desired; 2295 } else if (desired > mDataCapacity) { 2296 mError = NO_MEMORY; 2297 return NO_MEMORY; 2298 } 2299 } else { 2300 if (mDataSize > desired) { 2301 mDataSize = desired; 2302 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2303 } 2304 if (mDataPos > desired) { 2305 mDataPos = desired; 2306 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 2307 } 2308 } 2309 2310 } else { 2311 // This is the first data. Easy! 2312 uint8_t* data = (uint8_t*)malloc(desired); 2313 if (!data) { 2314 mError = NO_MEMORY; 2315 return NO_MEMORY; 2316 } 2317 2318 if(!(mDataCapacity == 0 && mObjects == NULL 2319 && mObjectsCapacity == 0)) { 2320 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 2321 } 2322 2323 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); 2324 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2325 gParcelGlobalAllocSize += desired; 2326 gParcelGlobalAllocCount++; 2327 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2328 2329 mData = data; 2330 mDataSize = mDataPos = 0; 2331 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2332 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 2333 mDataCapacity = desired; 2334 } 2335 2336 return NO_ERROR; 2337} 2338 2339void Parcel::initState() 2340{ 2341 LOG_ALLOC("Parcel %p: initState", this); 2342 mError = NO_ERROR; 2343 mData = 0; 2344 mDataSize = 0; 2345 mDataCapacity = 0; 2346 mDataPos = 0; 2347 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 2348 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 2349 mObjects = NULL; 2350 mObjectsSize = 0; 2351 mObjectsCapacity = 0; 2352 mNextObjectHint = 0; 2353 mHasFds = false; 2354 mFdsKnown = true; 2355 mAllowFds = true; 2356 mOwner = NULL; 2357 mOpenAshmemSize = 0; 2358} 2359 2360void Parcel::scanForFds() const 2361{ 2362 bool hasFds = false; 2363 for (size_t i=0; i<mObjectsSize; i++) { 2364 const flat_binder_object* flat 2365 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 2366 if (flat->type == BINDER_TYPE_FD) { 2367 hasFds = true; 2368 break; 2369 } 2370 } 2371 mHasFds = hasFds; 2372 mFdsKnown = true; 2373} 2374 2375size_t Parcel::getBlobAshmemSize() const 2376{ 2377 // This used to return the size of all blobs that were written to ashmem, now we're returning 2378 // the ashmem currently referenced by this Parcel, which should be equivalent. 2379 // TODO: Remove method once ABI can be changed. 2380 return mOpenAshmemSize; 2381} 2382 2383size_t Parcel::getOpenAshmemSize() const 2384{ 2385 return mOpenAshmemSize; 2386} 2387 2388// --- Parcel::Blob --- 2389 2390Parcel::Blob::Blob() : 2391 mFd(-1), mData(NULL), mSize(0), mMutable(false) { 2392} 2393 2394Parcel::Blob::~Blob() { 2395 release(); 2396} 2397 2398void Parcel::Blob::release() { 2399 if (mFd != -1 && mData) { 2400 ::munmap(mData, mSize); 2401 } 2402 clear(); 2403} 2404 2405void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) { 2406 mFd = fd; 2407 mData = data; 2408 mSize = size; 2409 mMutable = isMutable; 2410} 2411 2412void Parcel::Blob::clear() { 2413 mFd = -1; 2414 mData = NULL; 2415 mSize = 0; 2416 mMutable = false; 2417} 2418 2419}; // namespace android 2420