Parcel.cpp revision 93bf31f54d56617baf0192a451f2269ad78e6c49
1/* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "Parcel" 18//#define LOG_NDEBUG 0 19 20#include <errno.h> 21#include <inttypes.h> 22#include <stdint.h> 23#include <stdio.h> 24#include <stdlib.h> 25#include <sys/mman.h> 26#include <sys/stat.h> 27#include <sys/types.h> 28#include <unistd.h> 29 30#include <binder/Binder.h> 31#include <binder/BpBinder.h> 32#include <binder/IPCThreadState.h> 33#include <binder/Parcel.h> 34#include <binder/ProcessState.h> 35#include <binder/TextOutput.h> 36 37#include <cutils/ashmem.h> 38#include <utils/Debug.h> 39#include <utils/Flattenable.h> 40#include <utils/Log.h> 41#include <utils/misc.h> 42#include <utils/String8.h> 43#include <utils/String16.h> 44 45#include <private/binder/binder_module.h> 46#include <private/binder/Static.h> 47 48#ifndef INT32_MAX 49#define INT32_MAX ((int32_t)(2147483647)) 50#endif 51 52#define LOG_REFS(...) 53//#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 54#define LOG_ALLOC(...) 55//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 56 57// --------------------------------------------------------------------------- 58 59// This macro should never be used at runtime, as a too large value 60// of s could cause an integer overflow. Instead, you should always 61// use the wrapper function pad_size() 62#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3) 63 64static size_t pad_size(size_t s) { 65 if (s > (SIZE_T_MAX - 3)) { 66 abort(); 67 } 68 return PAD_SIZE_UNSAFE(s); 69} 70 71// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 72#define STRICT_MODE_PENALTY_GATHER (0x40 << 16) 73 74// Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER 75#define EX_HAS_REPLY_HEADER -128 76 77// XXX This can be made public if we want to provide 78// support for typed data. 79struct small_flat_data 80{ 81 uint32_t type; 82 uint32_t data; 83}; 84 85namespace android { 86 87static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; 88static size_t gParcelGlobalAllocSize = 0; 89static size_t gParcelGlobalAllocCount = 0; 90 91// Maximum size of a blob to transfer in-place. 92static const size_t BLOB_INPLACE_LIMIT = 16 * 1024; 93 94enum { 95 BLOB_INPLACE = 0, 96 BLOB_ASHMEM_IMMUTABLE = 1, 97 BLOB_ASHMEM_MUTABLE = 2, 98}; 99 100void acquire_object(const sp<ProcessState>& proc, 101 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 102{ 103 switch (obj.type) { 104 case BINDER_TYPE_BINDER: 105 if (obj.binder) { 106 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 107 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 108 } 109 return; 110 case BINDER_TYPE_WEAK_BINDER: 111 if (obj.binder) 112 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 113 return; 114 case BINDER_TYPE_HANDLE: { 115 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 116 if (b != NULL) { 117 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 118 b->incStrong(who); 119 } 120 return; 121 } 122 case BINDER_TYPE_WEAK_HANDLE: { 123 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 124 if (b != NULL) b.get_refs()->incWeak(who); 125 return; 126 } 127 case BINDER_TYPE_FD: { 128 if ((obj.cookie != 0) && (outAshmemSize != NULL)) { 129 struct stat st; 130 int ret = fstat(obj.handle, &st); 131 if (!ret && S_ISCHR(st.st_mode)) { 132 // If we own an ashmem fd, keep track of how much memory it refers to. 133 int size = ashmem_get_size_region(obj.handle); 134 if (size > 0) { 135 *outAshmemSize += size; 136 } 137 } 138 } 139 return; 140 } 141 } 142 143 ALOGD("Invalid object type 0x%08x", obj.type); 144} 145 146void acquire_object(const sp<ProcessState>& proc, 147 const flat_binder_object& obj, const void* who) 148{ 149 acquire_object(proc, obj, who, NULL); 150} 151 152static void release_object(const sp<ProcessState>& proc, 153 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 154{ 155 switch (obj.type) { 156 case BINDER_TYPE_BINDER: 157 if (obj.binder) { 158 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 159 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 160 } 161 return; 162 case BINDER_TYPE_WEAK_BINDER: 163 if (obj.binder) 164 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 165 return; 166 case BINDER_TYPE_HANDLE: { 167 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 168 if (b != NULL) { 169 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 170 b->decStrong(who); 171 } 172 return; 173 } 174 case BINDER_TYPE_WEAK_HANDLE: { 175 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 176 if (b != NULL) b.get_refs()->decWeak(who); 177 return; 178 } 179 case BINDER_TYPE_FD: { 180 if (obj.cookie != 0) { // owned 181 if (outAshmemSize != NULL) { 182 struct stat st; 183 int ret = fstat(obj.handle, &st); 184 if (!ret && S_ISCHR(st.st_mode)) { 185 int size = ashmem_get_size_region(obj.handle); 186 if (size > 0) { 187 *outAshmemSize -= size; 188 } 189 } 190 } 191 192 close(obj.handle); 193 } 194 return; 195 } 196 } 197 198 ALOGE("Invalid object type 0x%08x", obj.type); 199} 200 201void release_object(const sp<ProcessState>& proc, 202 const flat_binder_object& obj, const void* who) 203{ 204 release_object(proc, obj, who, NULL); 205} 206 207inline static status_t finish_flatten_binder( 208 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 209{ 210 return out->writeObject(flat, false); 211} 212 213status_t flatten_binder(const sp<ProcessState>& /*proc*/, 214 const sp<IBinder>& binder, Parcel* out) 215{ 216 flat_binder_object obj; 217 218 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 219 if (binder != NULL) { 220 IBinder *local = binder->localBinder(); 221 if (!local) { 222 BpBinder *proxy = binder->remoteBinder(); 223 if (proxy == NULL) { 224 ALOGE("null proxy"); 225 } 226 const int32_t handle = proxy ? proxy->handle() : 0; 227 obj.type = BINDER_TYPE_HANDLE; 228 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 229 obj.handle = handle; 230 obj.cookie = 0; 231 } else { 232 obj.type = BINDER_TYPE_BINDER; 233 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 234 obj.cookie = reinterpret_cast<uintptr_t>(local); 235 } 236 } else { 237 obj.type = BINDER_TYPE_BINDER; 238 obj.binder = 0; 239 obj.cookie = 0; 240 } 241 242 return finish_flatten_binder(binder, obj, out); 243} 244 245status_t flatten_binder(const sp<ProcessState>& /*proc*/, 246 const wp<IBinder>& binder, Parcel* out) 247{ 248 flat_binder_object obj; 249 250 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 251 if (binder != NULL) { 252 sp<IBinder> real = binder.promote(); 253 if (real != NULL) { 254 IBinder *local = real->localBinder(); 255 if (!local) { 256 BpBinder *proxy = real->remoteBinder(); 257 if (proxy == NULL) { 258 ALOGE("null proxy"); 259 } 260 const int32_t handle = proxy ? proxy->handle() : 0; 261 obj.type = BINDER_TYPE_WEAK_HANDLE; 262 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 263 obj.handle = handle; 264 obj.cookie = 0; 265 } else { 266 obj.type = BINDER_TYPE_WEAK_BINDER; 267 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs()); 268 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get()); 269 } 270 return finish_flatten_binder(real, obj, out); 271 } 272 273 // XXX How to deal? In order to flatten the given binder, 274 // we need to probe it for information, which requires a primary 275 // reference... but we don't have one. 276 // 277 // The OpenBinder implementation uses a dynamic_cast<> here, 278 // but we can't do that with the different reference counting 279 // implementation we are using. 280 ALOGE("Unable to unflatten Binder weak reference!"); 281 obj.type = BINDER_TYPE_BINDER; 282 obj.binder = 0; 283 obj.cookie = 0; 284 return finish_flatten_binder(NULL, obj, out); 285 286 } else { 287 obj.type = BINDER_TYPE_BINDER; 288 obj.binder = 0; 289 obj.cookie = 0; 290 return finish_flatten_binder(NULL, obj, out); 291 } 292} 293 294inline static status_t finish_unflatten_binder( 295 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/, 296 const Parcel& /*in*/) 297{ 298 return NO_ERROR; 299} 300 301status_t unflatten_binder(const sp<ProcessState>& proc, 302 const Parcel& in, sp<IBinder>* out) 303{ 304 const flat_binder_object* flat = in.readObject(false); 305 306 if (flat) { 307 switch (flat->type) { 308 case BINDER_TYPE_BINDER: 309 *out = reinterpret_cast<IBinder*>(flat->cookie); 310 return finish_unflatten_binder(NULL, *flat, in); 311 case BINDER_TYPE_HANDLE: 312 *out = proc->getStrongProxyForHandle(flat->handle); 313 return finish_unflatten_binder( 314 static_cast<BpBinder*>(out->get()), *flat, in); 315 } 316 } 317 return BAD_TYPE; 318} 319 320status_t unflatten_binder(const sp<ProcessState>& proc, 321 const Parcel& in, wp<IBinder>* out) 322{ 323 const flat_binder_object* flat = in.readObject(false); 324 325 if (flat) { 326 switch (flat->type) { 327 case BINDER_TYPE_BINDER: 328 *out = reinterpret_cast<IBinder*>(flat->cookie); 329 return finish_unflatten_binder(NULL, *flat, in); 330 case BINDER_TYPE_WEAK_BINDER: 331 if (flat->binder != 0) { 332 out->set_object_and_refs( 333 reinterpret_cast<IBinder*>(flat->cookie), 334 reinterpret_cast<RefBase::weakref_type*>(flat->binder)); 335 } else { 336 *out = NULL; 337 } 338 return finish_unflatten_binder(NULL, *flat, in); 339 case BINDER_TYPE_HANDLE: 340 case BINDER_TYPE_WEAK_HANDLE: 341 *out = proc->getWeakProxyForHandle(flat->handle); 342 return finish_unflatten_binder( 343 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 344 } 345 } 346 return BAD_TYPE; 347} 348 349// --------------------------------------------------------------------------- 350 351Parcel::Parcel() 352{ 353 LOG_ALLOC("Parcel %p: constructing", this); 354 initState(); 355} 356 357Parcel::~Parcel() 358{ 359 freeDataNoInit(); 360 LOG_ALLOC("Parcel %p: destroyed", this); 361} 362 363size_t Parcel::getGlobalAllocSize() { 364 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 365 size_t size = gParcelGlobalAllocSize; 366 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 367 return size; 368} 369 370size_t Parcel::getGlobalAllocCount() { 371 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 372 size_t count = gParcelGlobalAllocCount; 373 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 374 return count; 375} 376 377const uint8_t* Parcel::data() const 378{ 379 return mData; 380} 381 382size_t Parcel::dataSize() const 383{ 384 return (mDataSize > mDataPos ? mDataSize : mDataPos); 385} 386 387size_t Parcel::dataAvail() const 388{ 389 // TODO: decide what to do about the possibility that this can 390 // report an available-data size that exceeds a Java int's max 391 // positive value, causing havoc. Fortunately this will only 392 // happen if someone constructs a Parcel containing more than two 393 // gigabytes of data, which on typical phone hardware is simply 394 // not possible. 395 return dataSize() - dataPosition(); 396} 397 398size_t Parcel::dataPosition() const 399{ 400 return mDataPos; 401} 402 403size_t Parcel::dataCapacity() const 404{ 405 return mDataCapacity; 406} 407 408status_t Parcel::setDataSize(size_t size) 409{ 410 if (size > INT32_MAX) { 411 // don't accept size_t values which may have come from an 412 // inadvertent conversion from a negative int. 413 return BAD_VALUE; 414 } 415 416 status_t err; 417 err = continueWrite(size); 418 if (err == NO_ERROR) { 419 mDataSize = size; 420 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 421 } 422 return err; 423} 424 425void Parcel::setDataPosition(size_t pos) const 426{ 427 if (pos > INT32_MAX) { 428 // don't accept size_t values which may have come from an 429 // inadvertent conversion from a negative int. 430 abort(); 431 } 432 433 mDataPos = pos; 434 mNextObjectHint = 0; 435} 436 437status_t Parcel::setDataCapacity(size_t size) 438{ 439 if (size > INT32_MAX) { 440 // don't accept size_t values which may have come from an 441 // inadvertent conversion from a negative int. 442 return BAD_VALUE; 443 } 444 445 if (size > mDataCapacity) return continueWrite(size); 446 return NO_ERROR; 447} 448 449status_t Parcel::setData(const uint8_t* buffer, size_t len) 450{ 451 if (len > INT32_MAX) { 452 // don't accept size_t values which may have come from an 453 // inadvertent conversion from a negative int. 454 return BAD_VALUE; 455 } 456 457 status_t err = restartWrite(len); 458 if (err == NO_ERROR) { 459 memcpy(const_cast<uint8_t*>(data()), buffer, len); 460 mDataSize = len; 461 mFdsKnown = false; 462 } 463 return err; 464} 465 466status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 467{ 468 const sp<ProcessState> proc(ProcessState::self()); 469 status_t err; 470 const uint8_t *data = parcel->mData; 471 const binder_size_t *objects = parcel->mObjects; 472 size_t size = parcel->mObjectsSize; 473 int startPos = mDataPos; 474 int firstIndex = -1, lastIndex = -2; 475 476 if (len == 0) { 477 return NO_ERROR; 478 } 479 480 if (len > INT32_MAX) { 481 // don't accept size_t values which may have come from an 482 // inadvertent conversion from a negative int. 483 return BAD_VALUE; 484 } 485 486 // range checks against the source parcel size 487 if ((offset > parcel->mDataSize) 488 || (len > parcel->mDataSize) 489 || (offset + len > parcel->mDataSize)) { 490 return BAD_VALUE; 491 } 492 493 // Count objects in range 494 for (int i = 0; i < (int) size; i++) { 495 size_t off = objects[i]; 496 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) { 497 if (firstIndex == -1) { 498 firstIndex = i; 499 } 500 lastIndex = i; 501 } 502 } 503 int numObjects = lastIndex - firstIndex + 1; 504 505 if ((mDataSize+len) > mDataCapacity) { 506 // grow data 507 err = growData(len); 508 if (err != NO_ERROR) { 509 return err; 510 } 511 } 512 513 // append data 514 memcpy(mData + mDataPos, data + offset, len); 515 mDataPos += len; 516 mDataSize += len; 517 518 err = NO_ERROR; 519 520 if (numObjects > 0) { 521 // grow objects 522 if (mObjectsCapacity < mObjectsSize + numObjects) { 523 size_t newSize = ((mObjectsSize + numObjects)*3)/2; 524 if (newSize < mObjectsSize) return NO_MEMORY; // overflow 525 binder_size_t *objects = 526 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 527 if (objects == (binder_size_t*)0) { 528 return NO_MEMORY; 529 } 530 mObjects = objects; 531 mObjectsCapacity = newSize; 532 } 533 534 // append and acquire objects 535 int idx = mObjectsSize; 536 for (int i = firstIndex; i <= lastIndex; i++) { 537 size_t off = objects[i] - offset + startPos; 538 mObjects[idx++] = off; 539 mObjectsSize++; 540 541 flat_binder_object* flat 542 = reinterpret_cast<flat_binder_object*>(mData + off); 543 acquire_object(proc, *flat, this, &mOpenAshmemSize); 544 545 if (flat->type == BINDER_TYPE_FD) { 546 // If this is a file descriptor, we need to dup it so the 547 // new Parcel now owns its own fd, and can declare that we 548 // officially know we have fds. 549 flat->handle = dup(flat->handle); 550 flat->cookie = 1; 551 mHasFds = mFdsKnown = true; 552 if (!mAllowFds) { 553 err = FDS_NOT_ALLOWED; 554 } 555 } 556 } 557 } 558 559 return err; 560} 561 562bool Parcel::allowFds() const 563{ 564 return mAllowFds; 565} 566 567bool Parcel::pushAllowFds(bool allowFds) 568{ 569 const bool origValue = mAllowFds; 570 if (!allowFds) { 571 mAllowFds = false; 572 } 573 return origValue; 574} 575 576void Parcel::restoreAllowFds(bool lastValue) 577{ 578 mAllowFds = lastValue; 579} 580 581bool Parcel::hasFileDescriptors() const 582{ 583 if (!mFdsKnown) { 584 scanForFds(); 585 } 586 return mHasFds; 587} 588 589// Write RPC headers. (previously just the interface token) 590status_t Parcel::writeInterfaceToken(const String16& interface) 591{ 592 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 593 STRICT_MODE_PENALTY_GATHER); 594 // currently the interface identification token is just its name as a string 595 return writeString16(interface); 596} 597 598bool Parcel::checkInterface(IBinder* binder) const 599{ 600 return enforceInterface(binder->getInterfaceDescriptor()); 601} 602 603bool Parcel::enforceInterface(const String16& interface, 604 IPCThreadState* threadState) const 605{ 606 int32_t strictPolicy = readInt32(); 607 if (threadState == NULL) { 608 threadState = IPCThreadState::self(); 609 } 610 if ((threadState->getLastTransactionBinderFlags() & 611 IBinder::FLAG_ONEWAY) != 0) { 612 // For one-way calls, the callee is running entirely 613 // disconnected from the caller, so disable StrictMode entirely. 614 // Not only does disk/network usage not impact the caller, but 615 // there's no way to commuicate back any violations anyway. 616 threadState->setStrictModePolicy(0); 617 } else { 618 threadState->setStrictModePolicy(strictPolicy); 619 } 620 const String16 str(readString16()); 621 if (str == interface) { 622 return true; 623 } else { 624 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 625 String8(interface).string(), String8(str).string()); 626 return false; 627 } 628} 629 630const binder_size_t* Parcel::objects() const 631{ 632 return mObjects; 633} 634 635size_t Parcel::objectsCount() const 636{ 637 return mObjectsSize; 638} 639 640status_t Parcel::errorCheck() const 641{ 642 return mError; 643} 644 645void Parcel::setError(status_t err) 646{ 647 mError = err; 648} 649 650status_t Parcel::finishWrite(size_t len) 651{ 652 if (len > INT32_MAX) { 653 // don't accept size_t values which may have come from an 654 // inadvertent conversion from a negative int. 655 return BAD_VALUE; 656 } 657 658 //printf("Finish write of %d\n", len); 659 mDataPos += len; 660 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 661 if (mDataPos > mDataSize) { 662 mDataSize = mDataPos; 663 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 664 } 665 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 666 return NO_ERROR; 667} 668 669status_t Parcel::writeUnpadded(const void* data, size_t len) 670{ 671 if (len > INT32_MAX) { 672 // don't accept size_t values which may have come from an 673 // inadvertent conversion from a negative int. 674 return BAD_VALUE; 675 } 676 677 size_t end = mDataPos + len; 678 if (end < mDataPos) { 679 // integer overflow 680 return BAD_VALUE; 681 } 682 683 if (end <= mDataCapacity) { 684restart_write: 685 memcpy(mData+mDataPos, data, len); 686 return finishWrite(len); 687 } 688 689 status_t err = growData(len); 690 if (err == NO_ERROR) goto restart_write; 691 return err; 692} 693 694status_t Parcel::write(const void* data, size_t len) 695{ 696 if (len > INT32_MAX) { 697 // don't accept size_t values which may have come from an 698 // inadvertent conversion from a negative int. 699 return BAD_VALUE; 700 } 701 702 void* const d = writeInplace(len); 703 if (d) { 704 memcpy(d, data, len); 705 return NO_ERROR; 706 } 707 return mError; 708} 709 710void* Parcel::writeInplace(size_t len) 711{ 712 if (len > INT32_MAX) { 713 // don't accept size_t values which may have come from an 714 // inadvertent conversion from a negative int. 715 return NULL; 716 } 717 718 const size_t padded = pad_size(len); 719 720 // sanity check for integer overflow 721 if (mDataPos+padded < mDataPos) { 722 return NULL; 723 } 724 725 if ((mDataPos+padded) <= mDataCapacity) { 726restart_write: 727 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 728 uint8_t* const data = mData+mDataPos; 729 730 // Need to pad at end? 731 if (padded != len) { 732#if BYTE_ORDER == BIG_ENDIAN 733 static const uint32_t mask[4] = { 734 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 735 }; 736#endif 737#if BYTE_ORDER == LITTLE_ENDIAN 738 static const uint32_t mask[4] = { 739 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 740 }; 741#endif 742 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 743 // *reinterpret_cast<void**>(data+padded-4)); 744 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 745 } 746 747 finishWrite(padded); 748 return data; 749 } 750 751 status_t err = growData(padded); 752 if (err == NO_ERROR) goto restart_write; 753 return NULL; 754} 755 756status_t Parcel::writeInt32(int32_t val) 757{ 758 return writeAligned(val); 759} 760 761status_t Parcel::writeUint32(uint32_t val) 762{ 763 return writeAligned(val); 764} 765 766status_t Parcel::writeInt32Array(size_t len, const int32_t *val) { 767 if (len > INT32_MAX) { 768 // don't accept size_t values which may have come from an 769 // inadvertent conversion from a negative int. 770 return BAD_VALUE; 771 } 772 773 if (!val) { 774 return writeInt32(-1); 775 } 776 status_t ret = writeInt32(static_cast<uint32_t>(len)); 777 if (ret == NO_ERROR) { 778 ret = write(val, len * sizeof(*val)); 779 } 780 return ret; 781} 782status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 783 if (len > INT32_MAX) { 784 // don't accept size_t values which may have come from an 785 // inadvertent conversion from a negative int. 786 return BAD_VALUE; 787 } 788 789 if (!val) { 790 return writeInt32(-1); 791 } 792 status_t ret = writeInt32(static_cast<uint32_t>(len)); 793 if (ret == NO_ERROR) { 794 ret = write(val, len * sizeof(*val)); 795 } 796 return ret; 797} 798 799status_t Parcel::writeInt64(int64_t val) 800{ 801 return writeAligned(val); 802} 803 804status_t Parcel::writeUint64(uint64_t val) 805{ 806 return writeAligned(val); 807} 808 809status_t Parcel::writePointer(uintptr_t val) 810{ 811 return writeAligned<binder_uintptr_t>(val); 812} 813 814status_t Parcel::writeFloat(float val) 815{ 816 return writeAligned(val); 817} 818 819#if defined(__mips__) && defined(__mips_hard_float) 820 821status_t Parcel::writeDouble(double val) 822{ 823 union { 824 double d; 825 unsigned long long ll; 826 } u; 827 u.d = val; 828 return writeAligned(u.ll); 829} 830 831#else 832 833status_t Parcel::writeDouble(double val) 834{ 835 return writeAligned(val); 836} 837 838#endif 839 840status_t Parcel::writeCString(const char* str) 841{ 842 return write(str, strlen(str)+1); 843} 844 845status_t Parcel::writeString8(const String8& str) 846{ 847 status_t err = writeInt32(str.bytes()); 848 // only write string if its length is more than zero characters, 849 // as readString8 will only read if the length field is non-zero. 850 // this is slightly different from how writeString16 works. 851 if (str.bytes() > 0 && err == NO_ERROR) { 852 err = write(str.string(), str.bytes()+1); 853 } 854 return err; 855} 856 857status_t Parcel::writeString16(const String16& str) 858{ 859 return writeString16(str.string(), str.size()); 860} 861 862status_t Parcel::writeString16(const char16_t* str, size_t len) 863{ 864 if (str == NULL) return writeInt32(-1); 865 866 status_t err = writeInt32(len); 867 if (err == NO_ERROR) { 868 len *= sizeof(char16_t); 869 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 870 if (data) { 871 memcpy(data, str, len); 872 *reinterpret_cast<char16_t*>(data+len) = 0; 873 return NO_ERROR; 874 } 875 err = mError; 876 } 877 return err; 878} 879 880status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 881{ 882 return flatten_binder(ProcessState::self(), val, this); 883} 884 885status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 886{ 887 return flatten_binder(ProcessState::self(), val, this); 888} 889 890status_t Parcel::writeNativeHandle(const native_handle* handle) 891{ 892 if (!handle || handle->version != sizeof(native_handle)) 893 return BAD_TYPE; 894 895 status_t err; 896 err = writeInt32(handle->numFds); 897 if (err != NO_ERROR) return err; 898 899 err = writeInt32(handle->numInts); 900 if (err != NO_ERROR) return err; 901 902 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 903 err = writeDupFileDescriptor(handle->data[i]); 904 905 if (err != NO_ERROR) { 906 ALOGD("write native handle, write dup fd failed"); 907 return err; 908 } 909 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 910 return err; 911} 912 913status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 914{ 915 flat_binder_object obj; 916 obj.type = BINDER_TYPE_FD; 917 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 918 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 919 obj.handle = fd; 920 obj.cookie = takeOwnership ? 1 : 0; 921 return writeObject(obj, true); 922} 923 924status_t Parcel::writeDupFileDescriptor(int fd) 925{ 926 int dupFd = dup(fd); 927 if (dupFd < 0) { 928 return -errno; 929 } 930 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 931 if (err) { 932 close(dupFd); 933 } 934 return err; 935} 936 937status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob) 938{ 939 if (len > INT32_MAX) { 940 // don't accept size_t values which may have come from an 941 // inadvertent conversion from a negative int. 942 return BAD_VALUE; 943 } 944 945 status_t status; 946 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) { 947 ALOGV("writeBlob: write in place"); 948 status = writeInt32(BLOB_INPLACE); 949 if (status) return status; 950 951 void* ptr = writeInplace(len); 952 if (!ptr) return NO_MEMORY; 953 954 outBlob->init(-1, ptr, len, false); 955 return NO_ERROR; 956 } 957 958 ALOGV("writeBlob: write to ashmem"); 959 int fd = ashmem_create_region("Parcel Blob", len); 960 if (fd < 0) return NO_MEMORY; 961 962 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 963 if (result < 0) { 964 status = result; 965 } else { 966 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 967 if (ptr == MAP_FAILED) { 968 status = -errno; 969 } else { 970 if (!mutableCopy) { 971 result = ashmem_set_prot_region(fd, PROT_READ); 972 } 973 if (result < 0) { 974 status = result; 975 } else { 976 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE); 977 if (!status) { 978 status = writeFileDescriptor(fd, true /*takeOwnership*/); 979 if (!status) { 980 outBlob->init(fd, ptr, len, mutableCopy); 981 return NO_ERROR; 982 } 983 } 984 } 985 } 986 ::munmap(ptr, len); 987 } 988 ::close(fd); 989 return status; 990} 991 992status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd) 993{ 994 // Must match up with what's done in writeBlob. 995 if (!mAllowFds) return FDS_NOT_ALLOWED; 996 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE); 997 if (status) return status; 998 return writeDupFileDescriptor(fd); 999} 1000 1001status_t Parcel::write(const FlattenableHelperInterface& val) 1002{ 1003 status_t err; 1004 1005 // size if needed 1006 const size_t len = val.getFlattenedSize(); 1007 const size_t fd_count = val.getFdCount(); 1008 1009 if ((len > INT32_MAX) || (fd_count > INT32_MAX)) { 1010 // don't accept size_t values which may have come from an 1011 // inadvertent conversion from a negative int. 1012 return BAD_VALUE; 1013 } 1014 1015 err = this->writeInt32(len); 1016 if (err) return err; 1017 1018 err = this->writeInt32(fd_count); 1019 if (err) return err; 1020 1021 // payload 1022 void* const buf = this->writeInplace(pad_size(len)); 1023 if (buf == NULL) 1024 return BAD_VALUE; 1025 1026 int* fds = NULL; 1027 if (fd_count) { 1028 fds = new int[fd_count]; 1029 } 1030 1031 err = val.flatten(buf, len, fds, fd_count); 1032 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1033 err = this->writeDupFileDescriptor( fds[i] ); 1034 } 1035 1036 if (fd_count) { 1037 delete [] fds; 1038 } 1039 1040 return err; 1041} 1042 1043status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 1044{ 1045 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 1046 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 1047 if (enoughData && enoughObjects) { 1048restart_write: 1049 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 1050 1051 // remember if it's a file descriptor 1052 if (val.type == BINDER_TYPE_FD) { 1053 if (!mAllowFds) { 1054 // fail before modifying our object index 1055 return FDS_NOT_ALLOWED; 1056 } 1057 mHasFds = mFdsKnown = true; 1058 } 1059 1060 // Need to write meta-data? 1061 if (nullMetaData || val.binder != 0) { 1062 mObjects[mObjectsSize] = mDataPos; 1063 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize); 1064 mObjectsSize++; 1065 } 1066 1067 return finishWrite(sizeof(flat_binder_object)); 1068 } 1069 1070 if (!enoughData) { 1071 const status_t err = growData(sizeof(val)); 1072 if (err != NO_ERROR) return err; 1073 } 1074 if (!enoughObjects) { 1075 size_t newSize = ((mObjectsSize+2)*3)/2; 1076 if (newSize < mObjectsSize) return NO_MEMORY; // overflow 1077 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 1078 if (objects == NULL) return NO_MEMORY; 1079 mObjects = objects; 1080 mObjectsCapacity = newSize; 1081 } 1082 1083 goto restart_write; 1084} 1085 1086status_t Parcel::writeNoException() 1087{ 1088 return writeInt32(0); 1089} 1090 1091void Parcel::remove(size_t /*start*/, size_t /*amt*/) 1092{ 1093 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 1094} 1095 1096status_t Parcel::read(void* outData, size_t len) const 1097{ 1098 if (len > INT32_MAX) { 1099 // don't accept size_t values which may have come from an 1100 // inadvertent conversion from a negative int. 1101 return BAD_VALUE; 1102 } 1103 1104 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1105 && len <= pad_size(len)) { 1106 memcpy(outData, mData+mDataPos, len); 1107 mDataPos += pad_size(len); 1108 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 1109 return NO_ERROR; 1110 } 1111 return NOT_ENOUGH_DATA; 1112} 1113 1114const void* Parcel::readInplace(size_t len) const 1115{ 1116 if (len > INT32_MAX) { 1117 // don't accept size_t values which may have come from an 1118 // inadvertent conversion from a negative int. 1119 return NULL; 1120 } 1121 1122 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1123 && len <= pad_size(len)) { 1124 const void* data = mData+mDataPos; 1125 mDataPos += pad_size(len); 1126 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 1127 return data; 1128 } 1129 return NULL; 1130} 1131 1132template<class T> 1133status_t Parcel::readAligned(T *pArg) const { 1134 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1135 1136 if ((mDataPos+sizeof(T)) <= mDataSize) { 1137 const void* data = mData+mDataPos; 1138 mDataPos += sizeof(T); 1139 *pArg = *reinterpret_cast<const T*>(data); 1140 return NO_ERROR; 1141 } else { 1142 return NOT_ENOUGH_DATA; 1143 } 1144} 1145 1146template<class T> 1147T Parcel::readAligned() const { 1148 T result; 1149 if (readAligned(&result) != NO_ERROR) { 1150 result = 0; 1151 } 1152 1153 return result; 1154} 1155 1156template<class T> 1157status_t Parcel::writeAligned(T val) { 1158 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1159 1160 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 1161restart_write: 1162 *reinterpret_cast<T*>(mData+mDataPos) = val; 1163 return finishWrite(sizeof(val)); 1164 } 1165 1166 status_t err = growData(sizeof(val)); 1167 if (err == NO_ERROR) goto restart_write; 1168 return err; 1169} 1170 1171status_t Parcel::readInt32(int32_t *pArg) const 1172{ 1173 return readAligned(pArg); 1174} 1175 1176int32_t Parcel::readInt32() const 1177{ 1178 return readAligned<int32_t>(); 1179} 1180 1181status_t Parcel::readUint32(uint32_t *pArg) const 1182{ 1183 return readAligned(pArg); 1184} 1185 1186uint32_t Parcel::readUint32() const 1187{ 1188 return readAligned<uint32_t>(); 1189} 1190 1191status_t Parcel::readInt64(int64_t *pArg) const 1192{ 1193 return readAligned(pArg); 1194} 1195 1196 1197int64_t Parcel::readInt64() const 1198{ 1199 return readAligned<int64_t>(); 1200} 1201 1202status_t Parcel::readUint64(uint64_t *pArg) const 1203{ 1204 return readAligned(pArg); 1205} 1206 1207uint64_t Parcel::readUint64() const 1208{ 1209 return readAligned<uint64_t>(); 1210} 1211 1212status_t Parcel::readPointer(uintptr_t *pArg) const 1213{ 1214 status_t ret; 1215 binder_uintptr_t ptr; 1216 ret = readAligned(&ptr); 1217 if (!ret) 1218 *pArg = ptr; 1219 return ret; 1220} 1221 1222uintptr_t Parcel::readPointer() const 1223{ 1224 return readAligned<binder_uintptr_t>(); 1225} 1226 1227 1228status_t Parcel::readFloat(float *pArg) const 1229{ 1230 return readAligned(pArg); 1231} 1232 1233 1234float Parcel::readFloat() const 1235{ 1236 return readAligned<float>(); 1237} 1238 1239#if defined(__mips__) && defined(__mips_hard_float) 1240 1241status_t Parcel::readDouble(double *pArg) const 1242{ 1243 union { 1244 double d; 1245 unsigned long long ll; 1246 } u; 1247 u.d = 0; 1248 status_t status; 1249 status = readAligned(&u.ll); 1250 *pArg = u.d; 1251 return status; 1252} 1253 1254double Parcel::readDouble() const 1255{ 1256 union { 1257 double d; 1258 unsigned long long ll; 1259 } u; 1260 u.ll = readAligned<unsigned long long>(); 1261 return u.d; 1262} 1263 1264#else 1265 1266status_t Parcel::readDouble(double *pArg) const 1267{ 1268 return readAligned(pArg); 1269} 1270 1271double Parcel::readDouble() const 1272{ 1273 return readAligned<double>(); 1274} 1275 1276#endif 1277 1278status_t Parcel::readIntPtr(intptr_t *pArg) const 1279{ 1280 return readAligned(pArg); 1281} 1282 1283 1284intptr_t Parcel::readIntPtr() const 1285{ 1286 return readAligned<intptr_t>(); 1287} 1288 1289 1290const char* Parcel::readCString() const 1291{ 1292 const size_t avail = mDataSize-mDataPos; 1293 if (avail > 0) { 1294 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1295 // is the string's trailing NUL within the parcel's valid bounds? 1296 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1297 if (eos) { 1298 const size_t len = eos - str; 1299 mDataPos += pad_size(len+1); 1300 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1301 return str; 1302 } 1303 } 1304 return NULL; 1305} 1306 1307String8 Parcel::readString8() const 1308{ 1309 int32_t size = readInt32(); 1310 // watch for potential int overflow adding 1 for trailing NUL 1311 if (size > 0 && size < INT32_MAX) { 1312 const char* str = (const char*)readInplace(size+1); 1313 if (str) return String8(str, size); 1314 } 1315 return String8(); 1316} 1317 1318String16 Parcel::readString16() const 1319{ 1320 size_t len; 1321 const char16_t* str = readString16Inplace(&len); 1322 if (str) return String16(str, len); 1323 ALOGE("Reading a NULL string not supported here."); 1324 return String16(); 1325} 1326 1327const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1328{ 1329 int32_t size = readInt32(); 1330 // watch for potential int overflow from size+1 1331 if (size >= 0 && size < INT32_MAX) { 1332 *outLen = size; 1333 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1334 if (str != NULL) { 1335 return str; 1336 } 1337 } 1338 *outLen = 0; 1339 return NULL; 1340} 1341 1342sp<IBinder> Parcel::readStrongBinder() const 1343{ 1344 sp<IBinder> val; 1345 unflatten_binder(ProcessState::self(), *this, &val); 1346 return val; 1347} 1348 1349wp<IBinder> Parcel::readWeakBinder() const 1350{ 1351 wp<IBinder> val; 1352 unflatten_binder(ProcessState::self(), *this, &val); 1353 return val; 1354} 1355 1356int32_t Parcel::readExceptionCode() const 1357{ 1358 int32_t exception_code = readAligned<int32_t>(); 1359 if (exception_code == EX_HAS_REPLY_HEADER) { 1360 int32_t header_start = dataPosition(); 1361 int32_t header_size = readAligned<int32_t>(); 1362 // Skip over fat responses headers. Not used (or propagated) in 1363 // native code 1364 setDataPosition(header_start + header_size); 1365 // And fat response headers are currently only used when there are no 1366 // exceptions, so return no error: 1367 return 0; 1368 } 1369 return exception_code; 1370} 1371 1372native_handle* Parcel::readNativeHandle() const 1373{ 1374 int numFds, numInts; 1375 status_t err; 1376 err = readInt32(&numFds); 1377 if (err != NO_ERROR) return 0; 1378 err = readInt32(&numInts); 1379 if (err != NO_ERROR) return 0; 1380 1381 native_handle* h = native_handle_create(numFds, numInts); 1382 if (!h) { 1383 return 0; 1384 } 1385 1386 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 1387 h->data[i] = dup(readFileDescriptor()); 1388 if (h->data[i] < 0) { 1389 for (int j = 0; j < i; j++) { 1390 close(h->data[j]); 1391 } 1392 native_handle_delete(h); 1393 return 0; 1394 } 1395 } 1396 err = read(h->data + numFds, sizeof(int)*numInts); 1397 if (err != NO_ERROR) { 1398 native_handle_close(h); 1399 native_handle_delete(h); 1400 h = 0; 1401 } 1402 return h; 1403} 1404 1405 1406int Parcel::readFileDescriptor() const 1407{ 1408 const flat_binder_object* flat = readObject(true); 1409 if (flat) { 1410 switch (flat->type) { 1411 case BINDER_TYPE_FD: 1412 //ALOGI("Returning file descriptor %ld from parcel %p", flat->handle, this); 1413 return flat->handle; 1414 } 1415 } 1416 return BAD_TYPE; 1417} 1418 1419status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 1420{ 1421 int32_t blobType; 1422 status_t status = readInt32(&blobType); 1423 if (status) return status; 1424 1425 if (blobType == BLOB_INPLACE) { 1426 ALOGV("readBlob: read in place"); 1427 const void* ptr = readInplace(len); 1428 if (!ptr) return BAD_VALUE; 1429 1430 outBlob->init(-1, const_cast<void*>(ptr), len, false); 1431 return NO_ERROR; 1432 } 1433 1434 ALOGV("readBlob: read from ashmem"); 1435 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE); 1436 int fd = readFileDescriptor(); 1437 if (fd == int(BAD_TYPE)) return BAD_VALUE; 1438 1439 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ, 1440 MAP_SHARED, fd, 0); 1441 if (ptr == MAP_FAILED) return NO_MEMORY; 1442 1443 outBlob->init(fd, ptr, len, isMutable); 1444 return NO_ERROR; 1445} 1446 1447status_t Parcel::read(FlattenableHelperInterface& val) const 1448{ 1449 // size 1450 const size_t len = this->readInt32(); 1451 const size_t fd_count = this->readInt32(); 1452 1453 if (len > INT32_MAX) { 1454 // don't accept size_t values which may have come from an 1455 // inadvertent conversion from a negative int. 1456 return BAD_VALUE; 1457 } 1458 1459 // payload 1460 void const* const buf = this->readInplace(pad_size(len)); 1461 if (buf == NULL) 1462 return BAD_VALUE; 1463 1464 int* fds = NULL; 1465 if (fd_count) { 1466 fds = new int[fd_count]; 1467 } 1468 1469 status_t err = NO_ERROR; 1470 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1471 fds[i] = dup(this->readFileDescriptor()); 1472 if (fds[i] < 0) { 1473 err = BAD_VALUE; 1474 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s", 1475 i, fds[i], fd_count, strerror(errno)); 1476 } 1477 } 1478 1479 if (err == NO_ERROR) { 1480 err = val.unflatten(buf, len, fds, fd_count); 1481 } 1482 1483 if (fd_count) { 1484 delete [] fds; 1485 } 1486 1487 return err; 1488} 1489const flat_binder_object* Parcel::readObject(bool nullMetaData) const 1490{ 1491 const size_t DPOS = mDataPos; 1492 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 1493 const flat_binder_object* obj 1494 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 1495 mDataPos = DPOS + sizeof(flat_binder_object); 1496 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) { 1497 // When transferring a NULL object, we don't write it into 1498 // the object list, so we don't want to check for it when 1499 // reading. 1500 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1501 return obj; 1502 } 1503 1504 // Ensure that this object is valid... 1505 binder_size_t* const OBJS = mObjects; 1506 const size_t N = mObjectsSize; 1507 size_t opos = mNextObjectHint; 1508 1509 if (N > 0) { 1510 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 1511 this, DPOS, opos); 1512 1513 // Start at the current hint position, looking for an object at 1514 // the current data position. 1515 if (opos < N) { 1516 while (opos < (N-1) && OBJS[opos] < DPOS) { 1517 opos++; 1518 } 1519 } else { 1520 opos = N-1; 1521 } 1522 if (OBJS[opos] == DPOS) { 1523 // Found it! 1524 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 1525 this, DPOS, opos); 1526 mNextObjectHint = opos+1; 1527 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1528 return obj; 1529 } 1530 1531 // Look backwards for it... 1532 while (opos > 0 && OBJS[opos] > DPOS) { 1533 opos--; 1534 } 1535 if (OBJS[opos] == DPOS) { 1536 // Found it! 1537 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 1538 this, DPOS, opos); 1539 mNextObjectHint = opos+1; 1540 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1541 return obj; 1542 } 1543 } 1544 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 1545 this, DPOS); 1546 } 1547 return NULL; 1548} 1549 1550void Parcel::closeFileDescriptors() 1551{ 1552 size_t i = mObjectsSize; 1553 if (i > 0) { 1554 //ALOGI("Closing file descriptors for %zu objects...", i); 1555 } 1556 while (i > 0) { 1557 i--; 1558 const flat_binder_object* flat 1559 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1560 if (flat->type == BINDER_TYPE_FD) { 1561 //ALOGI("Closing fd: %ld", flat->handle); 1562 close(flat->handle); 1563 } 1564 } 1565} 1566 1567uintptr_t Parcel::ipcData() const 1568{ 1569 return reinterpret_cast<uintptr_t>(mData); 1570} 1571 1572size_t Parcel::ipcDataSize() const 1573{ 1574 return (mDataSize > mDataPos ? mDataSize : mDataPos); 1575} 1576 1577uintptr_t Parcel::ipcObjects() const 1578{ 1579 return reinterpret_cast<uintptr_t>(mObjects); 1580} 1581 1582size_t Parcel::ipcObjectsCount() const 1583{ 1584 return mObjectsSize; 1585} 1586 1587void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 1588 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 1589{ 1590 binder_size_t minOffset = 0; 1591 freeDataNoInit(); 1592 mError = NO_ERROR; 1593 mData = const_cast<uint8_t*>(data); 1594 mDataSize = mDataCapacity = dataSize; 1595 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 1596 mDataPos = 0; 1597 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 1598 mObjects = const_cast<binder_size_t*>(objects); 1599 mObjectsSize = mObjectsCapacity = objectsCount; 1600 mNextObjectHint = 0; 1601 mOwner = relFunc; 1602 mOwnerCookie = relCookie; 1603 for (size_t i = 0; i < mObjectsSize; i++) { 1604 binder_size_t offset = mObjects[i]; 1605 if (offset < minOffset) { 1606 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n", 1607 __func__, (uint64_t)offset, (uint64_t)minOffset); 1608 mObjectsSize = 0; 1609 break; 1610 } 1611 minOffset = offset + sizeof(flat_binder_object); 1612 } 1613 scanForFds(); 1614} 1615 1616void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 1617{ 1618 to << "Parcel("; 1619 1620 if (errorCheck() != NO_ERROR) { 1621 const status_t err = errorCheck(); 1622 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 1623 } else if (dataSize() > 0) { 1624 const uint8_t* DATA = data(); 1625 to << indent << HexDump(DATA, dataSize()) << dedent; 1626 const binder_size_t* OBJS = objects(); 1627 const size_t N = objectsCount(); 1628 for (size_t i=0; i<N; i++) { 1629 const flat_binder_object* flat 1630 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 1631 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 1632 << TypeCode(flat->type & 0x7f7f7f00) 1633 << " = " << flat->binder; 1634 } 1635 } else { 1636 to << "NULL"; 1637 } 1638 1639 to << ")"; 1640} 1641 1642void Parcel::releaseObjects() 1643{ 1644 const sp<ProcessState> proc(ProcessState::self()); 1645 size_t i = mObjectsSize; 1646 uint8_t* const data = mData; 1647 binder_size_t* const objects = mObjects; 1648 while (i > 0) { 1649 i--; 1650 const flat_binder_object* flat 1651 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1652 release_object(proc, *flat, this, &mOpenAshmemSize); 1653 } 1654} 1655 1656void Parcel::acquireObjects() 1657{ 1658 const sp<ProcessState> proc(ProcessState::self()); 1659 size_t i = mObjectsSize; 1660 uint8_t* const data = mData; 1661 binder_size_t* const objects = mObjects; 1662 while (i > 0) { 1663 i--; 1664 const flat_binder_object* flat 1665 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1666 acquire_object(proc, *flat, this, &mOpenAshmemSize); 1667 } 1668} 1669 1670void Parcel::freeData() 1671{ 1672 freeDataNoInit(); 1673 initState(); 1674} 1675 1676void Parcel::freeDataNoInit() 1677{ 1678 if (mOwner) { 1679 LOG_ALLOC("Parcel %p: freeing other owner data", this); 1680 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1681 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1682 } else { 1683 LOG_ALLOC("Parcel %p: freeing allocated data", this); 1684 releaseObjects(); 1685 if (mData) { 1686 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); 1687 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1688 gParcelGlobalAllocSize -= mDataCapacity; 1689 gParcelGlobalAllocCount--; 1690 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1691 free(mData); 1692 } 1693 if (mObjects) free(mObjects); 1694 } 1695} 1696 1697status_t Parcel::growData(size_t len) 1698{ 1699 if (len > INT32_MAX) { 1700 // don't accept size_t values which may have come from an 1701 // inadvertent conversion from a negative int. 1702 return BAD_VALUE; 1703 } 1704 1705 size_t newSize = ((mDataSize+len)*3)/2; 1706 return (newSize <= mDataSize) 1707 ? (status_t) NO_MEMORY 1708 : continueWrite(newSize); 1709} 1710 1711status_t Parcel::restartWrite(size_t desired) 1712{ 1713 if (desired > INT32_MAX) { 1714 // don't accept size_t values which may have come from an 1715 // inadvertent conversion from a negative int. 1716 return BAD_VALUE; 1717 } 1718 1719 if (mOwner) { 1720 freeData(); 1721 return continueWrite(desired); 1722 } 1723 1724 uint8_t* data = (uint8_t*)realloc(mData, desired); 1725 if (!data && desired > mDataCapacity) { 1726 mError = NO_MEMORY; 1727 return NO_MEMORY; 1728 } 1729 1730 releaseObjects(); 1731 1732 if (data) { 1733 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); 1734 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1735 gParcelGlobalAllocSize += desired; 1736 gParcelGlobalAllocSize -= mDataCapacity; 1737 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1738 mData = data; 1739 mDataCapacity = desired; 1740 } 1741 1742 mDataSize = mDataPos = 0; 1743 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 1744 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 1745 1746 free(mObjects); 1747 mObjects = NULL; 1748 mObjectsSize = mObjectsCapacity = 0; 1749 mNextObjectHint = 0; 1750 mHasFds = false; 1751 mFdsKnown = true; 1752 mAllowFds = true; 1753 1754 return NO_ERROR; 1755} 1756 1757status_t Parcel::continueWrite(size_t desired) 1758{ 1759 if (desired > INT32_MAX) { 1760 // don't accept size_t values which may have come from an 1761 // inadvertent conversion from a negative int. 1762 return BAD_VALUE; 1763 } 1764 1765 // If shrinking, first adjust for any objects that appear 1766 // after the new data size. 1767 size_t objectsSize = mObjectsSize; 1768 if (desired < mDataSize) { 1769 if (desired == 0) { 1770 objectsSize = 0; 1771 } else { 1772 while (objectsSize > 0) { 1773 if (mObjects[objectsSize-1] < desired) 1774 break; 1775 objectsSize--; 1776 } 1777 } 1778 } 1779 1780 if (mOwner) { 1781 // If the size is going to zero, just release the owner's data. 1782 if (desired == 0) { 1783 freeData(); 1784 return NO_ERROR; 1785 } 1786 1787 // If there is a different owner, we need to take 1788 // posession. 1789 uint8_t* data = (uint8_t*)malloc(desired); 1790 if (!data) { 1791 mError = NO_MEMORY; 1792 return NO_MEMORY; 1793 } 1794 binder_size_t* objects = NULL; 1795 1796 if (objectsSize) { 1797 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t)); 1798 if (!objects) { 1799 free(data); 1800 1801 mError = NO_MEMORY; 1802 return NO_MEMORY; 1803 } 1804 1805 // Little hack to only acquire references on objects 1806 // we will be keeping. 1807 size_t oldObjectsSize = mObjectsSize; 1808 mObjectsSize = objectsSize; 1809 acquireObjects(); 1810 mObjectsSize = oldObjectsSize; 1811 } 1812 1813 if (mData) { 1814 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 1815 } 1816 if (objects && mObjects) { 1817 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 1818 } 1819 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1820 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1821 mOwner = NULL; 1822 1823 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); 1824 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1825 gParcelGlobalAllocSize += desired; 1826 gParcelGlobalAllocCount++; 1827 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1828 1829 mData = data; 1830 mObjects = objects; 1831 mDataSize = (mDataSize < desired) ? mDataSize : desired; 1832 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1833 mDataCapacity = desired; 1834 mObjectsSize = mObjectsCapacity = objectsSize; 1835 mNextObjectHint = 0; 1836 1837 } else if (mData) { 1838 if (objectsSize < mObjectsSize) { 1839 // Need to release refs on any objects we are dropping. 1840 const sp<ProcessState> proc(ProcessState::self()); 1841 for (size_t i=objectsSize; i<mObjectsSize; i++) { 1842 const flat_binder_object* flat 1843 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1844 if (flat->type == BINDER_TYPE_FD) { 1845 // will need to rescan because we may have lopped off the only FDs 1846 mFdsKnown = false; 1847 } 1848 release_object(proc, *flat, this, &mOpenAshmemSize); 1849 } 1850 binder_size_t* objects = 1851 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 1852 if (objects) { 1853 mObjects = objects; 1854 } 1855 mObjectsSize = objectsSize; 1856 mNextObjectHint = 0; 1857 } 1858 1859 // We own the data, so we can just do a realloc(). 1860 if (desired > mDataCapacity) { 1861 uint8_t* data = (uint8_t*)realloc(mData, desired); 1862 if (data) { 1863 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, 1864 desired); 1865 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1866 gParcelGlobalAllocSize += desired; 1867 gParcelGlobalAllocSize -= mDataCapacity; 1868 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1869 mData = data; 1870 mDataCapacity = desired; 1871 } else if (desired > mDataCapacity) { 1872 mError = NO_MEMORY; 1873 return NO_MEMORY; 1874 } 1875 } else { 1876 if (mDataSize > desired) { 1877 mDataSize = desired; 1878 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1879 } 1880 if (mDataPos > desired) { 1881 mDataPos = desired; 1882 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1883 } 1884 } 1885 1886 } else { 1887 // This is the first data. Easy! 1888 uint8_t* data = (uint8_t*)malloc(desired); 1889 if (!data) { 1890 mError = NO_MEMORY; 1891 return NO_MEMORY; 1892 } 1893 1894 if(!(mDataCapacity == 0 && mObjects == NULL 1895 && mObjectsCapacity == 0)) { 1896 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 1897 } 1898 1899 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); 1900 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1901 gParcelGlobalAllocSize += desired; 1902 gParcelGlobalAllocCount++; 1903 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1904 1905 mData = data; 1906 mDataSize = mDataPos = 0; 1907 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1908 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1909 mDataCapacity = desired; 1910 } 1911 1912 return NO_ERROR; 1913} 1914 1915void Parcel::initState() 1916{ 1917 LOG_ALLOC("Parcel %p: initState", this); 1918 mError = NO_ERROR; 1919 mData = 0; 1920 mDataSize = 0; 1921 mDataCapacity = 0; 1922 mDataPos = 0; 1923 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 1924 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 1925 mObjects = NULL; 1926 mObjectsSize = 0; 1927 mObjectsCapacity = 0; 1928 mNextObjectHint = 0; 1929 mHasFds = false; 1930 mFdsKnown = true; 1931 mAllowFds = true; 1932 mOwner = NULL; 1933 mOpenAshmemSize = 0; 1934} 1935 1936void Parcel::scanForFds() const 1937{ 1938 bool hasFds = false; 1939 for (size_t i=0; i<mObjectsSize; i++) { 1940 const flat_binder_object* flat 1941 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 1942 if (flat->type == BINDER_TYPE_FD) { 1943 hasFds = true; 1944 break; 1945 } 1946 } 1947 mHasFds = hasFds; 1948 mFdsKnown = true; 1949} 1950 1951size_t Parcel::getBlobAshmemSize() const 1952{ 1953 // This used to return the size of all blobs that were written to ashmem, now we're returning 1954 // the ashmem currently referenced by this Parcel, which should be equivalent. 1955 // TODO: Remove method once ABI can be changed. 1956 return mOpenAshmemSize; 1957} 1958 1959size_t Parcel::getOpenAshmemSize() const 1960{ 1961 return mOpenAshmemSize; 1962} 1963 1964// --- Parcel::Blob --- 1965 1966Parcel::Blob::Blob() : 1967 mFd(-1), mData(NULL), mSize(0), mMutable(false) { 1968} 1969 1970Parcel::Blob::~Blob() { 1971 release(); 1972} 1973 1974void Parcel::Blob::release() { 1975 if (mFd != -1 && mData) { 1976 ::munmap(mData, mSize); 1977 } 1978 clear(); 1979} 1980 1981void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) { 1982 mFd = fd; 1983 mData = data; 1984 mSize = size; 1985 mMutable = isMutable; 1986} 1987 1988void Parcel::Blob::clear() { 1989 mFd = -1; 1990 mData = NULL; 1991 mSize = 0; 1992 mMutable = false; 1993} 1994 1995}; // namespace android 1996