Parcel.cpp revision aa5c2346c7291465aaca53f59878582dccbe4835
1/* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "Parcel" 18//#define LOG_NDEBUG 0 19 20#include <binder/Parcel.h> 21 22#include <binder/IPCThreadState.h> 23#include <binder/Binder.h> 24#include <binder/BpBinder.h> 25#include <binder/ProcessState.h> 26#include <binder/TextOutput.h> 27 28#include <errno.h> 29#include <utils/Debug.h> 30#include <utils/Log.h> 31#include <utils/String8.h> 32#include <utils/String16.h> 33#include <utils/misc.h> 34#include <utils/Flattenable.h> 35#include <cutils/ashmem.h> 36 37#include <private/binder/binder_module.h> 38#include <private/binder/Static.h> 39 40#include <inttypes.h> 41#include <stdio.h> 42#include <stdlib.h> 43#include <stdint.h> 44#include <sys/mman.h> 45 46#ifndef INT32_MAX 47#define INT32_MAX ((int32_t)(2147483647)) 48#endif 49 50#define LOG_REFS(...) 51//#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__) 52#define LOG_ALLOC(...) 53//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__) 54 55// --------------------------------------------------------------------------- 56 57// This macro should never be used at runtime, as a too large value 58// of s could cause an integer overflow. Instead, you should always 59// use the wrapper function pad_size() 60#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3) 61 62static size_t pad_size(size_t s) { 63 if (s > (SIZE_T_MAX - 3)) { 64 abort(); 65 } 66 return PAD_SIZE_UNSAFE(s); 67} 68 69// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 70#define STRICT_MODE_PENALTY_GATHER (0x40 << 16) 71 72// Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER 73#define EX_HAS_REPLY_HEADER -128 74 75// Maximum size of a blob to transfer in-place. 76static const size_t IN_PLACE_BLOB_LIMIT = 40 * 1024; 77 78// XXX This can be made public if we want to provide 79// support for typed data. 80struct small_flat_data 81{ 82 uint32_t type; 83 uint32_t data; 84}; 85 86namespace android { 87 88static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; 89static size_t gParcelGlobalAllocSize = 0; 90static size_t gParcelGlobalAllocCount = 0; 91 92void acquire_object(const sp<ProcessState>& proc, 93 const flat_binder_object& obj, const void* who) 94{ 95 switch (obj.type) { 96 case BINDER_TYPE_BINDER: 97 if (obj.binder) { 98 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 99 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 100 } 101 return; 102 case BINDER_TYPE_WEAK_BINDER: 103 if (obj.binder) 104 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 105 return; 106 case BINDER_TYPE_HANDLE: { 107 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 108 if (b != NULL) { 109 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 110 b->incStrong(who); 111 } 112 return; 113 } 114 case BINDER_TYPE_WEAK_HANDLE: { 115 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 116 if (b != NULL) b.get_refs()->incWeak(who); 117 return; 118 } 119 case BINDER_TYPE_FD: { 120 // intentionally blank -- nothing to do to acquire this, but we do 121 // recognize it as a legitimate object type. 122 return; 123 } 124 } 125 126 ALOGD("Invalid object type 0x%08x", obj.type); 127} 128 129void release_object(const sp<ProcessState>& proc, 130 const flat_binder_object& obj, const void* who) 131{ 132 switch (obj.type) { 133 case BINDER_TYPE_BINDER: 134 if (obj.binder) { 135 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 136 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 137 } 138 return; 139 case BINDER_TYPE_WEAK_BINDER: 140 if (obj.binder) 141 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 142 return; 143 case BINDER_TYPE_HANDLE: { 144 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 145 if (b != NULL) { 146 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 147 b->decStrong(who); 148 } 149 return; 150 } 151 case BINDER_TYPE_WEAK_HANDLE: { 152 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 153 if (b != NULL) b.get_refs()->decWeak(who); 154 return; 155 } 156 case BINDER_TYPE_FD: { 157 if (obj.cookie != 0) close(obj.handle); 158 return; 159 } 160 } 161 162 ALOGE("Invalid object type 0x%08x", obj.type); 163} 164 165inline static status_t finish_flatten_binder( 166 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 167{ 168 return out->writeObject(flat, false); 169} 170 171status_t flatten_binder(const sp<ProcessState>& /*proc*/, 172 const sp<IBinder>& binder, Parcel* out) 173{ 174 flat_binder_object obj; 175 176 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 177 if (binder != NULL) { 178 IBinder *local = binder->localBinder(); 179 if (!local) { 180 BpBinder *proxy = binder->remoteBinder(); 181 if (proxy == NULL) { 182 ALOGE("null proxy"); 183 } 184 const int32_t handle = proxy ? proxy->handle() : 0; 185 obj.type = BINDER_TYPE_HANDLE; 186 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 187 obj.handle = handle; 188 obj.cookie = 0; 189 } else { 190 obj.type = BINDER_TYPE_BINDER; 191 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 192 obj.cookie = reinterpret_cast<uintptr_t>(local); 193 } 194 } else { 195 obj.type = BINDER_TYPE_BINDER; 196 obj.binder = 0; 197 obj.cookie = 0; 198 } 199 200 return finish_flatten_binder(binder, obj, out); 201} 202 203status_t flatten_binder(const sp<ProcessState>& /*proc*/, 204 const wp<IBinder>& binder, Parcel* out) 205{ 206 flat_binder_object obj; 207 208 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 209 if (binder != NULL) { 210 sp<IBinder> real = binder.promote(); 211 if (real != NULL) { 212 IBinder *local = real->localBinder(); 213 if (!local) { 214 BpBinder *proxy = real->remoteBinder(); 215 if (proxy == NULL) { 216 ALOGE("null proxy"); 217 } 218 const int32_t handle = proxy ? proxy->handle() : 0; 219 obj.type = BINDER_TYPE_WEAK_HANDLE; 220 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 221 obj.handle = handle; 222 obj.cookie = 0; 223 } else { 224 obj.type = BINDER_TYPE_WEAK_BINDER; 225 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs()); 226 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get()); 227 } 228 return finish_flatten_binder(real, obj, out); 229 } 230 231 // XXX How to deal? In order to flatten the given binder, 232 // we need to probe it for information, which requires a primary 233 // reference... but we don't have one. 234 // 235 // The OpenBinder implementation uses a dynamic_cast<> here, 236 // but we can't do that with the different reference counting 237 // implementation we are using. 238 ALOGE("Unable to unflatten Binder weak reference!"); 239 obj.type = BINDER_TYPE_BINDER; 240 obj.binder = 0; 241 obj.cookie = 0; 242 return finish_flatten_binder(NULL, obj, out); 243 244 } else { 245 obj.type = BINDER_TYPE_BINDER; 246 obj.binder = 0; 247 obj.cookie = 0; 248 return finish_flatten_binder(NULL, obj, out); 249 } 250} 251 252inline static status_t finish_unflatten_binder( 253 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/, 254 const Parcel& /*in*/) 255{ 256 return NO_ERROR; 257} 258 259status_t unflatten_binder(const sp<ProcessState>& proc, 260 const Parcel& in, sp<IBinder>* out) 261{ 262 const flat_binder_object* flat = in.readObject(false); 263 264 if (flat) { 265 switch (flat->type) { 266 case BINDER_TYPE_BINDER: 267 *out = reinterpret_cast<IBinder*>(flat->cookie); 268 return finish_unflatten_binder(NULL, *flat, in); 269 case BINDER_TYPE_HANDLE: 270 *out = proc->getStrongProxyForHandle(flat->handle); 271 return finish_unflatten_binder( 272 static_cast<BpBinder*>(out->get()), *flat, in); 273 } 274 } 275 return BAD_TYPE; 276} 277 278status_t unflatten_binder(const sp<ProcessState>& proc, 279 const Parcel& in, wp<IBinder>* out) 280{ 281 const flat_binder_object* flat = in.readObject(false); 282 283 if (flat) { 284 switch (flat->type) { 285 case BINDER_TYPE_BINDER: 286 *out = reinterpret_cast<IBinder*>(flat->cookie); 287 return finish_unflatten_binder(NULL, *flat, in); 288 case BINDER_TYPE_WEAK_BINDER: 289 if (flat->binder != 0) { 290 out->set_object_and_refs( 291 reinterpret_cast<IBinder*>(flat->cookie), 292 reinterpret_cast<RefBase::weakref_type*>(flat->binder)); 293 } else { 294 *out = NULL; 295 } 296 return finish_unflatten_binder(NULL, *flat, in); 297 case BINDER_TYPE_HANDLE: 298 case BINDER_TYPE_WEAK_HANDLE: 299 *out = proc->getWeakProxyForHandle(flat->handle); 300 return finish_unflatten_binder( 301 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 302 } 303 } 304 return BAD_TYPE; 305} 306 307// --------------------------------------------------------------------------- 308 309Parcel::Parcel() 310{ 311 LOG_ALLOC("Parcel %p: constructing", this); 312 initState(); 313} 314 315Parcel::~Parcel() 316{ 317 freeDataNoInit(); 318 LOG_ALLOC("Parcel %p: destroyed", this); 319} 320 321size_t Parcel::getGlobalAllocSize() { 322 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 323 size_t size = gParcelGlobalAllocSize; 324 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 325 return size; 326} 327 328size_t Parcel::getGlobalAllocCount() { 329 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 330 size_t count = gParcelGlobalAllocCount; 331 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 332 return count; 333} 334 335const uint8_t* Parcel::data() const 336{ 337 return mData; 338} 339 340size_t Parcel::dataSize() const 341{ 342 return (mDataSize > mDataPos ? mDataSize : mDataPos); 343} 344 345size_t Parcel::dataAvail() const 346{ 347 // TODO: decide what to do about the possibility that this can 348 // report an available-data size that exceeds a Java int's max 349 // positive value, causing havoc. Fortunately this will only 350 // happen if someone constructs a Parcel containing more than two 351 // gigabytes of data, which on typical phone hardware is simply 352 // not possible. 353 return dataSize() - dataPosition(); 354} 355 356size_t Parcel::dataPosition() const 357{ 358 return mDataPos; 359} 360 361size_t Parcel::dataCapacity() const 362{ 363 return mDataCapacity; 364} 365 366status_t Parcel::setDataSize(size_t size) 367{ 368 if (size > INT32_MAX) { 369 // don't accept size_t values which may have come from an 370 // inadvertent conversion from a negative int. 371 return BAD_VALUE; 372 } 373 374 status_t err; 375 err = continueWrite(size); 376 if (err == NO_ERROR) { 377 mDataSize = size; 378 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 379 } 380 return err; 381} 382 383void Parcel::setDataPosition(size_t pos) const 384{ 385 if (pos > INT32_MAX) { 386 // don't accept size_t values which may have come from an 387 // inadvertent conversion from a negative int. 388 abort(); 389 } 390 391 mDataPos = pos; 392 mNextObjectHint = 0; 393} 394 395status_t Parcel::setDataCapacity(size_t size) 396{ 397 if (size > INT32_MAX) { 398 // don't accept size_t values which may have come from an 399 // inadvertent conversion from a negative int. 400 return BAD_VALUE; 401 } 402 403 if (size > mDataCapacity) return continueWrite(size); 404 return NO_ERROR; 405} 406 407status_t Parcel::setData(const uint8_t* buffer, size_t len) 408{ 409 if (len > INT32_MAX) { 410 // don't accept size_t values which may have come from an 411 // inadvertent conversion from a negative int. 412 return BAD_VALUE; 413 } 414 415 status_t err = restartWrite(len); 416 if (err == NO_ERROR) { 417 memcpy(const_cast<uint8_t*>(data()), buffer, len); 418 mDataSize = len; 419 mFdsKnown = false; 420 } 421 return err; 422} 423 424status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 425{ 426 const sp<ProcessState> proc(ProcessState::self()); 427 status_t err; 428 const uint8_t *data = parcel->mData; 429 const binder_size_t *objects = parcel->mObjects; 430 size_t size = parcel->mObjectsSize; 431 int startPos = mDataPos; 432 int firstIndex = -1, lastIndex = -2; 433 434 if (len == 0) { 435 return NO_ERROR; 436 } 437 438 if (len > INT32_MAX) { 439 // don't accept size_t values which may have come from an 440 // inadvertent conversion from a negative int. 441 return BAD_VALUE; 442 } 443 444 // range checks against the source parcel size 445 if ((offset > parcel->mDataSize) 446 || (len > parcel->mDataSize) 447 || (offset + len > parcel->mDataSize)) { 448 return BAD_VALUE; 449 } 450 451 // Count objects in range 452 for (int i = 0; i < (int) size; i++) { 453 size_t off = objects[i]; 454 if ((off >= offset) && (off < offset + len)) { 455 if (firstIndex == -1) { 456 firstIndex = i; 457 } 458 lastIndex = i; 459 } 460 } 461 int numObjects = lastIndex - firstIndex + 1; 462 463 if ((mDataSize+len) > mDataCapacity) { 464 // grow data 465 err = growData(len); 466 if (err != NO_ERROR) { 467 return err; 468 } 469 } 470 471 // append data 472 memcpy(mData + mDataPos, data + offset, len); 473 mDataPos += len; 474 mDataSize += len; 475 476 err = NO_ERROR; 477 478 if (numObjects > 0) { 479 // grow objects 480 if (mObjectsCapacity < mObjectsSize + numObjects) { 481 int newSize = ((mObjectsSize + numObjects)*3)/2; 482 binder_size_t *objects = 483 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 484 if (objects == (binder_size_t*)0) { 485 return NO_MEMORY; 486 } 487 mObjects = objects; 488 mObjectsCapacity = newSize; 489 } 490 491 // append and acquire objects 492 int idx = mObjectsSize; 493 for (int i = firstIndex; i <= lastIndex; i++) { 494 size_t off = objects[i] - offset + startPos; 495 mObjects[idx++] = off; 496 mObjectsSize++; 497 498 flat_binder_object* flat 499 = reinterpret_cast<flat_binder_object*>(mData + off); 500 acquire_object(proc, *flat, this); 501 502 if (flat->type == BINDER_TYPE_FD) { 503 // If this is a file descriptor, we need to dup it so the 504 // new Parcel now owns its own fd, and can declare that we 505 // officially know we have fds. 506 flat->handle = dup(flat->handle); 507 flat->cookie = 1; 508 mHasFds = mFdsKnown = true; 509 if (!mAllowFds) { 510 err = FDS_NOT_ALLOWED; 511 } 512 } 513 } 514 } 515 516 return err; 517} 518 519bool Parcel::pushAllowFds(bool allowFds) 520{ 521 const bool origValue = mAllowFds; 522 if (!allowFds) { 523 mAllowFds = false; 524 } 525 return origValue; 526} 527 528void Parcel::restoreAllowFds(bool lastValue) 529{ 530 mAllowFds = lastValue; 531} 532 533bool Parcel::hasFileDescriptors() const 534{ 535 if (!mFdsKnown) { 536 scanForFds(); 537 } 538 return mHasFds; 539} 540 541// Write RPC headers. (previously just the interface token) 542status_t Parcel::writeInterfaceToken(const String16& interface) 543{ 544 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 545 STRICT_MODE_PENALTY_GATHER); 546 // currently the interface identification token is just its name as a string 547 return writeString16(interface); 548} 549 550bool Parcel::checkInterface(IBinder* binder) const 551{ 552 return enforceInterface(binder->getInterfaceDescriptor()); 553} 554 555bool Parcel::enforceInterface(const String16& interface, 556 IPCThreadState* threadState) const 557{ 558 int32_t strictPolicy = readInt32(); 559 if (threadState == NULL) { 560 threadState = IPCThreadState::self(); 561 } 562 if ((threadState->getLastTransactionBinderFlags() & 563 IBinder::FLAG_ONEWAY) != 0) { 564 // For one-way calls, the callee is running entirely 565 // disconnected from the caller, so disable StrictMode entirely. 566 // Not only does disk/network usage not impact the caller, but 567 // there's no way to commuicate back any violations anyway. 568 threadState->setStrictModePolicy(0); 569 } else { 570 threadState->setStrictModePolicy(strictPolicy); 571 } 572 const String16 str(readString16()); 573 if (str == interface) { 574 return true; 575 } else { 576 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 577 String8(interface).string(), String8(str).string()); 578 return false; 579 } 580} 581 582const binder_size_t* Parcel::objects() const 583{ 584 return mObjects; 585} 586 587size_t Parcel::objectsCount() const 588{ 589 return mObjectsSize; 590} 591 592status_t Parcel::errorCheck() const 593{ 594 return mError; 595} 596 597void Parcel::setError(status_t err) 598{ 599 mError = err; 600} 601 602status_t Parcel::finishWrite(size_t len) 603{ 604 if (len > INT32_MAX) { 605 // don't accept size_t values which may have come from an 606 // inadvertent conversion from a negative int. 607 return BAD_VALUE; 608 } 609 610 //printf("Finish write of %d\n", len); 611 mDataPos += len; 612 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 613 if (mDataPos > mDataSize) { 614 mDataSize = mDataPos; 615 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 616 } 617 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 618 return NO_ERROR; 619} 620 621status_t Parcel::writeUnpadded(const void* data, size_t len) 622{ 623 if (len > INT32_MAX) { 624 // don't accept size_t values which may have come from an 625 // inadvertent conversion from a negative int. 626 return BAD_VALUE; 627 } 628 629 size_t end = mDataPos + len; 630 if (end < mDataPos) { 631 // integer overflow 632 return BAD_VALUE; 633 } 634 635 if (end <= mDataCapacity) { 636restart_write: 637 memcpy(mData+mDataPos, data, len); 638 return finishWrite(len); 639 } 640 641 status_t err = growData(len); 642 if (err == NO_ERROR) goto restart_write; 643 return err; 644} 645 646status_t Parcel::write(const void* data, size_t len) 647{ 648 if (len > INT32_MAX) { 649 // don't accept size_t values which may have come from an 650 // inadvertent conversion from a negative int. 651 return BAD_VALUE; 652 } 653 654 void* const d = writeInplace(len); 655 if (d) { 656 memcpy(d, data, len); 657 return NO_ERROR; 658 } 659 return mError; 660} 661 662void* Parcel::writeInplace(size_t len) 663{ 664 if (len > INT32_MAX) { 665 // don't accept size_t values which may have come from an 666 // inadvertent conversion from a negative int. 667 return NULL; 668 } 669 670 const size_t padded = pad_size(len); 671 672 // sanity check for integer overflow 673 if (mDataPos+padded < mDataPos) { 674 return NULL; 675 } 676 677 if ((mDataPos+padded) <= mDataCapacity) { 678restart_write: 679 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 680 uint8_t* const data = mData+mDataPos; 681 682 // Need to pad at end? 683 if (padded != len) { 684#if BYTE_ORDER == BIG_ENDIAN 685 static const uint32_t mask[4] = { 686 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 687 }; 688#endif 689#if BYTE_ORDER == LITTLE_ENDIAN 690 static const uint32_t mask[4] = { 691 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 692 }; 693#endif 694 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 695 // *reinterpret_cast<void**>(data+padded-4)); 696 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 697 } 698 699 finishWrite(padded); 700 return data; 701 } 702 703 status_t err = growData(padded); 704 if (err == NO_ERROR) goto restart_write; 705 return NULL; 706} 707 708status_t Parcel::writeInt32(int32_t val) 709{ 710 return writeAligned(val); 711} 712 713status_t Parcel::writeUint32(uint32_t val) 714{ 715 return writeAligned(val); 716} 717 718status_t Parcel::writeInt32Array(size_t len, const int32_t *val) { 719 if (len > INT32_MAX) { 720 // don't accept size_t values which may have come from an 721 // inadvertent conversion from a negative int. 722 return BAD_VALUE; 723 } 724 725 if (!val) { 726 return writeAligned(-1); 727 } 728 status_t ret = writeAligned(len); 729 if (ret == NO_ERROR) { 730 ret = write(val, len * sizeof(*val)); 731 } 732 return ret; 733} 734status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 735 if (len > INT32_MAX) { 736 // don't accept size_t values which may have come from an 737 // inadvertent conversion from a negative int. 738 return BAD_VALUE; 739 } 740 741 if (!val) { 742 return writeAligned(-1); 743 } 744 status_t ret = writeAligned(len); 745 if (ret == NO_ERROR) { 746 ret = write(val, len * sizeof(*val)); 747 } 748 return ret; 749} 750 751status_t Parcel::writeInt64(int64_t val) 752{ 753 return writeAligned(val); 754} 755 756status_t Parcel::writeUint64(uint64_t val) 757{ 758 return writeAligned(val); 759} 760 761status_t Parcel::writePointer(uintptr_t val) 762{ 763 return writeAligned<binder_uintptr_t>(val); 764} 765 766status_t Parcel::writeFloat(float val) 767{ 768 return writeAligned(val); 769} 770 771#if defined(__mips__) && defined(__mips_hard_float) 772 773status_t Parcel::writeDouble(double val) 774{ 775 union { 776 double d; 777 unsigned long long ll; 778 } u; 779 u.d = val; 780 return writeAligned(u.ll); 781} 782 783#else 784 785status_t Parcel::writeDouble(double val) 786{ 787 return writeAligned(val); 788} 789 790#endif 791 792status_t Parcel::writeCString(const char* str) 793{ 794 return write(str, strlen(str)+1); 795} 796 797status_t Parcel::writeString8(const String8& str) 798{ 799 status_t err = writeInt32(str.bytes()); 800 // only write string if its length is more than zero characters, 801 // as readString8 will only read if the length field is non-zero. 802 // this is slightly different from how writeString16 works. 803 if (str.bytes() > 0 && err == NO_ERROR) { 804 err = write(str.string(), str.bytes()+1); 805 } 806 return err; 807} 808 809status_t Parcel::writeString16(const String16& str) 810{ 811 return writeString16(str.string(), str.size()); 812} 813 814status_t Parcel::writeString16(const char16_t* str, size_t len) 815{ 816 if (str == NULL) return writeInt32(-1); 817 818 status_t err = writeInt32(len); 819 if (err == NO_ERROR) { 820 len *= sizeof(char16_t); 821 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 822 if (data) { 823 memcpy(data, str, len); 824 *reinterpret_cast<char16_t*>(data+len) = 0; 825 return NO_ERROR; 826 } 827 err = mError; 828 } 829 return err; 830} 831 832status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 833{ 834 return flatten_binder(ProcessState::self(), val, this); 835} 836 837status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 838{ 839 return flatten_binder(ProcessState::self(), val, this); 840} 841 842status_t Parcel::writeNativeHandle(const native_handle* handle) 843{ 844 if (!handle || handle->version != sizeof(native_handle)) 845 return BAD_TYPE; 846 847 status_t err; 848 err = writeInt32(handle->numFds); 849 if (err != NO_ERROR) return err; 850 851 err = writeInt32(handle->numInts); 852 if (err != NO_ERROR) return err; 853 854 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 855 err = writeDupFileDescriptor(handle->data[i]); 856 857 if (err != NO_ERROR) { 858 ALOGD("write native handle, write dup fd failed"); 859 return err; 860 } 861 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 862 return err; 863} 864 865status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 866{ 867 flat_binder_object obj; 868 obj.type = BINDER_TYPE_FD; 869 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 870 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 871 obj.handle = fd; 872 obj.cookie = takeOwnership ? 1 : 0; 873 return writeObject(obj, true); 874} 875 876status_t Parcel::writeDupFileDescriptor(int fd) 877{ 878 int dupFd = dup(fd); 879 if (dupFd < 0) { 880 return -errno; 881 } 882 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 883 if (err) { 884 close(dupFd); 885 } 886 return err; 887} 888 889// WARNING: This method must stay in sync with 890// Parcelable.Creator<ParcelFileDescriptor> CREATOR 891// in frameworks/base/core/java/android/os/ParcelFileDescriptor.java 892status_t Parcel::writeParcelFileDescriptor(int fd, int commChannel) { 893 status_t status; 894 895 if (fd < 0) { 896 status = writeInt32(0); // ParcelFileDescriptor is null 897 if (status) return status; 898 } else { 899 status = writeInt32(1); // ParcelFileDescriptor is not null 900 if (status) return status; 901 status = writeDupFileDescriptor(fd); 902 if (status) return status; 903 if (commChannel < 0) { 904 status = writeInt32(0); // commChannel is null 905 if (status) return status; 906 } else { 907 status = writeInt32(1); // commChannel is not null 908 if (status) return status; 909 status = writeDupFileDescriptor(commChannel); 910 } 911 } 912 return status; 913} 914 915status_t Parcel::writeBlob(size_t len, WritableBlob* outBlob) 916{ 917 status_t status; 918 919 if (len > INT32_MAX) { 920 // don't accept size_t values which may have come from an 921 // inadvertent conversion from a negative int. 922 return BAD_VALUE; 923 } 924 925 if (!mAllowFds || len <= IN_PLACE_BLOB_LIMIT) { 926 ALOGV("writeBlob: write in place"); 927 status = writeInt32(0); 928 if (status) return status; 929 930 void* ptr = writeInplace(len); 931 if (!ptr) return NO_MEMORY; 932 933 outBlob->init(false /*mapped*/, ptr, len); 934 return NO_ERROR; 935 } 936 937 ALOGV("writeBlob: write to ashmem"); 938 int fd = ashmem_create_region("Parcel Blob", len); 939 if (fd < 0) return NO_MEMORY; 940 941 mBlobAshmemSize += len; 942 943 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 944 if (result < 0) { 945 status = result; 946 } else { 947 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 948 if (ptr == MAP_FAILED) { 949 status = -errno; 950 } else { 951 result = ashmem_set_prot_region(fd, PROT_READ); 952 if (result < 0) { 953 status = result; 954 } else { 955 status = writeInt32(1); 956 if (!status) { 957 status = writeFileDescriptor(fd, true /*takeOwnership*/); 958 if (!status) { 959 outBlob->init(true /*mapped*/, ptr, len); 960 return NO_ERROR; 961 } 962 } 963 } 964 } 965 ::munmap(ptr, len); 966 } 967 ::close(fd); 968 return status; 969} 970 971status_t Parcel::write(const FlattenableHelperInterface& val) 972{ 973 status_t err; 974 975 // size if needed 976 const size_t len = val.getFlattenedSize(); 977 const size_t fd_count = val.getFdCount(); 978 979 if ((len > INT32_MAX) || (fd_count > INT32_MAX)) { 980 // don't accept size_t values which may have come from an 981 // inadvertent conversion from a negative int. 982 return BAD_VALUE; 983 } 984 985 err = this->writeInt32(len); 986 if (err) return err; 987 988 err = this->writeInt32(fd_count); 989 if (err) return err; 990 991 // payload 992 void* const buf = this->writeInplace(pad_size(len)); 993 if (buf == NULL) 994 return BAD_VALUE; 995 996 int* fds = NULL; 997 if (fd_count) { 998 fds = new int[fd_count]; 999 } 1000 1001 err = val.flatten(buf, len, fds, fd_count); 1002 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1003 err = this->writeDupFileDescriptor( fds[i] ); 1004 } 1005 1006 if (fd_count) { 1007 delete [] fds; 1008 } 1009 1010 return err; 1011} 1012 1013status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 1014{ 1015 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 1016 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 1017 if (enoughData && enoughObjects) { 1018restart_write: 1019 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 1020 1021 // Need to write meta-data? 1022 if (nullMetaData || val.binder != 0) { 1023 mObjects[mObjectsSize] = mDataPos; 1024 acquire_object(ProcessState::self(), val, this); 1025 mObjectsSize++; 1026 } 1027 1028 // remember if it's a file descriptor 1029 if (val.type == BINDER_TYPE_FD) { 1030 if (!mAllowFds) { 1031 return FDS_NOT_ALLOWED; 1032 } 1033 mHasFds = mFdsKnown = true; 1034 } 1035 1036 return finishWrite(sizeof(flat_binder_object)); 1037 } 1038 1039 if (!enoughData) { 1040 const status_t err = growData(sizeof(val)); 1041 if (err != NO_ERROR) return err; 1042 } 1043 if (!enoughObjects) { 1044 size_t newSize = ((mObjectsSize+2)*3)/2; 1045 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 1046 if (objects == NULL) return NO_MEMORY; 1047 mObjects = objects; 1048 mObjectsCapacity = newSize; 1049 } 1050 1051 goto restart_write; 1052} 1053 1054status_t Parcel::writeNoException() 1055{ 1056 return writeInt32(0); 1057} 1058 1059void Parcel::remove(size_t /*start*/, size_t /*amt*/) 1060{ 1061 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 1062} 1063 1064status_t Parcel::read(void* outData, size_t len) const 1065{ 1066 if (len > INT32_MAX) { 1067 // don't accept size_t values which may have come from an 1068 // inadvertent conversion from a negative int. 1069 return BAD_VALUE; 1070 } 1071 1072 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1073 && len <= pad_size(len)) { 1074 memcpy(outData, mData+mDataPos, len); 1075 mDataPos += pad_size(len); 1076 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 1077 return NO_ERROR; 1078 } 1079 return NOT_ENOUGH_DATA; 1080} 1081 1082const void* Parcel::readInplace(size_t len) const 1083{ 1084 if (len > INT32_MAX) { 1085 // don't accept size_t values which may have come from an 1086 // inadvertent conversion from a negative int. 1087 return NULL; 1088 } 1089 1090 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1091 && len <= pad_size(len)) { 1092 const void* data = mData+mDataPos; 1093 mDataPos += pad_size(len); 1094 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 1095 return data; 1096 } 1097 return NULL; 1098} 1099 1100template<class T> 1101status_t Parcel::readAligned(T *pArg) const { 1102 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1103 1104 if ((mDataPos+sizeof(T)) <= mDataSize) { 1105 const void* data = mData+mDataPos; 1106 mDataPos += sizeof(T); 1107 *pArg = *reinterpret_cast<const T*>(data); 1108 return NO_ERROR; 1109 } else { 1110 return NOT_ENOUGH_DATA; 1111 } 1112} 1113 1114template<class T> 1115T Parcel::readAligned() const { 1116 T result; 1117 if (readAligned(&result) != NO_ERROR) { 1118 result = 0; 1119 } 1120 1121 return result; 1122} 1123 1124template<class T> 1125status_t Parcel::writeAligned(T val) { 1126 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1127 1128 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 1129restart_write: 1130 *reinterpret_cast<T*>(mData+mDataPos) = val; 1131 return finishWrite(sizeof(val)); 1132 } 1133 1134 status_t err = growData(sizeof(val)); 1135 if (err == NO_ERROR) goto restart_write; 1136 return err; 1137} 1138 1139status_t Parcel::readInt32(int32_t *pArg) const 1140{ 1141 return readAligned(pArg); 1142} 1143 1144int32_t Parcel::readInt32() const 1145{ 1146 return readAligned<int32_t>(); 1147} 1148 1149status_t Parcel::readUint32(uint32_t *pArg) const 1150{ 1151 return readAligned(pArg); 1152} 1153 1154uint32_t Parcel::readUint32() const 1155{ 1156 return readAligned<uint32_t>(); 1157} 1158 1159status_t Parcel::readInt64(int64_t *pArg) const 1160{ 1161 return readAligned(pArg); 1162} 1163 1164 1165int64_t Parcel::readInt64() const 1166{ 1167 return readAligned<int64_t>(); 1168} 1169 1170status_t Parcel::readUint64(uint64_t *pArg) const 1171{ 1172 return readAligned(pArg); 1173} 1174 1175uint64_t Parcel::readUint64() const 1176{ 1177 return readAligned<uint64_t>(); 1178} 1179 1180status_t Parcel::readPointer(uintptr_t *pArg) const 1181{ 1182 status_t ret; 1183 binder_uintptr_t ptr; 1184 ret = readAligned(&ptr); 1185 if (!ret) 1186 *pArg = ptr; 1187 return ret; 1188} 1189 1190uintptr_t Parcel::readPointer() const 1191{ 1192 return readAligned<binder_uintptr_t>(); 1193} 1194 1195 1196status_t Parcel::readFloat(float *pArg) const 1197{ 1198 return readAligned(pArg); 1199} 1200 1201 1202float Parcel::readFloat() const 1203{ 1204 return readAligned<float>(); 1205} 1206 1207#if defined(__mips__) && defined(__mips_hard_float) 1208 1209status_t Parcel::readDouble(double *pArg) const 1210{ 1211 union { 1212 double d; 1213 unsigned long long ll; 1214 } u; 1215 u.d = 0; 1216 status_t status; 1217 status = readAligned(&u.ll); 1218 *pArg = u.d; 1219 return status; 1220} 1221 1222double Parcel::readDouble() const 1223{ 1224 union { 1225 double d; 1226 unsigned long long ll; 1227 } u; 1228 u.ll = readAligned<unsigned long long>(); 1229 return u.d; 1230} 1231 1232#else 1233 1234status_t Parcel::readDouble(double *pArg) const 1235{ 1236 return readAligned(pArg); 1237} 1238 1239double Parcel::readDouble() const 1240{ 1241 return readAligned<double>(); 1242} 1243 1244#endif 1245 1246status_t Parcel::readIntPtr(intptr_t *pArg) const 1247{ 1248 return readAligned(pArg); 1249} 1250 1251 1252intptr_t Parcel::readIntPtr() const 1253{ 1254 return readAligned<intptr_t>(); 1255} 1256 1257 1258const char* Parcel::readCString() const 1259{ 1260 const size_t avail = mDataSize-mDataPos; 1261 if (avail > 0) { 1262 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1263 // is the string's trailing NUL within the parcel's valid bounds? 1264 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1265 if (eos) { 1266 const size_t len = eos - str; 1267 mDataPos += pad_size(len+1); 1268 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1269 return str; 1270 } 1271 } 1272 return NULL; 1273} 1274 1275String8 Parcel::readString8() const 1276{ 1277 int32_t size = readInt32(); 1278 // watch for potential int overflow adding 1 for trailing NUL 1279 if (size > 0 && size < INT32_MAX) { 1280 const char* str = (const char*)readInplace(size+1); 1281 if (str) return String8(str, size); 1282 } 1283 return String8(); 1284} 1285 1286String16 Parcel::readString16() const 1287{ 1288 size_t len; 1289 const char16_t* str = readString16Inplace(&len); 1290 if (str) return String16(str, len); 1291 ALOGE("Reading a NULL string not supported here."); 1292 return String16(); 1293} 1294 1295const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1296{ 1297 int32_t size = readInt32(); 1298 // watch for potential int overflow from size+1 1299 if (size >= 0 && size < INT32_MAX) { 1300 *outLen = size; 1301 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1302 if (str != NULL) { 1303 return str; 1304 } 1305 } 1306 *outLen = 0; 1307 return NULL; 1308} 1309 1310sp<IBinder> Parcel::readStrongBinder() const 1311{ 1312 sp<IBinder> val; 1313 unflatten_binder(ProcessState::self(), *this, &val); 1314 return val; 1315} 1316 1317wp<IBinder> Parcel::readWeakBinder() const 1318{ 1319 wp<IBinder> val; 1320 unflatten_binder(ProcessState::self(), *this, &val); 1321 return val; 1322} 1323 1324int32_t Parcel::readExceptionCode() const 1325{ 1326 int32_t exception_code = readAligned<int32_t>(); 1327 if (exception_code == EX_HAS_REPLY_HEADER) { 1328 int32_t header_start = dataPosition(); 1329 int32_t header_size = readAligned<int32_t>(); 1330 // Skip over fat responses headers. Not used (or propagated) in 1331 // native code 1332 setDataPosition(header_start + header_size); 1333 // And fat response headers are currently only used when there are no 1334 // exceptions, so return no error: 1335 return 0; 1336 } 1337 return exception_code; 1338} 1339 1340native_handle* Parcel::readNativeHandle() const 1341{ 1342 int numFds, numInts; 1343 status_t err; 1344 err = readInt32(&numFds); 1345 if (err != NO_ERROR) return 0; 1346 err = readInt32(&numInts); 1347 if (err != NO_ERROR) return 0; 1348 1349 native_handle* h = native_handle_create(numFds, numInts); 1350 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 1351 h->data[i] = dup(readFileDescriptor()); 1352 if (h->data[i] < 0) err = BAD_VALUE; 1353 } 1354 err = read(h->data + numFds, sizeof(int)*numInts); 1355 if (err != NO_ERROR) { 1356 native_handle_close(h); 1357 native_handle_delete(h); 1358 h = 0; 1359 } 1360 return h; 1361} 1362 1363 1364int Parcel::readFileDescriptor() const 1365{ 1366 const flat_binder_object* flat = readObject(true); 1367 if (flat) { 1368 switch (flat->type) { 1369 case BINDER_TYPE_FD: 1370 //ALOGI("Returning file descriptor %ld from parcel %p", flat->handle, this); 1371 return flat->handle; 1372 } 1373 } 1374 return BAD_TYPE; 1375} 1376 1377// WARNING: This method must stay in sync with writeToParcel() 1378// in frameworks/base/core/java/android/os/ParcelFileDescriptor.java 1379int Parcel::readParcelFileDescriptor(int& outCommChannel) const { 1380 int fd; 1381 outCommChannel = -1; 1382 1383 if (readInt32() == 0) { 1384 fd = -1; 1385 } else { 1386 fd = readFileDescriptor(); 1387 if (fd >= 0 && readInt32() != 0) { 1388 outCommChannel = readFileDescriptor(); 1389 } 1390 } 1391 return fd; 1392} 1393 1394status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 1395{ 1396 int32_t useAshmem; 1397 status_t status = readInt32(&useAshmem); 1398 if (status) return status; 1399 1400 if (!useAshmem) { 1401 ALOGV("readBlob: read in place"); 1402 const void* ptr = readInplace(len); 1403 if (!ptr) return BAD_VALUE; 1404 1405 outBlob->init(false /*mapped*/, const_cast<void*>(ptr), len); 1406 return NO_ERROR; 1407 } 1408 1409 ALOGV("readBlob: read from ashmem"); 1410 int fd = readFileDescriptor(); 1411 if (fd == int(BAD_TYPE)) return BAD_VALUE; 1412 1413 void* ptr = ::mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); 1414 if (ptr == MAP_FAILED) return NO_MEMORY; 1415 1416 outBlob->init(true /*mapped*/, ptr, len); 1417 return NO_ERROR; 1418} 1419 1420status_t Parcel::read(FlattenableHelperInterface& val) const 1421{ 1422 // size 1423 const size_t len = this->readInt32(); 1424 const size_t fd_count = this->readInt32(); 1425 1426 if (len > INT32_MAX) { 1427 // don't accept size_t values which may have come from an 1428 // inadvertent conversion from a negative int. 1429 return BAD_VALUE; 1430 } 1431 1432 // payload 1433 void const* const buf = this->readInplace(pad_size(len)); 1434 if (buf == NULL) 1435 return BAD_VALUE; 1436 1437 int* fds = NULL; 1438 if (fd_count) { 1439 fds = new int[fd_count]; 1440 } 1441 1442 status_t err = NO_ERROR; 1443 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1444 fds[i] = dup(this->readFileDescriptor()); 1445 if (fds[i] < 0) { 1446 err = BAD_VALUE; 1447 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s", 1448 i, fds[i], fd_count, strerror(errno)); 1449 } 1450 } 1451 1452 if (err == NO_ERROR) { 1453 err = val.unflatten(buf, len, fds, fd_count); 1454 } 1455 1456 if (fd_count) { 1457 delete [] fds; 1458 } 1459 1460 return err; 1461} 1462const flat_binder_object* Parcel::readObject(bool nullMetaData) const 1463{ 1464 const size_t DPOS = mDataPos; 1465 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 1466 const flat_binder_object* obj 1467 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 1468 mDataPos = DPOS + sizeof(flat_binder_object); 1469 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) { 1470 // When transferring a NULL object, we don't write it into 1471 // the object list, so we don't want to check for it when 1472 // reading. 1473 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1474 return obj; 1475 } 1476 1477 // Ensure that this object is valid... 1478 binder_size_t* const OBJS = mObjects; 1479 const size_t N = mObjectsSize; 1480 size_t opos = mNextObjectHint; 1481 1482 if (N > 0) { 1483 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 1484 this, DPOS, opos); 1485 1486 // Start at the current hint position, looking for an object at 1487 // the current data position. 1488 if (opos < N) { 1489 while (opos < (N-1) && OBJS[opos] < DPOS) { 1490 opos++; 1491 } 1492 } else { 1493 opos = N-1; 1494 } 1495 if (OBJS[opos] == DPOS) { 1496 // Found it! 1497 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 1498 this, DPOS, opos); 1499 mNextObjectHint = opos+1; 1500 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1501 return obj; 1502 } 1503 1504 // Look backwards for it... 1505 while (opos > 0 && OBJS[opos] > DPOS) { 1506 opos--; 1507 } 1508 if (OBJS[opos] == DPOS) { 1509 // Found it! 1510 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 1511 this, DPOS, opos); 1512 mNextObjectHint = opos+1; 1513 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1514 return obj; 1515 } 1516 } 1517 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 1518 this, DPOS); 1519 } 1520 return NULL; 1521} 1522 1523void Parcel::closeFileDescriptors() 1524{ 1525 size_t i = mObjectsSize; 1526 if (i > 0) { 1527 //ALOGI("Closing file descriptors for %zu objects...", i); 1528 } 1529 while (i > 0) { 1530 i--; 1531 const flat_binder_object* flat 1532 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1533 if (flat->type == BINDER_TYPE_FD) { 1534 //ALOGI("Closing fd: %ld", flat->handle); 1535 close(flat->handle); 1536 } 1537 } 1538} 1539 1540uintptr_t Parcel::ipcData() const 1541{ 1542 return reinterpret_cast<uintptr_t>(mData); 1543} 1544 1545size_t Parcel::ipcDataSize() const 1546{ 1547 return (mDataSize > mDataPos ? mDataSize : mDataPos); 1548} 1549 1550uintptr_t Parcel::ipcObjects() const 1551{ 1552 return reinterpret_cast<uintptr_t>(mObjects); 1553} 1554 1555size_t Parcel::ipcObjectsCount() const 1556{ 1557 return mObjectsSize; 1558} 1559 1560void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 1561 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 1562{ 1563 binder_size_t minOffset = 0; 1564 freeDataNoInit(); 1565 mError = NO_ERROR; 1566 mData = const_cast<uint8_t*>(data); 1567 mDataSize = mDataCapacity = dataSize; 1568 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 1569 mDataPos = 0; 1570 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 1571 mObjects = const_cast<binder_size_t*>(objects); 1572 mObjectsSize = mObjectsCapacity = objectsCount; 1573 mNextObjectHint = 0; 1574 mOwner = relFunc; 1575 mOwnerCookie = relCookie; 1576 for (size_t i = 0; i < mObjectsSize; i++) { 1577 binder_size_t offset = mObjects[i]; 1578 if (offset < minOffset) { 1579 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n", 1580 __func__, (uint64_t)offset, (uint64_t)minOffset); 1581 mObjectsSize = 0; 1582 break; 1583 } 1584 minOffset = offset + sizeof(flat_binder_object); 1585 } 1586 scanForFds(); 1587} 1588 1589void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 1590{ 1591 to << "Parcel("; 1592 1593 if (errorCheck() != NO_ERROR) { 1594 const status_t err = errorCheck(); 1595 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 1596 } else if (dataSize() > 0) { 1597 const uint8_t* DATA = data(); 1598 to << indent << HexDump(DATA, dataSize()) << dedent; 1599 const binder_size_t* OBJS = objects(); 1600 const size_t N = objectsCount(); 1601 for (size_t i=0; i<N; i++) { 1602 const flat_binder_object* flat 1603 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 1604 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 1605 << TypeCode(flat->type & 0x7f7f7f00) 1606 << " = " << flat->binder; 1607 } 1608 } else { 1609 to << "NULL"; 1610 } 1611 1612 to << ")"; 1613} 1614 1615void Parcel::releaseObjects() 1616{ 1617 const sp<ProcessState> proc(ProcessState::self()); 1618 size_t i = mObjectsSize; 1619 uint8_t* const data = mData; 1620 binder_size_t* const objects = mObjects; 1621 while (i > 0) { 1622 i--; 1623 const flat_binder_object* flat 1624 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1625 release_object(proc, *flat, this); 1626 } 1627} 1628 1629void Parcel::acquireObjects() 1630{ 1631 const sp<ProcessState> proc(ProcessState::self()); 1632 size_t i = mObjectsSize; 1633 uint8_t* const data = mData; 1634 binder_size_t* const objects = mObjects; 1635 while (i > 0) { 1636 i--; 1637 const flat_binder_object* flat 1638 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1639 acquire_object(proc, *flat, this); 1640 } 1641} 1642 1643void Parcel::freeData() 1644{ 1645 freeDataNoInit(); 1646 initState(); 1647} 1648 1649void Parcel::freeDataNoInit() 1650{ 1651 if (mOwner) { 1652 LOG_ALLOC("Parcel %p: freeing other owner data", this); 1653 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1654 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1655 } else { 1656 LOG_ALLOC("Parcel %p: freeing allocated data", this); 1657 releaseObjects(); 1658 if (mData) { 1659 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); 1660 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1661 gParcelGlobalAllocSize -= mDataCapacity; 1662 gParcelGlobalAllocCount--; 1663 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1664 free(mData); 1665 } 1666 if (mObjects) free(mObjects); 1667 } 1668} 1669 1670status_t Parcel::growData(size_t len) 1671{ 1672 if (len > INT32_MAX) { 1673 // don't accept size_t values which may have come from an 1674 // inadvertent conversion from a negative int. 1675 return BAD_VALUE; 1676 } 1677 1678 size_t newSize = ((mDataSize+len)*3)/2; 1679 return (newSize <= mDataSize) 1680 ? (status_t) NO_MEMORY 1681 : continueWrite(newSize); 1682} 1683 1684status_t Parcel::restartWrite(size_t desired) 1685{ 1686 if (desired > INT32_MAX) { 1687 // don't accept size_t values which may have come from an 1688 // inadvertent conversion from a negative int. 1689 return BAD_VALUE; 1690 } 1691 1692 if (mOwner) { 1693 freeData(); 1694 return continueWrite(desired); 1695 } 1696 1697 uint8_t* data = (uint8_t*)realloc(mData, desired); 1698 if (!data && desired > mDataCapacity) { 1699 mError = NO_MEMORY; 1700 return NO_MEMORY; 1701 } 1702 1703 releaseObjects(); 1704 1705 if (data) { 1706 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); 1707 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1708 gParcelGlobalAllocSize += desired; 1709 gParcelGlobalAllocSize -= mDataCapacity; 1710 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1711 mData = data; 1712 mDataCapacity = desired; 1713 } 1714 1715 mDataSize = mDataPos = 0; 1716 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 1717 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 1718 1719 free(mObjects); 1720 mObjects = NULL; 1721 mObjectsSize = mObjectsCapacity = 0; 1722 mNextObjectHint = 0; 1723 mHasFds = false; 1724 mFdsKnown = true; 1725 mAllowFds = true; 1726 1727 return NO_ERROR; 1728} 1729 1730status_t Parcel::continueWrite(size_t desired) 1731{ 1732 if (desired > INT32_MAX) { 1733 // don't accept size_t values which may have come from an 1734 // inadvertent conversion from a negative int. 1735 return BAD_VALUE; 1736 } 1737 1738 // If shrinking, first adjust for any objects that appear 1739 // after the new data size. 1740 size_t objectsSize = mObjectsSize; 1741 if (desired < mDataSize) { 1742 if (desired == 0) { 1743 objectsSize = 0; 1744 } else { 1745 while (objectsSize > 0) { 1746 if (mObjects[objectsSize-1] < desired) 1747 break; 1748 objectsSize--; 1749 } 1750 } 1751 } 1752 1753 if (mOwner) { 1754 // If the size is going to zero, just release the owner's data. 1755 if (desired == 0) { 1756 freeData(); 1757 return NO_ERROR; 1758 } 1759 1760 // If there is a different owner, we need to take 1761 // posession. 1762 uint8_t* data = (uint8_t*)malloc(desired); 1763 if (!data) { 1764 mError = NO_MEMORY; 1765 return NO_MEMORY; 1766 } 1767 binder_size_t* objects = NULL; 1768 1769 if (objectsSize) { 1770 objects = (binder_size_t*)malloc(objectsSize*sizeof(binder_size_t)); 1771 if (!objects) { 1772 free(data); 1773 1774 mError = NO_MEMORY; 1775 return NO_MEMORY; 1776 } 1777 1778 // Little hack to only acquire references on objects 1779 // we will be keeping. 1780 size_t oldObjectsSize = mObjectsSize; 1781 mObjectsSize = objectsSize; 1782 acquireObjects(); 1783 mObjectsSize = oldObjectsSize; 1784 } 1785 1786 if (mData) { 1787 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 1788 } 1789 if (objects && mObjects) { 1790 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 1791 } 1792 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1793 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1794 mOwner = NULL; 1795 1796 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); 1797 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1798 gParcelGlobalAllocSize += desired; 1799 gParcelGlobalAllocCount++; 1800 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1801 1802 mData = data; 1803 mObjects = objects; 1804 mDataSize = (mDataSize < desired) ? mDataSize : desired; 1805 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1806 mDataCapacity = desired; 1807 mObjectsSize = mObjectsCapacity = objectsSize; 1808 mNextObjectHint = 0; 1809 1810 } else if (mData) { 1811 if (objectsSize < mObjectsSize) { 1812 // Need to release refs on any objects we are dropping. 1813 const sp<ProcessState> proc(ProcessState::self()); 1814 for (size_t i=objectsSize; i<mObjectsSize; i++) { 1815 const flat_binder_object* flat 1816 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1817 if (flat->type == BINDER_TYPE_FD) { 1818 // will need to rescan because we may have lopped off the only FDs 1819 mFdsKnown = false; 1820 } 1821 release_object(proc, *flat, this); 1822 } 1823 binder_size_t* objects = 1824 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 1825 if (objects) { 1826 mObjects = objects; 1827 } 1828 mObjectsSize = objectsSize; 1829 mNextObjectHint = 0; 1830 } 1831 1832 // We own the data, so we can just do a realloc(). 1833 if (desired > mDataCapacity) { 1834 uint8_t* data = (uint8_t*)realloc(mData, desired); 1835 if (data) { 1836 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, 1837 desired); 1838 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1839 gParcelGlobalAllocSize += desired; 1840 gParcelGlobalAllocSize -= mDataCapacity; 1841 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1842 mData = data; 1843 mDataCapacity = desired; 1844 } else if (desired > mDataCapacity) { 1845 mError = NO_MEMORY; 1846 return NO_MEMORY; 1847 } 1848 } else { 1849 if (mDataSize > desired) { 1850 mDataSize = desired; 1851 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1852 } 1853 if (mDataPos > desired) { 1854 mDataPos = desired; 1855 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1856 } 1857 } 1858 1859 } else { 1860 // This is the first data. Easy! 1861 uint8_t* data = (uint8_t*)malloc(desired); 1862 if (!data) { 1863 mError = NO_MEMORY; 1864 return NO_MEMORY; 1865 } 1866 1867 if(!(mDataCapacity == 0 && mObjects == NULL 1868 && mObjectsCapacity == 0)) { 1869 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 1870 } 1871 1872 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); 1873 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1874 gParcelGlobalAllocSize += desired; 1875 gParcelGlobalAllocCount++; 1876 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1877 1878 mData = data; 1879 mDataSize = mDataPos = 0; 1880 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1881 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1882 mDataCapacity = desired; 1883 } 1884 1885 return NO_ERROR; 1886} 1887 1888void Parcel::initState() 1889{ 1890 LOG_ALLOC("Parcel %p: initState", this); 1891 mError = NO_ERROR; 1892 mData = 0; 1893 mDataSize = 0; 1894 mDataCapacity = 0; 1895 mDataPos = 0; 1896 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 1897 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 1898 mObjects = NULL; 1899 mObjectsSize = 0; 1900 mObjectsCapacity = 0; 1901 mNextObjectHint = 0; 1902 mHasFds = false; 1903 mFdsKnown = true; 1904 mAllowFds = true; 1905 mOwner = NULL; 1906 mBlobAshmemSize = 0; 1907} 1908 1909void Parcel::scanForFds() const 1910{ 1911 bool hasFds = false; 1912 for (size_t i=0; i<mObjectsSize; i++) { 1913 const flat_binder_object* flat 1914 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 1915 if (flat->type == BINDER_TYPE_FD) { 1916 hasFds = true; 1917 break; 1918 } 1919 } 1920 mHasFds = hasFds; 1921 mFdsKnown = true; 1922} 1923 1924size_t Parcel::getBlobAshmemSize() const 1925{ 1926 return mBlobAshmemSize; 1927} 1928 1929// --- Parcel::Blob --- 1930 1931Parcel::Blob::Blob() : 1932 mMapped(false), mData(NULL), mSize(0) { 1933} 1934 1935Parcel::Blob::~Blob() { 1936 release(); 1937} 1938 1939void Parcel::Blob::release() { 1940 if (mMapped && mData) { 1941 ::munmap(mData, mSize); 1942 } 1943 clear(); 1944} 1945 1946void Parcel::Blob::init(bool mapped, void* data, size_t size) { 1947 mMapped = mapped; 1948 mData = data; 1949 mSize = size; 1950} 1951 1952void Parcel::Blob::clear() { 1953 mMapped = false; 1954 mData = NULL; 1955 mSize = 0; 1956} 1957 1958}; // namespace android 1959