Parcel.cpp revision 37b44969c0ca1d00e213da685dfbb2807f2bab30
1/* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "Parcel" 18//#define LOG_NDEBUG 0 19 20#include <binder/Parcel.h> 21 22#include <binder/IPCThreadState.h> 23#include <binder/Binder.h> 24#include <binder/BpBinder.h> 25#include <utils/Debug.h> 26#include <binder/ProcessState.h> 27#include <utils/Log.h> 28#include <utils/String8.h> 29#include <utils/String16.h> 30#include <utils/TextOutput.h> 31#include <utils/misc.h> 32#include <utils/Flattenable.h> 33#include <cutils/ashmem.h> 34 35#include <private/binder/binder_module.h> 36 37#include <stdio.h> 38#include <stdlib.h> 39#include <stdint.h> 40#include <sys/mman.h> 41 42#ifndef INT32_MAX 43#define INT32_MAX ((int32_t)(2147483647)) 44#endif 45 46#define LOG_REFS(...) 47//#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__) 48 49// --------------------------------------------------------------------------- 50 51#define PAD_SIZE(s) (((s)+3)&~3) 52 53// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 54#define STRICT_MODE_PENALTY_GATHER 0x100 55 56// Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER 57#define EX_HAS_REPLY_HEADER -128 58 59// Maximum size of a blob to transfer in-place. 60static const size_t IN_PLACE_BLOB_LIMIT = 40 * 1024; 61 62// XXX This can be made public if we want to provide 63// support for typed data. 64struct small_flat_data 65{ 66 uint32_t type; 67 uint32_t data; 68}; 69 70namespace android { 71 72void acquire_object(const sp<ProcessState>& proc, 73 const flat_binder_object& obj, const void* who) 74{ 75 switch (obj.type) { 76 case BINDER_TYPE_BINDER: 77 if (obj.binder) { 78 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 79 static_cast<IBinder*>(obj.cookie)->incStrong(who); 80 } 81 return; 82 case BINDER_TYPE_WEAK_BINDER: 83 if (obj.binder) 84 static_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 85 return; 86 case BINDER_TYPE_HANDLE: { 87 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 88 if (b != NULL) { 89 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 90 b->incStrong(who); 91 } 92 return; 93 } 94 case BINDER_TYPE_WEAK_HANDLE: { 95 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 96 if (b != NULL) b.get_refs()->incWeak(who); 97 return; 98 } 99 case BINDER_TYPE_FD: { 100 // intentionally blank -- nothing to do to acquire this, but we do 101 // recognize it as a legitimate object type. 102 return; 103 } 104 } 105 106 ALOGD("Invalid object type 0x%08lx", obj.type); 107} 108 109void release_object(const sp<ProcessState>& proc, 110 const flat_binder_object& obj, const void* who) 111{ 112 switch (obj.type) { 113 case BINDER_TYPE_BINDER: 114 if (obj.binder) { 115 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 116 static_cast<IBinder*>(obj.cookie)->decStrong(who); 117 } 118 return; 119 case BINDER_TYPE_WEAK_BINDER: 120 if (obj.binder) 121 static_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 122 return; 123 case BINDER_TYPE_HANDLE: { 124 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 125 if (b != NULL) { 126 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 127 b->decStrong(who); 128 } 129 return; 130 } 131 case BINDER_TYPE_WEAK_HANDLE: { 132 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 133 if (b != NULL) b.get_refs()->decWeak(who); 134 return; 135 } 136 case BINDER_TYPE_FD: { 137 if (obj.cookie != (void*)0) close(obj.handle); 138 return; 139 } 140 } 141 142 ALOGE("Invalid object type 0x%08lx", obj.type); 143} 144 145inline static status_t finish_flatten_binder( 146 const sp<IBinder>& binder, const flat_binder_object& flat, Parcel* out) 147{ 148 return out->writeObject(flat, false); 149} 150 151status_t flatten_binder(const sp<ProcessState>& proc, 152 const sp<IBinder>& binder, Parcel* out) 153{ 154 flat_binder_object obj; 155 156 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 157 if (binder != NULL) { 158 IBinder *local = binder->localBinder(); 159 if (!local) { 160 BpBinder *proxy = binder->remoteBinder(); 161 if (proxy == NULL) { 162 ALOGE("null proxy"); 163 } 164 const int32_t handle = proxy ? proxy->handle() : 0; 165 obj.type = BINDER_TYPE_HANDLE; 166 obj.handle = handle; 167 obj.cookie = NULL; 168 } else { 169 obj.type = BINDER_TYPE_BINDER; 170 obj.binder = local->getWeakRefs(); 171 obj.cookie = local; 172 } 173 } else { 174 obj.type = BINDER_TYPE_BINDER; 175 obj.binder = NULL; 176 obj.cookie = NULL; 177 } 178 179 return finish_flatten_binder(binder, obj, out); 180} 181 182status_t flatten_binder(const sp<ProcessState>& proc, 183 const wp<IBinder>& binder, Parcel* out) 184{ 185 flat_binder_object obj; 186 187 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 188 if (binder != NULL) { 189 sp<IBinder> real = binder.promote(); 190 if (real != NULL) { 191 IBinder *local = real->localBinder(); 192 if (!local) { 193 BpBinder *proxy = real->remoteBinder(); 194 if (proxy == NULL) { 195 ALOGE("null proxy"); 196 } 197 const int32_t handle = proxy ? proxy->handle() : 0; 198 obj.type = BINDER_TYPE_WEAK_HANDLE; 199 obj.handle = handle; 200 obj.cookie = NULL; 201 } else { 202 obj.type = BINDER_TYPE_WEAK_BINDER; 203 obj.binder = binder.get_refs(); 204 obj.cookie = binder.unsafe_get(); 205 } 206 return finish_flatten_binder(real, obj, out); 207 } 208 209 // XXX How to deal? In order to flatten the given binder, 210 // we need to probe it for information, which requires a primary 211 // reference... but we don't have one. 212 // 213 // The OpenBinder implementation uses a dynamic_cast<> here, 214 // but we can't do that with the different reference counting 215 // implementation we are using. 216 ALOGE("Unable to unflatten Binder weak reference!"); 217 obj.type = BINDER_TYPE_BINDER; 218 obj.binder = NULL; 219 obj.cookie = NULL; 220 return finish_flatten_binder(NULL, obj, out); 221 222 } else { 223 obj.type = BINDER_TYPE_BINDER; 224 obj.binder = NULL; 225 obj.cookie = NULL; 226 return finish_flatten_binder(NULL, obj, out); 227 } 228} 229 230inline static status_t finish_unflatten_binder( 231 BpBinder* proxy, const flat_binder_object& flat, const Parcel& in) 232{ 233 return NO_ERROR; 234} 235 236status_t unflatten_binder(const sp<ProcessState>& proc, 237 const Parcel& in, sp<IBinder>* out) 238{ 239 const flat_binder_object* flat = in.readObject(false); 240 241 if (flat) { 242 switch (flat->type) { 243 case BINDER_TYPE_BINDER: 244 *out = static_cast<IBinder*>(flat->cookie); 245 return finish_unflatten_binder(NULL, *flat, in); 246 case BINDER_TYPE_HANDLE: 247 *out = proc->getStrongProxyForHandle(flat->handle); 248 return finish_unflatten_binder( 249 static_cast<BpBinder*>(out->get()), *flat, in); 250 } 251 } 252 return BAD_TYPE; 253} 254 255status_t unflatten_binder(const sp<ProcessState>& proc, 256 const Parcel& in, wp<IBinder>* out) 257{ 258 const flat_binder_object* flat = in.readObject(false); 259 260 if (flat) { 261 switch (flat->type) { 262 case BINDER_TYPE_BINDER: 263 *out = static_cast<IBinder*>(flat->cookie); 264 return finish_unflatten_binder(NULL, *flat, in); 265 case BINDER_TYPE_WEAK_BINDER: 266 if (flat->binder != NULL) { 267 out->set_object_and_refs( 268 static_cast<IBinder*>(flat->cookie), 269 static_cast<RefBase::weakref_type*>(flat->binder)); 270 } else { 271 *out = NULL; 272 } 273 return finish_unflatten_binder(NULL, *flat, in); 274 case BINDER_TYPE_HANDLE: 275 case BINDER_TYPE_WEAK_HANDLE: 276 *out = proc->getWeakProxyForHandle(flat->handle); 277 return finish_unflatten_binder( 278 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 279 } 280 } 281 return BAD_TYPE; 282} 283 284// --------------------------------------------------------------------------- 285 286Parcel::Parcel() 287{ 288 initState(); 289} 290 291Parcel::~Parcel() 292{ 293 freeDataNoInit(); 294} 295 296const uint8_t* Parcel::data() const 297{ 298 return mData; 299} 300 301size_t Parcel::dataSize() const 302{ 303 return (mDataSize > mDataPos ? mDataSize : mDataPos); 304} 305 306size_t Parcel::dataAvail() const 307{ 308 // TODO: decide what to do about the possibility that this can 309 // report an available-data size that exceeds a Java int's max 310 // positive value, causing havoc. Fortunately this will only 311 // happen if someone constructs a Parcel containing more than two 312 // gigabytes of data, which on typical phone hardware is simply 313 // not possible. 314 return dataSize() - dataPosition(); 315} 316 317size_t Parcel::dataPosition() const 318{ 319 return mDataPos; 320} 321 322size_t Parcel::dataCapacity() const 323{ 324 return mDataCapacity; 325} 326 327status_t Parcel::setDataSize(size_t size) 328{ 329 status_t err; 330 err = continueWrite(size); 331 if (err == NO_ERROR) { 332 mDataSize = size; 333 ALOGV("setDataSize Setting data size of %p to %d\n", this, mDataSize); 334 } 335 return err; 336} 337 338void Parcel::setDataPosition(size_t pos) const 339{ 340 mDataPos = pos; 341 mNextObjectHint = 0; 342} 343 344status_t Parcel::setDataCapacity(size_t size) 345{ 346 if (size > mDataCapacity) return continueWrite(size); 347 return NO_ERROR; 348} 349 350status_t Parcel::setData(const uint8_t* buffer, size_t len) 351{ 352 status_t err = restartWrite(len); 353 if (err == NO_ERROR) { 354 memcpy(const_cast<uint8_t*>(data()), buffer, len); 355 mDataSize = len; 356 mFdsKnown = false; 357 } 358 return err; 359} 360 361status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 362{ 363 const sp<ProcessState> proc(ProcessState::self()); 364 status_t err; 365 const uint8_t *data = parcel->mData; 366 const size_t *objects = parcel->mObjects; 367 size_t size = parcel->mObjectsSize; 368 int startPos = mDataPos; 369 int firstIndex = -1, lastIndex = -2; 370 371 if (len == 0) { 372 return NO_ERROR; 373 } 374 375 // range checks against the source parcel size 376 if ((offset > parcel->mDataSize) 377 || (len > parcel->mDataSize) 378 || (offset + len > parcel->mDataSize)) { 379 return BAD_VALUE; 380 } 381 382 // Count objects in range 383 for (int i = 0; i < (int) size; i++) { 384 size_t off = objects[i]; 385 if ((off >= offset) && (off < offset + len)) { 386 if (firstIndex == -1) { 387 firstIndex = i; 388 } 389 lastIndex = i; 390 } 391 } 392 int numObjects = lastIndex - firstIndex + 1; 393 394 if ((mDataSize+len) > mDataCapacity) { 395 // grow data 396 err = growData(len); 397 if (err != NO_ERROR) { 398 return err; 399 } 400 } 401 402 // append data 403 memcpy(mData + mDataPos, data + offset, len); 404 mDataPos += len; 405 mDataSize += len; 406 407 err = NO_ERROR; 408 409 if (numObjects > 0) { 410 // grow objects 411 if (mObjectsCapacity < mObjectsSize + numObjects) { 412 int newSize = ((mObjectsSize + numObjects)*3)/2; 413 size_t *objects = 414 (size_t*)realloc(mObjects, newSize*sizeof(size_t)); 415 if (objects == (size_t*)0) { 416 return NO_MEMORY; 417 } 418 mObjects = objects; 419 mObjectsCapacity = newSize; 420 } 421 422 // append and acquire objects 423 int idx = mObjectsSize; 424 for (int i = firstIndex; i <= lastIndex; i++) { 425 size_t off = objects[i] - offset + startPos; 426 mObjects[idx++] = off; 427 mObjectsSize++; 428 429 flat_binder_object* flat 430 = reinterpret_cast<flat_binder_object*>(mData + off); 431 acquire_object(proc, *flat, this); 432 433 if (flat->type == BINDER_TYPE_FD) { 434 // If this is a file descriptor, we need to dup it so the 435 // new Parcel now owns its own fd, and can declare that we 436 // officially know we have fds. 437 flat->handle = dup(flat->handle); 438 flat->cookie = (void*)1; 439 mHasFds = mFdsKnown = true; 440 if (!mAllowFds) { 441 err = FDS_NOT_ALLOWED; 442 } 443 } 444 } 445 } 446 447 return err; 448} 449 450bool Parcel::pushAllowFds(bool allowFds) 451{ 452 const bool origValue = mAllowFds; 453 if (!allowFds) { 454 mAllowFds = false; 455 } 456 return origValue; 457} 458 459void Parcel::restoreAllowFds(bool lastValue) 460{ 461 mAllowFds = lastValue; 462} 463 464bool Parcel::hasFileDescriptors() const 465{ 466 if (!mFdsKnown) { 467 scanForFds(); 468 } 469 return mHasFds; 470} 471 472// Write RPC headers. (previously just the interface token) 473status_t Parcel::writeInterfaceToken(const String16& interface) 474{ 475 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 476 STRICT_MODE_PENALTY_GATHER); 477 // currently the interface identification token is just its name as a string 478 return writeString16(interface); 479} 480 481bool Parcel::checkInterface(IBinder* binder) const 482{ 483 return enforceInterface(binder->getInterfaceDescriptor()); 484} 485 486bool Parcel::enforceInterface(const String16& interface, 487 IPCThreadState* threadState) const 488{ 489 int32_t strictPolicy = readInt32(); 490 if (threadState == NULL) { 491 threadState = IPCThreadState::self(); 492 } 493 if ((threadState->getLastTransactionBinderFlags() & 494 IBinder::FLAG_ONEWAY) != 0) { 495 // For one-way calls, the callee is running entirely 496 // disconnected from the caller, so disable StrictMode entirely. 497 // Not only does disk/network usage not impact the caller, but 498 // there's no way to commuicate back any violations anyway. 499 threadState->setStrictModePolicy(0); 500 } else { 501 threadState->setStrictModePolicy(strictPolicy); 502 } 503 const String16 str(readString16()); 504 if (str == interface) { 505 return true; 506 } else { 507 ALOGW("**** enforceInterface() expected '%s' but read '%s'\n", 508 String8(interface).string(), String8(str).string()); 509 return false; 510 } 511} 512 513const size_t* Parcel::objects() const 514{ 515 return mObjects; 516} 517 518size_t Parcel::objectsCount() const 519{ 520 return mObjectsSize; 521} 522 523status_t Parcel::errorCheck() const 524{ 525 return mError; 526} 527 528void Parcel::setError(status_t err) 529{ 530 mError = err; 531} 532 533status_t Parcel::finishWrite(size_t len) 534{ 535 //printf("Finish write of %d\n", len); 536 mDataPos += len; 537 ALOGV("finishWrite Setting data pos of %p to %d\n", this, mDataPos); 538 if (mDataPos > mDataSize) { 539 mDataSize = mDataPos; 540 ALOGV("finishWrite Setting data size of %p to %d\n", this, mDataSize); 541 } 542 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 543 return NO_ERROR; 544} 545 546status_t Parcel::writeUnpadded(const void* data, size_t len) 547{ 548 size_t end = mDataPos + len; 549 if (end < mDataPos) { 550 // integer overflow 551 return BAD_VALUE; 552 } 553 554 if (end <= mDataCapacity) { 555restart_write: 556 memcpy(mData+mDataPos, data, len); 557 return finishWrite(len); 558 } 559 560 status_t err = growData(len); 561 if (err == NO_ERROR) goto restart_write; 562 return err; 563} 564 565status_t Parcel::write(const void* data, size_t len) 566{ 567 void* const d = writeInplace(len); 568 if (d) { 569 memcpy(d, data, len); 570 return NO_ERROR; 571 } 572 return mError; 573} 574 575void* Parcel::writeInplace(size_t len) 576{ 577 const size_t padded = PAD_SIZE(len); 578 579 // sanity check for integer overflow 580 if (mDataPos+padded < mDataPos) { 581 return NULL; 582 } 583 584 if ((mDataPos+padded) <= mDataCapacity) { 585restart_write: 586 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 587 uint8_t* const data = mData+mDataPos; 588 589 // Need to pad at end? 590 if (padded != len) { 591#if BYTE_ORDER == BIG_ENDIAN 592 static const uint32_t mask[4] = { 593 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 594 }; 595#endif 596#if BYTE_ORDER == LITTLE_ENDIAN 597 static const uint32_t mask[4] = { 598 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 599 }; 600#endif 601 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 602 // *reinterpret_cast<void**>(data+padded-4)); 603 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 604 } 605 606 finishWrite(padded); 607 return data; 608 } 609 610 status_t err = growData(padded); 611 if (err == NO_ERROR) goto restart_write; 612 return NULL; 613} 614 615status_t Parcel::writeInt32(int32_t val) 616{ 617 return writeAligned(val); 618} 619 620status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 621 if (!val) { 622 return writeAligned(-1); 623 } 624 status_t ret = writeAligned(len); 625 if (ret == NO_ERROR) { 626 ret = write(val, len * sizeof(*val)); 627 } 628 return ret; 629} 630 631status_t Parcel::writeInt64(int64_t val) 632{ 633 return writeAligned(val); 634} 635 636status_t Parcel::writeFloat(float val) 637{ 638 return writeAligned(val); 639} 640 641status_t Parcel::writeDouble(double val) 642{ 643 return writeAligned(val); 644} 645 646status_t Parcel::writeIntPtr(intptr_t val) 647{ 648 return writeAligned(val); 649} 650 651status_t Parcel::writeCString(const char* str) 652{ 653 return write(str, strlen(str)+1); 654} 655 656status_t Parcel::writeString8(const String8& str) 657{ 658 status_t err = writeInt32(str.bytes()); 659 // only write string if its length is more than zero characters, 660 // as readString8 will only read if the length field is non-zero. 661 // this is slightly different from how writeString16 works. 662 if (str.bytes() > 0 && err == NO_ERROR) { 663 err = write(str.string(), str.bytes()+1); 664 } 665 return err; 666} 667 668status_t Parcel::writeString16(const String16& str) 669{ 670 return writeString16(str.string(), str.size()); 671} 672 673status_t Parcel::writeString16(const char16_t* str, size_t len) 674{ 675 if (str == NULL) return writeInt32(-1); 676 677 status_t err = writeInt32(len); 678 if (err == NO_ERROR) { 679 len *= sizeof(char16_t); 680 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 681 if (data) { 682 memcpy(data, str, len); 683 *reinterpret_cast<char16_t*>(data+len) = 0; 684 return NO_ERROR; 685 } 686 err = mError; 687 } 688 return err; 689} 690 691status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 692{ 693 return flatten_binder(ProcessState::self(), val, this); 694} 695 696status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 697{ 698 return flatten_binder(ProcessState::self(), val, this); 699} 700 701status_t Parcel::writeNativeHandle(const native_handle* handle) 702{ 703 if (!handle || handle->version != sizeof(native_handle)) 704 return BAD_TYPE; 705 706 status_t err; 707 err = writeInt32(handle->numFds); 708 if (err != NO_ERROR) return err; 709 710 err = writeInt32(handle->numInts); 711 if (err != NO_ERROR) return err; 712 713 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 714 err = writeDupFileDescriptor(handle->data[i]); 715 716 if (err != NO_ERROR) { 717 ALOGD("write native handle, write dup fd failed"); 718 return err; 719 } 720 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 721 return err; 722} 723 724status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 725{ 726 flat_binder_object obj; 727 obj.type = BINDER_TYPE_FD; 728 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 729 obj.handle = fd; 730 obj.cookie = (void*) (takeOwnership ? 1 : 0); 731 return writeObject(obj, true); 732} 733 734status_t Parcel::writeDupFileDescriptor(int fd) 735{ 736 int dupFd = dup(fd); 737 if (dupFd < 0) { 738 return -errno; 739 } 740 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 741 if (err) { 742 close(dupFd); 743 } 744 return err; 745} 746 747status_t Parcel::writeBlob(size_t len, WritableBlob* outBlob) 748{ 749 status_t status; 750 751 if (!mAllowFds || len <= IN_PLACE_BLOB_LIMIT) { 752 ALOGV("writeBlob: write in place"); 753 status = writeInt32(0); 754 if (status) return status; 755 756 void* ptr = writeInplace(len); 757 if (!ptr) return NO_MEMORY; 758 759 outBlob->init(false /*mapped*/, ptr, len); 760 return NO_ERROR; 761 } 762 763 ALOGV("writeBlob: write to ashmem"); 764 int fd = ashmem_create_region("Parcel Blob", len); 765 if (fd < 0) return NO_MEMORY; 766 767 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 768 if (result < 0) { 769 status = result; 770 } else { 771 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 772 if (ptr == MAP_FAILED) { 773 status = -errno; 774 } else { 775 result = ashmem_set_prot_region(fd, PROT_READ); 776 if (result < 0) { 777 status = result; 778 } else { 779 status = writeInt32(1); 780 if (!status) { 781 status = writeFileDescriptor(fd, true /*takeOwnership*/); 782 if (!status) { 783 outBlob->init(true /*mapped*/, ptr, len); 784 return NO_ERROR; 785 } 786 } 787 } 788 } 789 ::munmap(ptr, len); 790 } 791 ::close(fd); 792 return status; 793} 794 795status_t Parcel::write(const Flattenable& val) 796{ 797 status_t err; 798 799 // size if needed 800 size_t len = val.getFlattenedSize(); 801 size_t fd_count = val.getFdCount(); 802 803 err = this->writeInt32(len); 804 if (err) return err; 805 806 err = this->writeInt32(fd_count); 807 if (err) return err; 808 809 // payload 810 void* buf = this->writeInplace(PAD_SIZE(len)); 811 if (buf == NULL) 812 return BAD_VALUE; 813 814 int* fds = NULL; 815 if (fd_count) { 816 fds = new int[fd_count]; 817 } 818 819 err = val.flatten(buf, len, fds, fd_count); 820 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 821 err = this->writeDupFileDescriptor( fds[i] ); 822 } 823 824 if (fd_count) { 825 delete [] fds; 826 } 827 828 return err; 829} 830 831status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 832{ 833 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 834 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 835 if (enoughData && enoughObjects) { 836restart_write: 837 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 838 839 // Need to write meta-data? 840 if (nullMetaData || val.binder != NULL) { 841 mObjects[mObjectsSize] = mDataPos; 842 acquire_object(ProcessState::self(), val, this); 843 mObjectsSize++; 844 } 845 846 // remember if it's a file descriptor 847 if (val.type == BINDER_TYPE_FD) { 848 if (!mAllowFds) { 849 return FDS_NOT_ALLOWED; 850 } 851 mHasFds = mFdsKnown = true; 852 } 853 854 return finishWrite(sizeof(flat_binder_object)); 855 } 856 857 if (!enoughData) { 858 const status_t err = growData(sizeof(val)); 859 if (err != NO_ERROR) return err; 860 } 861 if (!enoughObjects) { 862 size_t newSize = ((mObjectsSize+2)*3)/2; 863 size_t* objects = (size_t*)realloc(mObjects, newSize*sizeof(size_t)); 864 if (objects == NULL) return NO_MEMORY; 865 mObjects = objects; 866 mObjectsCapacity = newSize; 867 } 868 869 goto restart_write; 870} 871 872status_t Parcel::writeNoException() 873{ 874 return writeInt32(0); 875} 876 877void Parcel::remove(size_t start, size_t amt) 878{ 879 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 880} 881 882status_t Parcel::read(void* outData, size_t len) const 883{ 884 if ((mDataPos+PAD_SIZE(len)) >= mDataPos && (mDataPos+PAD_SIZE(len)) <= mDataSize) { 885 memcpy(outData, mData+mDataPos, len); 886 mDataPos += PAD_SIZE(len); 887 ALOGV("read Setting data pos of %p to %d\n", this, mDataPos); 888 return NO_ERROR; 889 } 890 return NOT_ENOUGH_DATA; 891} 892 893const void* Parcel::readInplace(size_t len) const 894{ 895 if ((mDataPos+PAD_SIZE(len)) >= mDataPos && (mDataPos+PAD_SIZE(len)) <= mDataSize) { 896 const void* data = mData+mDataPos; 897 mDataPos += PAD_SIZE(len); 898 ALOGV("readInplace Setting data pos of %p to %d\n", this, mDataPos); 899 return data; 900 } 901 return NULL; 902} 903 904template<class T> 905status_t Parcel::readAligned(T *pArg) const { 906 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T)); 907 908 if ((mDataPos+sizeof(T)) <= mDataSize) { 909 const void* data = mData+mDataPos; 910 mDataPos += sizeof(T); 911 *pArg = *reinterpret_cast<const T*>(data); 912 return NO_ERROR; 913 } else { 914 return NOT_ENOUGH_DATA; 915 } 916} 917 918template<class T> 919T Parcel::readAligned() const { 920 T result; 921 if (readAligned(&result) != NO_ERROR) { 922 result = 0; 923 } 924 925 return result; 926} 927 928template<class T> 929status_t Parcel::writeAligned(T val) { 930 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T)); 931 932 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 933restart_write: 934 *reinterpret_cast<T*>(mData+mDataPos) = val; 935 return finishWrite(sizeof(val)); 936 } 937 938 status_t err = growData(sizeof(val)); 939 if (err == NO_ERROR) goto restart_write; 940 return err; 941} 942 943status_t Parcel::readInt32(int32_t *pArg) const 944{ 945 return readAligned(pArg); 946} 947 948int32_t Parcel::readInt32() const 949{ 950 return readAligned<int32_t>(); 951} 952 953 954status_t Parcel::readInt64(int64_t *pArg) const 955{ 956 return readAligned(pArg); 957} 958 959 960int64_t Parcel::readInt64() const 961{ 962 return readAligned<int64_t>(); 963} 964 965status_t Parcel::readFloat(float *pArg) const 966{ 967 return readAligned(pArg); 968} 969 970 971float Parcel::readFloat() const 972{ 973 return readAligned<float>(); 974} 975 976status_t Parcel::readDouble(double *pArg) const 977{ 978 return readAligned(pArg); 979} 980 981 982double Parcel::readDouble() const 983{ 984 return readAligned<double>(); 985} 986 987status_t Parcel::readIntPtr(intptr_t *pArg) const 988{ 989 return readAligned(pArg); 990} 991 992 993intptr_t Parcel::readIntPtr() const 994{ 995 return readAligned<intptr_t>(); 996} 997 998 999const char* Parcel::readCString() const 1000{ 1001 const size_t avail = mDataSize-mDataPos; 1002 if (avail > 0) { 1003 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1004 // is the string's trailing NUL within the parcel's valid bounds? 1005 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1006 if (eos) { 1007 const size_t len = eos - str; 1008 mDataPos += PAD_SIZE(len+1); 1009 ALOGV("readCString Setting data pos of %p to %d\n", this, mDataPos); 1010 return str; 1011 } 1012 } 1013 return NULL; 1014} 1015 1016String8 Parcel::readString8() const 1017{ 1018 int32_t size = readInt32(); 1019 // watch for potential int overflow adding 1 for trailing NUL 1020 if (size > 0 && size < INT32_MAX) { 1021 const char* str = (const char*)readInplace(size+1); 1022 if (str) return String8(str, size); 1023 } 1024 return String8(); 1025} 1026 1027String16 Parcel::readString16() const 1028{ 1029 size_t len; 1030 const char16_t* str = readString16Inplace(&len); 1031 if (str) return String16(str, len); 1032 ALOGE("Reading a NULL string not supported here."); 1033 return String16(); 1034} 1035 1036const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1037{ 1038 int32_t size = readInt32(); 1039 // watch for potential int overflow from size+1 1040 if (size >= 0 && size < INT32_MAX) { 1041 *outLen = size; 1042 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1043 if (str != NULL) { 1044 return str; 1045 } 1046 } 1047 *outLen = 0; 1048 return NULL; 1049} 1050 1051sp<IBinder> Parcel::readStrongBinder() const 1052{ 1053 sp<IBinder> val; 1054 unflatten_binder(ProcessState::self(), *this, &val); 1055 return val; 1056} 1057 1058wp<IBinder> Parcel::readWeakBinder() const 1059{ 1060 wp<IBinder> val; 1061 unflatten_binder(ProcessState::self(), *this, &val); 1062 return val; 1063} 1064 1065int32_t Parcel::readExceptionCode() const 1066{ 1067 int32_t exception_code = readAligned<int32_t>(); 1068 if (exception_code == EX_HAS_REPLY_HEADER) { 1069 int32_t header_size = readAligned<int32_t>(); 1070 // Skip over fat responses headers. Not used (or propagated) in 1071 // native code 1072 setDataPosition(dataPosition() + header_size); 1073 // And fat response headers are currently only used when there are no 1074 // exceptions, so return no error: 1075 return 0; 1076 } 1077 return exception_code; 1078} 1079 1080native_handle* Parcel::readNativeHandle() const 1081{ 1082 int numFds, numInts; 1083 status_t err; 1084 err = readInt32(&numFds); 1085 if (err != NO_ERROR) return 0; 1086 err = readInt32(&numInts); 1087 if (err != NO_ERROR) return 0; 1088 1089 native_handle* h = native_handle_create(numFds, numInts); 1090 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 1091 h->data[i] = dup(readFileDescriptor()); 1092 if (h->data[i] < 0) err = BAD_VALUE; 1093 } 1094 err = read(h->data + numFds, sizeof(int)*numInts); 1095 if (err != NO_ERROR) { 1096 native_handle_close(h); 1097 native_handle_delete(h); 1098 h = 0; 1099 } 1100 return h; 1101} 1102 1103 1104int Parcel::readFileDescriptor() const 1105{ 1106 const flat_binder_object* flat = readObject(true); 1107 if (flat) { 1108 switch (flat->type) { 1109 case BINDER_TYPE_FD: 1110 //ALOGI("Returning file descriptor %ld from parcel %p\n", flat->handle, this); 1111 return flat->handle; 1112 } 1113 } 1114 return BAD_TYPE; 1115} 1116 1117status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 1118{ 1119 int32_t useAshmem; 1120 status_t status = readInt32(&useAshmem); 1121 if (status) return status; 1122 1123 if (!useAshmem) { 1124 ALOGV("readBlob: read in place"); 1125 const void* ptr = readInplace(len); 1126 if (!ptr) return BAD_VALUE; 1127 1128 outBlob->init(false /*mapped*/, const_cast<void*>(ptr), len); 1129 return NO_ERROR; 1130 } 1131 1132 ALOGV("readBlob: read from ashmem"); 1133 int fd = readFileDescriptor(); 1134 if (fd == int(BAD_TYPE)) return BAD_VALUE; 1135 1136 void* ptr = ::mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); 1137 if (!ptr) return NO_MEMORY; 1138 1139 outBlob->init(true /*mapped*/, ptr, len); 1140 return NO_ERROR; 1141} 1142 1143status_t Parcel::read(Flattenable& val) const 1144{ 1145 // size 1146 const size_t len = this->readInt32(); 1147 const size_t fd_count = this->readInt32(); 1148 1149 // payload 1150 void const* buf = this->readInplace(PAD_SIZE(len)); 1151 if (buf == NULL) 1152 return BAD_VALUE; 1153 1154 int* fds = NULL; 1155 if (fd_count) { 1156 fds = new int[fd_count]; 1157 } 1158 1159 status_t err = NO_ERROR; 1160 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1161 fds[i] = dup(this->readFileDescriptor()); 1162 if (fds[i] < 0) err = BAD_VALUE; 1163 } 1164 1165 if (err == NO_ERROR) { 1166 err = val.unflatten(buf, len, fds, fd_count); 1167 } 1168 1169 if (fd_count) { 1170 delete [] fds; 1171 } 1172 1173 return err; 1174} 1175const flat_binder_object* Parcel::readObject(bool nullMetaData) const 1176{ 1177 const size_t DPOS = mDataPos; 1178 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 1179 const flat_binder_object* obj 1180 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 1181 mDataPos = DPOS + sizeof(flat_binder_object); 1182 if (!nullMetaData && (obj->cookie == NULL && obj->binder == NULL)) { 1183 // When transferring a NULL object, we don't write it into 1184 // the object list, so we don't want to check for it when 1185 // reading. 1186 ALOGV("readObject Setting data pos of %p to %d\n", this, mDataPos); 1187 return obj; 1188 } 1189 1190 // Ensure that this object is valid... 1191 size_t* const OBJS = mObjects; 1192 const size_t N = mObjectsSize; 1193 size_t opos = mNextObjectHint; 1194 1195 if (N > 0) { 1196 ALOGV("Parcel %p looking for obj at %d, hint=%d\n", 1197 this, DPOS, opos); 1198 1199 // Start at the current hint position, looking for an object at 1200 // the current data position. 1201 if (opos < N) { 1202 while (opos < (N-1) && OBJS[opos] < DPOS) { 1203 opos++; 1204 } 1205 } else { 1206 opos = N-1; 1207 } 1208 if (OBJS[opos] == DPOS) { 1209 // Found it! 1210 ALOGV("Parcel found obj %d at index %d with forward search", 1211 this, DPOS, opos); 1212 mNextObjectHint = opos+1; 1213 ALOGV("readObject Setting data pos of %p to %d\n", this, mDataPos); 1214 return obj; 1215 } 1216 1217 // Look backwards for it... 1218 while (opos > 0 && OBJS[opos] > DPOS) { 1219 opos--; 1220 } 1221 if (OBJS[opos] == DPOS) { 1222 // Found it! 1223 ALOGV("Parcel found obj %d at index %d with backward search", 1224 this, DPOS, opos); 1225 mNextObjectHint = opos+1; 1226 ALOGV("readObject Setting data pos of %p to %d\n", this, mDataPos); 1227 return obj; 1228 } 1229 } 1230 ALOGW("Attempt to read object from Parcel %p at offset %d that is not in the object list", 1231 this, DPOS); 1232 } 1233 return NULL; 1234} 1235 1236void Parcel::closeFileDescriptors() 1237{ 1238 size_t i = mObjectsSize; 1239 if (i > 0) { 1240 //ALOGI("Closing file descriptors for %d objects...", mObjectsSize); 1241 } 1242 while (i > 0) { 1243 i--; 1244 const flat_binder_object* flat 1245 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1246 if (flat->type == BINDER_TYPE_FD) { 1247 //ALOGI("Closing fd: %ld\n", flat->handle); 1248 close(flat->handle); 1249 } 1250 } 1251} 1252 1253const uint8_t* Parcel::ipcData() const 1254{ 1255 return mData; 1256} 1257 1258size_t Parcel::ipcDataSize() const 1259{ 1260 return (mDataSize > mDataPos ? mDataSize : mDataPos); 1261} 1262 1263const size_t* Parcel::ipcObjects() const 1264{ 1265 return mObjects; 1266} 1267 1268size_t Parcel::ipcObjectsCount() const 1269{ 1270 return mObjectsSize; 1271} 1272 1273void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 1274 const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 1275{ 1276 freeDataNoInit(); 1277 mError = NO_ERROR; 1278 mData = const_cast<uint8_t*>(data); 1279 mDataSize = mDataCapacity = dataSize; 1280 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)\n", this, mDataSize, getpid()); 1281 mDataPos = 0; 1282 ALOGV("setDataReference Setting data pos of %p to %d\n", this, mDataPos); 1283 mObjects = const_cast<size_t*>(objects); 1284 mObjectsSize = mObjectsCapacity = objectsCount; 1285 mNextObjectHint = 0; 1286 mOwner = relFunc; 1287 mOwnerCookie = relCookie; 1288 scanForFds(); 1289} 1290 1291void Parcel::print(TextOutput& to, uint32_t flags) const 1292{ 1293 to << "Parcel("; 1294 1295 if (errorCheck() != NO_ERROR) { 1296 const status_t err = errorCheck(); 1297 to << "Error: " << (void*)err << " \"" << strerror(-err) << "\""; 1298 } else if (dataSize() > 0) { 1299 const uint8_t* DATA = data(); 1300 to << indent << HexDump(DATA, dataSize()) << dedent; 1301 const size_t* OBJS = objects(); 1302 const size_t N = objectsCount(); 1303 for (size_t i=0; i<N; i++) { 1304 const flat_binder_object* flat 1305 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 1306 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 1307 << TypeCode(flat->type & 0x7f7f7f00) 1308 << " = " << flat->binder; 1309 } 1310 } else { 1311 to << "NULL"; 1312 } 1313 1314 to << ")"; 1315} 1316 1317void Parcel::releaseObjects() 1318{ 1319 const sp<ProcessState> proc(ProcessState::self()); 1320 size_t i = mObjectsSize; 1321 uint8_t* const data = mData; 1322 size_t* const objects = mObjects; 1323 while (i > 0) { 1324 i--; 1325 const flat_binder_object* flat 1326 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1327 release_object(proc, *flat, this); 1328 } 1329} 1330 1331void Parcel::acquireObjects() 1332{ 1333 const sp<ProcessState> proc(ProcessState::self()); 1334 size_t i = mObjectsSize; 1335 uint8_t* const data = mData; 1336 size_t* const objects = mObjects; 1337 while (i > 0) { 1338 i--; 1339 const flat_binder_object* flat 1340 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1341 acquire_object(proc, *flat, this); 1342 } 1343} 1344 1345void Parcel::freeData() 1346{ 1347 freeDataNoInit(); 1348 initState(); 1349} 1350 1351void Parcel::freeDataNoInit() 1352{ 1353 if (mOwner) { 1354 //ALOGI("Freeing data ref of %p (pid=%d)\n", this, getpid()); 1355 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1356 } else { 1357 releaseObjects(); 1358 if (mData) free(mData); 1359 if (mObjects) free(mObjects); 1360 } 1361} 1362 1363status_t Parcel::growData(size_t len) 1364{ 1365 size_t newSize = ((mDataSize+len)*3)/2; 1366 return (newSize <= mDataSize) 1367 ? (status_t) NO_MEMORY 1368 : continueWrite(newSize); 1369} 1370 1371status_t Parcel::restartWrite(size_t desired) 1372{ 1373 if (mOwner) { 1374 freeData(); 1375 return continueWrite(desired); 1376 } 1377 1378 uint8_t* data = (uint8_t*)realloc(mData, desired); 1379 if (!data && desired > mDataCapacity) { 1380 mError = NO_MEMORY; 1381 return NO_MEMORY; 1382 } 1383 1384 releaseObjects(); 1385 1386 if (data) { 1387 mData = data; 1388 mDataCapacity = desired; 1389 } 1390 1391 mDataSize = mDataPos = 0; 1392 ALOGV("restartWrite Setting data size of %p to %d\n", this, mDataSize); 1393 ALOGV("restartWrite Setting data pos of %p to %d\n", this, mDataPos); 1394 1395 free(mObjects); 1396 mObjects = NULL; 1397 mObjectsSize = mObjectsCapacity = 0; 1398 mNextObjectHint = 0; 1399 mHasFds = false; 1400 mFdsKnown = true; 1401 mAllowFds = true; 1402 1403 return NO_ERROR; 1404} 1405 1406status_t Parcel::continueWrite(size_t desired) 1407{ 1408 // If shrinking, first adjust for any objects that appear 1409 // after the new data size. 1410 size_t objectsSize = mObjectsSize; 1411 if (desired < mDataSize) { 1412 if (desired == 0) { 1413 objectsSize = 0; 1414 } else { 1415 while (objectsSize > 0) { 1416 if (mObjects[objectsSize-1] < desired) 1417 break; 1418 objectsSize--; 1419 } 1420 } 1421 } 1422 1423 if (mOwner) { 1424 // If the size is going to zero, just release the owner's data. 1425 if (desired == 0) { 1426 freeData(); 1427 return NO_ERROR; 1428 } 1429 1430 // If there is a different owner, we need to take 1431 // posession. 1432 uint8_t* data = (uint8_t*)malloc(desired); 1433 if (!data) { 1434 mError = NO_MEMORY; 1435 return NO_MEMORY; 1436 } 1437 size_t* objects = NULL; 1438 1439 if (objectsSize) { 1440 objects = (size_t*)malloc(objectsSize*sizeof(size_t)); 1441 if (!objects) { 1442 mError = NO_MEMORY; 1443 return NO_MEMORY; 1444 } 1445 1446 // Little hack to only acquire references on objects 1447 // we will be keeping. 1448 size_t oldObjectsSize = mObjectsSize; 1449 mObjectsSize = objectsSize; 1450 acquireObjects(); 1451 mObjectsSize = oldObjectsSize; 1452 } 1453 1454 if (mData) { 1455 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 1456 } 1457 if (objects && mObjects) { 1458 memcpy(objects, mObjects, objectsSize*sizeof(size_t)); 1459 } 1460 //ALOGI("Freeing data ref of %p (pid=%d)\n", this, getpid()); 1461 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1462 mOwner = NULL; 1463 1464 mData = data; 1465 mObjects = objects; 1466 mDataSize = (mDataSize < desired) ? mDataSize : desired; 1467 ALOGV("continueWrite Setting data size of %p to %d\n", this, mDataSize); 1468 mDataCapacity = desired; 1469 mObjectsSize = mObjectsCapacity = objectsSize; 1470 mNextObjectHint = 0; 1471 1472 } else if (mData) { 1473 if (objectsSize < mObjectsSize) { 1474 // Need to release refs on any objects we are dropping. 1475 const sp<ProcessState> proc(ProcessState::self()); 1476 for (size_t i=objectsSize; i<mObjectsSize; i++) { 1477 const flat_binder_object* flat 1478 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1479 if (flat->type == BINDER_TYPE_FD) { 1480 // will need to rescan because we may have lopped off the only FDs 1481 mFdsKnown = false; 1482 } 1483 release_object(proc, *flat, this); 1484 } 1485 size_t* objects = 1486 (size_t*)realloc(mObjects, objectsSize*sizeof(size_t)); 1487 if (objects) { 1488 mObjects = objects; 1489 } 1490 mObjectsSize = objectsSize; 1491 mNextObjectHint = 0; 1492 } 1493 1494 // We own the data, so we can just do a realloc(). 1495 if (desired > mDataCapacity) { 1496 uint8_t* data = (uint8_t*)realloc(mData, desired); 1497 if (data) { 1498 mData = data; 1499 mDataCapacity = desired; 1500 } else if (desired > mDataCapacity) { 1501 mError = NO_MEMORY; 1502 return NO_MEMORY; 1503 } 1504 } else { 1505 if (mDataSize > desired) { 1506 mDataSize = desired; 1507 ALOGV("continueWrite Setting data size of %p to %d\n", this, mDataSize); 1508 } 1509 if (mDataPos > desired) { 1510 mDataPos = desired; 1511 ALOGV("continueWrite Setting data pos of %p to %d\n", this, mDataPos); 1512 } 1513 } 1514 1515 } else { 1516 // This is the first data. Easy! 1517 uint8_t* data = (uint8_t*)malloc(desired); 1518 if (!data) { 1519 mError = NO_MEMORY; 1520 return NO_MEMORY; 1521 } 1522 1523 if(!(mDataCapacity == 0 && mObjects == NULL 1524 && mObjectsCapacity == 0)) { 1525 ALOGE("continueWrite: %d/%p/%d/%d", mDataCapacity, mObjects, mObjectsCapacity, desired); 1526 } 1527 1528 mData = data; 1529 mDataSize = mDataPos = 0; 1530 ALOGV("continueWrite Setting data size of %p to %d\n", this, mDataSize); 1531 ALOGV("continueWrite Setting data pos of %p to %d\n", this, mDataPos); 1532 mDataCapacity = desired; 1533 } 1534 1535 return NO_ERROR; 1536} 1537 1538void Parcel::initState() 1539{ 1540 mError = NO_ERROR; 1541 mData = 0; 1542 mDataSize = 0; 1543 mDataCapacity = 0; 1544 mDataPos = 0; 1545 ALOGV("initState Setting data size of %p to %d\n", this, mDataSize); 1546 ALOGV("initState Setting data pos of %p to %d\n", this, mDataPos); 1547 mObjects = NULL; 1548 mObjectsSize = 0; 1549 mObjectsCapacity = 0; 1550 mNextObjectHint = 0; 1551 mHasFds = false; 1552 mFdsKnown = true; 1553 mAllowFds = true; 1554 mOwner = NULL; 1555} 1556 1557void Parcel::scanForFds() const 1558{ 1559 bool hasFds = false; 1560 for (size_t i=0; i<mObjectsSize; i++) { 1561 const flat_binder_object* flat 1562 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 1563 if (flat->type == BINDER_TYPE_FD) { 1564 hasFds = true; 1565 break; 1566 } 1567 } 1568 mHasFds = hasFds; 1569 mFdsKnown = true; 1570} 1571 1572// --- Parcel::Blob --- 1573 1574Parcel::Blob::Blob() : 1575 mMapped(false), mData(NULL), mSize(0) { 1576} 1577 1578Parcel::Blob::~Blob() { 1579 release(); 1580} 1581 1582void Parcel::Blob::release() { 1583 if (mMapped && mData) { 1584 ::munmap(mData, mSize); 1585 } 1586 clear(); 1587} 1588 1589void Parcel::Blob::init(bool mapped, void* data, size_t size) { 1590 mMapped = mapped; 1591 mData = data; 1592 mSize = size; 1593} 1594 1595void Parcel::Blob::clear() { 1596 mMapped = false; 1597 mData = NULL; 1598 mSize = 0; 1599} 1600 1601}; // namespace android 1602