system_properties.cpp revision 926ebe109424baa407b2cd938ba053b5c0b8ce7c
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28#include <new> 29#include <stdatomic.h> 30#include <stdio.h> 31#include <stdint.h> 32#include <stdlib.h> 33#include <unistd.h> 34#include <stddef.h> 35#include <errno.h> 36#include <poll.h> 37#include <fcntl.h> 38#include <stdbool.h> 39#include <string.h> 40 41#include <sys/mman.h> 42 43#include <sys/socket.h> 44#include <sys/un.h> 45#include <sys/select.h> 46#include <sys/stat.h> 47#include <sys/types.h> 48#include <netinet/in.h> 49 50#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_ 51#include <sys/_system_properties.h> 52#include <sys/system_properties.h> 53 54#include "private/bionic_futex.h" 55#include "private/bionic_macros.h" 56 57static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME; 58 59 60/* 61 * Properties are stored in a hybrid trie/binary tree structure. 62 * Each property's name is delimited at '.' characters, and the tokens are put 63 * into a trie structure. Siblings at each level of the trie are stored in a 64 * binary tree. For instance, "ro.secure"="1" could be stored as follows: 65 * 66 * +-----+ children +----+ children +--------+ 67 * | |-------------->| ro |-------------->| secure | 68 * +-----+ +----+ +--------+ 69 * / \ / | 70 * left / \ right left / | prop +===========+ 71 * v v v +-------->| ro.secure | 72 * +-----+ +-----+ +-----+ +-----------+ 73 * | net | | sys | | com | | 1 | 74 * +-----+ +-----+ +-----+ +===========+ 75 */ 76 77// Represents a node in the trie. 78struct prop_bt { 79 uint8_t namelen; 80 uint8_t reserved[3]; 81 82 // The property trie is updated only by the init process (single threaded) which provides 83 // property service. And it can be read by multiple threads at the same time. 84 // As the property trie is not protected by locks, we use atomic_uint_least32_t types for the 85 // left, right, children "pointers" in the trie node. To make sure readers who see the 86 // change of "pointers" can also notice the change of prop_bt structure contents pointed by 87 // the "pointers", we always use release-consume ordering pair when accessing these "pointers". 88 89 // prop "points" to prop_info structure if there is a propery associated with the trie node. 90 // Its situation is similar to the left, right, children "pointers". So we use 91 // atomic_uint_least32_t and release-consume ordering to protect it as well. 92 93 // We should also avoid rereading these fields redundantly, since not 94 // all processor implementations ensure that multiple loads from the 95 // same field are carried out in the right order. 96 atomic_uint_least32_t prop; 97 98 atomic_uint_least32_t left; 99 atomic_uint_least32_t right; 100 101 atomic_uint_least32_t children; 102 103 char name[0]; 104 105 prop_bt(const char *name, const uint8_t name_length) { 106 this->namelen = name_length; 107 memcpy(this->name, name, name_length); 108 this->name[name_length] = '\0'; 109 } 110 111private: 112 DISALLOW_COPY_AND_ASSIGN(prop_bt); 113}; 114 115class prop_area { 116public: 117 118 prop_area(const uint32_t magic, const uint32_t version) : 119 magic_(magic), version_(version) { 120 atomic_init(&serial_, 0); 121 memset(reserved_, 0, sizeof(reserved_)); 122 // Allocate enough space for the root node. 123 bytes_used_ = sizeof(prop_bt); 124 } 125 126 const prop_info *find(const char *name); 127 bool add(const char *name, unsigned int namelen, 128 const char *value, unsigned int valuelen); 129 130 bool foreach(void (*propfn)(const prop_info *pi, void *cookie), void *cookie); 131 132 atomic_uint_least32_t *serial() { return &serial_; } 133 uint32_t magic() const { return magic_; } 134 uint32_t version() const { return version_; } 135 136private: 137 void *allocate_obj(const size_t size, uint_least32_t *const off); 138 prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint_least32_t *const off); 139 prop_info *new_prop_info(const char *name, uint8_t namelen, 140 const char *value, uint8_t valuelen, 141 uint_least32_t *const off); 142 void *to_prop_obj(uint_least32_t off); 143 prop_bt *to_prop_bt(atomic_uint_least32_t *off_p); 144 prop_info *to_prop_info(atomic_uint_least32_t *off_p); 145 146 prop_bt *root_node(); 147 148 prop_bt *find_prop_bt(prop_bt *const bt, const char *name, 149 uint8_t namelen, bool alloc_if_needed); 150 151 const prop_info *find_property(prop_bt *const trie, const char *name, 152 uint8_t namelen, const char *value, 153 uint8_t valuelen, bool alloc_if_needed); 154 155 bool foreach_property(prop_bt *const trie, 156 void (*propfn)(const prop_info *pi, void *cookie), 157 void *cookie); 158 159 uint32_t bytes_used_; 160 atomic_uint_least32_t serial_; 161 uint32_t magic_; 162 uint32_t version_; 163 uint32_t reserved_[28]; 164 char data_[0]; 165 166 DISALLOW_COPY_AND_ASSIGN(prop_area); 167}; 168 169struct prop_info { 170 atomic_uint_least32_t serial; 171 char value[PROP_VALUE_MAX]; 172 char name[0]; 173 174 prop_info(const char *name, const uint8_t namelen, const char *value, 175 const uint8_t valuelen) { 176 memcpy(this->name, name, namelen); 177 this->name[namelen] = '\0'; 178 atomic_init(&this->serial, valuelen << 24); 179 memcpy(this->value, value, valuelen); 180 this->value[valuelen] = '\0'; 181 } 182private: 183 DISALLOW_COPY_AND_ASSIGN(prop_info); 184}; 185 186struct find_nth_cookie { 187 uint32_t count; 188 const uint32_t n; 189 const prop_info *pi; 190 191 find_nth_cookie(uint32_t n) : count(0), n(n), pi(NULL) { 192 } 193}; 194 195static char property_filename[PATH_MAX] = PROP_FILENAME; 196static bool compat_mode = false; 197static size_t pa_data_size; 198static size_t pa_size; 199 200// NOTE: This isn't static because system_properties_compat.c 201// requires it. 202prop_area *__system_property_area__ = NULL; 203 204static int get_fd_from_env(void) 205{ 206 // This environment variable consistes of two decimal integer 207 // values separated by a ",". The first value is a file descriptor 208 // and the second is the size of the system properties area. The 209 // size is currently unused. 210 char *env = getenv("ANDROID_PROPERTY_WORKSPACE"); 211 212 if (!env) { 213 return -1; 214 } 215 216 return atoi(env); 217} 218 219static int map_prop_area_rw() 220{ 221 /* dev is a tmpfs that we can use to carve a shared workspace 222 * out of, so let's do that... 223 */ 224 const int fd = open(property_filename, 225 O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444); 226 227 if (fd < 0) { 228 if (errno == EACCES) { 229 /* for consistency with the case where the process has already 230 * mapped the page in and segfaults when trying to write to it 231 */ 232 abort(); 233 } 234 return -1; 235 } 236 237 if (ftruncate(fd, PA_SIZE) < 0) { 238 close(fd); 239 return -1; 240 } 241 242 pa_size = PA_SIZE; 243 pa_data_size = pa_size - sizeof(prop_area); 244 compat_mode = false; 245 246 void *const memory_area = mmap(NULL, pa_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 247 if (memory_area == MAP_FAILED) { 248 close(fd); 249 return -1; 250 } 251 252 prop_area *pa = new(memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION); 253 254 /* plug into the lib property services */ 255 __system_property_area__ = pa; 256 257 close(fd); 258 return 0; 259} 260 261static int map_fd_ro(const int fd) { 262 struct stat fd_stat; 263 if (fstat(fd, &fd_stat) < 0) { 264 return -1; 265 } 266 267 if ((fd_stat.st_uid != 0) 268 || (fd_stat.st_gid != 0) 269 || ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) 270 || (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area))) ) { 271 return -1; 272 } 273 274 pa_size = fd_stat.st_size; 275 pa_data_size = pa_size - sizeof(prop_area); 276 277 void* const map_result = mmap(NULL, pa_size, PROT_READ, MAP_SHARED, fd, 0); 278 if (map_result == MAP_FAILED) { 279 return -1; 280 } 281 282 prop_area* pa = reinterpret_cast<prop_area*>(map_result); 283 if ((pa->magic() != PROP_AREA_MAGIC) || 284 (pa->version() != PROP_AREA_VERSION && 285 pa->version() != PROP_AREA_VERSION_COMPAT)) { 286 munmap(pa, pa_size); 287 return -1; 288 } 289 290 if (pa->version() == PROP_AREA_VERSION_COMPAT) { 291 compat_mode = true; 292 } 293 294 __system_property_area__ = pa; 295 return 0; 296} 297 298static int map_prop_area() 299{ 300 int fd = open(property_filename, O_CLOEXEC | O_NOFOLLOW | O_RDONLY); 301 bool close_fd = true; 302 if (fd == -1 && errno == ENOENT) { 303 /* 304 * For backwards compatibility, if the file doesn't 305 * exist, we use the environment to get the file descriptor. 306 * For security reasons, we only use this backup if the kernel 307 * returns ENOENT. We don't want to use the backup if the kernel 308 * returns other errors such as ENOMEM or ENFILE, since it 309 * might be possible for an external program to trigger this 310 * condition. 311 */ 312 fd = get_fd_from_env(); 313 close_fd = false; 314 } 315 316 if (fd < 0) { 317 return -1; 318 } 319 320 const int map_result = map_fd_ro(fd); 321 if (close_fd) { 322 close(fd); 323 } 324 325 return map_result; 326} 327 328void *prop_area::allocate_obj(const size_t size, uint_least32_t *const off) 329{ 330 const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t)); 331 if (bytes_used_ + aligned > pa_data_size) { 332 return NULL; 333 } 334 335 *off = bytes_used_; 336 bytes_used_ += aligned; 337 return data_ + *off; 338} 339 340prop_bt *prop_area::new_prop_bt(const char *name, uint8_t namelen, uint_least32_t *const off) 341{ 342 uint_least32_t new_offset; 343 void *const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset); 344 if (p != NULL) { 345 prop_bt* bt = new(p) prop_bt(name, namelen); 346 *off = new_offset; 347 return bt; 348 } 349 350 return NULL; 351} 352 353prop_info *prop_area::new_prop_info(const char *name, uint8_t namelen, 354 const char *value, uint8_t valuelen, uint_least32_t *const off) 355{ 356 uint_least32_t new_offset; 357 void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset); 358 if (p != NULL) { 359 prop_info* info = new(p) prop_info(name, namelen, value, valuelen); 360 *off = new_offset; 361 return info; 362 } 363 364 return NULL; 365} 366 367void *prop_area::to_prop_obj(uint_least32_t off) 368{ 369 if (off > pa_data_size) 370 return NULL; 371 372 return (data_ + off); 373} 374 375inline prop_bt *prop_area::to_prop_bt(atomic_uint_least32_t* off_p) { 376 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume); 377 return reinterpret_cast<prop_bt*>(to_prop_obj(off)); 378} 379 380inline prop_info *prop_area::to_prop_info(atomic_uint_least32_t* off_p) { 381 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume); 382 return reinterpret_cast<prop_info*>(to_prop_obj(off)); 383} 384 385inline prop_bt *prop_area::root_node() 386{ 387 return reinterpret_cast<prop_bt*>(to_prop_obj(0)); 388} 389 390static int cmp_prop_name(const char *one, uint8_t one_len, const char *two, 391 uint8_t two_len) 392{ 393 if (one_len < two_len) 394 return -1; 395 else if (one_len > two_len) 396 return 1; 397 else 398 return strncmp(one, two, one_len); 399} 400 401prop_bt *prop_area::find_prop_bt(prop_bt *const bt, const char *name, 402 uint8_t namelen, bool alloc_if_needed) 403{ 404 405 prop_bt* current = bt; 406 while (true) { 407 if (!current) { 408 return NULL; 409 } 410 411 const int ret = cmp_prop_name(name, namelen, current->name, current->namelen); 412 if (ret == 0) { 413 return current; 414 } 415 416 if (ret < 0) { 417 uint_least32_t left_offset = atomic_load_explicit(¤t->left, memory_order_relaxed); 418 if (left_offset != 0) { 419 current = to_prop_bt(¤t->left); 420 } else { 421 if (!alloc_if_needed) { 422 return NULL; 423 } 424 425 uint_least32_t new_offset; 426 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset); 427 if (new_bt) { 428 atomic_store_explicit(¤t->left, new_offset, memory_order_release); 429 } 430 return new_bt; 431 } 432 } else { 433 uint_least32_t right_offset = atomic_load_explicit(¤t->right, memory_order_relaxed); 434 if (right_offset != 0) { 435 current = to_prop_bt(¤t->right); 436 } else { 437 if (!alloc_if_needed) { 438 return NULL; 439 } 440 441 uint_least32_t new_offset; 442 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset); 443 if (new_bt) { 444 atomic_store_explicit(¤t->right, new_offset, memory_order_release); 445 } 446 return new_bt; 447 } 448 } 449 } 450} 451 452const prop_info *prop_area::find_property(prop_bt *const trie, const char *name, 453 uint8_t namelen, const char *value, uint8_t valuelen, 454 bool alloc_if_needed) 455{ 456 if (!trie) return NULL; 457 458 const char *remaining_name = name; 459 prop_bt* current = trie; 460 while (true) { 461 const char *sep = strchr(remaining_name, '.'); 462 const bool want_subtree = (sep != NULL); 463 const uint8_t substr_size = (want_subtree) ? 464 sep - remaining_name : strlen(remaining_name); 465 466 if (!substr_size) { 467 return NULL; 468 } 469 470 prop_bt* root = NULL; 471 uint_least32_t children_offset = atomic_load_explicit(¤t->children, memory_order_relaxed); 472 if (children_offset != 0) { 473 root = to_prop_bt(¤t->children); 474 } else if (alloc_if_needed) { 475 uint_least32_t new_offset; 476 root = new_prop_bt(remaining_name, substr_size, &new_offset); 477 if (root) { 478 atomic_store_explicit(¤t->children, new_offset, memory_order_release); 479 } 480 } 481 482 if (!root) { 483 return NULL; 484 } 485 486 current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed); 487 if (!current) { 488 return NULL; 489 } 490 491 if (!want_subtree) 492 break; 493 494 remaining_name = sep + 1; 495 } 496 497 uint_least32_t prop_offset = atomic_load_explicit(¤t->prop, memory_order_relaxed); 498 if (prop_offset != 0) { 499 return to_prop_info(¤t->prop); 500 } else if (alloc_if_needed) { 501 uint_least32_t new_offset; 502 prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset); 503 if (new_info) { 504 atomic_store_explicit(¤t->prop, new_offset, memory_order_release); 505 } 506 507 return new_info; 508 } else { 509 return NULL; 510 } 511} 512 513static int send_prop_msg(const prop_msg *msg) 514{ 515 const int fd = socket(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0); 516 if (fd == -1) { 517 return -1; 518 } 519 520 const size_t namelen = strlen(property_service_socket); 521 522 sockaddr_un addr; 523 memset(&addr, 0, sizeof(addr)); 524 strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path)); 525 addr.sun_family = AF_LOCAL; 526 socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1; 527 if (TEMP_FAILURE_RETRY(connect(fd, reinterpret_cast<sockaddr*>(&addr), alen)) < 0) { 528 close(fd); 529 return -1; 530 } 531 532 const int num_bytes = TEMP_FAILURE_RETRY(send(fd, msg, sizeof(prop_msg), 0)); 533 534 int result = -1; 535 if (num_bytes == sizeof(prop_msg)) { 536 // We successfully wrote to the property server but now we 537 // wait for the property server to finish its work. It 538 // acknowledges its completion by closing the socket so we 539 // poll here (on nothing), waiting for the socket to close. 540 // If you 'adb shell setprop foo bar' you'll see the POLLHUP 541 // once the socket closes. Out of paranoia we cap our poll 542 // at 250 ms. 543 pollfd pollfds[1]; 544 pollfds[0].fd = fd; 545 pollfds[0].events = 0; 546 const int poll_result = TEMP_FAILURE_RETRY(poll(pollfds, 1, 250 /* ms */)); 547 if (poll_result == 1 && (pollfds[0].revents & POLLHUP) != 0) { 548 result = 0; 549 } else { 550 // Ignore the timeout and treat it like a success anyway. 551 // The init process is single-threaded and its property 552 // service is sometimes slow to respond (perhaps it's off 553 // starting a child process or something) and thus this 554 // times out and the caller thinks it failed, even though 555 // it's still getting around to it. So we fake it here, 556 // mostly for ctl.* properties, but we do try and wait 250 557 // ms so callers who do read-after-write can reliably see 558 // what they've written. Most of the time. 559 // TODO: fix the system properties design. 560 result = 0; 561 } 562 } 563 564 close(fd); 565 return result; 566} 567 568static void find_nth_fn(const prop_info *pi, void *ptr) 569{ 570 find_nth_cookie *cookie = reinterpret_cast<find_nth_cookie*>(ptr); 571 572 if (cookie->n == cookie->count) 573 cookie->pi = pi; 574 575 cookie->count++; 576} 577 578bool prop_area::foreach_property(prop_bt *const trie, 579 void (*propfn)(const prop_info *pi, void *cookie), void *cookie) 580{ 581 if (!trie) 582 return false; 583 584 uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed); 585 if (left_offset != 0) { 586 const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie); 587 if (err < 0) 588 return false; 589 } 590 uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed); 591 if (prop_offset != 0) { 592 prop_info *info = to_prop_info(&trie->prop); 593 if (!info) 594 return false; 595 propfn(info, cookie); 596 } 597 uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed); 598 if (children_offset != 0) { 599 const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie); 600 if (err < 0) 601 return false; 602 } 603 uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed); 604 if (right_offset != 0) { 605 const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie); 606 if (err < 0) 607 return false; 608 } 609 610 return true; 611} 612 613const prop_info *prop_area::find(const char *name) { 614 return find_property(root_node(), name, strlen(name), nullptr, 0, false); 615} 616 617bool prop_area::add(const char *name, unsigned int namelen, 618 const char *value, unsigned int valuelen) { 619 return find_property(root_node(), name, namelen, value, valuelen, true); 620} 621 622bool prop_area::foreach(void (*propfn)(const prop_info* pi, void* cookie), void* cookie) { 623 return foreach_property(root_node(), propfn, cookie); 624} 625 626int __system_properties_init() 627{ 628 return map_prop_area(); 629} 630 631int __system_property_set_filename(const char *filename) 632{ 633 size_t len = strlen(filename); 634 if (len >= sizeof(property_filename)) 635 return -1; 636 637 strcpy(property_filename, filename); 638 return 0; 639} 640 641int __system_property_area_init() 642{ 643 return map_prop_area_rw(); 644} 645 646unsigned int __system_property_area_serial() 647{ 648 prop_area *pa = __system_property_area__; 649 if (!pa) { 650 return -1; 651 } 652 // Make sure this read fulfilled before __system_property_serial 653 return atomic_load_explicit(pa->serial(), memory_order_acquire); 654} 655 656const prop_info *__system_property_find(const char *name) 657{ 658 if (__predict_false(compat_mode)) { 659 return __system_property_find_compat(name); 660 } 661 662 if (!__system_property_area__) { 663 return nullptr; 664 } 665 666 return __system_property_area__->find(name); 667} 668 669// The C11 standard doesn't allow atomic loads from const fields, 670// though C++11 does. Fudge it until standards get straightened out. 671static inline uint_least32_t load_const_atomic(const atomic_uint_least32_t* s, 672 memory_order mo) { 673 atomic_uint_least32_t* non_const_s = const_cast<atomic_uint_least32_t*>(s); 674 return atomic_load_explicit(non_const_s, mo); 675} 676 677int __system_property_read(const prop_info *pi, char *name, char *value) 678{ 679 if (__predict_false(compat_mode)) { 680 return __system_property_read_compat(pi, name, value); 681 } 682 683 while (true) { 684 uint32_t serial = __system_property_serial(pi); // acquire semantics 685 size_t len = SERIAL_VALUE_LEN(serial); 686 memcpy(value, pi->value, len + 1); 687 // TODO: Fix the synchronization scheme here. 688 // There is no fully supported way to implement this kind 689 // of synchronization in C++11, since the memcpy races with 690 // updates to pi, and the data being accessed is not atomic. 691 // The following fence is unintuitive, but would be the 692 // correct one if memcpy used memory_order_relaxed atomic accesses. 693 // In practice it seems unlikely that the generated code would 694 // would be any different, so this should be OK. 695 atomic_thread_fence(memory_order_acquire); 696 if (serial == 697 load_const_atomic(&(pi->serial), memory_order_relaxed)) { 698 if (name != 0) { 699 strcpy(name, pi->name); 700 } 701 return len; 702 } 703 } 704} 705 706int __system_property_get(const char *name, char *value) 707{ 708 const prop_info *pi = __system_property_find(name); 709 710 if (pi != 0) { 711 return __system_property_read(pi, 0, value); 712 } else { 713 value[0] = 0; 714 return 0; 715 } 716} 717 718int __system_property_set(const char *key, const char *value) 719{ 720 if (key == 0) return -1; 721 if (value == 0) value = ""; 722 if (strlen(key) >= PROP_NAME_MAX) return -1; 723 if (strlen(value) >= PROP_VALUE_MAX) return -1; 724 725 prop_msg msg; 726 memset(&msg, 0, sizeof msg); 727 msg.cmd = PROP_MSG_SETPROP; 728 strlcpy(msg.name, key, sizeof msg.name); 729 strlcpy(msg.value, value, sizeof msg.value); 730 731 const int err = send_prop_msg(&msg); 732 if (err < 0) { 733 return err; 734 } 735 736 return 0; 737} 738 739int __system_property_update(prop_info *pi, const char *value, unsigned int len) 740{ 741 prop_area *pa = __system_property_area__; 742 743 if (len >= PROP_VALUE_MAX) 744 return -1; 745 746 uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed); 747 serial |= 1; 748 atomic_store_explicit(&pi->serial, serial, memory_order_relaxed); 749 // The memcpy call here also races. Again pretend it 750 // used memory_order_relaxed atomics, and use the analogous 751 // counterintuitive fence. 752 atomic_thread_fence(memory_order_release); 753 memcpy(pi->value, value, len + 1); 754 atomic_store_explicit( 755 &pi->serial, 756 (len << 24) | ((serial + 1) & 0xffffff), 757 memory_order_release); 758 __futex_wake(&pi->serial, INT32_MAX); 759 760 atomic_store_explicit( 761 pa->serial(), 762 atomic_load_explicit(pa->serial(), memory_order_relaxed) + 1, 763 memory_order_release); 764 __futex_wake(pa->serial(), INT32_MAX); 765 766 return 0; 767} 768 769int __system_property_add(const char *name, unsigned int namelen, 770 const char *value, unsigned int valuelen) 771{ 772 prop_area *pa = __system_property_area__; 773 774 if (namelen >= PROP_NAME_MAX) 775 return -1; 776 if (valuelen >= PROP_VALUE_MAX) 777 return -1; 778 if (namelen < 1) 779 return -1; 780 781 if (!__system_property_area__) { 782 return -1; 783 } 784 785 bool ret = __system_property_area__->add(name, namelen, value, valuelen); 786 if (!ret) 787 return -1; 788 789 // There is only a single mutator, but we want to make sure that 790 // updates are visible to a reader waiting for the update. 791 atomic_store_explicit( 792 pa->serial(), 793 atomic_load_explicit(pa->serial(), memory_order_relaxed) + 1, 794 memory_order_release); 795 __futex_wake(pa->serial(), INT32_MAX); 796 return 0; 797} 798 799// Wait for non-locked serial, and retrieve it with acquire semantics. 800unsigned int __system_property_serial(const prop_info *pi) 801{ 802 uint32_t serial = load_const_atomic(&pi->serial, memory_order_acquire); 803 while (SERIAL_DIRTY(serial)) { 804 __futex_wait(const_cast<volatile void *>( 805 reinterpret_cast<const void *>(&pi->serial)), 806 serial, NULL); 807 serial = load_const_atomic(&pi->serial, memory_order_acquire); 808 } 809 return serial; 810} 811 812unsigned int __system_property_wait_any(unsigned int serial) 813{ 814 prop_area *pa = __system_property_area__; 815 uint32_t my_serial; 816 817 do { 818 __futex_wait(pa->serial(), serial, NULL); 819 my_serial = atomic_load_explicit(pa->serial(), memory_order_acquire); 820 } while (my_serial == serial); 821 822 return my_serial; 823} 824 825const prop_info *__system_property_find_nth(unsigned n) 826{ 827 find_nth_cookie cookie(n); 828 829 const int err = __system_property_foreach(find_nth_fn, &cookie); 830 if (err < 0) { 831 return NULL; 832 } 833 834 return cookie.pi; 835} 836 837int __system_property_foreach(void (*propfn)(const prop_info *pi, void *cookie), 838 void *cookie) 839{ 840 if (__predict_false(compat_mode)) { 841 return __system_property_foreach_compat(propfn, cookie); 842 } 843 844 if (!__system_property_area__) { 845 return -1; 846 } 847 848 return __system_property_area__->foreach(propfn, cookie) ? 0 : -1; 849} 850