system_properties.cpp revision 30214b901e8dbec9ec11230187a8e71fc8a04014
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28#include <new> 29#include <stdatomic.h> 30#include <stdio.h> 31#include <stdint.h> 32#include <stdlib.h> 33#include <unistd.h> 34#include <stddef.h> 35#include <errno.h> 36#include <poll.h> 37#include <fcntl.h> 38#include <stdbool.h> 39#include <string.h> 40 41#include <sys/mman.h> 42 43#include <sys/socket.h> 44#include <sys/un.h> 45#include <sys/select.h> 46#include <sys/stat.h> 47#include <sys/types.h> 48#include <netinet/in.h> 49 50#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_ 51#include <sys/_system_properties.h> 52#include <sys/system_properties.h> 53 54#include "private/bionic_atomic_inline.h" 55#include "private/bionic_futex.h" 56#include "private/bionic_macros.h" 57 58static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME; 59 60 61/* 62 * Properties are stored in a hybrid trie/binary tree structure. 63 * Each property's name is delimited at '.' characters, and the tokens are put 64 * into a trie structure. Siblings at each level of the trie are stored in a 65 * binary tree. For instance, "ro.secure"="1" could be stored as follows: 66 * 67 * +-----+ children +----+ children +--------+ 68 * | |-------------->| ro |-------------->| secure | 69 * +-----+ +----+ +--------+ 70 * / \ / | 71 * left / \ right left / | prop +===========+ 72 * v v v +-------->| ro.secure | 73 * +-----+ +-----+ +-----+ +-----------+ 74 * | net | | sys | | com | | 1 | 75 * +-----+ +-----+ +-----+ +===========+ 76 */ 77 78// Represents a node in the trie. 79struct prop_bt { 80 uint8_t namelen; 81 uint8_t reserved[3]; 82 83 // TODO: The following fields should be declared as atomic_uint32_t. 84 // They should be assigned to with release semantics, instead of using 85 // explicit fences. Unfortunately, the read accesses are generally 86 // followed by more dependent read accesses, and the dependence 87 // is assumed to enforce memory ordering. Which it does on supported 88 // hardware. This technically should use memory_order_consume, if 89 // that worked as intended. 90 // We should also avoid rereading these fields redundantly, since not 91 // all processor implementations ensure that multiple loads from the 92 // same field are carried out in the right order. 93 volatile uint32_t prop; 94 95 volatile uint32_t left; 96 volatile uint32_t right; 97 98 volatile uint32_t children; 99 100 char name[0]; 101 102 prop_bt(const char *name, const uint8_t name_length) { 103 this->namelen = name_length; 104 memcpy(this->name, name, name_length); 105 this->name[name_length] = '\0'; 106 ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store 107 // for subsequent pointer assignment. 108 } 109 110private: 111 DISALLOW_COPY_AND_ASSIGN(prop_bt); 112}; 113 114struct prop_area { 115 uint32_t bytes_used; 116 atomic_uint_least32_t serial; 117 uint32_t magic; 118 uint32_t version; 119 uint32_t reserved[28]; 120 char data[0]; 121 122 prop_area(const uint32_t magic, const uint32_t version) : 123 magic(magic), version(version) { 124 atomic_init(&serial, 0); 125 memset(reserved, 0, sizeof(reserved)); 126 // Allocate enough space for the root node. 127 bytes_used = sizeof(prop_bt); 128 } 129 130private: 131 DISALLOW_COPY_AND_ASSIGN(prop_area); 132}; 133 134struct prop_info { 135 atomic_uint_least32_t serial; 136 char value[PROP_VALUE_MAX]; 137 char name[0]; 138 139 prop_info(const char *name, const uint8_t namelen, const char *value, 140 const uint8_t valuelen) { 141 memcpy(this->name, name, namelen); 142 this->name[namelen] = '\0'; 143 atomic_init(&this->serial, valuelen << 24); 144 memcpy(this->value, value, valuelen); 145 this->value[valuelen] = '\0'; 146 ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store 147 // for subsequent point assignment. 148 } 149private: 150 DISALLOW_COPY_AND_ASSIGN(prop_info); 151}; 152 153struct find_nth_cookie { 154 uint32_t count; 155 const uint32_t n; 156 const prop_info *pi; 157 158 find_nth_cookie(uint32_t n) : count(0), n(n), pi(NULL) { 159 } 160}; 161 162static char property_filename[PATH_MAX] = PROP_FILENAME; 163static bool compat_mode = false; 164static size_t pa_data_size; 165static size_t pa_size; 166 167// NOTE: This isn't static because system_properties_compat.c 168// requires it. 169prop_area *__system_property_area__ = NULL; 170 171static int get_fd_from_env(void) 172{ 173 // This environment variable consistes of two decimal integer 174 // values separated by a ",". The first value is a file descriptor 175 // and the second is the size of the system properties area. The 176 // size is currently unused. 177 char *env = getenv("ANDROID_PROPERTY_WORKSPACE"); 178 179 if (!env) { 180 return -1; 181 } 182 183 return atoi(env); 184} 185 186static int map_prop_area_rw() 187{ 188 /* dev is a tmpfs that we can use to carve a shared workspace 189 * out of, so let's do that... 190 */ 191 const int fd = open(property_filename, 192 O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444); 193 194 if (fd < 0) { 195 if (errno == EACCES) { 196 /* for consistency with the case where the process has already 197 * mapped the page in and segfaults when trying to write to it 198 */ 199 abort(); 200 } 201 return -1; 202 } 203 204 // TODO: Is this really required ? Does android run on any kernels that 205 // don't support O_CLOEXEC ? 206 const int ret = fcntl(fd, F_SETFD, FD_CLOEXEC); 207 if (ret < 0) { 208 close(fd); 209 return -1; 210 } 211 212 if (ftruncate(fd, PA_SIZE) < 0) { 213 close(fd); 214 return -1; 215 } 216 217 pa_size = PA_SIZE; 218 pa_data_size = pa_size - sizeof(prop_area); 219 compat_mode = false; 220 221 void *const memory_area = mmap(NULL, pa_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 222 if (memory_area == MAP_FAILED) { 223 close(fd); 224 return -1; 225 } 226 227 prop_area *pa = new(memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION); 228 229 /* plug into the lib property services */ 230 __system_property_area__ = pa; 231 232 close(fd); 233 return 0; 234} 235 236static int map_fd_ro(const int fd) { 237 struct stat fd_stat; 238 if (fstat(fd, &fd_stat) < 0) { 239 return -1; 240 } 241 242 if ((fd_stat.st_uid != 0) 243 || (fd_stat.st_gid != 0) 244 || ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) 245 || (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area))) ) { 246 return -1; 247 } 248 249 pa_size = fd_stat.st_size; 250 pa_data_size = pa_size - sizeof(prop_area); 251 252 void* const map_result = mmap(NULL, pa_size, PROT_READ, MAP_SHARED, fd, 0); 253 if (map_result == MAP_FAILED) { 254 return -1; 255 } 256 257 prop_area* pa = reinterpret_cast<prop_area*>(map_result); 258 if ((pa->magic != PROP_AREA_MAGIC) || (pa->version != PROP_AREA_VERSION && 259 pa->version != PROP_AREA_VERSION_COMPAT)) { 260 munmap(pa, pa_size); 261 return -1; 262 } 263 264 if (pa->version == PROP_AREA_VERSION_COMPAT) { 265 compat_mode = true; 266 } 267 268 __system_property_area__ = pa; 269 return 0; 270} 271 272static int map_prop_area() 273{ 274 int fd(open(property_filename, O_RDONLY | O_NOFOLLOW | O_CLOEXEC)); 275 if (fd >= 0) { 276 /* For old kernels that don't support O_CLOEXEC */ 277 const int ret = fcntl(fd, F_SETFD, FD_CLOEXEC); 278 if (ret < 0) { 279 close(fd); 280 return -1; 281 } 282 } 283 284 bool close_fd = true; 285 if ((fd < 0) && (errno == ENOENT)) { 286 /* 287 * For backwards compatibility, if the file doesn't 288 * exist, we use the environment to get the file descriptor. 289 * For security reasons, we only use this backup if the kernel 290 * returns ENOENT. We don't want to use the backup if the kernel 291 * returns other errors such as ENOMEM or ENFILE, since it 292 * might be possible for an external program to trigger this 293 * condition. 294 */ 295 fd = get_fd_from_env(); 296 close_fd = false; 297 } 298 299 if (fd < 0) { 300 return -1; 301 } 302 303 const int map_result = map_fd_ro(fd); 304 if (close_fd) { 305 close(fd); 306 } 307 308 return map_result; 309} 310 311static void *allocate_obj(const size_t size, uint32_t *const off) 312{ 313 prop_area *pa = __system_property_area__; 314 const size_t aligned = BIONIC_ALIGN(size, sizeof(uint32_t)); 315 if (pa->bytes_used + aligned > pa_data_size) { 316 return NULL; 317 } 318 319 *off = pa->bytes_used; 320 pa->bytes_used += aligned; 321 return pa->data + *off; 322} 323 324static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint32_t *const off) 325{ 326 uint32_t new_offset; 327 void *const offset = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset); 328 if (offset) { 329 prop_bt* bt = new(offset) prop_bt(name, namelen); 330 *off = new_offset; 331 return bt; 332 } 333 334 return NULL; 335} 336 337static prop_info *new_prop_info(const char *name, uint8_t namelen, 338 const char *value, uint8_t valuelen, uint32_t *const off) 339{ 340 uint32_t off_tmp; 341 void* const offset = allocate_obj(sizeof(prop_info) + namelen + 1, &off_tmp); 342 if (offset) { 343 prop_info* info = new(offset) prop_info(name, namelen, value, valuelen); 344 *off = off_tmp; 345 return info; 346 } 347 348 return NULL; 349} 350 351static void *to_prop_obj(const uint32_t off) 352{ 353 if (off > pa_data_size) 354 return NULL; 355 if (!__system_property_area__) 356 return NULL; 357 358 return (__system_property_area__->data + off); 359} 360 361static prop_bt *root_node() 362{ 363 return reinterpret_cast<prop_bt*>(to_prop_obj(0)); 364} 365 366static int cmp_prop_name(const char *one, uint8_t one_len, const char *two, 367 uint8_t two_len) 368{ 369 if (one_len < two_len) 370 return -1; 371 else if (one_len > two_len) 372 return 1; 373 else 374 return strncmp(one, two, one_len); 375} 376 377static prop_bt *find_prop_bt(prop_bt *const bt, const char *name, 378 uint8_t namelen, bool alloc_if_needed) 379{ 380 381 prop_bt* current = bt; 382 while (true) { 383 if (!current) { 384 return NULL; 385 } 386 387 const int ret = cmp_prop_name(name, namelen, current->name, current->namelen); 388 if (ret == 0) { 389 return current; 390 } 391 392 if (ret < 0) { 393 if (current->left) { 394 current = reinterpret_cast<prop_bt*>(to_prop_obj(current->left)); 395 } else { 396 if (!alloc_if_needed) { 397 return NULL; 398 } 399 400 // Note that there isn't a race condition here. "clients" never 401 // reach this code-path since It's only the (single threaded) server 402 // that allocates new nodes. Though "bt->left" is volatile, it can't 403 // have changed since the last value was last read. 404 uint32_t new_offset = 0; 405 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset); 406 if (new_bt) { 407 current->left = new_offset; 408 } 409 return new_bt; 410 } 411 } else { 412 if (current->right) { 413 current = reinterpret_cast<prop_bt*>(to_prop_obj(current->right)); 414 } else { 415 if (!alloc_if_needed) { 416 return NULL; 417 } 418 419 uint32_t new_offset; 420 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset); 421 if (new_bt) { 422 current->right = new_offset; 423 } 424 return new_bt; 425 } 426 } 427 } 428} 429 430static const prop_info *find_property(prop_bt *const trie, const char *name, 431 uint8_t namelen, const char *value, uint8_t valuelen, 432 bool alloc_if_needed) 433{ 434 if (!trie) return NULL; 435 436 const char *remaining_name = name; 437 prop_bt* current = trie; 438 while (true) { 439 const char *sep = strchr(remaining_name, '.'); 440 const bool want_subtree = (sep != NULL); 441 const uint8_t substr_size = (want_subtree) ? 442 sep - remaining_name : strlen(remaining_name); 443 444 if (!substr_size) { 445 return NULL; 446 } 447 448 prop_bt* root = NULL; 449 if (current->children) { 450 root = reinterpret_cast<prop_bt*>(to_prop_obj(current->children)); 451 } else if (alloc_if_needed) { 452 uint32_t new_bt_offset; 453 root = new_prop_bt(remaining_name, substr_size, &new_bt_offset); 454 if (root) { 455 current->children = new_bt_offset; 456 } 457 } 458 459 if (!root) { 460 return NULL; 461 } 462 463 current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed); 464 if (!current) { 465 return NULL; 466 } 467 468 if (!want_subtree) 469 break; 470 471 remaining_name = sep + 1; 472 } 473 474 if (current->prop) { 475 return reinterpret_cast<prop_info*>(to_prop_obj(current->prop)); 476 } else if (alloc_if_needed) { 477 uint32_t new_info_offset; 478 prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_info_offset); 479 if (new_info) { 480 current->prop = new_info_offset; 481 } 482 483 return new_info; 484 } else { 485 return NULL; 486 } 487} 488 489static int send_prop_msg(const prop_msg *msg) 490{ 491 const int fd = socket(AF_LOCAL, SOCK_STREAM, 0); 492 if (fd < 0) { 493 return -1; 494 } 495 496 const size_t namelen = strlen(property_service_socket); 497 498 sockaddr_un addr; 499 memset(&addr, 0, sizeof(addr)); 500 strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path)); 501 addr.sun_family = AF_LOCAL; 502 socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1; 503 if (TEMP_FAILURE_RETRY(connect(fd, reinterpret_cast<sockaddr*>(&addr), alen)) < 0) { 504 close(fd); 505 return -1; 506 } 507 508 const int num_bytes = TEMP_FAILURE_RETRY(send(fd, msg, sizeof(prop_msg), 0)); 509 510 int result = -1; 511 if (num_bytes == sizeof(prop_msg)) { 512 // We successfully wrote to the property server but now we 513 // wait for the property server to finish its work. It 514 // acknowledges its completion by closing the socket so we 515 // poll here (on nothing), waiting for the socket to close. 516 // If you 'adb shell setprop foo bar' you'll see the POLLHUP 517 // once the socket closes. Out of paranoia we cap our poll 518 // at 250 ms. 519 pollfd pollfds[1]; 520 pollfds[0].fd = fd; 521 pollfds[0].events = 0; 522 const int poll_result = TEMP_FAILURE_RETRY(poll(pollfds, 1, 250 /* ms */)); 523 if (poll_result == 1 && (pollfds[0].revents & POLLHUP) != 0) { 524 result = 0; 525 } else { 526 // Ignore the timeout and treat it like a success anyway. 527 // The init process is single-threaded and its property 528 // service is sometimes slow to respond (perhaps it's off 529 // starting a child process or something) and thus this 530 // times out and the caller thinks it failed, even though 531 // it's still getting around to it. So we fake it here, 532 // mostly for ctl.* properties, but we do try and wait 250 533 // ms so callers who do read-after-write can reliably see 534 // what they've written. Most of the time. 535 // TODO: fix the system properties design. 536 result = 0; 537 } 538 } 539 540 close(fd); 541 return result; 542} 543 544static void find_nth_fn(const prop_info *pi, void *ptr) 545{ 546 find_nth_cookie *cookie = reinterpret_cast<find_nth_cookie*>(ptr); 547 548 if (cookie->n == cookie->count) 549 cookie->pi = pi; 550 551 cookie->count++; 552} 553 554static int foreach_property(const uint32_t off, 555 void (*propfn)(const prop_info *pi, void *cookie), void *cookie) 556{ 557 prop_bt *trie = reinterpret_cast<prop_bt*>(to_prop_obj(off)); 558 if (!trie) 559 return -1; 560 561 if (trie->left) { 562 const int err = foreach_property(trie->left, propfn, cookie); 563 if (err < 0) 564 return -1; 565 } 566 if (trie->prop) { 567 prop_info *info = reinterpret_cast<prop_info*>(to_prop_obj(trie->prop)); 568 if (!info) 569 return -1; 570 propfn(info, cookie); 571 } 572 if (trie->children) { 573 const int err = foreach_property(trie->children, propfn, cookie); 574 if (err < 0) 575 return -1; 576 } 577 if (trie->right) { 578 const int err = foreach_property(trie->right, propfn, cookie); 579 if (err < 0) 580 return -1; 581 } 582 583 return 0; 584} 585 586int __system_properties_init() 587{ 588 return map_prop_area(); 589} 590 591int __system_property_set_filename(const char *filename) 592{ 593 size_t len = strlen(filename); 594 if (len >= sizeof(property_filename)) 595 return -1; 596 597 strcpy(property_filename, filename); 598 return 0; 599} 600 601int __system_property_area_init() 602{ 603 return map_prop_area_rw(); 604} 605 606const prop_info *__system_property_find(const char *name) 607{ 608 if (__predict_false(compat_mode)) { 609 return __system_property_find_compat(name); 610 } 611 return find_property(root_node(), name, strlen(name), NULL, 0, false); 612} 613 614int __system_property_read(const prop_info *pi, char *name, char *value) 615{ 616 if (__predict_false(compat_mode)) { 617 return __system_property_read_compat(pi, name, value); 618 } 619 620 while (true) { 621 uint32_t serial = __system_property_serial(pi); // acquire semantics 622 size_t len = SERIAL_VALUE_LEN(serial); 623 memcpy(value, pi->value, len + 1); 624 // TODO: Fix the synchronization scheme here. 625 // There is no fully supported way to implement this kind 626 // of synchronization in C++11, since the memcpy races with 627 // updates to pi, and the data being accessed is not atomic. 628 // The following fence is unintuitive, but would be the 629 // correct one if memcpy used memory_order_relaxed atomic accesses. 630 // In practice it seems unlikely that the generated code would 631 // would be any different, so this should be OK. 632 atomic_thread_fence(memory_order_acquire); 633 if (serial == 634 atomic_load_explicit(&(pi->serial), memory_order_relaxed)) { 635 if (name != 0) { 636 strcpy(name, pi->name); 637 } 638 return len; 639 } 640 } 641} 642 643int __system_property_get(const char *name, char *value) 644{ 645 const prop_info *pi = __system_property_find(name); 646 647 if (pi != 0) { 648 return __system_property_read(pi, 0, value); 649 } else { 650 value[0] = 0; 651 return 0; 652 } 653} 654 655int __system_property_set(const char *key, const char *value) 656{ 657 if (key == 0) return -1; 658 if (value == 0) value = ""; 659 if (strlen(key) >= PROP_NAME_MAX) return -1; 660 if (strlen(value) >= PROP_VALUE_MAX) return -1; 661 662 prop_msg msg; 663 memset(&msg, 0, sizeof msg); 664 msg.cmd = PROP_MSG_SETPROP; 665 strlcpy(msg.name, key, sizeof msg.name); 666 strlcpy(msg.value, value, sizeof msg.value); 667 668 const int err = send_prop_msg(&msg); 669 if (err < 0) { 670 return err; 671 } 672 673 return 0; 674} 675 676int __system_property_update(prop_info *pi, const char *value, unsigned int len) 677{ 678 prop_area *pa = __system_property_area__; 679 680 if (len >= PROP_VALUE_MAX) 681 return -1; 682 683 uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed); 684 serial |= 1; 685 atomic_store_explicit(&pi->serial, serial, memory_order_relaxed); 686 // The memcpy call here also races. Again pretend it 687 // used memory_order_relaxed atomics, and use the analogous 688 // counterintuitive fence. 689 atomic_thread_fence(memory_order_release); 690 memcpy(pi->value, value, len + 1); 691 atomic_store_explicit( 692 &pi->serial, 693 (len << 24) | ((serial + 1) & 0xffffff), 694 memory_order_release); 695 __futex_wake(&pi->serial, INT32_MAX); 696 697 atomic_store_explicit( 698 &pa->serial, 699 atomic_load_explicit(&pa->serial, memory_order_relaxed) + 1, 700 memory_order_release); 701 __futex_wake(&pa->serial, INT32_MAX); 702 703 return 0; 704} 705 706int __system_property_add(const char *name, unsigned int namelen, 707 const char *value, unsigned int valuelen) 708{ 709 prop_area *pa = __system_property_area__; 710 const prop_info *pi; 711 712 if (namelen >= PROP_NAME_MAX) 713 return -1; 714 if (valuelen >= PROP_VALUE_MAX) 715 return -1; 716 if (namelen < 1) 717 return -1; 718 719 pi = find_property(root_node(), name, namelen, value, valuelen, true); 720 if (!pi) 721 return -1; 722 723 // There is only a single mutator, but we want to make sure that 724 // updates are visible to a reader waiting for the update. 725 atomic_store_explicit( 726 &pa->serial, 727 atomic_load_explicit(&pa->serial, memory_order_relaxed) + 1, 728 memory_order_release); 729 __futex_wake(&pa->serial, INT32_MAX); 730 return 0; 731} 732 733// Wait for non-locked serial, and retrieve it with acquire semantics. 734unsigned int __system_property_serial(const prop_info *pi) 735{ 736 uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_acquire); 737 while (SERIAL_DIRTY(serial)) { 738 __futex_wait(const_cast<volatile void *>( 739 reinterpret_cast<const void *>(&pi->serial)), 740 serial, NULL); 741 serial = atomic_load_explicit(&pi->serial, memory_order_acquire); 742 } 743 return serial; 744} 745 746unsigned int __system_property_wait_any(unsigned int serial) 747{ 748 prop_area *pa = __system_property_area__; 749 uint32_t my_serial; 750 751 do { 752 __futex_wait(&pa->serial, serial, NULL); 753 my_serial = atomic_load_explicit(&pa->serial, memory_order_acquire); 754 } while (my_serial == serial); 755 756 return my_serial; 757} 758 759const prop_info *__system_property_find_nth(unsigned n) 760{ 761 find_nth_cookie cookie(n); 762 763 const int err = __system_property_foreach(find_nth_fn, &cookie); 764 if (err < 0) { 765 return NULL; 766 } 767 768 return cookie.pi; 769} 770 771int __system_property_foreach(void (*propfn)(const prop_info *pi, void *cookie), 772 void *cookie) 773{ 774 if (__predict_false(compat_mode)) { 775 return __system_property_foreach_compat(propfn, cookie); 776 } 777 778 return foreach_property(0, propfn, cookie); 779} 780