1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <ctype.h>
29#include <errno.h>
30#include <fcntl.h>
31#include <poll.h>
32#include <stdatomic.h>
33#include <stdbool.h>
34#include <stddef.h>
35#include <stdint.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <string.h>
39#include <unistd.h>
40#include <new>
41
42#include <linux/xattr.h>
43#include <netinet/in.h>
44#include <sys/mman.h>
45#include <sys/select.h>
46#include <sys/socket.h>
47#include <sys/stat.h>
48#include <sys/types.h>
49#include <sys/un.h>
50#include <sys/xattr.h>
51
52#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
53#include <sys/_system_properties.h>
54#include <sys/system_properties.h>
55
56#include "private/bionic_futex.h"
57#include "private/bionic_lock.h"
58#include "private/bionic_macros.h"
59#include "private/libc_logging.h"
60
61static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME;
62
63
64/*
65 * Properties are stored in a hybrid trie/binary tree structure.
66 * Each property's name is delimited at '.' characters, and the tokens are put
67 * into a trie structure.  Siblings at each level of the trie are stored in a
68 * binary tree.  For instance, "ro.secure"="1" could be stored as follows:
69 *
70 * +-----+   children    +----+   children    +--------+
71 * |     |-------------->| ro |-------------->| secure |
72 * +-----+               +----+               +--------+
73 *                       /    \                /   |
74 *                 left /      \ right   left /    |  prop   +===========+
75 *                     v        v            v     +-------->| ro.secure |
76 *                  +-----+   +-----+     +-----+            +-----------+
77 *                  | net |   | sys |     | com |            |     1     |
78 *                  +-----+   +-----+     +-----+            +===========+
79 */
80
81// Represents a node in the trie.
82struct prop_bt {
83    uint8_t namelen;
84    uint8_t reserved[3];
85
86    // The property trie is updated only by the init process (single threaded) which provides
87    // property service. And it can be read by multiple threads at the same time.
88    // As the property trie is not protected by locks, we use atomic_uint_least32_t types for the
89    // left, right, children "pointers" in the trie node. To make sure readers who see the
90    // change of "pointers" can also notice the change of prop_bt structure contents pointed by
91    // the "pointers", we always use release-consume ordering pair when accessing these "pointers".
92
93    // prop "points" to prop_info structure if there is a propery associated with the trie node.
94    // Its situation is similar to the left, right, children "pointers". So we use
95    // atomic_uint_least32_t and release-consume ordering to protect it as well.
96
97    // We should also avoid rereading these fields redundantly, since not
98    // all processor implementations ensure that multiple loads from the
99    // same field are carried out in the right order.
100    atomic_uint_least32_t prop;
101
102    atomic_uint_least32_t left;
103    atomic_uint_least32_t right;
104
105    atomic_uint_least32_t children;
106
107    char name[0];
108
109    prop_bt(const char *name, const uint8_t name_length) {
110        this->namelen = name_length;
111        memcpy(this->name, name, name_length);
112        this->name[name_length] = '\0';
113    }
114
115private:
116    DISALLOW_COPY_AND_ASSIGN(prop_bt);
117};
118
119class prop_area {
120public:
121
122    prop_area(const uint32_t magic, const uint32_t version) :
123        magic_(magic), version_(version) {
124        atomic_init(&serial_, 0);
125        memset(reserved_, 0, sizeof(reserved_));
126        // Allocate enough space for the root node.
127        bytes_used_ = sizeof(prop_bt);
128    }
129
130    const prop_info *find(const char *name);
131    bool add(const char *name, unsigned int namelen,
132             const char *value, unsigned int valuelen);
133
134    bool foreach(void (*propfn)(const prop_info *pi, void *cookie), void *cookie);
135
136    atomic_uint_least32_t *serial() { return &serial_; }
137    uint32_t magic() const { return magic_; }
138    uint32_t version() const { return version_; }
139
140private:
141    void *allocate_obj(const size_t size, uint_least32_t *const off);
142    prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint_least32_t *const off);
143    prop_info *new_prop_info(const char *name, uint8_t namelen,
144                             const char *value, uint8_t valuelen,
145                             uint_least32_t *const off);
146    void *to_prop_obj(uint_least32_t off);
147    prop_bt *to_prop_bt(atomic_uint_least32_t *off_p);
148    prop_info *to_prop_info(atomic_uint_least32_t *off_p);
149
150    prop_bt *root_node();
151
152    prop_bt *find_prop_bt(prop_bt *const bt, const char *name,
153                          uint8_t namelen, bool alloc_if_needed);
154
155    const prop_info *find_property(prop_bt *const trie, const char *name,
156                                   uint8_t namelen, const char *value,
157                                   uint8_t valuelen, bool alloc_if_needed);
158
159    bool foreach_property(prop_bt *const trie,
160                          void (*propfn)(const prop_info *pi, void *cookie),
161                          void *cookie);
162
163    uint32_t bytes_used_;
164    atomic_uint_least32_t serial_;
165    uint32_t magic_;
166    uint32_t version_;
167    uint32_t reserved_[28];
168    char data_[0];
169
170    DISALLOW_COPY_AND_ASSIGN(prop_area);
171};
172
173struct prop_info {
174    atomic_uint_least32_t serial;
175    char value[PROP_VALUE_MAX];
176    char name[0];
177
178    prop_info(const char *name, const uint8_t namelen, const char *value,
179              const uint8_t valuelen) {
180        memcpy(this->name, name, namelen);
181        this->name[namelen] = '\0';
182        atomic_init(&this->serial, valuelen << 24);
183        memcpy(this->value, value, valuelen);
184        this->value[valuelen] = '\0';
185    }
186private:
187    DISALLOW_COPY_AND_ASSIGN(prop_info);
188};
189
190struct find_nth_cookie {
191    uint32_t count;
192    const uint32_t n;
193    const prop_info *pi;
194
195    find_nth_cookie(uint32_t n) : count(0), n(n), pi(NULL) {
196    }
197};
198
199static char property_filename[PROP_FILENAME_MAX] = PROP_FILENAME;
200static bool compat_mode = false;
201static size_t pa_data_size;
202static size_t pa_size;
203static bool initialized = false;
204
205// NOTE: This isn't static because system_properties_compat.c
206// requires it.
207prop_area *__system_property_area__ = NULL;
208
209static int get_fd_from_env(void)
210{
211    // This environment variable consistes of two decimal integer
212    // values separated by a ",". The first value is a file descriptor
213    // and the second is the size of the system properties area. The
214    // size is currently unused.
215    char *env = getenv("ANDROID_PROPERTY_WORKSPACE");
216
217    if (!env) {
218        return -1;
219    }
220
221    return atoi(env);
222}
223
224static prop_area* map_prop_area_rw(const char* filename, const char* context,
225                                   bool* fsetxattr_failed) {
226    /* dev is a tmpfs that we can use to carve a shared workspace
227     * out of, so let's do that...
228     */
229    const int fd = open(filename, O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444);
230
231    if (fd < 0) {
232        if (errno == EACCES) {
233            /* for consistency with the case where the process has already
234             * mapped the page in and segfaults when trying to write to it
235             */
236            abort();
237        }
238        return nullptr;
239    }
240
241    if (context) {
242        if (fsetxattr(fd, XATTR_NAME_SELINUX, context, strlen(context) + 1, 0) != 0) {
243            __libc_format_log(ANDROID_LOG_ERROR, "libc",
244                              "fsetxattr failed to set context (%s) for \"%s\"", context, filename);
245            /*
246             * fsetxattr() will fail during system properties tests due to selinux policy.
247             * We do not want to create a custom policy for the tester, so we will continue in
248             * this function but set a flag that an error has occurred.
249             * Init, which is the only daemon that should ever call this function will abort
250             * when this error occurs.
251             * Otherwise, the tester will ignore it and continue, albeit without any selinux
252             * property separation.
253             */
254            if (fsetxattr_failed) {
255                *fsetxattr_failed = true;
256            }
257        }
258    }
259
260    if (ftruncate(fd, PA_SIZE) < 0) {
261        close(fd);
262        return nullptr;
263    }
264
265    pa_size = PA_SIZE;
266    pa_data_size = pa_size - sizeof(prop_area);
267    compat_mode = false;
268
269    void *const memory_area = mmap(NULL, pa_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
270    if (memory_area == MAP_FAILED) {
271        close(fd);
272        return nullptr;
273    }
274
275    prop_area *pa = new(memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION);
276
277    close(fd);
278    return pa;
279}
280
281static prop_area* map_fd_ro(const int fd) {
282    struct stat fd_stat;
283    if (fstat(fd, &fd_stat) < 0) {
284        return nullptr;
285    }
286
287    if ((fd_stat.st_uid != 0)
288            || (fd_stat.st_gid != 0)
289            || ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0)
290            || (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area))) ) {
291        return nullptr;
292    }
293
294    pa_size = fd_stat.st_size;
295    pa_data_size = pa_size - sizeof(prop_area);
296
297    void* const map_result = mmap(NULL, pa_size, PROT_READ, MAP_SHARED, fd, 0);
298    if (map_result == MAP_FAILED) {
299        return nullptr;
300    }
301
302    prop_area* pa = reinterpret_cast<prop_area*>(map_result);
303    if ((pa->magic() != PROP_AREA_MAGIC) ||
304        (pa->version() != PROP_AREA_VERSION &&
305         pa->version() != PROP_AREA_VERSION_COMPAT)) {
306        munmap(pa, pa_size);
307        return nullptr;
308    }
309
310    if (pa->version() == PROP_AREA_VERSION_COMPAT) {
311        compat_mode = true;
312    }
313
314    return pa;
315}
316
317static prop_area* map_prop_area(const char* filename, bool is_legacy) {
318    int fd = open(filename, O_CLOEXEC | O_NOFOLLOW | O_RDONLY);
319    bool close_fd = true;
320    if (fd == -1 && errno == ENOENT && is_legacy) {
321        /*
322         * For backwards compatibility, if the file doesn't
323         * exist, we use the environment to get the file descriptor.
324         * For security reasons, we only use this backup if the kernel
325         * returns ENOENT. We don't want to use the backup if the kernel
326         * returns other errors such as ENOMEM or ENFILE, since it
327         * might be possible for an external program to trigger this
328         * condition.
329         * Only do this for the legacy prop file, secured prop files
330         * do not have a backup
331         */
332        fd = get_fd_from_env();
333        close_fd = false;
334    }
335
336    if (fd < 0) {
337        return nullptr;
338    }
339
340    prop_area* map_result = map_fd_ro(fd);
341    if (close_fd) {
342        close(fd);
343    }
344
345    return map_result;
346}
347
348void *prop_area::allocate_obj(const size_t size, uint_least32_t *const off)
349{
350    const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t));
351    if (bytes_used_ + aligned > pa_data_size) {
352        return NULL;
353    }
354
355    *off = bytes_used_;
356    bytes_used_ += aligned;
357    return data_ + *off;
358}
359
360prop_bt *prop_area::new_prop_bt(const char *name, uint8_t namelen, uint_least32_t *const off)
361{
362    uint_least32_t new_offset;
363    void *const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
364    if (p != NULL) {
365        prop_bt* bt = new(p) prop_bt(name, namelen);
366        *off = new_offset;
367        return bt;
368    }
369
370    return NULL;
371}
372
373prop_info *prop_area::new_prop_info(const char *name, uint8_t namelen,
374        const char *value, uint8_t valuelen, uint_least32_t *const off)
375{
376    uint_least32_t new_offset;
377    void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset);
378    if (p != NULL) {
379        prop_info* info = new(p) prop_info(name, namelen, value, valuelen);
380        *off = new_offset;
381        return info;
382    }
383
384    return NULL;
385}
386
387void *prop_area::to_prop_obj(uint_least32_t off)
388{
389    if (off > pa_data_size)
390        return NULL;
391
392    return (data_ + off);
393}
394
395inline prop_bt *prop_area::to_prop_bt(atomic_uint_least32_t* off_p) {
396  uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
397  return reinterpret_cast<prop_bt*>(to_prop_obj(off));
398}
399
400inline prop_info *prop_area::to_prop_info(atomic_uint_least32_t* off_p) {
401  uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
402  return reinterpret_cast<prop_info*>(to_prop_obj(off));
403}
404
405inline prop_bt *prop_area::root_node()
406{
407    return reinterpret_cast<prop_bt*>(to_prop_obj(0));
408}
409
410static int cmp_prop_name(const char *one, uint8_t one_len, const char *two,
411        uint8_t two_len)
412{
413    if (one_len < two_len)
414        return -1;
415    else if (one_len > two_len)
416        return 1;
417    else
418        return strncmp(one, two, one_len);
419}
420
421prop_bt *prop_area::find_prop_bt(prop_bt *const bt, const char *name,
422                                 uint8_t namelen, bool alloc_if_needed)
423{
424
425    prop_bt* current = bt;
426    while (true) {
427        if (!current) {
428            return NULL;
429        }
430
431        const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
432        if (ret == 0) {
433            return current;
434        }
435
436        if (ret < 0) {
437            uint_least32_t left_offset = atomic_load_explicit(&current->left, memory_order_relaxed);
438            if (left_offset != 0) {
439                current = to_prop_bt(&current->left);
440            } else {
441                if (!alloc_if_needed) {
442                   return NULL;
443                }
444
445                uint_least32_t new_offset;
446                prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
447                if (new_bt) {
448                    atomic_store_explicit(&current->left, new_offset, memory_order_release);
449                }
450                return new_bt;
451            }
452        } else {
453            uint_least32_t right_offset = atomic_load_explicit(&current->right, memory_order_relaxed);
454            if (right_offset != 0) {
455                current = to_prop_bt(&current->right);
456            } else {
457                if (!alloc_if_needed) {
458                   return NULL;
459                }
460
461                uint_least32_t new_offset;
462                prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
463                if (new_bt) {
464                    atomic_store_explicit(&current->right, new_offset, memory_order_release);
465                }
466                return new_bt;
467            }
468        }
469    }
470}
471
472const prop_info *prop_area::find_property(prop_bt *const trie, const char *name,
473        uint8_t namelen, const char *value, uint8_t valuelen,
474        bool alloc_if_needed)
475{
476    if (!trie) return NULL;
477
478    const char *remaining_name = name;
479    prop_bt* current = trie;
480    while (true) {
481        const char *sep = strchr(remaining_name, '.');
482        const bool want_subtree = (sep != NULL);
483        const uint8_t substr_size = (want_subtree) ?
484            sep - remaining_name : strlen(remaining_name);
485
486        if (!substr_size) {
487            return NULL;
488        }
489
490        prop_bt* root = NULL;
491        uint_least32_t children_offset = atomic_load_explicit(&current->children, memory_order_relaxed);
492        if (children_offset != 0) {
493            root = to_prop_bt(&current->children);
494        } else if (alloc_if_needed) {
495            uint_least32_t new_offset;
496            root = new_prop_bt(remaining_name, substr_size, &new_offset);
497            if (root) {
498                atomic_store_explicit(&current->children, new_offset, memory_order_release);
499            }
500        }
501
502        if (!root) {
503            return NULL;
504        }
505
506        current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed);
507        if (!current) {
508            return NULL;
509        }
510
511        if (!want_subtree)
512            break;
513
514        remaining_name = sep + 1;
515    }
516
517    uint_least32_t prop_offset = atomic_load_explicit(&current->prop, memory_order_relaxed);
518    if (prop_offset != 0) {
519        return to_prop_info(&current->prop);
520    } else if (alloc_if_needed) {
521        uint_least32_t new_offset;
522        prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
523        if (new_info) {
524            atomic_store_explicit(&current->prop, new_offset, memory_order_release);
525        }
526
527        return new_info;
528    } else {
529        return NULL;
530    }
531}
532
533static int send_prop_msg(const prop_msg *msg)
534{
535    const int fd = socket(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0);
536    if (fd == -1) {
537        return -1;
538    }
539
540    const size_t namelen = strlen(property_service_socket);
541
542    sockaddr_un addr;
543    memset(&addr, 0, sizeof(addr));
544    strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path));
545    addr.sun_family = AF_LOCAL;
546    socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1;
547    if (TEMP_FAILURE_RETRY(connect(fd, reinterpret_cast<sockaddr*>(&addr), alen)) < 0) {
548        close(fd);
549        return -1;
550    }
551
552    const int num_bytes = TEMP_FAILURE_RETRY(send(fd, msg, sizeof(prop_msg), 0));
553
554    int result = -1;
555    if (num_bytes == sizeof(prop_msg)) {
556        // We successfully wrote to the property server but now we
557        // wait for the property server to finish its work.  It
558        // acknowledges its completion by closing the socket so we
559        // poll here (on nothing), waiting for the socket to close.
560        // If you 'adb shell setprop foo bar' you'll see the POLLHUP
561        // once the socket closes.  Out of paranoia we cap our poll
562        // at 250 ms.
563        pollfd pollfds[1];
564        pollfds[0].fd = fd;
565        pollfds[0].events = 0;
566        const int poll_result = TEMP_FAILURE_RETRY(poll(pollfds, 1, 250 /* ms */));
567        if (poll_result == 1 && (pollfds[0].revents & POLLHUP) != 0) {
568            result = 0;
569        } else {
570            // Ignore the timeout and treat it like a success anyway.
571            // The init process is single-threaded and its property
572            // service is sometimes slow to respond (perhaps it's off
573            // starting a child process or something) and thus this
574            // times out and the caller thinks it failed, even though
575            // it's still getting around to it.  So we fake it here,
576            // mostly for ctl.* properties, but we do try and wait 250
577            // ms so callers who do read-after-write can reliably see
578            // what they've written.  Most of the time.
579            // TODO: fix the system properties design.
580            result = 0;
581        }
582    }
583
584    close(fd);
585    return result;
586}
587
588static void find_nth_fn(const prop_info *pi, void *ptr)
589{
590    find_nth_cookie *cookie = reinterpret_cast<find_nth_cookie*>(ptr);
591
592    if (cookie->n == cookie->count)
593        cookie->pi = pi;
594
595    cookie->count++;
596}
597
598bool prop_area::foreach_property(prop_bt *const trie,
599        void (*propfn)(const prop_info *pi, void *cookie), void *cookie)
600{
601    if (!trie)
602        return false;
603
604    uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
605    if (left_offset != 0) {
606        const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie);
607        if (err < 0)
608            return false;
609    }
610    uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
611    if (prop_offset != 0) {
612        prop_info *info = to_prop_info(&trie->prop);
613        if (!info)
614            return false;
615        propfn(info, cookie);
616    }
617    uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
618    if (children_offset != 0) {
619        const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie);
620        if (err < 0)
621            return false;
622    }
623    uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
624    if (right_offset != 0) {
625        const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie);
626        if (err < 0)
627            return false;
628    }
629
630    return true;
631}
632
633const prop_info *prop_area::find(const char *name) {
634    return find_property(root_node(), name, strlen(name), nullptr, 0, false);
635}
636
637bool prop_area::add(const char *name, unsigned int namelen,
638                    const char *value, unsigned int valuelen) {
639    return find_property(root_node(), name, namelen, value, valuelen, true);
640}
641
642bool prop_area::foreach(void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
643    return foreach_property(root_node(), propfn, cookie);
644}
645
646class context_node {
647public:
648    context_node(context_node* next, const char* context, prop_area* pa)
649        : next(next), context_(strdup(context)), pa_(pa), no_access_(false) {
650        lock_.init(false);
651    }
652    ~context_node() {
653        unmap();
654        free(context_);
655    }
656    bool open(bool access_rw, bool* fsetxattr_failed);
657    bool check_access_and_open();
658    void reset_access();
659
660    const char* context() const { return context_; }
661    prop_area* pa() { return pa_; }
662
663    context_node* next;
664
665private:
666    bool check_access();
667    void unmap();
668
669    Lock lock_;
670    char* context_;
671    prop_area* pa_;
672    bool no_access_;
673};
674
675struct prefix_node {
676    prefix_node(struct prefix_node* next, const char* prefix, context_node* context)
677        : prefix(strdup(prefix)), prefix_len(strlen(prefix)), context(context), next(next) {
678    }
679    ~prefix_node() {
680        free(prefix);
681    }
682    char* prefix;
683    const size_t prefix_len;
684    context_node* context;
685    struct prefix_node* next;
686};
687
688template <typename List, typename... Args>
689static inline void list_add(List** list, Args... args) {
690    *list = new List(*list, args...);
691}
692
693static void list_add_after_len(prefix_node** list, const char* prefix, context_node* context) {
694    size_t prefix_len = strlen(prefix);
695
696    auto next_list = list;
697
698    while (*next_list) {
699        if ((*next_list)->prefix_len < prefix_len || (*next_list)->prefix[0] == '*') {
700            list_add(next_list, prefix, context);
701            return;
702        }
703        next_list = &(*next_list)->next;
704    }
705    list_add(next_list, prefix, context);
706}
707
708template <typename List, typename Func>
709static void list_foreach(List* list, Func func) {
710    while (list) {
711        func(list);
712        list = list->next;
713    }
714}
715
716template <typename List, typename Func>
717static List* list_find(List* list, Func func) {
718    while (list) {
719        if (func(list)) {
720            return list;
721        }
722        list = list->next;
723    }
724    return nullptr;
725}
726
727template <typename List>
728static void list_free(List** list) {
729    while (*list) {
730        auto old_list = *list;
731        *list = old_list->next;
732        delete old_list;
733    }
734}
735
736static prefix_node* prefixes = nullptr;
737static context_node* contexts = nullptr;
738
739/*
740 * pthread_mutex_lock() calls into system_properties in the case of contention.
741 * This creates a risk of dead lock if any system_properties functions
742 * use pthread locks after system_property initialization.
743 *
744 * For this reason, the below three functions use a bionic Lock and static
745 * allocation of memory for each filename.
746 */
747
748bool context_node::open(bool access_rw, bool* fsetxattr_failed) {
749    lock_.lock();
750    if (pa_) {
751        lock_.unlock();
752        return true;
753    }
754
755    char filename[PROP_FILENAME_MAX];
756    int len = __libc_format_buffer(filename, sizeof(filename), "%s/%s",
757                                   property_filename, context_);
758    if (len < 0 || len > PROP_FILENAME_MAX) {
759        lock_.unlock();
760        return false;
761    }
762
763    if (access_rw) {
764        pa_ = map_prop_area_rw(filename, context_, fsetxattr_failed);
765    } else {
766        pa_ = map_prop_area(filename, false);
767    }
768    lock_.unlock();
769    return pa_;
770}
771
772bool context_node::check_access_and_open() {
773    if (!pa_ && !no_access_) {
774        if (!check_access() || !open(false, nullptr)) {
775            no_access_ = true;
776        }
777    }
778    return pa_;
779}
780
781void context_node::reset_access() {
782    if (!check_access()) {
783        unmap();
784        no_access_ = true;
785    } else {
786        no_access_ = false;
787    }
788}
789
790bool context_node::check_access() {
791    char filename[PROP_FILENAME_MAX];
792    int len = __libc_format_buffer(filename, sizeof(filename), "%s/%s",
793                                   property_filename, context_);
794    if (len < 0 || len > PROP_FILENAME_MAX) {
795        return false;
796    }
797
798    return access(filename, R_OK) == 0;
799}
800
801void context_node::unmap() {
802    if (!pa_) {
803        return;
804    }
805
806    munmap(pa_, pa_size);
807    if (pa_ == __system_property_area__) {
808        __system_property_area__ = nullptr;
809    }
810    pa_ = nullptr;
811}
812
813static bool map_system_property_area(bool access_rw, bool* fsetxattr_failed) {
814    char filename[PROP_FILENAME_MAX];
815    int len = __libc_format_buffer(filename, sizeof(filename),
816                                   "%s/properties_serial", property_filename);
817    if (len < 0 || len > PROP_FILENAME_MAX) {
818        __system_property_area__ = nullptr;
819        return false;
820    }
821
822    if (access_rw) {
823        __system_property_area__ =
824            map_prop_area_rw(filename, "u:object_r:properties_serial:s0", fsetxattr_failed);
825    } else {
826        __system_property_area__ = map_prop_area(filename, false);
827    }
828    return __system_property_area__;
829}
830
831static prop_area* get_prop_area_for_name(const char* name) {
832    auto entry = list_find(prefixes, [name](prefix_node* l) {
833        return l->prefix[0] == '*' || !strncmp(l->prefix, name, l->prefix_len);
834    });
835    if (!entry) {
836        return nullptr;
837    }
838
839    auto cnode = entry->context;
840    if (!cnode->pa()) {
841        /*
842         * We explicitly do not check no_access_ in this case because unlike the
843         * case of foreach(), we want to generate an selinux audit for each
844         * non-permitted property access in this function.
845         */
846        cnode->open(false, nullptr);
847    }
848    return cnode->pa();
849}
850
851/*
852 * The below two functions are duplicated from label_support.c in libselinux.
853 * TODO: Find a location suitable for these functions such that both libc and
854 * libselinux can share a common source file.
855 */
856
857/*
858 * The read_spec_entries and read_spec_entry functions may be used to
859 * replace sscanf to read entries from spec files. The file and
860 * property services now use these.
861 */
862
863/* Read an entry from a spec file (e.g. file_contexts) */
864static inline int read_spec_entry(char **entry, char **ptr, int *len)
865{
866    *entry = NULL;
867    char *tmp_buf = NULL;
868
869    while (isspace(**ptr) && **ptr != '\0')
870        (*ptr)++;
871
872    tmp_buf = *ptr;
873    *len = 0;
874
875    while (!isspace(**ptr) && **ptr != '\0') {
876        (*ptr)++;
877        (*len)++;
878    }
879
880    if (*len) {
881        *entry = strndup(tmp_buf, *len);
882        if (!*entry)
883            return -1;
884    }
885
886    return 0;
887}
888
889/*
890 * line_buf - Buffer containing the spec entries .
891 * num_args - The number of spec parameter entries to process.
892 * ...      - A 'char **spec_entry' for each parameter.
893 * returns  - The number of items processed.
894 *
895 * This function calls read_spec_entry() to do the actual string processing.
896 */
897static int read_spec_entries(char *line_buf, int num_args, ...)
898{
899    char **spec_entry, *buf_p;
900    int len, rc, items, entry_len = 0;
901    va_list ap;
902
903    len = strlen(line_buf);
904    if (line_buf[len - 1] == '\n')
905        line_buf[len - 1] = '\0';
906    else
907        /* Handle case if line not \n terminated by bumping
908         * the len for the check below (as the line is NUL
909         * terminated by getline(3)) */
910        len++;
911
912    buf_p = line_buf;
913    while (isspace(*buf_p))
914        buf_p++;
915
916    /* Skip comment lines and empty lines. */
917    if (*buf_p == '#' || *buf_p == '\0')
918        return 0;
919
920    /* Process the spec file entries */
921    va_start(ap, num_args);
922
923    items = 0;
924    while (items < num_args) {
925        spec_entry = va_arg(ap, char **);
926
927        if (len - 1 == buf_p - line_buf) {
928            va_end(ap);
929            return items;
930        }
931
932        rc = read_spec_entry(spec_entry, &buf_p, &entry_len);
933        if (rc < 0) {
934            va_end(ap);
935            return rc;
936        }
937        if (entry_len)
938            items++;
939    }
940    va_end(ap);
941    return items;
942}
943
944static bool initialize_properties() {
945    FILE* file = fopen("/property_contexts", "re");
946
947    if (!file) {
948        return false;
949    }
950
951    char* buffer = nullptr;
952    size_t line_len;
953    char* prop_prefix = nullptr;
954    char* context = nullptr;
955
956    while (getline(&buffer, &line_len, file) > 0) {
957        int items = read_spec_entries(buffer, 2, &prop_prefix, &context);
958        if (items <= 0) {
959            continue;
960        }
961        if (items == 1) {
962            free(prop_prefix);
963            continue;
964        }
965        /*
966         * init uses ctl.* properties as an IPC mechanism and does not write them
967         * to a property file, therefore we do not need to create property files
968         * to store them.
969         */
970        if (!strncmp(prop_prefix, "ctl.", 4)) {
971            free(prop_prefix);
972            free(context);
973            continue;
974        }
975
976        auto old_context = list_find(
977            contexts, [context](context_node* l) { return !strcmp(l->context(), context); });
978        if (old_context) {
979            list_add_after_len(&prefixes, prop_prefix, old_context);
980        } else {
981            list_add(&contexts, context, nullptr);
982            list_add_after_len(&prefixes, prop_prefix, contexts);
983        }
984        free(prop_prefix);
985        free(context);
986    }
987
988    free(buffer);
989    fclose(file);
990    return true;
991}
992
993static bool is_dir(const char* pathname) {
994    struct stat info;
995    if (stat(pathname, &info) == -1) {
996        return false;
997    }
998    return S_ISDIR(info.st_mode);
999}
1000
1001static void free_and_unmap_contexts() {
1002    list_free(&prefixes);
1003    list_free(&contexts);
1004    if (__system_property_area__) {
1005        munmap(__system_property_area__, pa_size);
1006        __system_property_area__ = nullptr;
1007    }
1008}
1009
1010int __system_properties_init()
1011{
1012    if (initialized) {
1013        list_foreach(contexts, [](context_node* l) { l->reset_access(); });
1014        return 0;
1015    }
1016    if (is_dir(property_filename)) {
1017        if (!initialize_properties()) {
1018            return -1;
1019        }
1020        if (!map_system_property_area(false, nullptr)) {
1021            free_and_unmap_contexts();
1022            return -1;
1023        }
1024    } else {
1025        __system_property_area__ = map_prop_area(property_filename, true);
1026        if (!__system_property_area__) {
1027            return -1;
1028        }
1029        list_add(&contexts, "legacy_system_prop_area", __system_property_area__);
1030        list_add_after_len(&prefixes, "*", contexts);
1031    }
1032    initialized = true;
1033    return 0;
1034}
1035
1036int __system_property_set_filename(const char *filename)
1037{
1038    size_t len = strlen(filename);
1039    if (len >= sizeof(property_filename))
1040        return -1;
1041
1042    strcpy(property_filename, filename);
1043    return 0;
1044}
1045
1046int __system_property_area_init()
1047{
1048    free_and_unmap_contexts();
1049    mkdir(property_filename, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
1050    if (!initialize_properties()) {
1051        return -1;
1052    }
1053    bool open_failed = false;
1054    bool fsetxattr_failed = false;
1055    list_foreach(contexts, [&fsetxattr_failed, &open_failed](context_node* l) {
1056        if (!l->open(true, &fsetxattr_failed)) {
1057            open_failed = true;
1058        }
1059    });
1060    if (open_failed || !map_system_property_area(true, &fsetxattr_failed)) {
1061        free_and_unmap_contexts();
1062        return -1;
1063    }
1064    initialized = true;
1065    return fsetxattr_failed ? -2 : 0;
1066}
1067
1068unsigned int __system_property_area_serial()
1069{
1070    prop_area *pa = __system_property_area__;
1071    if (!pa) {
1072        return -1;
1073    }
1074    // Make sure this read fulfilled before __system_property_serial
1075    return atomic_load_explicit(pa->serial(), memory_order_acquire);
1076}
1077
1078const prop_info *__system_property_find(const char *name)
1079{
1080    if (!__system_property_area__) {
1081        return nullptr;
1082    }
1083
1084    if (__predict_false(compat_mode)) {
1085        return __system_property_find_compat(name);
1086    }
1087
1088    prop_area* pa = get_prop_area_for_name(name);
1089    if (!pa) {
1090        __libc_format_log(ANDROID_LOG_ERROR, "libc", "Access denied finding property \"%s\"", name);
1091        return nullptr;
1092    }
1093
1094    return pa->find(name);
1095}
1096
1097// The C11 standard doesn't allow atomic loads from const fields,
1098// though C++11 does.  Fudge it until standards get straightened out.
1099static inline uint_least32_t load_const_atomic(const atomic_uint_least32_t* s,
1100                                               memory_order mo) {
1101    atomic_uint_least32_t* non_const_s = const_cast<atomic_uint_least32_t*>(s);
1102    return atomic_load_explicit(non_const_s, mo);
1103}
1104
1105int __system_property_read(const prop_info *pi, char *name, char *value)
1106{
1107    if (__predict_false(compat_mode)) {
1108        return __system_property_read_compat(pi, name, value);
1109    }
1110
1111    while (true) {
1112        uint32_t serial = __system_property_serial(pi); // acquire semantics
1113        size_t len = SERIAL_VALUE_LEN(serial);
1114        memcpy(value, pi->value, len + 1);
1115        // TODO: Fix the synchronization scheme here.
1116        // There is no fully supported way to implement this kind
1117        // of synchronization in C++11, since the memcpy races with
1118        // updates to pi, and the data being accessed is not atomic.
1119        // The following fence is unintuitive, but would be the
1120        // correct one if memcpy used memory_order_relaxed atomic accesses.
1121        // In practice it seems unlikely that the generated code would
1122        // would be any different, so this should be OK.
1123        atomic_thread_fence(memory_order_acquire);
1124        if (serial ==
1125                load_const_atomic(&(pi->serial), memory_order_relaxed)) {
1126            if (name != 0) {
1127                strcpy(name, pi->name);
1128            }
1129            return len;
1130        }
1131    }
1132}
1133
1134int __system_property_get(const char *name, char *value)
1135{
1136    const prop_info *pi = __system_property_find(name);
1137
1138    if (pi != 0) {
1139        return __system_property_read(pi, 0, value);
1140    } else {
1141        value[0] = 0;
1142        return 0;
1143    }
1144}
1145
1146int __system_property_set(const char *key, const char *value)
1147{
1148    if (key == 0) return -1;
1149    if (value == 0) value = "";
1150    if (strlen(key) >= PROP_NAME_MAX) return -1;
1151    if (strlen(value) >= PROP_VALUE_MAX) return -1;
1152
1153    prop_msg msg;
1154    memset(&msg, 0, sizeof msg);
1155    msg.cmd = PROP_MSG_SETPROP;
1156    strlcpy(msg.name, key, sizeof msg.name);
1157    strlcpy(msg.value, value, sizeof msg.value);
1158
1159    const int err = send_prop_msg(&msg);
1160    if (err < 0) {
1161        return err;
1162    }
1163
1164    return 0;
1165}
1166
1167int __system_property_update(prop_info *pi, const char *value, unsigned int len)
1168{
1169    if (len >= PROP_VALUE_MAX)
1170        return -1;
1171
1172    prop_area* pa = __system_property_area__;
1173
1174    if (!pa) {
1175        return -1;
1176    }
1177
1178    uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed);
1179    serial |= 1;
1180    atomic_store_explicit(&pi->serial, serial, memory_order_relaxed);
1181    // The memcpy call here also races.  Again pretend it
1182    // used memory_order_relaxed atomics, and use the analogous
1183    // counterintuitive fence.
1184    atomic_thread_fence(memory_order_release);
1185    memcpy(pi->value, value, len + 1);
1186    atomic_store_explicit(
1187        &pi->serial,
1188        (len << 24) | ((serial + 1) & 0xffffff),
1189        memory_order_release);
1190    __futex_wake(&pi->serial, INT32_MAX);
1191
1192    atomic_store_explicit(
1193        pa->serial(),
1194        atomic_load_explicit(pa->serial(), memory_order_relaxed) + 1,
1195        memory_order_release);
1196    __futex_wake(pa->serial(), INT32_MAX);
1197
1198    return 0;
1199}
1200
1201int __system_property_add(const char *name, unsigned int namelen,
1202            const char *value, unsigned int valuelen)
1203{
1204    if (namelen >= PROP_NAME_MAX)
1205        return -1;
1206    if (valuelen >= PROP_VALUE_MAX)
1207        return -1;
1208    if (namelen < 1)
1209        return -1;
1210
1211    if (!__system_property_area__) {
1212        return -1;
1213    }
1214
1215    prop_area* pa = get_prop_area_for_name(name);
1216
1217    if (!pa) {
1218        __libc_format_log(ANDROID_LOG_ERROR, "libc", "Access denied adding property \"%s\"", name);
1219        return -1;
1220    }
1221
1222    bool ret = pa->add(name, namelen, value, valuelen);
1223    if (!ret)
1224        return -1;
1225
1226    // There is only a single mutator, but we want to make sure that
1227    // updates are visible to a reader waiting for the update.
1228    atomic_store_explicit(
1229        __system_property_area__->serial(),
1230        atomic_load_explicit(__system_property_area__->serial(), memory_order_relaxed) + 1,
1231        memory_order_release);
1232    __futex_wake(__system_property_area__->serial(), INT32_MAX);
1233    return 0;
1234}
1235
1236// Wait for non-locked serial, and retrieve it with acquire semantics.
1237unsigned int __system_property_serial(const prop_info *pi)
1238{
1239    uint32_t serial = load_const_atomic(&pi->serial, memory_order_acquire);
1240    while (SERIAL_DIRTY(serial)) {
1241        __futex_wait(const_cast<volatile void *>(
1242                        reinterpret_cast<const void *>(&pi->serial)),
1243                     serial, NULL);
1244        serial = load_const_atomic(&pi->serial, memory_order_acquire);
1245    }
1246    return serial;
1247}
1248
1249unsigned int __system_property_wait_any(unsigned int serial)
1250{
1251    prop_area *pa = __system_property_area__;
1252    uint32_t my_serial;
1253
1254    if (!pa) {
1255        return 0;
1256    }
1257
1258    do {
1259        __futex_wait(pa->serial(), serial, NULL);
1260        my_serial = atomic_load_explicit(pa->serial(), memory_order_acquire);
1261    } while (my_serial == serial);
1262
1263    return my_serial;
1264}
1265
1266const prop_info *__system_property_find_nth(unsigned n)
1267{
1268    find_nth_cookie cookie(n);
1269
1270    const int err = __system_property_foreach(find_nth_fn, &cookie);
1271    if (err < 0) {
1272        return NULL;
1273    }
1274
1275    return cookie.pi;
1276}
1277
1278int __system_property_foreach(void (*propfn)(const prop_info *pi, void *cookie),
1279        void *cookie)
1280{
1281    if (!__system_property_area__) {
1282        return -1;
1283    }
1284
1285    if (__predict_false(compat_mode)) {
1286        return __system_property_foreach_compat(propfn, cookie);
1287    }
1288
1289    list_foreach(contexts, [propfn, cookie](context_node* l) {
1290        if (l->check_access_and_open()) {
1291            l->pa()->foreach(propfn, cookie);
1292        }
1293    });
1294    return 0;
1295}
1296