properties.c revision 774e04f77945f2e01608c6336b48375da8701b64
1/* 2** Copyright 2014, The Android Open Source Project 3** 4** Licensed under the Apache License, Version 2.0 (the "License"); 5** you may not use this file except in compliance with the License. 6** You may obtain a copy of the License at 7** 8** http://www.apache.org/licenses/LICENSE-2.0 9** 10** Unless required by applicable law or agreed to in writing, software 11** distributed under the License is distributed on an "AS IS" BASIS, 12** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13** See the License for the specific language governing permissions and 14** limitations under the License. 15*/ 16 17#include <ctype.h> 18#include <pthread.h> 19#include <stdbool.h> 20#include <stdlib.h> 21#include <string.h> 22#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_ 23#include <sys/_system_properties.h> 24#include <unistd.h> 25 26#include <private/android_logger.h> 27 28#include "log_portability.h" 29 30static pthread_mutex_t lock_loggable = PTHREAD_MUTEX_INITIALIZER; 31 32static int lock() 33{ 34 /* 35 * If we trigger a signal handler in the middle of locked activity and the 36 * signal handler logs a message, we could get into a deadlock state. 37 */ 38 /* 39 * Any contention, and we can turn around and use the non-cached method 40 * in less time than the system call associated with a mutex to deal with 41 * the contention. 42 */ 43 return pthread_mutex_trylock(&lock_loggable); 44} 45 46static void unlock() 47{ 48 pthread_mutex_unlock(&lock_loggable); 49} 50 51struct cache { 52 const prop_info* pinfo; 53 uint32_t serial; 54}; 55 56struct cache_char { 57 struct cache cache; 58 unsigned char c; 59}; 60 61static int check_cache(struct cache* cache) 62{ 63 return cache->pinfo 64 && __system_property_serial(cache->pinfo) != cache->serial; 65} 66 67#define BOOLEAN_TRUE 0xFF 68#define BOOLEAN_FALSE 0xFE 69 70static void refresh_cache(struct cache_char* cache, const char* key) 71{ 72 char buf[PROP_VALUE_MAX]; 73 74 if (!cache->cache.pinfo) { 75 cache->cache.pinfo = __system_property_find(key); 76 if (!cache->cache.pinfo) { 77 return; 78 } 79 } 80 cache->cache.serial = __system_property_serial(cache->cache.pinfo); 81 __system_property_read(cache->cache.pinfo, 0, buf); 82 switch(buf[0]) { 83 case 't': case 'T': 84 cache->c = strcasecmp(buf + 1, "rue") ? buf[0] : BOOLEAN_TRUE; 85 break; 86 case 'f': case 'F': 87 cache->c = strcasecmp(buf + 1, "alse") ? buf[0] : BOOLEAN_FALSE; 88 break; 89 default: 90 cache->c = buf[0]; 91 } 92} 93 94static int __android_log_level(const char* tag, size_t len, int default_prio) 95{ 96 /* sizeof() is used on this array below */ 97 static const char log_namespace[] = "persist.log.tag."; 98 static const size_t base_offset = 8; /* skip "persist." */ 99 /* calculate the size of our key temporary buffer */ 100 const size_t taglen = tag ? len : 0; 101 /* sizeof(log_namespace) = strlen(log_namespace) + 1 */ 102 char key[sizeof(log_namespace) + taglen]; /* may be > PROP_NAME_MAX */ 103 char* kp; 104 size_t i; 105 char c = 0; 106 /* 107 * Single layer cache of four properties. Priorities are: 108 * log.tag.<tag> 109 * persist.log.tag.<tag> 110 * log.tag 111 * persist.log.tag 112 * Where the missing tag matches all tags and becomes the 113 * system global default. We do not support ro.log.tag* . 114 */ 115 static char last_tag[PROP_NAME_MAX]; 116 static uint32_t global_serial; 117 /* some compilers erroneously see uninitialized use. !not_locked */ 118 uint32_t current_global_serial = 0; 119 static struct cache_char tag_cache[2]; 120 static struct cache_char global_cache[2]; 121 int change_detected; 122 int global_change_detected; 123 int not_locked; 124 125 strcpy(key, log_namespace); 126 127 global_change_detected = change_detected = not_locked = lock(); 128 129 if (!not_locked) { 130 /* 131 * check all known serial numbers to changes. 132 */ 133 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) { 134 if (check_cache(&tag_cache[i].cache)) { 135 change_detected = 1; 136 } 137 } 138 for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) { 139 if (check_cache(&global_cache[i].cache)) { 140 global_change_detected = 1; 141 } 142 } 143 144 current_global_serial = __system_property_area_serial(); 145 if (current_global_serial != global_serial) { 146 change_detected = 1; 147 global_change_detected = 1; 148 } 149 } 150 151 if (taglen) { 152 int local_change_detected = change_detected; 153 if (!not_locked) { 154 if (!last_tag[0] 155 || (last_tag[0] != tag[0]) 156 || strncmp(last_tag + 1, tag + 1, 157 (len < sizeof(last_tag)) ? 158 (len - 1) : 159 (sizeof(last_tag) - 1)) 160 || ((len < sizeof(last_tag)) && last_tag[len])) { 161 /* invalidate log.tag.<tag> cache */ 162 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) { 163 tag_cache[i].cache.pinfo = NULL; 164 tag_cache[i].c = '\0'; 165 } 166 last_tag[0] = '\0'; 167 local_change_detected = 1; 168 } 169 if (!last_tag[0]) { 170 if (len < sizeof(last_tag)) { 171 strncpy(last_tag, tag, len); 172 last_tag[len] = '\0'; 173 } else { 174 strncpy(last_tag, tag, sizeof(last_tag)); 175 } 176 } 177 } 178 strncpy(key + sizeof(log_namespace) - 1, tag, len); 179 key[sizeof(log_namespace) - 1 + len] = '\0'; 180 181 kp = key; 182 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) { 183 struct cache_char* cache = &tag_cache[i]; 184 struct cache_char temp_cache; 185 186 if (not_locked) { 187 temp_cache.cache.pinfo = NULL; 188 temp_cache.c = '\0'; 189 cache = &temp_cache; 190 } 191 if (local_change_detected) { 192 refresh_cache(cache, kp); 193 } 194 195 if (cache->c) { 196 c = cache->c; 197 break; 198 } 199 200 kp = key + base_offset; 201 } 202 } 203 204 switch (toupper(c)) { /* if invalid, resort to global */ 205 case 'V': 206 case 'D': 207 case 'I': 208 case 'W': 209 case 'E': 210 case 'F': /* Not officially supported */ 211 case 'A': 212 case 'S': 213 case BOOLEAN_FALSE: /* Not officially supported */ 214 break; 215 default: 216 /* clear '.' after log.tag */ 217 key[sizeof(log_namespace) - 2] = '\0'; 218 219 kp = key; 220 for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) { 221 struct cache_char* cache = &global_cache[i]; 222 struct cache_char temp_cache; 223 224 if (not_locked) { 225 temp_cache = *cache; 226 if (temp_cache.cache.pinfo != cache->cache.pinfo) { /* check atomic */ 227 temp_cache.cache.pinfo = NULL; 228 temp_cache.c = '\0'; 229 } 230 cache = &temp_cache; 231 } 232 if (global_change_detected) { 233 refresh_cache(cache, kp); 234 } 235 236 if (cache->c) { 237 c = cache->c; 238 break; 239 } 240 241 kp = key + base_offset; 242 } 243 break; 244 } 245 246 if (!not_locked) { 247 global_serial = current_global_serial; 248 unlock(); 249 } 250 251 switch (toupper(c)) { 252 case 'V': return ANDROID_LOG_VERBOSE; 253 case 'D': return ANDROID_LOG_DEBUG; 254 case 'I': return ANDROID_LOG_INFO; 255 case 'W': return ANDROID_LOG_WARN; 256 case 'E': return ANDROID_LOG_ERROR; 257 case 'F': /* FALLTHRU */ /* Not officially supported */ 258 case 'A': return ANDROID_LOG_FATAL; 259 case BOOLEAN_FALSE: /* FALLTHRU */ /* Not Officially supported */ 260 case 'S': return -1; /* ANDROID_LOG_SUPPRESS */ 261 } 262 return default_prio; 263} 264 265LIBLOG_ABI_PUBLIC int __android_log_is_loggable_len(int prio, 266 const char* tag, size_t len, 267 int default_prio) 268{ 269 int logLevel = __android_log_level(tag, len, default_prio); 270 return logLevel >= 0 && prio >= logLevel; 271} 272 273LIBLOG_ABI_PUBLIC int __android_log_is_loggable(int prio, 274 const char* tag, 275 int default_prio) 276{ 277 int logLevel = __android_log_level(tag, 278 (tag && *tag) ? strlen(tag) : 0, 279 default_prio); 280 return logLevel >= 0 && prio >= logLevel; 281} 282 283LIBLOG_ABI_PRIVATE int __android_log_is_debuggable() 284{ 285 static uint32_t serial; 286 static struct cache_char tag_cache; 287 static const char key[] = "ro.debuggable"; 288 int ret; 289 290 if (tag_cache.c) { /* ro property does not change after set */ 291 ret = tag_cache.c == '1'; 292 } else if (lock()) { 293 struct cache_char temp_cache = { { NULL, -1 }, '\0' }; 294 refresh_cache(&temp_cache, key); 295 ret = temp_cache.c == '1'; 296 } else { 297 int change_detected = check_cache(&tag_cache.cache); 298 uint32_t current_serial = __system_property_area_serial(); 299 if (current_serial != serial) { 300 change_detected = 1; 301 } 302 if (change_detected) { 303 refresh_cache(&tag_cache, key); 304 serial = current_serial; 305 } 306 ret = tag_cache.c == '1'; 307 308 unlock(); 309 } 310 311 return ret; 312} 313 314/* 315 * For properties that are read often, but generally remain constant. 316 * Since a change is rare, we will accept a trylock failure gracefully. 317 * Use a separate lock from is_loggable to keep contention down b/25563384. 318 */ 319struct cache2_char { 320 pthread_mutex_t lock; 321 uint32_t serial; 322 const char* key_persist; 323 struct cache_char cache_persist; 324 const char* key_ro; 325 struct cache_char cache_ro; 326 unsigned char (*const evaluate)(const struct cache2_char *self); 327}; 328 329static inline unsigned char do_cache2_char(struct cache2_char *self) 330{ 331 uint32_t current_serial; 332 int change_detected; 333 unsigned char c; 334 335 if (pthread_mutex_trylock(&self->lock)) { 336 /* We are willing to accept some race in this context */ 337 return self->evaluate(self); 338 } 339 340 change_detected = check_cache(&self->cache_persist.cache) 341 || check_cache(&self->cache_ro.cache); 342 current_serial = __system_property_area_serial(); 343 if (current_serial != self->serial) { 344 change_detected = 1; 345 } 346 if (change_detected) { 347 refresh_cache(&self->cache_persist, self->key_persist); 348 refresh_cache(&self->cache_ro, self->key_ro); 349 self->serial = current_serial; 350 } 351 c = self->evaluate(self); 352 353 pthread_mutex_unlock(&self->lock); 354 355 return c; 356} 357 358static unsigned char evaluate_persist_ro(const struct cache2_char *self) 359{ 360 unsigned char c = self->cache_persist.c; 361 362 if (c) { 363 return c; 364 } 365 366 return self->cache_ro.c; 367} 368 369/* 370 * Timestamp state generally remains constant, but can change at any time 371 * to handle developer requirements. 372 */ 373LIBLOG_ABI_PUBLIC clockid_t android_log_clockid() 374{ 375 static struct cache2_char clockid = { 376 PTHREAD_MUTEX_INITIALIZER, 377 0, 378 "persist.logd.timestamp", 379 { { NULL, -1 }, '\0' }, 380 "ro.logd.timestamp", 381 { { NULL, -1 }, '\0' }, 382 evaluate_persist_ro 383 }; 384 385 return (tolower(do_cache2_char(&clockid)) == 'm') 386 ? CLOCK_MONOTONIC 387 : CLOCK_REALTIME; 388} 389 390/* 391 * Security state generally remains constant, but the DO must be able 392 * to turn off logging should it become spammy after an attack is detected. 393 */ 394static unsigned char evaluate_security(const struct cache2_char *self) 395{ 396 unsigned char c = self->cache_ro.c; 397 398 return (c != BOOLEAN_FALSE) && c && (self->cache_persist.c == BOOLEAN_TRUE); 399} 400 401LIBLOG_ABI_PUBLIC int __android_log_security() 402{ 403 static struct cache2_char security = { 404 PTHREAD_MUTEX_INITIALIZER, 405 0, 406 "persist.logd.security", 407 { { NULL, -1 }, BOOLEAN_FALSE }, 408 "ro.device_owner", 409 { { NULL, -1 }, BOOLEAN_FALSE }, 410 evaluate_security 411 }; 412 413 return do_cache2_char(&security); 414} 415 416/* 417 * Interface that represents the logd buffer size determination so that others 418 * need not guess our intentions. 419 */ 420 421/* Property helper */ 422static bool check_flag(const char* prop, const char* flag) { 423 const char* cp = strcasestr(prop, flag); 424 if (!cp) { 425 return false; 426 } 427 /* We only will document comma (,) */ 428 static const char sep[] = ",:;|+ \t\f"; 429 if ((cp != prop) && !strchr(sep, cp[-1])) { 430 return false; 431 } 432 cp += strlen(flag); 433 return !*cp || !!strchr(sep, *cp); 434} 435 436/* cache structure */ 437struct cache_property { 438 struct cache cache; 439 char property[PROP_VALUE_MAX]; 440}; 441 442static void refresh_cache_property(struct cache_property* cache, const char* key) 443{ 444 if (!cache->cache.pinfo) { 445 cache->cache.pinfo = __system_property_find(key); 446 if (!cache->cache.pinfo) { 447 return; 448 } 449 } 450 cache->cache.serial = __system_property_serial(cache->cache.pinfo); 451 __system_property_read(cache->cache.pinfo, 0, cache->property); 452} 453 454/* get boolean with the logger twist that supports eng adjustments */ 455LIBLOG_ABI_PRIVATE bool __android_logger_property_get_bool(const char* key, 456 int flag) 457{ 458 struct cache_property property = { { NULL, -1 }, { 0 } }; 459 if (flag & BOOL_DEFAULT_FLAG_PERSIST) { 460 char newkey[PROP_NAME_MAX]; 461 snprintf(newkey, sizeof(newkey), "ro.%s", key); 462 refresh_cache_property(&property, newkey); 463 property.cache.pinfo = NULL; 464 property.cache.serial = -1; 465 snprintf(newkey, sizeof(newkey), "persist.%s", key); 466 refresh_cache_property(&property, newkey); 467 property.cache.pinfo = NULL; 468 property.cache.serial = -1; 469 } 470 471 refresh_cache_property(&property, key); 472 473 if (check_flag(property.property, "true")) { 474 return true; 475 } 476 if (check_flag(property.property, "false")) { 477 return false; 478 } 479 if (check_flag(property.property, "eng")) { 480 flag |= BOOL_DEFAULT_FLAG_ENG; 481 } 482 /* this is really a "not" flag */ 483 if (check_flag(property.property, "svelte")) { 484 flag |= BOOL_DEFAULT_FLAG_SVELTE; 485 } 486 487 /* Sanity Check */ 488 if (flag & (BOOL_DEFAULT_FLAG_SVELTE | BOOL_DEFAULT_FLAG_ENG)) { 489 flag &= ~BOOL_DEFAULT_FLAG_TRUE_FALSE; 490 flag |= BOOL_DEFAULT_TRUE; 491 } 492 493 if ((flag & BOOL_DEFAULT_FLAG_SVELTE) 494 && __android_logger_property_get_bool("ro.config.low_ram", 495 BOOL_DEFAULT_FALSE)) { 496 return false; 497 } 498 if ((flag & BOOL_DEFAULT_FLAG_ENG) && !__android_log_is_debuggable()) { 499 return false; 500 } 501 502 return (flag & BOOL_DEFAULT_FLAG_TRUE_FALSE) != BOOL_DEFAULT_FALSE; 503} 504 505LIBLOG_ABI_PRIVATE bool __android_logger_valid_buffer_size(unsigned long value) 506{ 507 static long pages, pagesize; 508 unsigned long maximum; 509 510 if ((value < LOG_BUFFER_MIN_SIZE) || (LOG_BUFFER_MAX_SIZE < value)) { 511 return false; 512 } 513 514 if (!pages) { 515 pages = sysconf(_SC_PHYS_PAGES); 516 } 517 if (pages < 1) { 518 return true; 519 } 520 521 if (!pagesize) { 522 pagesize = sysconf(_SC_PAGESIZE); 523 if (pagesize <= 1) { 524 pagesize = PAGE_SIZE; 525 } 526 } 527 528 /* maximum memory impact a somewhat arbitrary ~3% */ 529 pages = (pages + 31) / 32; 530 maximum = pages * pagesize; 531 532 if ((maximum < LOG_BUFFER_MIN_SIZE) || (LOG_BUFFER_MAX_SIZE < maximum)) { 533 return true; 534 } 535 536 return value <= maximum; 537} 538 539struct cache2_property_size { 540 pthread_mutex_t lock; 541 uint32_t serial; 542 const char* key_persist; 543 struct cache_property cache_persist; 544 const char* key_ro; 545 struct cache_property cache_ro; 546 unsigned long (*const evaluate)(const struct cache2_property_size* self); 547}; 548 549static inline unsigned long do_cache2_property_size(struct cache2_property_size* self) 550{ 551 uint32_t current_serial; 552 int change_detected; 553 unsigned long v; 554 555 if (pthread_mutex_trylock(&self->lock)) { 556 /* We are willing to accept some race in this context */ 557 return self->evaluate(self); 558 } 559 560 change_detected = check_cache(&self->cache_persist.cache) 561 || check_cache(&self->cache_ro.cache); 562 current_serial = __system_property_area_serial(); 563 if (current_serial != self->serial) { 564 change_detected = 1; 565 } 566 if (change_detected) { 567 refresh_cache_property(&self->cache_persist, self->key_persist); 568 refresh_cache_property(&self->cache_ro, self->key_ro); 569 self->serial = current_serial; 570 } 571 v = self->evaluate(self); 572 573 pthread_mutex_unlock(&self->lock); 574 575 return v; 576} 577 578static unsigned long property_get_size_from_cache(const struct cache_property* cache) 579{ 580 char* cp; 581 unsigned long value = strtoul(cache->property, &cp, 10); 582 583 switch(*cp) { 584 case 'm': 585 case 'M': 586 value *= 1024; 587 /* FALLTHRU */ 588 case 'k': 589 case 'K': 590 value *= 1024; 591 /* FALLTHRU */ 592 case '\0': 593 break; 594 595 default: 596 value = 0; 597 } 598 599 if (!__android_logger_valid_buffer_size(value)) { 600 value = 0; 601 } 602 603 return value; 604} 605 606static unsigned long evaluate_property_get_size(const struct cache2_property_size* self) 607{ 608 unsigned long size = property_get_size_from_cache(&self->cache_persist); 609 if (size) { 610 return size; 611 } 612 return property_get_size_from_cache(&self->cache_ro); 613} 614 615LIBLOG_ABI_PRIVATE unsigned long __android_logger_get_buffer_size(log_id_t logId) 616{ 617 static const char global_tunable[] = "persist.logd.size"; /* Settings App */ 618 static const char global_default[] = "ro.logd.size"; /* BoardConfig.mk */ 619 static struct cache2_property_size global = { 620 PTHREAD_MUTEX_INITIALIZER, 621 0, 622 global_tunable, 623 { { NULL, -1 }, {} }, 624 global_default, 625 { { NULL, -1 }, {} }, 626 evaluate_property_get_size 627 }; 628 char key_persist[PROP_NAME_MAX]; 629 char key_ro[PROP_NAME_MAX]; 630 struct cache2_property_size local = { 631 PTHREAD_MUTEX_INITIALIZER, 632 0, 633 key_persist, 634 { { NULL, -1 }, {} }, 635 key_ro, 636 { { NULL, -1 }, {} }, 637 evaluate_property_get_size 638 }; 639 unsigned long property_size, default_size; 640 641 default_size = do_cache2_property_size(&global); 642 if (!default_size) { 643 default_size = __android_logger_property_get_bool("ro.config.low_ram", 644 BOOL_DEFAULT_FALSE) 645 ? LOG_BUFFER_MIN_SIZE /* 64K */ 646 : LOG_BUFFER_SIZE; /* 256K */ 647 } 648 649 snprintf(key_persist, sizeof(key_persist), "%s.%s", 650 global_tunable, android_log_id_to_name(logId)); 651 snprintf(key_ro, sizeof(key_ro), "%s.%s", 652 global_default, android_log_id_to_name(logId)); 653 property_size = do_cache2_property_size(&local); 654 655 if (!property_size) { 656 property_size = default_size; 657 } 658 659 if (!property_size) { 660 property_size = LOG_BUFFER_SIZE; 661 } 662 663 return property_size; 664} 665