vboot_kernel.c revision c90e7e8cd99a70f207249d8d4a78a80cc0f44c3f
1/* Copyright (c) 2011 The Chromium OS Authors. All rights reserved. 2 * Use of this source code is governed by a BSD-style license that can be 3 * found in the LICENSE file. 4 * 5 * Functions for loading a kernel from disk. 6 * (Firmware portion) 7 */ 8 9#include "vboot_kernel.h" 10 11#include "boot_device.h" 12#include "cgptlib.h" 13#include "cgptlib_internal.h" 14#include "gbb_header.h" 15#include "load_kernel_fw.h" 16#include "rollback_index.h" 17#include "utility.h" 18#include "vboot_common.h" 19 20#define KBUF_SIZE 65536 /* Bytes to read at start of kernel partition */ 21#define LOWEST_TPM_VERSION 0xffffffff 22 23typedef enum BootMode { 24 kBootRecovery = 0, /* Recovery firmware, regardless of dev switch position */ 25 kBootNormal = 1, /* Normal firmware */ 26 kBootDev = 2 /* Dev firmware AND dev switch is on */ 27} BootMode; 28 29 30/* Allocates and reads GPT data from the drive. The sector_bytes and 31 * drive_sectors fields should be filled on input. The primary and 32 * secondary header and entries are filled on output. 33 * 34 * Returns 0 if successful, 1 if error. */ 35int AllocAndReadGptData(GptData* gptdata) { 36 37 uint64_t entries_sectors = TOTAL_ENTRIES_SIZE / gptdata->sector_bytes; 38 39 /* No data to be written yet */ 40 gptdata->modified = 0; 41 42 /* Allocate all buffers */ 43 gptdata->primary_header = (uint8_t*)Malloc(gptdata->sector_bytes); 44 gptdata->secondary_header = (uint8_t*)Malloc(gptdata->sector_bytes); 45 gptdata->primary_entries = (uint8_t*)Malloc(TOTAL_ENTRIES_SIZE); 46 gptdata->secondary_entries = (uint8_t*)Malloc(TOTAL_ENTRIES_SIZE); 47 48 if (gptdata->primary_header == NULL || gptdata->secondary_header == NULL || 49 gptdata->primary_entries == NULL || gptdata->secondary_entries == NULL) 50 return 1; 51 52 /* Read data from the drive, skipping the protective MBR */ 53 if (0 != BootDeviceReadLBA(1, 1, gptdata->primary_header)) 54 return 1; 55 if (0 != BootDeviceReadLBA(2, entries_sectors, gptdata->primary_entries)) 56 return 1; 57 if (0 != BootDeviceReadLBA(gptdata->drive_sectors - entries_sectors - 1, 58 entries_sectors, gptdata->secondary_entries)) 59 return 1; 60 if (0 != BootDeviceReadLBA(gptdata->drive_sectors - 1, 61 1, gptdata->secondary_header)) 62 return 1; 63 64 return 0; 65} 66 67 68/* Writes any changes for the GPT data back to the drive, then frees 69 * the buffers. 70 * 71 * Returns 0 if successful, 1 if error. */ 72int WriteAndFreeGptData(GptData* gptdata) { 73 74 uint64_t entries_sectors = TOTAL_ENTRIES_SIZE / gptdata->sector_bytes; 75 76 if (gptdata->primary_header) { 77 if (gptdata->modified & GPT_MODIFIED_HEADER1) { 78 VBDEBUG(("Updating GPT header 1\n")); 79 if (0 != BootDeviceWriteLBA(1, 1, gptdata->primary_header)) 80 return 1; 81 } 82 Free(gptdata->primary_header); 83 } 84 85 if (gptdata->primary_entries) { 86 if (gptdata->modified & GPT_MODIFIED_ENTRIES1) { 87 VBDEBUG(("Updating GPT entries 1\n")); 88 if (0 != BootDeviceWriteLBA(2, entries_sectors, 89 gptdata->primary_entries)) 90 return 1; 91 } 92 Free(gptdata->primary_entries); 93 } 94 95 if (gptdata->secondary_entries) { 96 if (gptdata->modified & GPT_MODIFIED_ENTRIES2) { 97 VBDEBUG(("Updating GPT header 2\n")); 98 if (0 != BootDeviceWriteLBA(gptdata->drive_sectors - entries_sectors - 1, 99 entries_sectors, gptdata->secondary_entries)) 100 return 1; 101 } 102 Free(gptdata->secondary_entries); 103 } 104 105 if (gptdata->secondary_header) { 106 if (gptdata->modified & GPT_MODIFIED_HEADER2) { 107 VBDEBUG(("Updating GPT entries 2\n")); 108 if (0 != BootDeviceWriteLBA(gptdata->drive_sectors - 1, 1, 109 gptdata->secondary_header)) 110 return 1; 111 } 112 Free(gptdata->secondary_header); 113 } 114 115 /* Success */ 116 return 0; 117} 118 119/* disable MSVC warning on const logical expression (as in } while(0);) */ 120__pragma(warning(disable: 4127)) 121 122int LoadKernel(LoadKernelParams* params) { 123 VbSharedDataHeader* shared = (VbSharedDataHeader*)params->shared_data_blob; 124 VbSharedDataKernelCall* shcall = NULL; 125 VbNvContext* vnc = params->nv_context; 126 GoogleBinaryBlockHeader* gbb = (GoogleBinaryBlockHeader*)params->gbb_data; 127 VbPublicKey* kernel_subkey; 128 GptData gpt; 129 uint64_t part_start, part_size; 130 uint64_t blba; 131 uint64_t kbuf_sectors; 132 uint8_t* kbuf = NULL; 133 int found_partitions = 0; 134 int good_partition = -1; 135 int good_partition_key_block_valid = 0; 136 uint32_t tpm_version = 0; 137 uint64_t lowest_version = LOWEST_TPM_VERSION; 138 int rec_switch, dev_switch; 139 BootMode boot_mode; 140 uint32_t test_err = 0; 141 uint32_t status; 142 143 int retval = LOAD_KERNEL_RECOVERY; 144 int recovery = VBNV_RECOVERY_RO_UNSPECIFIED; 145 uint64_t timer_enter = VbGetTimer(); 146 147 /* Setup NV storage */ 148 VbNvSetup(vnc); 149 150 /* Sanity Checks */ 151 if (!params || 152 !params->bytes_per_lba || 153 !params->ending_lba || 154 !params->kernel_buffer || 155 !params->kernel_buffer_size) { 156 VBDEBUG(("LoadKernel() called with invalid params\n")); 157 goto LoadKernelExit; 158 } 159 160 /* Clear output params in case we fail */ 161 params->partition_number = 0; 162 params->bootloader_address = 0; 163 params->bootloader_size = 0; 164 165 /* Calculate switch positions and boot mode */ 166 rec_switch = (BOOT_FLAG_RECOVERY & params->boot_flags ? 1 : 0); 167 dev_switch = (BOOT_FLAG_DEVELOPER & params->boot_flags ? 1 : 0); 168 if (rec_switch) 169 boot_mode = kBootRecovery; 170 else if (BOOT_FLAG_DEV_FIRMWARE & params->boot_flags) 171 boot_mode = kBootDev; 172 else { 173 /* Normal firmware */ 174 boot_mode = kBootNormal; 175 dev_switch = 0; /* Always do a fully verified boot */ 176 } 177 178 if (kBootRecovery == boot_mode) { 179 /* Initialize the shared data structure, since LoadFirmware() didn't do it 180 * for us. */ 181 if (0 != VbSharedDataInit(shared, params->shared_data_size)) { 182 /* Error initializing the shared data, but we can keep going. We just 183 * can't use the shared data. */ 184 VBDEBUG(("Shared data init error\n")); 185 params->shared_data_size = 0; 186 shared = NULL; 187 } 188 } 189 190 if (shared) { 191 /* Set up tracking for this call. This wraps around if called many times, 192 * so we need to initialize the call entry each time. */ 193 shcall = shared->lk_calls + (shared->lk_call_count 194 & (VBSD_MAX_KERNEL_CALLS - 1)); 195 Memset(shcall, 0, sizeof(VbSharedDataKernelCall)); 196 shcall->boot_flags = (uint32_t)params->boot_flags; 197 shcall->boot_mode = boot_mode; 198 shcall->sector_size = (uint32_t)params->bytes_per_lba; 199 shcall->sector_count = params->ending_lba + 1; 200 shared->lk_call_count++; 201 } 202 203 /* Handle test errors */ 204 VbNvGet(vnc, VBNV_TEST_ERROR_FUNC, &test_err); 205 if (VBNV_TEST_ERROR_LOAD_KERNEL == test_err) { 206 /* Get error code */ 207 VbNvGet(vnc, VBNV_TEST_ERROR_NUM, &test_err); 208 if (shcall) 209 shcall->test_error_num = (uint8_t)test_err; 210 /* Clear test params so we don't repeat the error */ 211 VbNvSet(vnc, VBNV_TEST_ERROR_FUNC, 0); 212 VbNvSet(vnc, VBNV_TEST_ERROR_NUM, 0); 213 /* Handle error codes */ 214 switch (test_err) { 215 case LOAD_KERNEL_RECOVERY: 216 recovery = VBNV_RECOVERY_RW_TEST_LK; 217 goto LoadKernelExit; 218 case LOAD_KERNEL_NOT_FOUND: 219 case LOAD_KERNEL_INVALID: 220 case LOAD_KERNEL_REBOOT: 221 retval = test_err; 222 goto LoadKernelExit; 223 default: 224 break; 225 } 226 } 227 228 /* Initialization */ 229 blba = params->bytes_per_lba; 230 kbuf_sectors = KBUF_SIZE / blba; 231 if (0 == kbuf_sectors) { 232 VBDEBUG(("LoadKernel() called with sector size > KBUF_SIZE\n")); 233 goto LoadKernelExit; 234 } 235 236 if (kBootDev == boot_mode && !dev_switch) { 237 /* Dev firmware should be signed such that it never boots with the dev 238 * switch is off; so something is terribly wrong. */ 239 VBDEBUG(("LoadKernel() called with dev firmware but dev switch off\n")); 240 if (shcall) 241 shcall->check_result = VBSD_LKC_CHECK_DEV_SWITCH_MISMATCH; 242 recovery = VBNV_RECOVERY_RW_DEV_MISMATCH; 243 goto LoadKernelExit; 244 } 245 246 if (kBootRecovery == boot_mode) { 247 /* Use the recovery key to verify the kernel */ 248 kernel_subkey = (VbPublicKey*)((uint8_t*)gbb + gbb->recovery_key_offset); 249 250 /* Let the TPM know if we're in recovery mode */ 251 if (0 != RollbackKernelRecovery(dev_switch)) { 252 VBDEBUG(("Error setting up TPM for recovery kernel\n")); 253 if (shcall) 254 shcall->flags |= VBSD_LK_FLAG_REC_TPM_INIT_ERROR; 255 /* Ignore return code, since we need to boot recovery mode to 256 * fix the TPM. */ 257 } 258 259 /* Read the key indices from the TPM; ignore any errors */ 260 if (shared) { 261 RollbackFirmwareRead(&shared->fw_version_tpm); 262 RollbackKernelRead(&shared->kernel_version_tpm); 263 } 264 } else { 265 /* Use the kernel subkey passed from LoadFirmware(). */ 266 kernel_subkey = &shared->kernel_subkey; 267 268 /* Read current kernel key index from TPM. Assumes TPM is already 269 * initialized. */ 270 status = RollbackKernelRead(&tpm_version); 271 if (0 != status) { 272 VBDEBUG(("Unable to get kernel versions from TPM\n")); 273 if (status == TPM_E_MUST_REBOOT) 274 retval = LOAD_KERNEL_REBOOT; 275 else 276 recovery = VBNV_RECOVERY_RW_TPM_ERROR; 277 goto LoadKernelExit; 278 } 279 if (shared) 280 shared->kernel_version_tpm = tpm_version; 281 } 282 283 do { 284 /* Read GPT data */ 285 gpt.sector_bytes = (uint32_t)blba; 286 gpt.drive_sectors = params->ending_lba + 1; 287 if (0 != AllocAndReadGptData(&gpt)) { 288 VBDEBUG(("Unable to read GPT data\n")); 289 if (shcall) 290 shcall->check_result = VBSD_LKC_CHECK_GPT_READ_ERROR; 291 break; 292 } 293 294 /* Initialize GPT library */ 295 if (GPT_SUCCESS != GptInit(&gpt)) { 296 VBDEBUG(("Error parsing GPT\n")); 297 if (shcall) 298 shcall->check_result = VBSD_LKC_CHECK_GPT_PARSE_ERROR; 299 break; 300 } 301 302 /* Allocate kernel header buffers */ 303 kbuf = (uint8_t*)Malloc(KBUF_SIZE); 304 if (!kbuf) 305 break; 306 307 /* Loop over candidate kernel partitions */ 308 while (GPT_SUCCESS == GptNextKernelEntry(&gpt, &part_start, &part_size)) { 309 VbSharedDataKernelPart* shpart = NULL; 310 VbKeyBlockHeader* key_block; 311 VbKernelPreambleHeader* preamble; 312 RSAPublicKey* data_key = NULL; 313 uint64_t key_version; 314 uint64_t combined_version; 315 uint64_t body_offset; 316 uint64_t body_offset_sectors; 317 uint64_t body_sectors; 318 int key_block_valid = 1; 319 320 VBDEBUG(("Found kernel entry at %" PRIu64 " size %" PRIu64 "\n", 321 part_start, part_size)); 322 323 if (shcall) { 324 /* Set up tracking for this partition. This wraps around if called 325 * many times, so initialize the partition entry each time. */ 326 shpart = shcall->parts + (shcall->kernel_parts_found 327 & (VBSD_MAX_KERNEL_PARTS - 1)); 328 Memset(shpart, 0, sizeof(VbSharedDataKernelPart)); 329 shpart->sector_start = part_start; 330 shpart->sector_count = part_size; 331 /* TODO: GPT partitions start at 1, but cgptlib starts them at 0. 332 * Adjust here, until cgptlib is fixed. */ 333 shpart->gpt_index = (uint8_t)(gpt.current_kernel + 1); 334 shcall->kernel_parts_found++; 335 } 336 337 /* Found at least one kernel partition. */ 338 found_partitions++; 339 340 /* Read the first part of the kernel partition. */ 341 if (part_size < kbuf_sectors) { 342 VBDEBUG(("Partition too small to hold kernel.\n")); 343 if (shpart) 344 shpart->check_result = VBSD_LKP_CHECK_TOO_SMALL; 345 goto bad_kernel; 346 } 347 348 if (0 != BootDeviceReadLBA(part_start, kbuf_sectors, kbuf)) { 349 VBDEBUG(("Unable to read start of partition.\n")); 350 if (shpart) 351 shpart->check_result = VBSD_LKP_CHECK_READ_START; 352 goto bad_kernel; 353 } 354 355 /* Verify the key block. */ 356 key_block = (VbKeyBlockHeader*)kbuf; 357 if (0 != KeyBlockVerify(key_block, KBUF_SIZE, kernel_subkey, 0)) { 358 VBDEBUG(("Verifying key block signature failed.\n")); 359 if (shpart) 360 shpart->check_result = VBSD_LKP_CHECK_KEY_BLOCK_SIG; 361 362 key_block_valid = 0; 363 364 /* If we're not in developer mode, this kernel is bad. */ 365 if (kBootDev != boot_mode) 366 goto bad_kernel; 367 368 /* In developer mode, we can continue if the SHA-512 hash of the key 369 * block is valid. */ 370 if (0 != KeyBlockVerify(key_block, KBUF_SIZE, kernel_subkey, 1)) { 371 VBDEBUG(("Verifying key block hash failed.\n")); 372 if (shpart) 373 shpart->check_result = VBSD_LKP_CHECK_KEY_BLOCK_HASH; 374 goto bad_kernel; 375 } 376 } 377 378 /* Check the key block flags against the current boot mode. */ 379 if (!(key_block->key_block_flags & 380 (dev_switch ? KEY_BLOCK_FLAG_DEVELOPER_1 : 381 KEY_BLOCK_FLAG_DEVELOPER_0))) { 382 VBDEBUG(("Key block developer flag mismatch.\n")); 383 if (shpart) 384 shpart->check_result = VBSD_LKP_CHECK_DEV_MISMATCH; 385 key_block_valid = 0; 386 } 387 if (!(key_block->key_block_flags & 388 (rec_switch ? KEY_BLOCK_FLAG_RECOVERY_1 : 389 KEY_BLOCK_FLAG_RECOVERY_0))) { 390 VBDEBUG(("Key block recovery flag mismatch.\n")); 391 if (shpart) 392 shpart->check_result = VBSD_LKP_CHECK_REC_MISMATCH; 393 key_block_valid = 0; 394 } 395 396 /* Check for rollback of key version except in recovery mode. */ 397 key_version = key_block->data_key.key_version; 398 if (kBootRecovery != boot_mode) { 399 if (key_version < (tpm_version >> 16)) { 400 VBDEBUG(("Key version too old.\n")); 401 if (shpart) 402 shpart->check_result = VBSD_LKP_CHECK_KEY_ROLLBACK; 403 key_block_valid = 0; 404 } 405 } 406 407 /* If we're not in developer mode, require the key block to be valid. */ 408 if (kBootDev != boot_mode && !key_block_valid) { 409 VBDEBUG(("Key block is invalid.\n")); 410 goto bad_kernel; 411 } 412 413 /* Get the key for preamble/data verification from the key block. */ 414 data_key = PublicKeyToRSA(&key_block->data_key); 415 if (!data_key) { 416 VBDEBUG(("Data key bad.\n")); 417 if (shpart) 418 shpart->check_result = VBSD_LKP_CHECK_DATA_KEY_PARSE; 419 goto bad_kernel; 420 } 421 422 /* Verify the preamble, which follows the key block */ 423 preamble = (VbKernelPreambleHeader*)(kbuf + key_block->key_block_size); 424 if ((0 != VerifyKernelPreamble(preamble, 425 KBUF_SIZE - key_block->key_block_size, 426 data_key))) { 427 VBDEBUG(("Preamble verification failed.\n")); 428 if (shpart) 429 shpart->check_result = VBSD_LKP_CHECK_VERIFY_PREAMBLE; 430 goto bad_kernel; 431 } 432 433 /* If the key block is valid and we're not in recovery mode, check for 434 * rollback of the kernel version. */ 435 combined_version = ((key_version << 16) | 436 (preamble->kernel_version & 0xFFFF)); 437 if (shpart) 438 shpart->combined_version = (uint32_t)combined_version; 439 if (key_block_valid && kBootRecovery != boot_mode) { 440 if (combined_version < tpm_version) { 441 VBDEBUG(("Kernel version too low.\n")); 442 if (shpart) 443 shpart->check_result = VBSD_LKP_CHECK_KERNEL_ROLLBACK; 444 /* If we're not in developer mode, kernel version must be valid. */ 445 if (kBootDev != boot_mode) 446 goto bad_kernel; 447 } 448 } 449 450 VBDEBUG(("Kernel preamble is good.\n")); 451 if (shpart) 452 shpart->check_result = VBSD_LKP_CHECK_PREAMBLE_VALID; 453 454 /* Check for lowest version from a valid header. */ 455 if (key_block_valid && lowest_version > combined_version) 456 lowest_version = combined_version; 457 else { 458 VBDEBUG(("Key block valid: %d\n", key_block_valid)); 459 VBDEBUG(("Combined version: %" PRIu64 "\n", combined_version)); 460 } 461 462 /* If we already have a good kernel, no need to read another 463 * one; we only needed to look at the versions to check for 464 * rollback. So skip to the next kernel preamble. */ 465 if (-1 != good_partition) 466 continue; 467 468 /* Verify body load address matches what we expect */ 469 if ((preamble->body_load_address != (size_t)params->kernel_buffer) && 470 !(params->boot_flags & BOOT_FLAG_SKIP_ADDR_CHECK)) { 471 VBDEBUG(("Wrong body load address.\n")); 472 if (shpart) 473 shpart->check_result = VBSD_LKP_CHECK_BODY_ADDRESS; 474 goto bad_kernel; 475 } 476 477 /* Verify kernel body starts at a multiple of the sector size. */ 478 body_offset = key_block->key_block_size + preamble->preamble_size; 479 if (0 != body_offset % blba) { 480 VBDEBUG(("Kernel body not at multiple of sector size.\n")); 481 if (shpart) 482 shpart->check_result = VBSD_LKP_CHECK_BODY_OFFSET; 483 goto bad_kernel; 484 } 485 body_offset_sectors = body_offset / blba; 486 487 /* Verify kernel body fits in the buffer */ 488 body_sectors = (preamble->body_signature.data_size + blba - 1) / blba; 489 if (body_sectors * blba > params->kernel_buffer_size) { 490 VBDEBUG(("Kernel body doesn't fit in memory.\n")); 491 if (shpart) 492 shpart->check_result = VBSD_LKP_CHECK_BODY_EXCEEDS_MEM; 493 goto bad_kernel; 494 } 495 496 /* Verify kernel body fits in the partition */ 497 if (body_offset_sectors + body_sectors > part_size) { 498 VBDEBUG(("Kernel body doesn't fit in partition.\n")); 499 if (shpart) 500 shpart->check_result = VBSD_LKP_CHECK_BODY_EXCEEDS_PART; 501 goto bad_kernel; 502 } 503 504 /* Read the kernel data */ 505 VBPERFSTART("VB_RKD"); 506 if (0 != BootDeviceReadLBA(part_start + body_offset_sectors, 507 body_sectors, 508 params->kernel_buffer)) { 509 VBDEBUG(("Unable to read kernel data.\n")); 510 VBPERFEND("VB_RKD"); 511 if (shpart) 512 shpart->check_result = VBSD_LKP_CHECK_READ_DATA; 513 goto bad_kernel; 514 } 515 VBPERFEND("VB_RKD"); 516 517 /* Verify kernel data */ 518 if (0 != VerifyData((const uint8_t*)params->kernel_buffer, 519 params->kernel_buffer_size, 520 &preamble->body_signature, data_key)) { 521 VBDEBUG(("Kernel data verification failed.\n")); 522 if (shpart) 523 shpart->check_result = VBSD_LKP_CHECK_VERIFY_DATA; 524 goto bad_kernel; 525 } 526 527 /* Done with the kernel signing key, so can free it now */ 528 RSAPublicKeyFree(data_key); 529 data_key = NULL; 530 531 /* If we're still here, the kernel is valid. */ 532 /* Save the first good partition we find; that's the one we'll boot */ 533 VBDEBUG(("Partition is good.\n")); 534 if (shpart) { 535 shpart->check_result = VBSD_LKP_CHECK_KERNEL_GOOD; 536 if (key_block_valid) 537 shpart->flags |= VBSD_LKP_FLAG_KEY_BLOCK_VALID; 538 } 539 540 good_partition_key_block_valid = key_block_valid; 541 /* TODO: GPT partitions start at 1, but cgptlib starts them at 0. 542 * Adjust here, until cgptlib is fixed. */ 543 good_partition = gpt.current_kernel + 1; 544 params->partition_number = gpt.current_kernel + 1; 545 GetCurrentKernelUniqueGuid(&gpt, ¶ms->partition_guid); 546 /* TODO: GetCurrentKernelUniqueGuid() should take a destination size, or 547 * the dest should be a struct, so we know it's big enough. */ 548 params->bootloader_address = preamble->bootloader_address; 549 params->bootloader_size = preamble->bootloader_size; 550 551 /* Update GPT to note this is the kernel we're trying */ 552 GptUpdateKernelEntry(&gpt, GPT_UPDATE_ENTRY_TRY); 553 554 /* If we're in recovery mode or we're about to boot a dev-signed kernel, 555 * there's no rollback protection, so we can stop at the first valid 556 * kernel. */ 557 if (kBootRecovery == boot_mode || !key_block_valid) { 558 VBDEBUG(("In recovery mode or dev-signed kernel\n")); 559 break; 560 } 561 562 /* Otherwise, we do care about the key index in the TPM. If the good 563 * partition's key version is the same as the tpm, then the TPM doesn't 564 * need updating; we can stop now. Otherwise, we'll check all the other 565 * headers to see if they contain a newer key. */ 566 if (combined_version == tpm_version) { 567 VBDEBUG(("Same kernel version\n")); 568 break; 569 } 570 571 /* Continue, so that we skip the error handling code below */ 572 continue; 573 574 bad_kernel: 575 /* Handle errors parsing this kernel */ 576 if (NULL != data_key) 577 RSAPublicKeyFree(data_key); 578 579 VBDEBUG(("Marking kernel as invalid.\n")); 580 GptUpdateKernelEntry(&gpt, GPT_UPDATE_ENTRY_BAD); 581 582 583 } /* while(GptNextKernelEntry) */ 584 } while(0); 585 586 /* Free kernel buffer */ 587 if (kbuf) 588 Free(kbuf); 589 590 /* Write and free GPT data */ 591 WriteAndFreeGptData(&gpt); 592 593 /* Handle finding a good partition */ 594 if (good_partition >= 0) { 595 VBDEBUG(("Good_partition >= 0\n")); 596 if (shcall) 597 shcall->check_result = VBSD_LKC_CHECK_GOOD_PARTITION; 598 599 /* See if we need to update the TPM */ 600 if ((kBootNormal == boot_mode) && 601 !((1 == shared->firmware_index) && (shared->flags & VBSD_FWB_TRIED))) { 602 /* We only update the TPM in normal mode. We don't advance the 603 * TPM if we're trying a new firmware B, because that firmware 604 * may have a key change and roll forward the TPM too soon. */ 605 VBDEBUG(("Checking if TPM kernel version needs advancing\n")); 606 607 if ((lowest_version > tpm_version) && 608 (lowest_version != LOWEST_TPM_VERSION)) { 609 status = RollbackKernelWrite((uint32_t)lowest_version); 610 if (0 != status) { 611 VBDEBUG(("Error writing kernel versions to TPM.\n")); 612 if (status == TPM_E_MUST_REBOOT) 613 retval = LOAD_KERNEL_REBOOT; 614 else 615 recovery = VBNV_RECOVERY_RW_TPM_ERROR; 616 goto LoadKernelExit; 617 } 618 if (shared) 619 shared->kernel_version_tpm = (uint32_t)lowest_version; 620 } 621 } 622 623 /* Lock the kernel versions */ 624 status = RollbackKernelLock(); 625 if (0 != status) { 626 VBDEBUG(("Error locking kernel versions.\n")); 627 /* Don't reboot to recovery mode if we're already there */ 628 if (kBootRecovery != boot_mode) { 629 if (status == TPM_E_MUST_REBOOT) 630 retval = LOAD_KERNEL_REBOOT; 631 else 632 recovery = VBNV_RECOVERY_RW_TPM_ERROR; 633 goto LoadKernelExit; 634 } 635 } 636 637 /* Success! */ 638 retval = LOAD_KERNEL_SUCCESS; 639 } else { 640 if (shcall) 641 shcall->check_result = (found_partitions > 0 642 ? VBSD_LKC_CHECK_INVALID_PARTITIONS 643 : VBSD_LKC_CHECK_NO_PARTITIONS); 644 645 /* TODO: differentiate between finding an invalid kernel 646 * (found_partitions>0) and not finding one at all. Right now we 647 * treat them the same, and return LOAD_KERNEL_INVALID for both. */ 648 retval = LOAD_KERNEL_INVALID; 649 } 650 651LoadKernelExit: 652 653 /* Store recovery request, if any, then tear down non-volatile storage */ 654 VbNvSet(vnc, VBNV_RECOVERY_REQUEST, LOAD_KERNEL_RECOVERY == retval ? 655 recovery : VBNV_RECOVERY_NOT_REQUESTED); 656 VbNvTeardown(vnc); 657 658 if (shared) { 659 if (shcall) 660 shcall->return_code = (uint8_t)retval; 661 662 /* Save whether the good partition's key block was fully verified */ 663 if (good_partition_key_block_valid) 664 shared->flags |= VBSD_KERNEL_KEY_VERIFIED; 665 666 /* Save timer values */ 667 shared->timer_load_kernel_enter = timer_enter; 668 shared->timer_load_kernel_exit = VbGetTimer(); 669 /* Store how much shared data we used, if any */ 670 params->shared_data_size = shared->data_used; 671 } 672 673 return retval; 674} 675