1/* Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 *
5 * Functions for loading a kernel from disk.
6 * (Firmware portion)
7 */
8
9#include "sysincludes.h"
10
11#include "cgptlib.h"
12#include "cgptlib_internal.h"
13#include "region.h"
14#include "gbb_access.h"
15#include "gbb_header.h"
16#include "gpt_misc.h"
17#include "load_kernel_fw.h"
18#include "utility.h"
19#include "vboot_api.h"
20#include "vboot_common.h"
21#include "vboot_kernel.h"
22
23#define KBUF_SIZE 65536  /* Bytes to read at start of kernel partition */
24#define LOWEST_TPM_VERSION 0xffffffff
25
26typedef enum BootMode {
27	kBootRecovery = 0,  /* Recovery firmware, any dev switch position */
28	kBootNormal = 1,    /* Normal boot - kernel must be verified */
29	kBootDev = 2        /* Developer boot - self-signed kernel ok */
30} BootMode;
31
32VbError_t LoadKernel(LoadKernelParams *params, VbCommonParams *cparams)
33{
34	VbSharedDataHeader *shared =
35		(VbSharedDataHeader *)params->shared_data_blob;
36	VbSharedDataKernelCall *shcall = NULL;
37	VbNvContext* vnc = params->nv_context;
38	VbPublicKey* kernel_subkey = NULL;
39	int free_kernel_subkey = 0;
40	GptData gpt;
41	uint64_t part_start, part_size;
42	uint64_t blba;
43	uint64_t kbuf_sectors;
44	uint8_t* kbuf = NULL;
45	int found_partitions = 0;
46	int good_partition = -1;
47	int good_partition_key_block_valid = 0;
48	uint32_t lowest_version = LOWEST_TPM_VERSION;
49	int rec_switch, dev_switch;
50	BootMode boot_mode;
51	uint32_t require_official_os = 0;
52	uint32_t body_toread;
53	uint8_t *body_readptr;
54
55	VbError_t retval = VBERROR_UNKNOWN;
56	int recovery = VBNV_RECOVERY_LK_UNSPECIFIED;
57
58	/* Sanity Checks */
59	if (!params->bytes_per_lba ||
60	    !params->streaming_lba_count) {
61		VBDEBUG(("LoadKernel() called with invalid params\n"));
62		retval = VBERROR_INVALID_PARAMETER;
63		goto LoadKernelExit;
64	}
65
66	/* Clear output params in case we fail */
67	params->partition_number = 0;
68	params->bootloader_address = 0;
69	params->bootloader_size = 0;
70	params->flags = 0;
71
72	/* Calculate switch positions and boot mode */
73	rec_switch = (BOOT_FLAG_RECOVERY & params->boot_flags ? 1 : 0);
74	dev_switch = (BOOT_FLAG_DEVELOPER & params->boot_flags ? 1 : 0);
75	if (rec_switch) {
76		boot_mode = kBootRecovery;
77	} else if (dev_switch) {
78		boot_mode = kBootDev;
79		VbNvGet(vnc, VBNV_DEV_BOOT_SIGNED_ONLY, &require_official_os);
80	} else {
81		boot_mode = kBootNormal;
82	}
83
84	/*
85	 * Set up tracking for this call.  This wraps around if called many
86	 * times, so we need to initialize the call entry each time.
87	 */
88	shcall = shared->lk_calls + (shared->lk_call_count
89				     & (VBSD_MAX_KERNEL_CALLS - 1));
90	Memset(shcall, 0, sizeof(VbSharedDataKernelCall));
91	shcall->boot_flags = (uint32_t)params->boot_flags;
92	shcall->boot_mode = boot_mode;
93	shcall->sector_size = (uint32_t)params->bytes_per_lba;
94	shcall->sector_count = params->streaming_lba_count;
95	shared->lk_call_count++;
96
97	/* Initialization */
98	blba = params->bytes_per_lba;
99	kbuf_sectors = KBUF_SIZE / blba;
100	if (0 == kbuf_sectors) {
101		VBDEBUG(("LoadKernel() called with sector size > KBUF_SIZE\n"));
102		retval = VBERROR_INVALID_PARAMETER;
103		goto LoadKernelExit;
104	}
105
106	if (kBootRecovery == boot_mode) {
107		/* Use the recovery key to verify the kernel */
108		retval = VbGbbReadRecoveryKey(cparams, &kernel_subkey);
109		if (VBERROR_SUCCESS != retval)
110			goto LoadKernelExit;
111		free_kernel_subkey = 1;
112	} else {
113		/* Use the kernel subkey passed from LoadFirmware(). */
114		kernel_subkey = &shared->kernel_subkey;
115	}
116
117	/* Read GPT data */
118	gpt.sector_bytes = (uint32_t)blba;
119	gpt.streaming_drive_sectors = params->streaming_lba_count;
120	gpt.gpt_drive_sectors = params->gpt_lba_count;
121	gpt.flags = params->boot_flags & BOOT_FLAG_EXTERNAL_GPT
122			? GPT_FLAG_EXTERNAL : 0;
123	if (0 != AllocAndReadGptData(params->disk_handle, &gpt)) {
124		VBDEBUG(("Unable to read GPT data\n"));
125		shcall->check_result = VBSD_LKC_CHECK_GPT_READ_ERROR;
126		goto bad_gpt;
127	}
128
129	/* Initialize GPT library */
130	if (GPT_SUCCESS != GptInit(&gpt)) {
131		VBDEBUG(("Error parsing GPT\n"));
132		shcall->check_result = VBSD_LKC_CHECK_GPT_PARSE_ERROR;
133		goto bad_gpt;
134	}
135
136	/* Allocate kernel header buffers */
137	kbuf = (uint8_t*)VbExMalloc(KBUF_SIZE);
138	if (!kbuf)
139		goto bad_gpt;
140
141        /* Loop over candidate kernel partitions */
142        while (GPT_SUCCESS ==
143	       GptNextKernelEntry(&gpt, &part_start, &part_size)) {
144		VbSharedDataKernelPart *shpart = NULL;
145		VbKeyBlockHeader *key_block;
146		VbKernelPreambleHeader *preamble;
147		RSAPublicKey *data_key = NULL;
148		VbExStream_t stream = NULL;
149		uint64_t key_version;
150		uint32_t combined_version;
151		uint64_t body_offset;
152		int key_block_valid = 1;
153
154		VBDEBUG(("Found kernel entry at %" PRIu64 " size %" PRIu64 "\n",
155			 part_start, part_size));
156
157		/*
158		 * Set up tracking for this partition.  This wraps around if
159		 * called many times, so initialize the partition entry each
160		 * time.
161		 */
162		shpart = shcall->parts + (shcall->kernel_parts_found
163					  & (VBSD_MAX_KERNEL_PARTS - 1));
164		Memset(shpart, 0, sizeof(VbSharedDataKernelPart));
165		shpart->sector_start = part_start;
166		shpart->sector_count = part_size;
167		/*
168		 * TODO: GPT partitions start at 1, but cgptlib starts them at
169		 * 0.  Adjust here, until cgptlib is fixed.
170		 */
171		shpart->gpt_index = (uint8_t)(gpt.current_kernel + 1);
172		shcall->kernel_parts_found++;
173
174		/* Found at least one kernel partition. */
175		found_partitions++;
176
177		/* Set up the stream */
178		if (VbExStreamOpen(params->disk_handle,
179				   part_start, part_size, &stream)) {
180			VBDEBUG(("Partition error getting stream.\n"));
181			shpart->check_result = VBSD_LKP_CHECK_TOO_SMALL;
182			goto bad_kernel;
183		}
184
185		if (0 != VbExStreamRead(stream, KBUF_SIZE, kbuf)) {
186			VBDEBUG(("Unable to read start of partition.\n"));
187			shpart->check_result = VBSD_LKP_CHECK_READ_START;
188			goto bad_kernel;
189		}
190
191		/* Verify the key block. */
192		key_block = (VbKeyBlockHeader*)kbuf;
193		if (0 != KeyBlockVerify(key_block, KBUF_SIZE,
194					kernel_subkey, 0)) {
195			VBDEBUG(("Verifying key block signature failed.\n"));
196			shpart->check_result = VBSD_LKP_CHECK_KEY_BLOCK_SIG;
197			key_block_valid = 0;
198
199			/* If not in developer mode, this kernel is bad. */
200			if (kBootDev != boot_mode)
201				goto bad_kernel;
202
203			/*
204			 * In developer mode, we can explictly disallow
205			 * self-signed kernels
206			 */
207			if (require_official_os) {
208				VBDEBUG(("Self-signed kernels not enabled.\n"));
209				shpart->check_result =
210					VBSD_LKP_CHECK_SELF_SIGNED;
211				goto bad_kernel;
212			}
213
214			/*
215			 * Allow the kernel if the SHA-512 hash of the key
216			 * block is valid.
217			 */
218			if (0 != KeyBlockVerify(key_block, KBUF_SIZE,
219						kernel_subkey, 1)) {
220				VBDEBUG(("Verifying key block hash failed.\n"));
221				shpart->check_result =
222					VBSD_LKP_CHECK_KEY_BLOCK_HASH;
223				goto bad_kernel;
224			}
225		}
226
227		/* Check the key block flags against the current boot mode. */
228		if (!(key_block->key_block_flags &
229		      (dev_switch ? KEY_BLOCK_FLAG_DEVELOPER_1 :
230		       KEY_BLOCK_FLAG_DEVELOPER_0))) {
231			VBDEBUG(("Key block developer flag mismatch.\n"));
232			shpart->check_result = VBSD_LKP_CHECK_DEV_MISMATCH;
233			key_block_valid = 0;
234		}
235		if (!(key_block->key_block_flags &
236		      (rec_switch ? KEY_BLOCK_FLAG_RECOVERY_1 :
237		       KEY_BLOCK_FLAG_RECOVERY_0))) {
238			VBDEBUG(("Key block recovery flag mismatch.\n"));
239			shpart->check_result = VBSD_LKP_CHECK_REC_MISMATCH;
240			key_block_valid = 0;
241		}
242
243		/* Check for rollback of key version except in recovery mode. */
244		key_version = key_block->data_key.key_version;
245		if (kBootRecovery != boot_mode) {
246			if (key_version < (shared->kernel_version_tpm >> 16)) {
247				VBDEBUG(("Key version too old.\n"));
248				shpart->check_result =
249					VBSD_LKP_CHECK_KEY_ROLLBACK;
250				key_block_valid = 0;
251			}
252			if (key_version > 0xFFFF) {
253				/*
254				 * Key version is stored in 16 bits in the TPM,
255				 * so key versions greater than 0xFFFF can't be
256				 * stored properly.
257				 */
258				VBDEBUG(("Key version > 0xFFFF.\n"));
259				shpart->check_result =
260					VBSD_LKP_CHECK_KEY_ROLLBACK;
261				key_block_valid = 0;
262			}
263		}
264
265		/* If not in developer mode, key block required to be valid. */
266		if (kBootDev != boot_mode && !key_block_valid) {
267			VBDEBUG(("Key block is invalid.\n"));
268			goto bad_kernel;
269		}
270
271		/* Get key for preamble/data verification from the key block. */
272		data_key = PublicKeyToRSA(&key_block->data_key);
273		if (!data_key) {
274			VBDEBUG(("Data key bad.\n"));
275			shpart->check_result = VBSD_LKP_CHECK_DATA_KEY_PARSE;
276			goto bad_kernel;
277		}
278
279		/* Verify the preamble, which follows the key block */
280		preamble = (VbKernelPreambleHeader *)
281			(kbuf + key_block->key_block_size);
282		if ((0 != VerifyKernelPreamble(
283					preamble,
284					KBUF_SIZE - key_block->key_block_size,
285					data_key))) {
286			VBDEBUG(("Preamble verification failed.\n"));
287			shpart->check_result = VBSD_LKP_CHECK_VERIFY_PREAMBLE;
288			goto bad_kernel;
289		}
290
291		/*
292		 * If the key block is valid and we're not in recovery mode,
293		 * check for rollback of the kernel version.
294		 */
295		combined_version = (uint32_t)(
296				(key_version << 16) |
297				(preamble->kernel_version & 0xFFFF));
298		shpart->combined_version = combined_version;
299		if (key_block_valid && kBootRecovery != boot_mode) {
300			if (combined_version < shared->kernel_version_tpm) {
301				VBDEBUG(("Kernel version too low.\n"));
302				shpart->check_result =
303					VBSD_LKP_CHECK_KERNEL_ROLLBACK;
304				/*
305				 * If not in developer mode, kernel version
306				 * must be valid.
307				 */
308				if (kBootDev != boot_mode)
309					goto bad_kernel;
310			}
311		}
312
313		VBDEBUG(("Kernel preamble is good.\n"));
314		shpart->check_result = VBSD_LKP_CHECK_PREAMBLE_VALID;
315
316		/* Check for lowest version from a valid header. */
317		if (key_block_valid && lowest_version > combined_version)
318			lowest_version = combined_version;
319		else {
320			VBDEBUG(("Key block valid: %d\n", key_block_valid));
321			VBDEBUG(("Combined version: %u\n",
322				 (unsigned) combined_version));
323		}
324
325		/*
326		 * If we already have a good kernel, no need to read another
327		 * one; we only needed to look at the versions to check for
328		 * rollback.  So skip to the next kernel preamble.
329		 */
330		if (-1 != good_partition) {
331			VbExStreamClose(stream);
332			stream = NULL;
333			continue;
334		}
335
336		body_offset = key_block->key_block_size +
337			preamble->preamble_size;
338
339		/*
340		 * Make sure the kernel starts at or before what we already
341		 * read into kbuf.
342		 *
343		 * We could deal with a larger offset by reading and discarding
344		 * the data in between the vblock and the kernel data.
345		 */
346		if (body_offset > KBUF_SIZE) {
347			shpart->check_result = VBSD_LKP_CHECK_BODY_OFFSET;
348			VBDEBUG(("Kernel body offset is %d > 64KB.\n",
349				 (int)body_offset));
350			goto bad_kernel;
351		}
352
353		if (!params->kernel_buffer) {
354			/* Get kernel load address and size from the header. */
355			params->kernel_buffer =
356				(void *)((long)preamble->body_load_address);
357			params->kernel_buffer_size =
358				preamble->body_signature.data_size;
359		} else if (preamble->body_signature.data_size >
360			   params->kernel_buffer_size) {
361			VBDEBUG(("Kernel body doesn't fit in memory.\n"));
362			shpart->check_result = VBSD_LKP_CHECK_BODY_EXCEEDS_MEM;
363			goto bad_kernel;
364		}
365
366		/*
367		 * Body signature data size is 64 bit and toread is 32 bit so
368		 * this could technically cause us to read less data.  That's
369		 * fine, because a 4 GB kernel is implausible, and if we did
370		 * have one that big, we'd simply read too little data and fail
371		 * to verify it.
372		 */
373		body_toread = preamble->body_signature.data_size;
374		body_readptr = params->kernel_buffer;
375
376		/*
377		 * If we've already read part of the kernel, copy that to the
378		 * beginning of the kernel buffer.
379		 */
380		if (body_offset < KBUF_SIZE) {
381			uint32_t body_copied = KBUF_SIZE - body_offset;
382
383			/* If the kernel is tiny, don't over-copy */
384			if (body_copied > body_toread)
385				body_copied = body_toread;
386
387			Memcpy(body_readptr, kbuf + body_offset, body_copied);
388			body_toread -= body_copied;
389			body_readptr += body_copied;
390		}
391
392		/* Read the kernel data */
393		if (body_toread &&
394		    0 != VbExStreamRead(stream, body_toread, body_readptr)) {
395			VBDEBUG(("Unable to read kernel data.\n"));
396			shpart->check_result = VBSD_LKP_CHECK_READ_DATA;
397			goto bad_kernel;
398		}
399
400		/* Close the stream; we're done with it */
401		VbExStreamClose(stream);
402		stream = NULL;
403
404		/* Verify kernel data */
405		if (0 != VerifyData((const uint8_t *)params->kernel_buffer,
406				    params->kernel_buffer_size,
407				    &preamble->body_signature, data_key)) {
408			VBDEBUG(("Kernel data verification failed.\n"));
409			shpart->check_result = VBSD_LKP_CHECK_VERIFY_DATA;
410			goto bad_kernel;
411		}
412
413		/* Done with the kernel signing key, so can free it now */
414		RSAPublicKeyFree(data_key);
415		data_key = NULL;
416
417		/*
418		 * If we're still here, the kernel is valid.  Save the first
419		 * good partition we find; that's the one we'll boot.
420		 */
421		VBDEBUG(("Partition is good.\n"));
422		shpart->check_result = VBSD_LKP_CHECK_KERNEL_GOOD;
423		if (key_block_valid)
424			shpart->flags |= VBSD_LKP_FLAG_KEY_BLOCK_VALID;
425
426		good_partition_key_block_valid = key_block_valid;
427		/*
428		 * TODO: GPT partitions start at 1, but cgptlib starts them at
429		 * 0.  Adjust here, until cgptlib is fixed.
430		 */
431		good_partition = gpt.current_kernel + 1;
432		params->partition_number = gpt.current_kernel + 1;
433		GetCurrentKernelUniqueGuid(&gpt, &params->partition_guid);
434		/*
435		 * TODO: GetCurrentKernelUniqueGuid() should take a destination
436		 * size, or the dest should be a struct, so we know it's big
437		 * enough.
438		 */
439		params->bootloader_address = preamble->bootloader_address;
440		params->bootloader_size = preamble->bootloader_size;
441		if (VbKernelHasFlags(preamble) == VBOOT_SUCCESS)
442			params->flags = preamble->flags;
443
444		/* Update GPT to note this is the kernel we're trying */
445		GptUpdateKernelEntry(&gpt, GPT_UPDATE_ENTRY_TRY);
446
447		/*
448		 * If we're in recovery mode or we're about to boot a
449		 * dev-signed kernel, there's no rollback protection, so we can
450		 * stop at the first valid kernel.
451		 */
452		if (kBootRecovery == boot_mode || !key_block_valid) {
453			VBDEBUG(("In recovery mode or dev-signed kernel\n"));
454			break;
455		}
456
457		/*
458		 * Otherwise, we do care about the key index in the TPM.  If
459		 * the good partition's key version is the same as the tpm,
460		 * then the TPM doesn't need updating; we can stop now.
461		 * Otherwise, we'll check all the other headers to see if they
462		 * contain a newer key.
463		 */
464		if (combined_version == shared->kernel_version_tpm) {
465			VBDEBUG(("Same kernel version\n"));
466			break;
467		}
468
469		/* Continue, so that we skip the error handling code below */
470		continue;
471
472	bad_kernel:
473		/* Handle errors parsing this kernel */
474		if (NULL != stream)
475			VbExStreamClose(stream);
476		if (NULL != data_key)
477			RSAPublicKeyFree(data_key);
478
479		VBDEBUG(("Marking kernel as invalid.\n"));
480		GptUpdateKernelEntry(&gpt, GPT_UPDATE_ENTRY_BAD);
481
482
483        } /* while(GptNextKernelEntry) */
484
485 bad_gpt:
486
487	/* Free kernel buffer */
488	if (kbuf)
489		VbExFree(kbuf);
490
491	/* Write and free GPT data */
492	WriteAndFreeGptData(params->disk_handle, &gpt);
493
494	/* Handle finding a good partition */
495	if (good_partition >= 0) {
496		VBDEBUG(("Good_partition >= 0\n"));
497		shcall->check_result = VBSD_LKC_CHECK_GOOD_PARTITION;
498		shared->kernel_version_lowest = lowest_version;
499		/*
500		 * Sanity check - only store a new TPM version if we found one.
501		 * If lowest_version is still at its initial value, we didn't
502		 * find one; for example, we're in developer mode and just
503		 * didn't look.
504		 */
505		if (lowest_version != LOWEST_TPM_VERSION &&
506		    lowest_version > shared->kernel_version_tpm)
507			shared->kernel_version_tpm = lowest_version;
508
509		/* Success! */
510		retval = VBERROR_SUCCESS;
511	} else if (found_partitions > 0) {
512		shcall->check_result = VBSD_LKC_CHECK_INVALID_PARTITIONS;
513		recovery = VBNV_RECOVERY_RW_INVALID_OS;
514		retval = VBERROR_INVALID_KERNEL_FOUND;
515	} else {
516		shcall->check_result = VBSD_LKC_CHECK_NO_PARTITIONS;
517		recovery = VBNV_RECOVERY_RW_NO_OS;
518		retval = VBERROR_NO_KERNEL_FOUND;
519	}
520
521 LoadKernelExit:
522
523	/* Store recovery request, if any */
524	VbNvSet(vnc, VBNV_RECOVERY_REQUEST, VBERROR_SUCCESS != retval ?
525		recovery : VBNV_RECOVERY_NOT_REQUESTED);
526
527	/*
528	 * If LoadKernel() was called with bad parameters, shcall may not be
529	 * initialized.
530	 */
531	if (shcall)
532		shcall->return_code = (uint8_t)retval;
533
534	/* Save whether the good partition's key block was fully verified */
535	if (good_partition_key_block_valid)
536		shared->flags |= VBSD_KERNEL_KEY_VERIFIED;
537
538	/* Store how much shared data we used, if any */
539	params->shared_data_size = shared->data_used;
540
541	if (free_kernel_subkey)
542		VbExFree(kernel_subkey);
543
544	return retval;
545}
546