squashed update

aosp: mkdtboimg,mkbootimg
test: Issue #59
parser: support boot-debug and vendor_boot-debug
pull/66/head
cfig 4 years ago
parent 577b970811
commit 852ee928c7
No known key found for this signature in database
GPG Key ID: B104C307F0FDABB7

3
.gitmodules vendored

@ -1,3 +1,6 @@
[submodule "src/integrationTest/resources"] [submodule "src/integrationTest/resources"]
path = src/integrationTest/resources path = src/integrationTest/resources
url = https://github.com/cfig/android_image_res url = https://github.com/cfig/android_image_res
[submodule "src/integrationTest/resources_2"]
path = src/integrationTest/resources_2
url = https://gitee.com/cfig/android_image_res2.git

@ -165,7 +165,7 @@ Then flash vbmeta.img.signed to your device.
## boot.img layout ## boot.img layout
Read [layout](doc/layout.md) of Android boot.img and vendor\_boot.img. Read [layout](doc/layout.md) of Android boot.img and vendor\_boot.img.
## References ## References and Acknowledgement
<details> <details>
<summary>more ...</summary> <summary>more ...</summary>
@ -192,4 +192,11 @@ https://android.googlesource.com/platform/system/core/+/refs/heads/master/libspa
Android Nexus/Pixle factory images<br/> Android Nexus/Pixle factory images<br/>
https://developers.google.cn/android/images<br/> https://developers.google.cn/android/images<br/>
This project is developed with products by Jetbrains.
<a href="https://jb.gg/OpenSource">
<img src="https://user-images.githubusercontent.com/1133314/116802621-c076be80-ab46-11eb-8a14-9454a933de7d.png" alt="drawing" width="80">
</a>
</details> </details>

@ -18,12 +18,13 @@ from __future__ import print_function
"""Tool for packing multiple DTB/DTBO files into a single image""" """Tool for packing multiple DTB/DTBO files into a single image"""
import argparse import argparse
import fnmatch
import os import os
import struct
import zlib
from array import array from array import array
from collections import namedtuple from collections import namedtuple
import struct
from sys import stdout from sys import stdout
import zlib
class CompressionFormat(object): class CompressionFormat(object):
"""Enum representing DT compression format for a DT entry. """Enum representing DT compression format for a DT entry.
@ -36,14 +37,18 @@ class DtEntry(object):
"""Provides individual DT image file arguments to be added to a DTBO. """Provides individual DT image file arguments to be added to a DTBO.
Attributes: Attributes:
_REQUIRED_KEYS: 'keys' needed to be present in the dictionary passed to instantiate REQUIRED_KEYS_V0: 'keys' needed to be present in the dictionary passed to instantiate
an object of this class. an object of this class when a DTBO header of version 0 is used.
_COMPRESSION_FORMAT_MASK: Mask to retrieve compression info for DT entry from flags field REQUIRED_KEYS_V1: 'keys' needed to be present in the dictionary passed to instantiate
an object of this class when a DTBO header of version 1 is used.
COMPRESSION_FORMAT_MASK: Mask to retrieve compression info for DT entry from flags field
when a DTBO header of version 1 is used. when a DTBO header of version 1 is used.
""" """
_COMPRESSION_FORMAT_MASK = 0x0f COMPRESSION_FORMAT_MASK = 0x0f
REQUIRED_KEYS = ('dt_file', 'dt_size', 'dt_offset', 'id', 'rev', 'flags', REQUIRED_KEYS_V0 = ('dt_file', 'dt_size', 'dt_offset', 'id', 'rev',
'custom0', 'custom1', 'custom2') 'custom0', 'custom1', 'custom2', 'custom3')
REQUIRED_KEYS_V1 = ('dt_file', 'dt_size', 'dt_offset', 'id', 'rev',
'flags', 'custom0', 'custom1', 'custom2')
@staticmethod @staticmethod
def __get_number_or_prop(arg): def __get_number_or_prop(arg):
@ -82,7 +87,14 @@ class DtEntry(object):
the tuple (_REQUIRED_KEYS) the tuple (_REQUIRED_KEYS)
""" """
missing_keys = set(self.REQUIRED_KEYS) - set(kwargs) self.__version = kwargs['version']
required_keys = None
if self.__version == 0:
required_keys = self.REQUIRED_KEYS_V0
elif self.__version == 1:
required_keys = self.REQUIRED_KEYS_V1
missing_keys = set(required_keys) - set(kwargs)
if missing_keys: if missing_keys:
raise ValueError('Missing keys in DtEntry constructor: %r' % raise ValueError('Missing keys in DtEntry constructor: %r' %
sorted(missing_keys)) sorted(missing_keys))
@ -92,10 +104,13 @@ class DtEntry(object):
self.__dt_size = kwargs['dt_size'] self.__dt_size = kwargs['dt_size']
self.__id = self.__get_number_or_prop(kwargs['id']) self.__id = self.__get_number_or_prop(kwargs['id'])
self.__rev = self.__get_number_or_prop(kwargs['rev']) self.__rev = self.__get_number_or_prop(kwargs['rev'])
self.__flags = self.__get_number_or_prop(kwargs['flags']) if self.__version == 1:
self.__flags = self.__get_number_or_prop(kwargs['flags'])
self.__custom0 = self.__get_number_or_prop(kwargs['custom0']) self.__custom0 = self.__get_number_or_prop(kwargs['custom0'])
self.__custom1 = self.__get_number_or_prop(kwargs['custom1']) self.__custom1 = self.__get_number_or_prop(kwargs['custom1'])
self.__custom2 = self.__get_number_or_prop(kwargs['custom2']) self.__custom2 = self.__get_number_or_prop(kwargs['custom2'])
if self.__version == 0:
self.__custom3 = self.__get_number_or_prop(kwargs['custom3'])
def __str__(self): def __str__(self):
sb = [] sb = []
@ -107,26 +122,30 @@ class DtEntry(object):
value=self.__id)) value=self.__id))
sb.append('{key:>20} = {value:08x}'.format(key='rev', sb.append('{key:>20} = {value:08x}'.format(key='rev',
value=self.__rev)) value=self.__rev))
if self.__version == 1:
sb.append('{key:>20} = {value:08x}'.format(key='flags',
value=self.__flags))
sb.append('{key:>20} = {value:08x}'.format(key='custom[0]', sb.append('{key:>20} = {value:08x}'.format(key='custom[0]',
value=self.__flags))
sb.append('{key:>20} = {value:08x}'.format(key='custom[1]',
value=self.__custom0)) value=self.__custom0))
sb.append('{key:>20} = {value:08x}'.format(key='custom[2]', sb.append('{key:>20} = {value:08x}'.format(key='custom[1]',
value=self.__custom1)) value=self.__custom1))
sb.append('{key:>20} = {value:08x}'.format(key='custom[3]', sb.append('{key:>20} = {value:08x}'.format(key='custom[2]',
value=self.__custom2)) value=self.__custom2))
if self.__version == 0:
sb.append('{key:>20} = {value:08x}'.format(key='custom[3]',
value=self.__custom3))
return '\n'.join(sb) return '\n'.join(sb)
def compression_info(self, version): def compression_info(self):
"""CompressionFormat: compression format for DT image file. """CompressionFormat: compression format for DT image file.
Args: Args:
version: Version of DTBO header, compression is only version: Version of DTBO header, compression is only
supported from version 1. supported from version 1.
""" """
if version is 0: if self.__version == 0:
return CompressionFormat.NO_COMPRESSION return CompressionFormat.NO_COMPRESSION
return self.flags & self._COMPRESSION_FORMAT_MASK return self.flags & self.COMPRESSION_FORMAT_MASK
@property @property
def dt_file(self): def dt_file(self):
@ -181,6 +200,10 @@ class DtEntry(object):
"""int: DT entry custom2 for this DT image.""" """int: DT entry custom2 for this DT image."""
return self.__custom2 return self.__custom2
@property
def custom3(self):
"""int: DT entry custom3 for this DT image."""
return self.__custom3
class Dtbo(object): class Dtbo(object):
""" """
@ -232,10 +255,17 @@ class Dtbo(object):
dtbo_offset: Offset where the DT image file for this dt_entry can dtbo_offset: Offset where the DT image file for this dt_entry can
be found in the resulting DTBO image. be found in the resulting DTBO image.
""" """
struct.pack_into('>8I', self.__metadata, metadata_offset, dt_entry.size, if self.version == 0:
dt_entry.dt_offset, dt_entry.image_id, dt_entry.rev, struct.pack_into('>8I', self.__metadata, metadata_offset, dt_entry.size,
dt_entry.flags, dt_entry.custom0, dt_entry.custom1, dt_entry.dt_offset, dt_entry.image_id, dt_entry.rev,
dt_entry.custom2) dt_entry.custom0, dt_entry.custom1, dt_entry.custom2,
dt_entry.custom3)
elif self.version == 1:
struct.pack_into('>8I', self.__metadata, metadata_offset, dt_entry.size,
dt_entry.dt_offset, dt_entry.image_id, dt_entry.rev,
dt_entry.flags, dt_entry.custom0, dt_entry.custom1,
dt_entry.custom2)
def _update_metadata(self): def _update_metadata(self):
"""Updates the DTBO metadata. """Updates the DTBO metadata.
@ -244,7 +274,7 @@ class Dtbo(object):
Tree table entries and update the DTBO header. Tree table entries and update the DTBO header.
""" """
self.__metadata = array('c', ' ' * self.__metadata_size) self.__metadata = array('b', b' ' * self.__metadata_size)
metadata_offset = self.header_size metadata_offset = self.header_size
for dt_entry in self.__dt_entries: for dt_entry in self.__dt_entries:
self._update_dt_entry_header(dt_entry, metadata_offset) self._update_dt_entry_header(dt_entry, metadata_offset)
@ -290,15 +320,21 @@ class Dtbo(object):
if self.__dt_entries: if self.__dt_entries:
raise ValueError('DTBO DT entries can be added only once') raise ValueError('DTBO DT entries can be added only once')
offset = self.dt_entries_offset / 4 offset = self.dt_entries_offset // 4
params = {} params = {}
params['version'] = self.version
params['dt_file'] = None params['dt_file'] = None
for i in range(0, self.dt_entry_count): for i in range(0, self.dt_entry_count):
dt_table_entry = self.__metadata[offset:offset + self._DT_ENTRY_HEADER_INTS] dt_table_entry = self.__metadata[offset:offset + self._DT_ENTRY_HEADER_INTS]
params['dt_size'] = dt_table_entry[0] params['dt_size'] = dt_table_entry[0]
params['dt_offset'] = dt_table_entry[1] params['dt_offset'] = dt_table_entry[1]
for j in range(2, self._DT_ENTRY_HEADER_INTS): for j in range(2, self._DT_ENTRY_HEADER_INTS):
params[DtEntry.REQUIRED_KEYS[j + 1]] = str(dt_table_entry[j]) required_keys = None
if self.version == 0:
required_keys = DtEntry.REQUIRED_KEYS_V0
elif self.version == 1:
required_keys = DtEntry.REQUIRED_KEYS_V1
params[required_keys[j + 1]] = str(dt_table_entry[j])
dt_entry = DtEntry(**params) dt_entry = DtEntry(**params)
self.__dt_entries.append(dt_entry) self.__dt_entries.append(dt_entry)
offset += self._DT_ENTRY_HEADER_INTS offset += self._DT_ENTRY_HEADER_INTS
@ -465,14 +501,13 @@ class Dtbo(object):
dt_offset = (self.header_size + dt_offset = (self.header_size +
dt_entry_count * self.dt_entry_size) dt_entry_count * self.dt_entry_size)
dt_entry_buf = "" dt_entry_buf = b""
for dt_entry in dt_entries: for dt_entry in dt_entries:
if not isinstance(dt_entry, DtEntry): if not isinstance(dt_entry, DtEntry):
raise ValueError('Adding invalid DT entry object to DTBO') raise ValueError('Adding invalid DT entry object to DTBO')
entry = self._find_dt_entry_with_same_file(dt_entry) entry = self._find_dt_entry_with_same_file(dt_entry)
dt_entry_compression_info = dt_entry.compression_info(self.version) dt_entry_compression_info = dt_entry.compression_info()
if entry and (entry.compression_info(self.version) if entry and (entry.compression_info() == dt_entry_compression_info):
== dt_entry_compression_info):
dt_entry.dt_offset = entry.dt_offset dt_entry.dt_offset = entry.dt_offset
dt_entry.size = entry.size dt_entry.size = entry.size
else: else:
@ -510,7 +545,7 @@ class Dtbo(object):
offset = self.dt_entries[idx].dt_offset offset = self.dt_entries[idx].dt_offset
self.__file.seek(offset, 0) self.__file.seek(offset, 0)
fout.seek(0) fout.seek(0)
compression_format = self.dt_entries[idx].compression_info(self.version) compression_format = self.dt_entries[idx].compression_info()
if decompress and compression_format: if decompress and compression_format:
if (compression_format == CompressionFormat.ZLIB_COMPRESSION or if (compression_format == CompressionFormat.ZLIB_COMPRESSION or
compression_format == CompressionFormat.GZIP_COMPRESSION): compression_format == CompressionFormat.GZIP_COMPRESSION):
@ -580,6 +615,9 @@ def parse_dt_entry(global_args, arglist):
parser.add_argument('--custom2', type=str, dest='custom2', parser.add_argument('--custom2', type=str, dest='custom2',
action='store', action='store',
default=global_args.global_custom2) default=global_args.global_custom2)
parser.add_argument('--custom3', type=str, dest='custom3',
action='store',
default=global_args.global_custom3)
return parser.parse_args(arglist) return parser.parse_args(arglist)
@ -612,7 +650,7 @@ def parse_dt_entries(global_args, arg_list):
raise ValueError('Input DT images must be provided') raise ValueError('Input DT images must be provided')
total_images = len(img_file_idx) total_images = len(img_file_idx)
for idx in xrange(total_images): for idx in range(total_images):
start_idx = img_file_idx[idx] start_idx = img_file_idx[idx]
if idx == total_images - 1: if idx == total_images - 1:
argv = arg_list[start_idx:] argv = arg_list[start_idx:]
@ -621,6 +659,7 @@ def parse_dt_entries(global_args, arg_list):
argv = arg_list[start_idx:end_idx] argv = arg_list[start_idx:end_idx]
args = parse_dt_entry(global_args, argv) args = parse_dt_entry(global_args, argv)
params = vars(args) params = vars(args)
params['version'] = global_args.version
params['dt_offset'] = 0 params['dt_offset'] = 0
params['dt_size'] = os.fstat(params['dt_file'].fileno()).st_size params['dt_size'] = os.fstat(params['dt_file'].fileno()).st_size
dt_entries.append(DtEntry(**params)) dt_entries.append(DtEntry(**params))
@ -753,6 +792,8 @@ def parse_create_args(arg_list):
action='store', default='0') action='store', default='0')
parser.add_argument('--custom2', type=str, dest='global_custom2', parser.add_argument('--custom2', type=str, dest='global_custom2',
action='store', default='0') action='store', default='0')
parser.add_argument('--custom3', type=str, dest='global_custom3',
action='store', default='0')
args = parser.parse_args(argv) args = parser.parse_args(argv)
return args, remainder return args, remainder
@ -769,7 +810,7 @@ def parse_dump_cmd_args(arglist):
parser = argparse.ArgumentParser(prog='dump') parser = argparse.ArgumentParser(prog='dump')
parser.add_argument('--output', '-o', nargs='?', parser.add_argument('--output', '-o', nargs='?',
type=argparse.FileType('wb'), type=argparse.FileType('w'),
dest='outfile', dest='outfile',
default=stdout) default=stdout)
parser.add_argument('--dtb', '-b', nargs='?', type=str, parser.add_argument('--dtb', '-b', nargs='?', type=str,
@ -789,7 +830,7 @@ def parse_config_create_cmd_args(arglist):
""" """
parser = argparse.ArgumentParser(prog='cfg_create') parser = argparse.ArgumentParser(prog='cfg_create')
parser.add_argument('conf_file', nargs='?', parser.add_argument('conf_file', nargs='?',
type=argparse.FileType('rb'), type=argparse.FileType('r'),
default=None) default=None)
cwd = os.getcwd() cwd = os.getcwd()
parser.add_argument('--dtb-dir', '-d', nargs='?', type=str, parser.add_argument('--dtb-dir', '-d', nargs='?', type=str,
@ -845,15 +886,22 @@ def create_dtbo_image_from_config(fout, argv):
if not args.conf_file: if not args.conf_file:
raise ValueError('Configuration file must be provided') raise ValueError('Configuration file must be provided')
_DT_KEYS = ('id', 'rev', 'flags', 'custom0', 'custom1', 'custom2') _DT_KEYS = ('id', 'rev', 'flags', 'custom0', 'custom1', 'custom2', 'custom3')
_GLOBAL_KEY_TYPES = {'dt_type': str, 'page_size': int, 'version': int} _GLOBAL_KEY_TYPES = {'dt_type': str, 'page_size': int, 'version': int}
global_args, dt_args = parse_config_file(args.conf_file, global_args, dt_args = parse_config_file(args.conf_file,
_DT_KEYS, _GLOBAL_KEY_TYPES) _DT_KEYS, _GLOBAL_KEY_TYPES)
version = global_args['version']
params = {} params = {}
params['version'] = version
dt_entries = [] dt_entries = []
for dt_arg in dt_args: for dt_arg in dt_args:
filepath = args.dtbdir + os.sep + dt_arg['filename'] filepath = dt_arg['filename']
if not os.path.isabs(filepath):
for root, dirnames, filenames in os.walk(args.dtbdir):
for filename in fnmatch.filter(filenames, os.path.basename(filepath)):
filepath = os.path.join(root, filename)
params['dt_file'] = open(filepath, 'rb') params['dt_file'] = open(filepath, 'rb')
params['dt_offset'] = 0 params['dt_offset'] = 0
params['dt_size'] = os.fstat(params['dt_file'].fileno()).st_size params['dt_size'] = os.fstat(params['dt_file'].fileno()).st_size
@ -865,7 +913,7 @@ def create_dtbo_image_from_config(fout, argv):
dt_entries.append(DtEntry(**params)) dt_entries.append(DtEntry(**params))
# Create and write DTBO file # Create and write DTBO file
dtbo = Dtbo(fout, global_args['dt_type'], global_args['page_size'], global_args['version']) dtbo = Dtbo(fout, global_args['dt_type'], global_args['page_size'], version)
dt_entry_buf = dtbo.add_dt_entries(dt_entries) dt_entry_buf = dtbo.add_dt_entries(dt_entries)
dtbo.commit(dt_entry_buf) dtbo.commit(dt_entry_buf)
fout.close() fout.close()
@ -916,6 +964,7 @@ def print_create_usage(progname):
sb.append(' --custom0=<number>') sb.append(' --custom0=<number>')
sb.append(' --custom1=<number>') sb.append(' --custom1=<number>')
sb.append(' --custom2=<number>\n') sb.append(' --custom2=<number>\n')
sb.append(' --custom3=<number>\n')
sb.append(' The value could be a number or a DT node path.') sb.append(' The value could be a number or a DT node path.')
sb.append(' <number> could be a 32-bits digit or hex value, ex. 68000, 0x6800.') sb.append(' <number> could be a 32-bits digit or hex value, ex. 68000, 0x6800.')

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python3
#
# Copyright 2015, The Android Open Source Project # Copyright 2015, The Android Open Source Project
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -13,16 +14,55 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function """Creates the boot image."""
from argparse import ArgumentParser, FileType, Action from argparse import (ArgumentParser, ArgumentTypeError,
FileType, RawDescriptionHelpFormatter)
from hashlib import sha1 from hashlib import sha1
from os import fstat from os import fstat
import re
from struct import pack from struct import pack
import array
import collections
import os
import re
import subprocess
import tempfile
# Constant and structure definition is in
# system/tools/mkbootimg/include/bootimg/bootimg.h
BOOT_MAGIC = 'ANDROID!'
BOOT_MAGIC_SIZE = 8
BOOT_NAME_SIZE = 16
BOOT_ARGS_SIZE = 512
BOOT_EXTRA_ARGS_SIZE = 1024
BOOT_IMAGE_HEADER_V1_SIZE = 1648
BOOT_IMAGE_HEADER_V2_SIZE = 1660
BOOT_IMAGE_HEADER_V3_SIZE = 1580
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096 BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
BOOT_IMAGE_HEADER_V4_SIZE = 1584
BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
VENDOR_BOOT_MAGIC = 'VNDRBOOT'
VENDOR_BOOT_MAGIC_SIZE = 8
VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
VENDOR_BOOT_ARGS_SIZE = 2048
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
VENDOR_RAMDISK_TYPE_NONE = 0
VENDOR_RAMDISK_TYPE_PLATFORM = 1
VENDOR_RAMDISK_TYPE_RECOVERY = 2
VENDOR_RAMDISK_TYPE_DLKM = 3
VENDOR_RAMDISK_NAME_SIZE = 32
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
# Names with special meaning, mustn't be specified in --ramdisk_name.
VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
def filesize(f): def filesize(f):
if f is None: if f is None:
@ -49,87 +89,135 @@ def pad_file(f, padding):
def get_number_of_pages(image_size, page_size): def get_number_of_pages(image_size, page_size):
"""calculates the number of pages required for the image""" """calculates the number of pages required for the image"""
return (image_size + page_size - 1) / page_size return (image_size + page_size - 1) // page_size
def get_recovery_dtbo_offset(args): def get_recovery_dtbo_offset(args):
"""calculates the offset of recovery_dtbo image in the boot image""" """calculates the offset of recovery_dtbo image in the boot image"""
num_header_pages = 1 # header occupies a page num_header_pages = 1 # header occupies a page
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize) num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk), args.pagesize) num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
args.pagesize)
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize) num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages + dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
num_ramdisk_pages + num_second_pages) num_ramdisk_pages + num_second_pages)
return dtbo_offset return dtbo_offset
def write_header_v3(args): def write_header_v3_and_above(args):
BOOT_IMAGE_HEADER_V3_SIZE = 1580 if args.header_version > 3:
BOOT_MAGIC = 'ANDROID!'.encode() boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
else:
args.output.write(pack('8s', BOOT_MAGIC)) boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
args.output.write(pack(
'4I', args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
filesize(args.kernel), # kernel size in bytes # kernel size in bytes
filesize(args.ramdisk), # ramdisk size in bytes args.output.write(pack('I', filesize(args.kernel)))
(args.os_version << 11) | args.os_patch_level, # os version and patch level # ramdisk size in bytes
BOOT_IMAGE_HEADER_V3_SIZE)) args.output.write(pack('I', filesize(args.ramdisk)))
# os version and patch level
args.output.write(pack('4I', 0, 0, 0, 0)) # reserved args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
args.output.write(pack('I', boot_header_size))
args.output.write(pack('I', args.header_version)) # version of bootimage header # reserved
args.output.write(pack('1536s', args.cmdline.encode())) args.output.write(pack('4I', 0, 0, 0, 0))
# version of boot image header
args.output.write(pack('I', args.header_version))
args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
args.cmdline))
if args.header_version >= 4:
# The signature used to verify boot image v4.
args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE))
pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE) pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
def write_vendor_boot_header(args): def write_vendor_boot_header(args):
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
BOOT_MAGIC = 'VNDRBOOT'.encode()
args.vendor_boot.write(pack('8s', BOOT_MAGIC))
args.vendor_boot.write(pack(
'5I',
args.header_version, # version of header
args.pagesize, # flash page size we assume
args.base + args.kernel_offset, # kernel physical load addr
args.base + args.ramdisk_offset, # ramdisk physical load addr
filesize(args.vendor_ramdisk))) # vendor ramdisk size in bytes
args.vendor_boot.write(pack('2048s', args.vendor_cmdline.encode()))
args.vendor_boot.write(pack('I', args.base + args.tags_offset)) # physical addr for kernel tags
args.vendor_boot.write(pack('16s', args.board.encode())) # asciiz product name
args.vendor_boot.write(pack('I', VENDOR_BOOT_IMAGE_HEADER_V3_SIZE)) # header size in bytes
if filesize(args.dtb) == 0: if filesize(args.dtb) == 0:
raise ValueError("DTB image must not be empty.") raise ValueError('DTB image must not be empty.')
args.vendor_boot.write(pack('I', filesize(args.dtb))) # size in bytes
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset)) # dtb physical load address
pad_file(args.vendor_boot, args.pagesize)
def write_header(args): if args.header_version > 3:
BOOT_IMAGE_HEADER_V1_SIZE = 1648 vendor_ramdisk_size = args.vendor_ramdisk_total_size
BOOT_IMAGE_HEADER_V2_SIZE = 1660 vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
BOOT_MAGIC = 'ANDROID!'.encode() else:
vendor_ramdisk_size = filesize(args.vendor_ramdisk)
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
VENDOR_BOOT_MAGIC.encode()))
# version of boot image header
args.vendor_boot.write(pack('I', args.header_version))
# flash page size
args.vendor_boot.write(pack('I', args.pagesize))
# kernel physical load address
args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
# ramdisk physical load address
args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
# ramdisk size in bytes
args.vendor_boot.write(pack('I', vendor_ramdisk_size))
args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
args.vendor_cmdline))
# kernel tags physical load address
args.vendor_boot.write(pack('I', args.base + args.tags_offset))
# asciiz product name
args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
# header size in bytes
args.vendor_boot.write(pack('I', vendor_boot_header_size))
# dtb size in bytes
args.vendor_boot.write(pack('I', filesize(args.dtb)))
# dtb physical load address
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
if args.header_version > 3: if args.header_version > 3:
raise ValueError('Boot header version %d not supported' % args.header_version) vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
elif args.header_version == 3: VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
return write_header_v3(args) # vendor ramdisk table size in bytes
args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
args.output.write(pack('8s', BOOT_MAGIC)) # number of vendor ramdisk table entries
final_ramdisk_offset = (args.base + args.ramdisk_offset) if filesize(args.ramdisk) > 0 else 0 args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
final_second_offset = (args.base + args.second_offset) if filesize(args.second) > 0 else 0 # vendor ramdisk table entry size in bytes
args.output.write(pack( args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
'10I', # bootconfig section size in bytes
filesize(args.kernel), # size in bytes args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
args.base + args.kernel_offset, # physical load addr pad_file(args.vendor_boot, args.pagesize)
filesize(args.ramdisk), # size in bytes
final_ramdisk_offset, # physical load addr
filesize(args.second), # size in bytes def write_header(args):
final_second_offset, # physical load addr if args.header_version > 4:
args.base + args.tags_offset, # physical addr for kernel tags raise ValueError(
args.pagesize, # flash page size we assume f'Boot header version {args.header_version} not supported')
args.header_version, # version of bootimage header if args.header_version in {3, 4}:
(args.os_version << 11) | args.os_patch_level)) # os version and patch level return write_header_v3_and_above(args)
args.output.write(pack('16s', args.board.encode())) # asciiz product name
args.output.write(pack('512s', args.cmdline[:512].encode())) ramdisk_load_address = ((args.base + args.ramdisk_offset)
if filesize(args.ramdisk) > 0 else 0)
second_load_address = ((args.base + args.second_offset)
if filesize(args.second) > 0 else 0)
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
# kernel size in bytes
args.output.write(pack('I', filesize(args.kernel)))
# kernel physical load address
args.output.write(pack('I', args.base + args.kernel_offset))
# ramdisk size in bytes
args.output.write(pack('I', filesize(args.ramdisk)))
# ramdisk physical load address
args.output.write(pack('I', ramdisk_load_address))
# second bootloader size in bytes
args.output.write(pack('I', filesize(args.second)))
# second bootloader physical load address
args.output.write(pack('I', second_load_address))
# kernel tags physical load address
args.output.write(pack('I', args.base + args.tags_offset))
# flash page size
args.output.write(pack('I', args.pagesize))
# version of boot image header
args.output.write(pack('I', args.header_version))
# os version and patch level
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
# asciiz product name
args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
sha = sha1() sha = sha1()
update_sha(sha, args.kernel) update_sha(sha, args.kernel)
@ -144,14 +232,18 @@ def write_header(args):
img_id = pack('32s', sha.digest()) img_id = pack('32s', sha.digest())
args.output.write(img_id) args.output.write(img_id)
args.output.write(pack('1024s', args.cmdline[512:].encode())) args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
if args.header_version > 0: if args.header_version > 0:
args.output.write(pack('I', filesize(args.recovery_dtbo))) # size in bytes
if args.recovery_dtbo: if args.recovery_dtbo:
args.output.write(pack('Q', get_recovery_dtbo_offset(args))) # recovery dtbo offset # recovery dtbo size in bytes
args.output.write(pack('I', filesize(args.recovery_dtbo)))
# recovert dtbo offset in the boot image
args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
else: else:
args.output.write(pack('Q', 0)) # Will be set to 0 for devices without a recovery dtbo # Set to zero if no recovery dtbo
args.output.write(pack('I', 0))
args.output.write(pack('Q', 0))
# Populate boot image header size for header versions 1 and 2. # Populate boot image header size for header versions 1 and 2.
if args.header_version == 1: if args.header_version == 1:
@ -160,29 +252,101 @@ def write_header(args):
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE)) args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
if args.header_version > 1: if args.header_version > 1:
if filesize(args.dtb) == 0: if filesize(args.dtb) == 0:
raise ValueError("DTB image must not be empty.") raise ValueError('DTB image must not be empty.')
# dtb size in bytes
args.output.write(pack('I', filesize(args.dtb)))
# dtb physical load address
args.output.write(pack('Q', args.base + args.dtb_offset))
args.output.write(pack('I', filesize(args.dtb))) # size in bytes
args.output.write(pack('Q', args.base + args.dtb_offset)) # dtb physical load address
pad_file(args.output, args.pagesize) pad_file(args.output, args.pagesize)
return img_id return img_id
class ValidateStrLenAction(Action): class AsciizBytes:
def __init__(self, option_strings, dest, nargs=None, **kwargs): """Parses a string and encodes it as an asciiz bytes object.
if 'maxlen' not in kwargs:
raise ValueError('maxlen must be set') >>> AsciizBytes(bufsize=4)('foo')
self.maxlen = int(kwargs['maxlen']) b'foo\\x00'
del kwargs['maxlen'] >>> AsciizBytes(bufsize=4)('foob')
super(ValidateStrLenAction, self).__init__(option_strings, dest, **kwargs) Traceback (most recent call last):
...
argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
"""
def __init__(self, bufsize):
self.bufsize = bufsize
def __call__(self, arg):
arg_bytes = arg.encode() + b'\x00'
if len(arg_bytes) > self.bufsize:
raise ArgumentTypeError(
'Encoded asciiz length exceeded: '
f'max {self.bufsize}, got {len(arg_bytes)}')
return arg_bytes
def __call__(self, parser, namespace, values, option_string=None): class VendorRamdiskTableBuilder:
if len(values) > self.maxlen: """Vendor ramdisk table builder.
Attributes:
entries: A list of VendorRamdiskTableEntry namedtuple.
ramdisk_total_size: Total size in bytes of all ramdisks in the table.
"""
VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
'VendorRamdiskTableEntry',
['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
'ramdisk_name', 'board_id'])
def __init__(self):
self.entries = []
self.ramdisk_total_size = 0
self.ramdisk_names = set()
def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
# Strip any trailing null for simple comparison.
stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
raise ValueError(
f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
if stripped_ramdisk_name in self.ramdisk_names:
raise ValueError( raise ValueError(
'String argument too long: max {0:d}, got {1:d}'.format(self.maxlen, len(values))) f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
setattr(namespace, self.dest, values) self.ramdisk_names.add(stripped_ramdisk_name)
if board_id is None:
board_id = array.array(
'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
else:
board_id = array.array('I', board_id)
if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
raise ValueError('board_id size must be '
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
with open(ramdisk_path, 'rb') as f:
ramdisk_size = filesize(f)
self.entries.append(self.VendorRamdiskTableEntry(
ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
ramdisk_name, board_id))
self.ramdisk_total_size += ramdisk_size
def write_ramdisks_padded(self, fout, alignment):
for entry in self.entries:
with open(entry.ramdisk_path, 'rb') as f:
fout.write(f.read())
pad_file(fout, alignment)
def write_entries_padded(self, fout, alignment):
for entry in self.entries:
fout.write(pack('I', entry.ramdisk_size))
fout.write(pack('I', entry.ramdisk_offset))
fout.write(pack('I', entry.ramdisk_type))
fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
entry.ramdisk_name))
fout.write(entry.board_id)
pad_file(fout, alignment)
def write_padded_file(f_out, f_in, padding): def write_padded_file(f_out, f_in, padding):
@ -225,49 +389,236 @@ def parse_os_patch_level(x):
return 0 return 0
def parse_vendor_ramdisk_type(x):
type_dict = {
'none': VENDOR_RAMDISK_TYPE_NONE,
'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
}
if x.lower() in type_dict:
return type_dict[x.lower()]
return parse_int(x)
def get_vendor_boot_v4_usage():
return """vendor boot version 4 arguments:
--ramdisk_type {none,platform,recovery,dlkm}
specify the type of the ramdisk
--ramdisk_name NAME
specify the name of the ramdisk
--board_id{0..15} NUMBER
specify the value of the board_id vector, defaults to 0
--vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
path to the vendor ramdisk file
These options can be specified multiple times, where each vendor ramdisk
option group ends with a --vendor_ramdisk_fragment option.
Each option group appends an additional ramdisk to the vendor boot image.
"""
def parse_vendor_ramdisk_args(args, args_list):
"""Parses vendor ramdisk specific arguments.
Args:
args: An argparse.Namespace object. Parsed results are stored into this
object.
args_list: A list of argument strings to be parsed.
Returns:
A list argument strings that are not parsed by this method.
"""
parser = ArgumentParser(add_help=False)
parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
default=VENDOR_RAMDISK_TYPE_NONE)
parser.add_argument('--ramdisk_name',
type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
required=True)
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
unknown_args = []
vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
if args.vendor_ramdisk is not None:
vendor_ramdisk_table_builder.add_entry(
args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
vendor_ramdisk_args = args_list[:idx]
args_list = args_list[idx:]
ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
ramdisk_args_dict = vars(ramdisk_args)
unknown_args.extend(extra_args)
ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
ramdisk_type = ramdisk_args.ramdisk_type
ramdisk_name = ramdisk_args.ramdisk_name
board_id = [ramdisk_args_dict[f'board_id{i}']
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
ramdisk_name, board_id)
if len(args_list) > 0:
unknown_args.extend(args_list)
args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
.ramdisk_total_size)
args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
.entries)
args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
return unknown_args
def parse_cmdline(): def parse_cmdline():
parser = ArgumentParser() version_parser = ArgumentParser(add_help=False)
parser.add_argument('--kernel', help='path to the kernel', type=FileType('rb')) version_parser.add_argument('--header_version', type=parse_int, default=0)
parser.add_argument('--ramdisk', help='path to the ramdisk', type=FileType('rb')) if version_parser.parse_known_args()[0].header_version < 3:
parser.add_argument('--second', help='path to the 2nd bootloader', type=FileType('rb')) # For boot header v0 to v2, the kernel commandline field is split into
parser.add_argument('--dtb', help='path to dtb', type=FileType('rb')) # two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
recovery_dtbo_group = parser.add_mutually_exclusive_group() # so we minus one here to ensure the encoded string plus the
recovery_dtbo_group.add_argument('--recovery_dtbo', help='path to the recovery DTBO', # null-terminator can fit in the buffer size.
type=FileType('rb')) cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
recovery_dtbo_group.add_argument('--recovery_acpio', help='path to the recovery ACPIO', else:
type=FileType('rb'), metavar='RECOVERY_ACPIO', cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
dest='recovery_dtbo')
parser.add_argument('--cmdline', help='extra arguments to be passed on the ' parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
'kernel command line', default='', action=ValidateStrLenAction, maxlen=1536) epilog=get_vendor_boot_v4_usage())
parser.add_argument('--kernel', type=FileType('rb'),
help='path to the kernel')
parser.add_argument('--ramdisk', type=FileType('rb'),
help='path to the ramdisk')
parser.add_argument('--second', type=FileType('rb'),
help='path to the second bootloader')
parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
dtbo_group = parser.add_mutually_exclusive_group()
dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
help='path to the recovery DTBO')
dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
help='path to the recovery ACPIO')
parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
default='', help='kernel command line arguments')
parser.add_argument('--vendor_cmdline', parser.add_argument('--vendor_cmdline',
help='kernel command line arguments contained in vendor boot', type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
default='', action=ValidateStrLenAction, maxlen=2048) default='',
parser.add_argument('--base', help='base address', type=parse_int, default=0x10000000) help='vendor boot kernel command line arguments')
parser.add_argument('--kernel_offset', help='kernel offset', type=parse_int, default=0x00008000) parser.add_argument('--base', type=parse_int, default=0x10000000,
parser.add_argument('--ramdisk_offset', help='ramdisk offset', type=parse_int, help='base address')
default=0x01000000) parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
parser.add_argument('--second_offset', help='2nd bootloader offset', type=parse_int, help='kernel offset')
default=0x00f00000) parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
parser.add_argument('--dtb_offset', help='dtb offset', type=parse_int, default=0x01f00000) help='ramdisk offset')
parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
parser.add_argument('--os_version', help='operating system version', type=parse_os_version, help='second bootloader offset')
default=0) parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
parser.add_argument('--os_patch_level', help='operating system patch level', help='dtb offset')
type=parse_os_patch_level, default=0)
parser.add_argument('--tags_offset', help='tags offset', type=parse_int, default=0x00000100) parser.add_argument('--os_version', type=parse_os_version, default=0,
parser.add_argument('--board', help='board name', default='', action=ValidateStrLenAction, help='operating system version')
maxlen=16) parser.add_argument('--os_patch_level', type=parse_os_patch_level,
parser.add_argument('--pagesize', help='page size', type=parse_int, default=0, help='operating system patch level')
choices=[2**i for i in range(11, 15)], default=2048) parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
parser.add_argument('--id', help='print the image ID on standard output', help='tags offset')
action='store_true') parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
parser.add_argument('--header_version', help='boot image header version', type=parse_int, default='', help='board name')
default=0) parser.add_argument('--pagesize', type=parse_int,
parser.add_argument('-o', '--output', help='output file name', type=FileType('wb')) choices=[2**i for i in range(11, 15)], default=2048,
parser.add_argument('--vendor_boot', help='vendor boot output file name', type=FileType('wb')) help='page size')
parser.add_argument('--vendor_ramdisk', help='path to the vendor ramdisk', type=FileType('rb')) parser.add_argument('--id', action='store_true',
help='print the image ID on standard output')
return parser.parse_args() parser.add_argument('--header_version', type=parse_int, default=0,
help='boot image header version')
parser.add_argument('-o', '--output', type=FileType('wb'),
help='output file name')
parser.add_argument('--gki_signing_algorithm',
help='GKI signing algorithm to use')
parser.add_argument('--gki_signing_key',
help='path to RSA private key file')
parser.add_argument('--gki_signing_signature_args',
help='other hash arguments passed to avbtool')
parser.add_argument('--gki_signing_avbtool_path',
help='path to avbtool for boot signature generation')
parser.add_argument('--vendor_boot', type=FileType('wb'),
help='vendor boot output file name')
parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
help='path to the vendor ramdisk')
parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
help='path to the vendor bootconfig file')
args, extra_args = parser.parse_known_args()
if args.vendor_boot is not None and args.header_version > 3:
extra_args = parse_vendor_ramdisk_args(args, extra_args)
if len(extra_args) > 0:
raise ValueError(f'Unrecognized arguments: {extra_args}')
if args.header_version < 3:
args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
assert len(args.cmdline) <= BOOT_ARGS_SIZE
assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
return args
def add_boot_image_signature(args, pagesize):
"""Adds the boot image signature.
Note that the signature will only be verified in VTS to ensure a
generic boot.img is used. It will not be used by the device
bootloader at boot time. The bootloader should only verify
the boot vbmeta at the end of the boot partition (or in the top-level
vbmeta partition) via the Android Verified Boot process, when the
device boots.
"""
args.output.flush() # Flush the buffer for signature calculation.
# Appends zeros if the signing key is not specified.
if not args.gki_signing_key or not args.gki_signing_algorithm:
zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE
args.output.write(zeros)
pad_file(args.output, pagesize)
return
avbtool = 'avbtool' # Used from otatools.zip or Android build env.
# We need to specify the path of avbtool in build/core/Makefile.
# Because avbtool is not guaranteed to be in $PATH there.
if args.gki_signing_avbtool_path:
avbtool = args.gki_signing_avbtool_path
# Need to specify a value of --partition_size for avbtool to work.
# We use 64 MB below, but avbtool will not resize the boot image to
# this size because --do_not_append_vbmeta_image is also specified.
avbtool_cmd = [
avbtool, 'add_hash_footer',
'--partition_name', 'boot',
'--partition_size', str(64 * 1024 * 1024),
'--image', args.output.name,
'--algorithm', args.gki_signing_algorithm,
'--key', args.gki_signing_key,
'--salt', 'd00df00d'] # TODO: use a hash of kernel/ramdisk as the salt.
# Additional arguments passed to avbtool.
if args.gki_signing_signature_args:
avbtool_cmd += args.gki_signing_signature_args.split()
# Outputs the signed vbmeta to a separate file, then append to boot.img
# as the boot signature.
with tempfile.TemporaryDirectory() as temp_out_dir:
boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
avbtool_cmd += ['--do_not_append_vbmeta_image',
'--output_vbmeta_image', boot_signature_output]
subprocess.check_call(avbtool_cmd)
with open(boot_signature_output, 'rb') as boot_signature:
if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
raise ValueError(
f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
write_padded_file(args.output, boot_signature, pagesize)
def write_data(args, pagesize): def write_data(args, pagesize):
@ -279,37 +630,44 @@ def write_data(args, pagesize):
write_padded_file(args.output, args.recovery_dtbo, pagesize) write_padded_file(args.output, args.recovery_dtbo, pagesize)
if args.header_version == 2: if args.header_version == 2:
write_padded_file(args.output, args.dtb, pagesize) write_padded_file(args.output, args.dtb, pagesize)
if args.header_version >= 4:
add_boot_image_signature(args, pagesize)
def write_vendor_boot_data(args): def write_vendor_boot_data(args):
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize) if args.header_version > 3:
write_padded_file(args.vendor_boot, args.dtb, args.pagesize) builder = args.vendor_ramdisk_table_builder
builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
builder.write_entries_padded(args.vendor_boot, args.pagesize)
write_padded_file(args.vendor_boot, args.vendor_bootconfig,
args.pagesize)
else:
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
def main(): def main():
args = parse_cmdline() args = parse_cmdline()
if args.vendor_boot is not None: if args.vendor_boot is not None:
if args.header_version < 3: if args.header_version not in {3, 4}:
raise ValueError('--vendor_boot not compatible with given header version') raise ValueError(
if args.vendor_ramdisk is None: '--vendor_boot not compatible with given header version')
if args.header_version == 3 and args.vendor_ramdisk is None:
raise ValueError('--vendor_ramdisk missing or invalid') raise ValueError('--vendor_ramdisk missing or invalid')
write_vendor_boot_header(args) write_vendor_boot_header(args)
write_vendor_boot_data(args) write_vendor_boot_data(args)
if args.output is not None: if args.output is not None:
if args.kernel is None:
raise ValueError('kernel must be supplied when creating a boot image')
if args.second is not None and args.header_version > 2: if args.second is not None and args.header_version > 2:
raise ValueError('--second not compatible with given header version') raise ValueError(
'--second not compatible with given header version')
img_id = write_header(args) img_id = write_header(args)
if args.header_version > 2: if args.header_version > 2:
write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE) write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
else: else:
write_data(args, args.pagesize) write_data(args, args.pagesize)
if args.id and img_id is not None: if args.id and img_id is not None:
# Python 2's struct.pack returns a string, but py3 returns bytes. print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
if isinstance(img_id, str):
img_id = [ord(x) for x in img_id]
print('0x' + ''.join('{:02x}'.format(c) for c in img_id))
if __name__ == '__main__': if __name__ == '__main__':

@ -132,11 +132,6 @@ data class BootV3(
} }
fun sign(fileName: String): BootV3 { fun sign(fileName: String): BootV3 {
val tab = AsciiTable().let {
it.addRule()
it.addRow("")
it
}
if (File(Avb.getJsonFileName(info.output)).exists()) { if (File(Avb.getJsonFileName(info.output)).exists()) {
Signer.signAVB(fileName, this.info.imageSize, String.format(Helper.prop("avbtool"), "v1.2")) Signer.signAVB(fileName, this.info.imageSize, String.format(Helper.prop("avbtool"), "v1.2"))
} else { } else {
@ -174,7 +169,11 @@ data class BootV3(
info.output, this.bootSignature.file, info.output, this.bootSignature.file,
this.bootSignature.position.toLong(), this.bootSignature.size this.bootSignature.position.toLong(), this.bootSignature.size
) )
Avb().parseVbMeta(this.bootSignature.file) try {
Avb().parseVbMeta(this.bootSignature.file)
} catch (e: IllegalArgumentException) {
log.warn("boot signature is invalid")
}
} }
//dump info again //dump info again
@ -225,7 +224,9 @@ data class BootV3(
if (this.info.signatureSize > 0) { if (this.info.signatureSize > 0) {
it.addRow("boot signature", this.bootSignature.file) it.addRow("boot signature", this.bootSignature.file)
it.addRow("\\-- decoded boot signature", Avb.getJsonFileName(this.bootSignature.file)) Avb.getJsonFileName(this.bootSignature.file).let { jsFile ->
it.addRow("\\-- decoded boot signature", if (File(jsFile).exists()) jsFile else "N/A")
}
it.addRule() it.addRule()
} }
Avb.getJsonFileName(info.output).let { jsonFile -> Avb.getJsonFileName(info.output).let { jsonFile ->

@ -19,32 +19,27 @@ class BootImgParser() : IPackable {
private val workDir = Helper.prop("workDir") private val workDir = Helper.prop("workDir")
override fun capabilities(): List<String> { override fun capabilities(): List<String> {
return listOf("^boot\\.img$", "^recovery\\.img$", "^recovery-two-step\\.img$") return listOf("^boot(-debug)?\\.img$", "^recovery\\.img$", "^recovery-two-step\\.img$")
} }
override fun unpack(fileName: String) { override fun unpack(fileName: String) {
cleanUp() cleanUp()
try { val hv = probeHeaderVersion(fileName)
val hv = probeHeaderVersion(fileName) log.info("header version $hv")
log.info("header version $hv") if (hv in 0..2) {
if (hv in 0..2) { val b2 = BootV2
val b2 = BootV2 .parse(fileName)
.parse(fileName) .extractImages()
.extractImages() .extractVBMeta()
.extractVBMeta() .printSummary()
.printSummary() log.debug(b2.toString())
log.debug(b2.toString()) } else {
} else { val b3 = BootV3
val b3 = BootV3 .parse(fileName)
.parse(fileName) .extractImages()
.extractImages() .extractVBMeta()
.extractVBMeta() .printSummary()
.printSummary() log.debug(b3.toString())
log.debug(b3.toString())
}
} catch (e: IllegalArgumentException) {
log.error(e.message)
log.error("Parser can not continue")
} }
} }
@ -61,24 +56,27 @@ class BootImgParser() : IPackable {
log.info("\n{}", tab.render()) log.info("\n{}", tab.render())
return return
} }
if (3 == probeHeaderVersion(fileName)) { when (val hv = probeHeaderVersion(fileName)) {
ObjectMapper().readValue(File(cfgFile), BootV3::class.java) 0, 1, 2 ->
.pack() ObjectMapper().readValue(File(cfgFile), BootV2::class.java)
.sign(fileName) .pack()
.let { .sign()
val tab = AsciiTable().let { tab -> 3, 4 ->
tab.addRule() ObjectMapper().readValue(File(cfgFile), BootV3::class.java)
val outFileSuffix = if (File(Avb.getJsonFileName(it.info.output)).exists()) ".signed" else "" .pack()
tab.addRow("${it.info.output}${outFileSuffix} is ready") .sign(fileName)
tab.addRule() .let {
tab val tab = AsciiTable().let { tab ->
tab.addRule()
val outFileSuffix =
if (File(Avb.getJsonFileName(it.info.output)).exists()) ".signed" else ".clear"
tab.addRow("${it.info.output}${outFileSuffix} is ready")
tab.addRule()
tab
}
log.info("\n{}", tab.render())
} }
log.info("\n{}", tab.render()) else -> throw IllegalArgumentException("do not support header version $hv")
}
} else {
ObjectMapper().readValue(File(cfgFile), BootV2::class.java)
.pack()
.sign()
} }
Avb.updateVbmeta(fileName) Avb.updateVbmeta(fileName)
} }

@ -12,7 +12,7 @@ class VendorBootParser : IPackable {
private val log = LoggerFactory.getLogger(VendorBootParser::class.java) private val log = LoggerFactory.getLogger(VendorBootParser::class.java)
private val workDir = Helper.prop("workDir") private val workDir = Helper.prop("workDir")
override fun capabilities(): List<String> { override fun capabilities(): List<String> {
return listOf("^vendor_boot\\.img$") return listOf("^vendor_boot(-debug)?\\.img$")
} }
override fun unpack(fileName: String) { override fun unpack(fileName: String) {

@ -9,6 +9,7 @@ successLogo = """
+----------------------------------+ +----------------------------------+
""" """
resDir = "src/integrationTest/resources" resDir = "src/integrationTest/resources"
resDir2 = "src/integrationTest/resources_2"
log = logging.getLogger('TEST') log = logging.getLogger('TEST')
log.setLevel(logging.DEBUG) log.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler(sys.stdout) consoleHandler = logging.StreamHandler(sys.stdout)
@ -54,7 +55,7 @@ def cleanUp():
deleteIfExists("vendor_boot.img.signed") deleteIfExists("vendor_boot.img.signed")
deleteIfExists("vendor_boot.img.signed2") deleteIfExists("vendor_boot.img.signed2")
def verifySingleJson(jsonFile): def verifySingleJson(jsonFile, func = None):
log.info(jsonFile) log.info(jsonFile)
imgDir = os.path.dirname(jsonFile) imgDir = os.path.dirname(jsonFile)
verifyItems = json.load(open(jsonFile)) verifyItems = json.load(open(jsonFile))
@ -73,6 +74,8 @@ def verifySingleJson(jsonFile):
else: else:
gradleWrapper = "./gradlew" gradleWrapper = "./gradlew"
subprocess.check_call(gradleWrapper + " unpack", shell = True) subprocess.check_call(gradleWrapper + " unpack", shell = True)
if func:
func()
subprocess.check_call(gradleWrapper + " pack", shell = True) subprocess.check_call(gradleWrapper + " pack", shell = True)
for k, v in verifyItems["hash"].items(): for k, v in verifyItems["hash"].items():
log.info("%s : %s" % (k, v)) log.info("%s : %s" % (k, v))
@ -114,6 +117,9 @@ def seekedCopy(inFile, outFile, offset):
writer.write(content) writer.write(content)
def main(): def main():
#########################################
# resource_1
#########################################
# from volunteers # from volunteers
verifySingleDir(resDir, "recovery_image_from_s-trace") verifySingleDir(resDir, "recovery_image_from_s-trace")
verifySingleDir(resDir, "boot_img_from_gesangtome") # android 9, no ramdisk verifySingleDir(resDir, "boot_img_from_gesangtome") # android 9, no ramdisk
@ -145,6 +151,11 @@ def main():
# 11 # 11
verifySingleDir(resDir, "11.0.0_redfin.rd1a.200810.021.a1") verifySingleDir(resDir, "11.0.0_redfin.rd1a.200810.021.a1")
#########################################
# resource_2
#########################################
verifySingleJson("%s/issue_59/recovery.json" % resDir2, func = lambda: shutil.rmtree("build/unzip_boot/root", ignore_errors = False))
log.info(successLogo) log.info(successLogo)
if __name__ == "__main__": if __name__ == "__main__":

@ -0,0 +1 @@
Subproject commit 4957dc9c53ea905f28b82c8ee65f738b6a88297c

@ -0,0 +1,20 @@
@startuml
state fastboot
normal -right-> recovery: 1
recovery -down-> fastbootd: 2
fastbootd -left-> fastboot: 3
fastboot -up-> normal: 4
'self
normal -[dashed]> normal: a
recovery -[dashed]> recovery: b
fastboot -[dashed]> fastboot: c
fastbootd -[dashed]> fastbootd: d
'normal
normal -> fastbootd: 5
'recovery
recovery -[#blue]> fastboot: 6
@enduml
Loading…
Cancel
Save