mass update

update avbtool from upstream
update kotlin to 1.3.20
support boot image header v2
add integration test
pull/31/head
cfig 6 years ago
parent b4f5a2b5b3
commit fac31f1526
No known key found for this signature in database
GPG Key ID: B104C307F0FDABB7

@ -1,7 +1,6 @@
# Android_boot_image_editor
[![Build Status](https://travis-ci.org/cfig/Android_boot_image_editor.svg?branch=master)](https://travis-ci.org/cfig/Android_boot_image_editor)
[![License](http://img.shields.io/:license-apache-blue.svg?style=flat-square)](http://www.apache.org/licenses/LICENSE-2.0.html)
[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/Android_boot_image_editor/lobby)
This tool focuses on editing Android boot.img(also recovery.img, recovery-two-step.img and vbmeta.img).
@ -38,7 +37,9 @@ Your get the flattened kernel and /root filesystem under **./build/unzip\_boot**
├── boot.img.avb.json (AVB only)
├── bootimg.json (boot image info)
├── kernel
├── second (2nd bootloader, if exists)
├── second (2nd bootloader, if exists)
├── dtb (dtb, if exists)
├── dtbo (dtbo, if exists)
└── root
Then you can edit the actual file contents, like rootfs or kernel.

549
avb/avbtool vendored

@ -65,8 +65,7 @@ class AvbError(Exception):
class Algorithm(object):
"""Contains details about an algorithm.
See the avb_vbmeta_header.h file for more details about
algorithms.
See the avb_vbmeta_image.h file for more details about algorithms.
The constant |ALGORITHMS| is a dictionary from human-readable
names (e.g 'SHA256_RSA2048') to instances of this class.
@ -545,13 +544,52 @@ def verify_vbmeta_signature(vbmeta_header, vbmeta_blob):
modulus = decode_long(modulus_blob)
exponent = 65537
# For now, just use Crypto.PublicKey.RSA to verify the signature. This
# is OK since 'avbtool verify_image' is not expected to run on the
# Android builders (see bug #36809096).
import Crypto.PublicKey.RSA
key = Crypto.PublicKey.RSA.construct((modulus, long(exponent)))
if not key.verify(decode_long(padding_and_digest),
(decode_long(sig_blob), None)):
# We used to have this:
#
# import Crypto.PublicKey.RSA
# key = Crypto.PublicKey.RSA.construct((modulus, long(exponent)))
# if not key.verify(decode_long(padding_and_digest),
# (decode_long(sig_blob), None)):
# return False
# return True
#
# but since 'avbtool verify_image' is used on the builders we don't want
# to rely on Crypto.PublicKey.RSA. Instead just use openssl(1) to verify.
asn1_str = ('asn1=SEQUENCE:pubkeyinfo\n'
'\n'
'[pubkeyinfo]\n'
'algorithm=SEQUENCE:rsa_alg\n'
'pubkey=BITWRAP,SEQUENCE:rsapubkey\n'
'\n'
'[rsa_alg]\n'
'algorithm=OID:rsaEncryption\n'
'parameter=NULL\n'
'\n'
'[rsapubkey]\n'
'n=INTEGER:%s\n'
'e=INTEGER:%s\n' % (hex(modulus).rstrip('L'), hex(exponent).rstrip('L')))
asn1_tmpfile = tempfile.NamedTemporaryFile()
asn1_tmpfile.write(asn1_str)
asn1_tmpfile.flush()
der_tmpfile = tempfile.NamedTemporaryFile()
p = subprocess.Popen(
['openssl', 'asn1parse', '-genconf', asn1_tmpfile.name, '-out', der_tmpfile.name, '-noout'])
retcode = p.wait()
if retcode != 0:
raise AvbError('Error generating DER file')
p = subprocess.Popen(
['openssl', 'rsautl', '-verify', '-pubin', '-inkey', der_tmpfile.name, '-keyform', 'DER', '-raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pout, perr) = p.communicate(str(sig_blob))
retcode = p.wait()
if retcode != 0:
raise AvbError('Error verifying data: {}'.format(perr))
recovered_data = bytearray(pout)
if recovered_data != padding_and_digest:
sys.stderr.write('Signature not correct\n')
return False
return True
@ -632,6 +670,7 @@ class ImageHandler(object):
of the block size.
Attributes:
filename: Name of file.
is_sparse: Whether the file being operated on is sparse.
block_size: The block size, typically 4096.
image_size: The size of the unsparsified file.
@ -654,7 +693,7 @@ class ImageHandler(object):
Raises:
ValueError: If data in the file is invalid.
"""
self._image_filename = image_filename
self.filename = image_filename
self._read_header()
def _read_header(self):
@ -669,7 +708,7 @@ class ImageHandler(object):
self.is_sparse = False
self.block_size = 4096
self._file_pos = 0
self._image = open(self._image_filename, 'r+b')
self._image = open(self.filename, 'r+b')
self._image.seek(0, os.SEEK_END)
self.image_size = self._image.tell()
@ -721,7 +760,7 @@ class ImageHandler(object):
chunk_sz*self.block_size,
self._image.tell(),
None))
self._image.read(data_sz)
self._image.seek(data_sz, os.SEEK_CUR)
elif chunk_type == ImageChunk.TYPE_FILL:
if data_sz != 4:
@ -1081,7 +1120,8 @@ class AvbDescriptor(object):
ret = desc + self.data + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map):
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
@ -1089,6 +1129,7 @@ class AvbDescriptor(object):
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
Returns:
True if the descriptor verifies, False otherwise.
@ -1165,7 +1206,8 @@ class AvbPropertyDescriptor(AvbDescriptor):
ret = desc + self.key + '\0' + self.value + '\0' + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map):
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
@ -1173,6 +1215,7 @@ class AvbPropertyDescriptor(AvbDescriptor):
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
Returns:
True if the descriptor verifies, False otherwise.
@ -1325,7 +1368,8 @@ class AvbHashtreeDescriptor(AvbDescriptor):
ret = desc + encoded_name + self.salt + self.root_digest + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map):
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
@ -1333,12 +1377,16 @@ class AvbHashtreeDescriptor(AvbDescriptor):
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
Returns:
True if the descriptor verifies, False otherwise.
"""
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename)
if self.partition_name == '':
image = image_containing_descriptor
else:
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename)
# Generate the hashtree and checks that it matches what's in the file.
digest_size = len(hashlib.new(name=self.hash_algorithm).digest())
digest_padding = round_to_pow2(digest_size) - digest_size
@ -1367,7 +1415,7 @@ class AvbHashtreeDescriptor(AvbDescriptor):
# takes a long time; and c) is not strictly needed for
# verification purposes as we've already verified the root hash.
print ('{}: Successfully verified {} hashtree of {} for image of {} bytes'
.format(self.partition_name, self.hash_algorithm, image_filename,
.format(self.partition_name, self.hash_algorithm, image.filename,
self.image_size))
return True
@ -1477,7 +1525,8 @@ class AvbHashDescriptor(AvbDescriptor):
ret = desc + encoded_name + self.salt + self.digest + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map):
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
@ -1485,12 +1534,16 @@ class AvbHashDescriptor(AvbDescriptor):
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
Returns:
True if the descriptor verifies, False otherwise.
"""
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename)
if self.partition_name == '':
image = image_containing_descriptor
else:
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename)
data = image.read(self.image_size)
ha = hashlib.new(self.hash_algorithm)
ha.update(self.salt)
@ -1502,7 +1555,7 @@ class AvbHashDescriptor(AvbDescriptor):
format(self.hash_algorithm, image_filename))
return False
print ('{}: Successfully verified {} hash of {} for image of {} bytes'
.format(self.partition_name, self.hash_algorithm, image_filename,
.format(self.partition_name, self.hash_algorithm, image.filename,
self.image_size))
return True
@ -1582,7 +1635,8 @@ class AvbKernelCmdlineDescriptor(AvbDescriptor):
ret = desc + encoded_str + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map):
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
@ -1590,6 +1644,7 @@ class AvbKernelCmdlineDescriptor(AvbDescriptor):
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
Returns:
True if the descriptor verifies, False otherwise.
@ -1683,7 +1738,8 @@ class AvbChainPartitionDescriptor(AvbDescriptor):
ret = desc + encoded_name + self.public_key + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map):
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
@ -1691,6 +1747,7 @@ class AvbChainPartitionDescriptor(AvbDescriptor):
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
Returns:
True if the descriptor verifies, False otherwise.
@ -1699,7 +1756,7 @@ class AvbChainPartitionDescriptor(AvbDescriptor):
if not value:
sys.stderr.write('No expected chain partition for partition {}. Use '
'--expected_chain_partition to specify expected '
'contents.\n'.
'contents or --follow_chain_partitions.\n'.
format(self.partition_name))
return False
rollback_index_location, pk_blob = value
@ -1820,8 +1877,8 @@ class AvbVBMetaHeader(object):
"""A class for parsing and writing AVB vbmeta images.
Attributes:
The attributes correspond to the |AvbVBMetaHeader| struct
defined in avb_vbmeta_header.h.
The attributes correspond to the |AvbVBMetaImageHeader| struct defined in
avb_vbmeta_image.h.
"""
SIZE = 256
@ -1960,6 +2017,33 @@ class Avb(object):
MAX_VBMETA_SIZE = 64 * 1024
MAX_FOOTER_SIZE = 4096
def extract_vbmeta_image(self, output, image_filename, padding_size):
"""Implements the 'extract_vbmeta_image' command.
Arguments:
output: Write vbmeta struct to this file.
image_filename: File to extract vbmeta data from (with a footer).
padding_size: If not 0, pads output so size is a multiple of the number.
Raises:
AvbError: If there's no footer in the image.
"""
image = ImageHandler(image_filename)
(footer, _, _, _) = self._parse_image(image)
if not footer:
raise AvbError('Given image does not have a footer.')
image.seek(footer.vbmeta_offset)
vbmeta_blob = image.read(footer.vbmeta_size)
output.write(vbmeta_blob)
if padding_size > 0:
padded_size = round_to_multiple(len(vbmeta_blob), padding_size)
padding_needed = padded_size - len(vbmeta_blob)
output.write('\0' * padding_needed)
def erase_footer(self, image_filename, keep_hashtree):
"""Implements the 'erase_footer' command.
@ -2136,15 +2220,16 @@ class Avb(object):
if num_printed == 0:
o.write(' (none)\n')
def verify_image(self, image_filename, key_path, expected_chain_partitions):
def verify_image(self, image_filename, key_path, expected_chain_partitions, follow_chain_partitions):
"""Implements the 'verify_image' command.
Arguments:
image_filename: Image file to get information from (file object).
key_path: None or check that embedded public key matches key at given path.
expected_chain_partitions: List of chain partitions to check or None.
follow_chain_partitions: If True, will follows chain partitions even when not
specified with the --expected_chain_partition option
"""
expected_chain_partitions_map = {}
if expected_chain_partitions:
used_locations = {}
@ -2174,11 +2259,11 @@ class Avb(object):
offset = 0
if footer:
offset = footer.vbmeta_offset
size = (header.SIZE + header.authentication_data_block_size +
header.auxiliary_data_block_size)
image.seek(offset)
vbmeta_blob = image.read(size)
h = AvbVBMetaHeader(vbmeta_blob[0:AvbVBMetaHeader.SIZE])
vbmeta_blob = image.read(header.SIZE + header.authentication_data_block_size +
header.auxiliary_data_block_size)
alg_name, _ = lookup_algorithm_by_type(header.algorithm_type)
if not verify_vbmeta_signature(header, vbmeta_blob):
raise AvbError('Signature check failed for {} vbmeta struct {}'
@ -2187,22 +2272,120 @@ class Avb(object):
if key_blob:
# The embedded public key is in the auxiliary block at an offset.
key_offset = AvbVBMetaHeader.SIZE
key_offset += h.authentication_data_block_size
key_offset += h.public_key_offset
key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset + h.public_key_size]
key_offset += header.authentication_data_block_size
key_offset += header.public_key_offset
key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset + header.public_key_size]
if key_blob != key_blob_in_vbmeta:
raise AvbError('Embedded public key does not match given key.')
if footer:
print ('vbmeta: Successfully verified footer and {} vbmeta struct in {}'
.format(alg_name, image_filename))
.format(alg_name, image.filename))
else:
print ('vbmeta: Successfully verified {} vbmeta struct in {}'
.format(alg_name, image_filename))
.format(alg_name, image.filename))
for desc in descriptors:
if not desc.verify(image_dir, image_ext, expected_chain_partitions_map):
raise AvbError('Error verifying descriptor.')
if (isinstance(desc, AvbChainPartitionDescriptor) and follow_chain_partitions and
expected_chain_partitions_map.get(desc.partition_name) == None):
# In this case we're processing a chain descriptor but don't have a
# --expect_chain_partition ... however --follow_chain_partitions was
# specified so we shouldn't error out in desc.verify().
print ('{}: Chained but ROLLBACK_SLOT (which is {}) and KEY (which has sha1 {}) not specified'
.format(desc.partition_name, desc.rollback_index_location,
hashlib.sha1(desc.public_key).hexdigest()))
else:
if not desc.verify(image_dir, image_ext, expected_chain_partitions_map, image):
raise AvbError('Error verifying descriptor.')
# Honor --follow_chain_partitions - add '--' to make the output more readable.
if isinstance(desc, AvbChainPartitionDescriptor) and follow_chain_partitions:
print '--'
chained_image_filename = os.path.join(image_dir, desc.partition_name + image_ext)
self.verify_image(chained_image_filename, key_path, None, False)
def calculate_vbmeta_digest(self, image_filename, hash_algorithm, output):
"""Implements the 'calculate_vbmeta_digest' command.
Arguments:
image_filename: Image file to get information from (file object).
hash_algorithm: Hash algorithm used.
output: Output file to write human-readable information to (file object).
"""
image_dir = os.path.dirname(image_filename)
image_ext = os.path.splitext(image_filename)[1]
image = ImageHandler(image_filename)
(footer, header, descriptors, image_size) = self._parse_image(image)
offset = 0
if footer:
offset = footer.vbmeta_offset
size = (header.SIZE + header.authentication_data_block_size +
header.auxiliary_data_block_size)
image.seek(offset)
vbmeta_blob = image.read(size)
hasher = hashlib.new(name=hash_algorithm)
hasher.update(vbmeta_blob)
for desc in descriptors:
if isinstance(desc, AvbChainPartitionDescriptor):
ch_image_filename = os.path.join(image_dir, desc.partition_name + image_ext)
ch_image = ImageHandler(ch_image_filename)
(ch_footer, ch_header, ch_descriptors, ch_image_size) = self._parse_image(ch_image)
ch_offset = 0
ch_size = (ch_header.SIZE + ch_header.authentication_data_block_size +
ch_header.auxiliary_data_block_size)
if ch_footer:
ch_offset = ch_footer.vbmeta_offset
ch_image.seek(ch_offset)
ch_vbmeta_blob = ch_image.read(ch_size)
hasher.update(ch_vbmeta_blob)
digest = hasher.digest()
output.write('{}\n'.format(digest.encode('hex')))
def calculate_kernel_cmdline(self, image_filename, hashtree_disabled, output):
"""Implements the 'calculate_kernel_cmdline' command.
Arguments:
image_filename: Image file to get information from (file object).
hashtree_disabled: If True, returns the cmdline for hashtree disabled.
output: Output file to write human-readable information to (file object).
"""
image = ImageHandler(image_filename)
_, _, descriptors, _ = self._parse_image(image)
image_dir = os.path.dirname(image_filename)
image_ext = os.path.splitext(image_filename)[1]
cmdline_descriptors = []
for desc in descriptors:
if isinstance(desc, AvbChainPartitionDescriptor):
ch_image_filename = os.path.join(image_dir, desc.partition_name + image_ext)
ch_image = ImageHandler(ch_image_filename)
_, _, ch_descriptors, _ = self._parse_image(ch_image)
for ch_desc in ch_descriptors:
if isinstance(ch_desc, AvbKernelCmdlineDescriptor):
cmdline_descriptors.append(ch_desc)
elif isinstance(desc, AvbKernelCmdlineDescriptor):
cmdline_descriptors.append(desc)
kernel_cmdline_snippets = []
for desc in cmdline_descriptors:
use_cmdline = True
if (desc.flags & AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED) != 0:
if hashtree_disabled:
use_cmdline = False
if (desc.flags & AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) != 0:
if not hashtree_disabled:
use_cmdline = False
if use_cmdline:
kernel_cmdline_snippets.append(desc.kernel_cmdline)
output.write(' '.join(kernel_cmdline_snippets))
def _parse_image(self, image):
@ -2578,6 +2761,7 @@ class Avb(object):
# Add descriptors from other images.
if include_descriptors_from_image:
descriptors_dict = dict()
for image in include_descriptors_from_image:
image_handler = ImageHandler(image.name)
(_, image_vbmeta_header, image_descriptors, _) = self._parse_image(
@ -2586,7 +2770,18 @@ class Avb(object):
h.bump_required_libavb_version_minor(
image_vbmeta_header.required_libavb_version_minor)
for desc in image_descriptors:
encoded_descriptors.extend(desc.encode())
# The --include_descriptors_from_image option is used in some setups
# with images A and B where both A and B contain a descriptor
# for a partition with the same name. Since it's not meaningful
# to include both descriptors, only include the last seen descriptor.
# See bug 76386656 for details.
if hasattr(desc, 'partition_name'):
key = type(desc).__name__ + '_' + desc.partition_name
descriptors_dict[key] = desc.encode()
else:
encoded_descriptors.extend(desc.encode())
for key in sorted(descriptors_dict.keys()):
encoded_descriptors.extend(descriptors_dict[key])
# Load public key metadata blob, if requested.
pkmd_blob = []
@ -2878,9 +3073,10 @@ class Avb(object):
if salt:
salt = salt.decode('hex')
else:
if salt is None:
# If salt is not explicitly specified, choose a hash
# that's the same size as the hash size.
if salt is None and not use_persistent_digest:
# If salt is not explicitly specified, choose a hash that's the same
# size as the hash size. Don't populate a random salt if this
# descriptor is being created to use a persistent digest on device.
hash_size = digest_size
salt = open('/dev/urandom').read(hash_size)
else:
@ -2986,7 +3182,7 @@ class Avb(object):
Arguments:
image_filename: File to add the footer to.
partition_size: Size of partition.
partition_size: Size of partition or 0 to put it right at the end.
partition_name: Name of partition (without A/B suffix).
generate_fec: If True, generate FEC codes.
fec_num_roots: Number of roots for FEC.
@ -3037,19 +3233,22 @@ class Avb(object):
digest_size = len(hashlib.new(name=hash_algorithm).digest())
digest_padding = round_to_pow2(digest_size) - digest_size
# First, calculate the maximum image size such that an image
# this size + the hashtree + metadata (footer + vbmeta struct)
# fits in |partition_size|. We use very conservative figures for
# metadata.
(_, max_tree_size) = calc_hash_level_offsets(
partition_size, block_size, digest_size + digest_padding)
max_fec_size = 0
if generate_fec:
max_fec_size = calc_fec_data_size(partition_size, fec_num_roots)
max_metadata_size = (max_fec_size + max_tree_size +
self.MAX_VBMETA_SIZE +
self.MAX_FOOTER_SIZE)
max_image_size = partition_size - max_metadata_size
# If |partition_size| is given (e.g. not 0), calculate the maximum image
# size such that an image this size + the hashtree + metadata (footer +
# vbmeta struct) fits in |partition_size|. We use very conservative figures
# for metadata.
if partition_size > 0:
(_, max_tree_size) = calc_hash_level_offsets(
partition_size, block_size, digest_size + digest_padding)
max_fec_size = 0
if generate_fec:
max_fec_size = calc_fec_data_size(partition_size, fec_num_roots)
max_metadata_size = (max_fec_size + max_tree_size +
self.MAX_VBMETA_SIZE +
self.MAX_FOOTER_SIZE)
max_image_size = partition_size - max_metadata_size
else:
max_image_size = 0
# If we're asked to only calculate the maximum image size, we're done.
if calc_max_image_size:
@ -3058,10 +3257,16 @@ class Avb(object):
image = ImageHandler(image_filename)
if partition_size % image.block_size != 0:
raise AvbError('Partition size of {} is not a multiple of the image '
'block size {}.'.format(partition_size,
image.block_size))
if partition_size > 0:
if partition_size % image.block_size != 0:
raise AvbError('Partition size of {} is not a multiple of the image '
'block size {}.'.format(partition_size,
image.block_size))
else:
if image.image_size % image.block_size != 0:
raise AvbError('File size of {} is not a multiple of the image '
'block size {}.'.format(image.image_size,
image.block_size))
# If there's already a footer, truncate the image to its original
# size. This way 'avbtool add_hashtree_footer' is idempotent
@ -3088,18 +3293,20 @@ class Avb(object):
image.append_raw('\0' * (rounded_image_size - image.image_size))
# If image size exceeds the maximum image size, fail.
if image.image_size > max_image_size:
raise AvbError('Image size of {} exceeds maximum image '
'size of {} in order to fit in a partition '
'size of {}.'.format(image.image_size, max_image_size,
partition_size))
if partition_size > 0:
if image.image_size > max_image_size:
raise AvbError('Image size of {} exceeds maximum image '
'size of {} in order to fit in a partition '
'size of {}.'.format(image.image_size, max_image_size,
partition_size))
if salt:
salt = salt.decode('hex')
else:
if salt is None:
# If salt is not explicitly specified, choose a hash
# that's the same size as the hash size.
if salt is None and not use_persistent_root_digest:
# If salt is not explicitly specified, choose a hash that's the same
# size as the hash size. Don't populate a random salt if this
# descriptor is being created to use a persistent digest on device.
hash_size = digest_size
salt = open('/dev/urandom').read(hash_size)
else:
@ -3191,8 +3398,9 @@ class Avb(object):
# Now insert a DONT_CARE chunk with enough bytes such that the
# final Footer block is at the end of partition_size..
image.append_dont_care(partition_size - image.image_size -
1*image.block_size)
if partition_size > 0:
image.append_dont_care(partition_size - image.image_size -
1*image.block_size)
# Generate the Footer that tells where the VBMeta footer
# is. Also put enough padding in the front of the footer since
@ -3213,7 +3421,7 @@ class Avb(object):
def make_atx_certificate(self, output, authority_key_path, subject_key_path,
subject_key_version, subject,
is_intermediate_authority, signing_helper,
is_intermediate_authority, usage, signing_helper,
signing_helper_with_files):
"""Implements the 'make_atx_certificate' command.
@ -3235,6 +3443,7 @@ class Avb(object):
should be the same Product ID found in the permanent attributes.
is_intermediate_authority: True if the certificate is for an intermediate
authority.
usage: If not empty, overrides the cert usage with a hash of this value.
signing_helper: Program which signs a hash and returns the signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
"""
@ -3244,9 +3453,10 @@ class Avb(object):
hasher = hashlib.sha256()
hasher.update(subject)
signed_data.extend(hasher.digest())
usage = 'com.google.android.things.vboot'
if is_intermediate_authority:
usage += '.ca'
if not usage:
usage = 'com.google.android.things.vboot'
if is_intermediate_authority:
usage += '.ca'
hasher = hashlib.sha256()
hasher.update(usage)
signed_data.extend(hasher.digest())
@ -3322,6 +3532,67 @@ class Avb(object):
output.write(intermediate_key_certificate)
output.write(product_key_certificate)
def make_atx_unlock_credential(self, output, intermediate_key_certificate,
unlock_key_certificate, challenge_path,
unlock_key_path, signing_helper,
signing_helper_with_files):
"""Implements the 'make_atx_unlock_credential' command.
Android Things unlock credentials can be used to authorize the unlock of AVB
on a device. These credentials are presented to an Android Things bootloader
via the fastboot interface in response to a 16-byte challenge. This method
creates all fields of the credential except the challenge signature field
(which is the last field) and can optionally create the challenge signature
field as well if a challenge and the unlock_key_path is provided.
Arguments:
output: The credential will be written to this file on success.
intermediate_key_certificate: A certificate file as output by
make_atx_certificate with
is_intermediate_authority set to true.
unlock_key_certificate: A certificate file as output by
make_atx_certificate with
is_intermediate_authority set to false and the
usage set to
'com.google.android.things.vboot.unlock'.
challenge_path: [optional] A path to the challenge to sign.
unlock_key_path: [optional] A PEM file path with the unlock private key.
signing_helper: Program which signs a hash and returns the signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
Raises:
AvbError: If an argument is incorrect.
"""
EXPECTED_CERTIFICATE_SIZE = 1620
EXPECTED_CHALLENGE_SIZE = 16
if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
raise AvbError('Invalid intermediate key certificate length.')
if len(unlock_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
raise AvbError('Invalid product key certificate length.')
challenge = bytearray()
if challenge_path:
with open(challenge_path, 'r') as f:
challenge = f.read()
if len(challenge) != EXPECTED_CHALLENGE_SIZE:
raise AvbError('Invalid unlock challenge length.')
output.write(struct.pack('<I', 1)) # Format Version
output.write(intermediate_key_certificate)
output.write(unlock_key_certificate)
if challenge_path and unlock_key_path:
signature = bytearray()
padding_and_hash = bytearray()
algorithm_name = 'SHA512_RSA4096'
alg = ALGORITHMS[algorithm_name]
hasher = hashlib.sha512()
padding_and_hash.extend(alg.padding)
hasher.update(challenge)
padding_and_hash.extend(hasher.digest())
signature.extend(raw_sign(signing_helper, signing_helper_with_files,
algorithm_name,
alg.signature_num_bytes, unlock_key_path,
padding_and_hash))
output.write(signature)
def calc_hash_level_offsets(image_size, block_size, digest_size):
"""Calculate the offsets of all the hash-levels in a Merkle-tree.
@ -3692,10 +3963,11 @@ class AvbTool(object):
type=argparse.FileType('rab+'))
sub_parser.add_argument('--partition_size',
help='Partition size',
default=0,
type=parse_number)
sub_parser.add_argument('--partition_name',
help='Partition name',
default=None)
default='')
sub_parser.add_argument('--hash_algorithm',
help='Hash algorithm to use (default: sha1)',
default='sha1')
@ -3755,6 +4027,23 @@ class AvbTool(object):
action='store_true')
sub_parser.set_defaults(func=self.erase_footer)
sub_parser = subparsers.add_parser('extract_vbmeta_image',
help='Extracts vbmeta from an image with a footer.')
sub_parser.add_argument('--image',
help='Image with footer',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--output',
help='Output file name',
type=argparse.FileType('wb'))
sub_parser.add_argument('--padding_size',
metavar='NUMBER',
help='If non-zero, pads output with NUL bytes so '
'its size is a multiple of NUMBER (default: 0)',
type=parse_number,
default=0)
sub_parser.set_defaults(func=self.extract_vbmeta_image)
sub_parser = subparsers.add_parser('resize_image',
help='Resize image with a footer.')
sub_parser.add_argument('--image',
@ -3794,8 +4083,44 @@ class AvbTool(object):
help='Expected chain partition',
metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
action='append')
sub_parser.add_argument('--follow_chain_partitions',
help=('Follows chain partitions even when not '
'specified with the --expected_chain_partition option'),
action='store_true')
sub_parser.set_defaults(func=self.verify_image)
sub_parser = subparsers.add_parser(
'calculate_vbmeta_digest',
help='Calculate vbmeta digest.')
sub_parser.add_argument('--image',
help='Image to calculate digest for',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--hash_algorithm',
help='Hash algorithm to use (default: sha256)',
default='sha256')
sub_parser.add_argument('--output',
help='Write hex digest to file (default: stdout)',
type=argparse.FileType('wt'),
default=sys.stdout)
sub_parser.set_defaults(func=self.calculate_vbmeta_digest)
sub_parser = subparsers.add_parser(
'calculate_kernel_cmdline',
help='Calculate kernel cmdline.')
sub_parser.add_argument('--image',
help='Image to calculate kernel cmdline for',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--hashtree_disabled',
help='Return the cmdline for hashtree disabled',
action='store_true')
sub_parser.add_argument('--output',
help='Write cmdline to file (default: stdout)',
type=argparse.FileType('wt'),
default=sys.stdout)
sub_parser.set_defaults(func=self.calculate_kernel_cmdline)
sub_parser = subparsers.add_parser('set_ab_metadata',
help='Set A/B metadata.')
sub_parser.add_argument('--misc_image',
@ -3835,6 +4160,10 @@ class AvbTool(object):
help=('Generate an intermediate authority '
'certificate'),
action='store_true')
sub_parser.add_argument('--usage',
help=('Override usage with a hash of the provided '
'string'),
required=False)
sub_parser.add_argument('--authority_key',
help='Path to authority RSA private key file',
required=False)
@ -3884,6 +4213,43 @@ class AvbTool(object):
required=True)
sub_parser.set_defaults(func=self.make_atx_metadata)
sub_parser = subparsers.add_parser(
'make_atx_unlock_credential',
help='Create an Android Things eXtension (ATX) unlock credential.')
sub_parser.add_argument('--output',
help='Write credential to file',
type=argparse.FileType('wb'),
default=sys.stdout)
sub_parser.add_argument('--intermediate_key_certificate',
help='Path to intermediate key certificate file',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--unlock_key_certificate',
help='Path to unlock key certificate file',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--challenge',
help='Path to the challenge to sign (optional). If '
'this is not provided the challenge signature '
'field is omitted and can be concatenated '
'later.',
required=False)
sub_parser.add_argument('--unlock_key',
help='Path to unlock key (optional). Must be '
'provided if using --challenge.',
required=False)
sub_parser.add_argument('--signing_helper',
help='Path to helper used for signing',
metavar='APP',
default=None,
required=False)
sub_parser.add_argument('--signing_helper_with_files',
help='Path to helper used for signing using files',
metavar='APP',
default=None,
required=False)
sub_parser.set_defaults(func=self.make_atx_unlock_credential)
args = parser.parse_args(argv[1:])
try:
args.func(args)
@ -3982,6 +4348,11 @@ class AvbTool(object):
"""Implements the 'erase_footer' sub-command."""
self.avb.erase_footer(args.image.name, args.keep_hashtree)
def extract_vbmeta_image(self, args):
"""Implements the 'extract_vbmeta_image' sub-command."""
self.avb.extract_vbmeta_image(args.output, args.image.name,
args.padding_size)
def resize_image(self, args):
"""Implements the 'resize_image' sub-command."""
self.avb.resize_image(args.image.name, args.partition_size)
@ -3997,7 +4368,17 @@ class AvbTool(object):
def verify_image(self, args):
"""Implements the 'verify_image' sub-command."""
self.avb.verify_image(args.image.name, args.key,
args.expected_chain_partition)
args.expected_chain_partition,
args.follow_chain_partitions)
def calculate_vbmeta_digest(self, args):
"""Implements the 'calculate_vbmeta_digest' sub-command."""
self.avb.calculate_vbmeta_digest(args.image.name, args.hash_algorithm,
args.output)
def calculate_kernel_cmdline(self, args):
"""Implements the 'calculate_kernel_cmdline' sub-command."""
self.avb.calculate_kernel_cmdline(args.image.name, args.hashtree_disabled, args.output)
def make_atx_certificate(self, args):
"""Implements the 'make_atx_certificate' sub-command."""
@ -4006,6 +4387,7 @@ class AvbTool(object):
args.subject_key_version,
args.subject.read(),
args.subject_is_intermediate_authority,
args.usage,
args.signing_helper,
args.signing_helper_with_files)
@ -4021,6 +4403,17 @@ class AvbTool(object):
args.intermediate_key_certificate.read(),
args.product_key_certificate.read())
def make_atx_unlock_credential(self, args):
"""Implements the 'make_atx_unlock_credential' sub-command."""
self.avb.make_atx_unlock_credential(
args.output,
args.intermediate_key_certificate.read(),
args.unlock_key_certificate.read(),
args.challenge,
args.unlock_key,
args.signing_helper,
args.signing_helper_with_files)
if __name__ == '__main__':
tool = AvbTool()

@ -1,6 +1,6 @@
buildscript {
ext {
kotlinVersion = "1.3.10"
kotlinVersion = "1.3.20"
}
repositories {
mavenCentral()

@ -3,25 +3,29 @@ package cfig
import avb.*
import avb.alg.Algorithms
import avb.desc.*
import avb.AuxBlob
import cfig.io.Struct
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.commons.codec.binary.Hex
import org.slf4j.LoggerFactory
import java.io.*
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.nio.file.Files
import java.nio.file.Paths
import java.nio.file.StandardOpenOption
import java.security.MessageDigest
class Avb {
val MAX_VBMETA_SIZE = 64 * 1024
val MAX_FOOTER_SIZE = 4096
val BLOCK_SIZE = 4096
private val MAX_VBMETA_SIZE = 64 * 1024
private val MAX_FOOTER_SIZE = 4096
private val BLOCK_SIZE = 4096
private var required_libavb_version_minor = 0
//migrated from: avbtool::Avb::add_hash_footer
fun add_hash_footer(image_file: String,
partition_size: Long,
partition_size: Long, //aligned by Avb::BLOCK_SIZE
use_persistent_digest: Boolean,
do_not_use_ab: Boolean,
salt: String,
@ -30,7 +34,8 @@ class Avb {
rollback_index: Long,
common_algorithm: String,
inReleaseString: String?) {
var original_image_size = 0L
log.info("add_hash_footer($image_file) ...")
var original_image_size: Long
//required libavb version
if (use_persistent_digest || do_not_use_ab) {
required_libavb_version_minor = 1
@ -46,31 +51,57 @@ class Avb {
val max_image_size = partition_size - max_metadata_size
log.info("max_image_size: $max_image_size")
if (partition_size % 4096L != 0L) {
throw IllegalArgumentException("Partition SIZE of $partition_size is not a multiple of the image block SIZE 4096")
//TODO: typical block size = 4096L, from avbtool::Avb::ImageHandler::block_size
//since boot.img is not in sparse format, we are safe to hardcode it to 4096L for now
if (partition_size % BLOCK_SIZE != 0L) {
throw IllegalArgumentException("Partition SIZE of $partition_size is not " +
"a multiple of the image block SIZE 4096")
}
//truncate AVB footer if there is. Then add_hash_footer() is idempotent
val fis = FileInputStream(image_file)
fis.skip(File(image_file).length() - 64)
val originalFileSize = File(image_file).length()
if (originalFileSize > max_image_size) {
throw IllegalArgumentException("Image size of $originalFileSize exceeds maximum image size " +
"of $max_image_size in order to fit in a partition size of $partition_size.")
}
fis.skip(originalFileSize - 64)
try {
val footer = Footer(fis)
original_image_size = footer.originalImageSize
FileOutputStream(File(image_file), true).channel.use {
log.info("truncate $image_file to its original SIZE ${footer.originalImageSize}")
log.info("original image $image_file has AVB footer, " +
"truncate it to original SIZE: ${footer.originalImageSize}")
it.truncate(footer.originalImageSize)
}
} catch (e: IllegalArgumentException) {
log.info("original image doesn't have footer")
original_image_size = File(image_file).length()
log.info("original image $image_file doesn't have AVB footer")
original_image_size = originalFileSize
}
val saltByteArray = Helper.fromHexString(salt)
//salt
var saltByteArray = Helper.fromHexString(salt)
if (salt.isBlank()) {
//If salt is not explicitly specified, choose a hash that's the same size as the hash size
val expectedDigestSize = MessageDigest.getInstance(Helper.pyAlg2java(hash_algorithm)).digest().size
FileInputStream(File("/dev/urandom")).use {
val randomSalt = ByteArray(expectedDigestSize)
it.read(randomSalt)
log.warn("salt is empty, using random salt[$expectedDigestSize]: " + Helper.toHexString(randomSalt))
saltByteArray = randomSalt
}
} else {
log.info("preset salt[${saltByteArray.size}] is valid: $salt")
}
//hash digest
val digest = MessageDigest.getInstance(Helper.pyAlg2java(hash_algorithm)).apply {
update(saltByteArray)
update(File(image_file).readBytes())
}.digest()
log.info("Digest: " + Helper.toHexString(digest))
log.info("Digest(salt + file): " + Helper.toHexString(digest))
//HashDescriptor
val hd = HashDescriptor()
hd.image_size = File(image_file).length()
hd.hash_algorithm = hash_algorithm.toByteArray()
@ -80,6 +111,8 @@ class Avb {
if (do_not_use_ab) hd.flags = hd.flags or 1
if (!use_persistent_digest) hd.digest = digest
log.info("encoded hash descriptor:" + Hex.encodeHexString(hd.encode()))
//VBmeta blob
val vbmeta_blob = generateVbMetaBlob(common_algorithm,
null,
arrayOf(hd as Descriptor),
@ -91,7 +124,10 @@ class Avb {
0,
inReleaseString)
log.debug("vbmeta_blob: " + Helper.toHexString(vbmeta_blob))
Helper.dumpToFile("hashDescriptor.vbmeta.blob", vbmeta_blob)
log.info("Padding image ...")
// image + padding
if (hd.image_size % BLOCK_SIZE != 0L) {
val padding_needed = BLOCK_SIZE - (hd.image_size % BLOCK_SIZE)
FileOutputStream(image_file, true).use { fos ->
@ -101,17 +137,25 @@ class Avb {
} else {
log.info("$image_file doesn't need padding")
}
val vbmeta_offset = hd.image_size
// + vbmeta + padding
log.info("Appending vbmeta ...")
val vbmeta_offset = File(image_file).length()
val padding_needed = Helper.round_to_multiple(vbmeta_blob.size.toLong(), BLOCK_SIZE) - vbmeta_blob.size
val vbmeta_blob_with_padding = Helper.join(vbmeta_blob, Struct("${padding_needed}x").pack(null))
FileOutputStream(image_file, true).use { fos ->
fos.write(vbmeta_blob_with_padding)
}
// + DONT_CARE chunk
log.info("Appending DONT_CARE chunk ...")
val vbmeta_end_offset = vbmeta_offset + vbmeta_blob_with_padding.size
FileOutputStream(image_file, true).use { fos ->
fos.write(Struct("${partition_size - vbmeta_end_offset - 1 * BLOCK_SIZE}x").pack(null))
}
// + AvbFooter + padding
log.info("Appending footer ...")
val footer = Footer()
footer.originalImageSize = original_image_size
footer.vbMetaOffset = vbmeta_offset
@ -124,28 +168,41 @@ class Avb {
FileOutputStream(image_file, true).use { fos ->
fos.write(footerBlobWithPadding)
}
log.info("add_hash_footer($image_file) done ...")
}
//avbtool::Avb::_generate_vbmeta_blob()
fun generateVbMetaBlob(algorithm_name: String,
public_key_metadata_path: String?,
descriptors: Array<Descriptor>,
chain_partitions: String?,
inRollbackIndex: Long,
inFlags: Long,
props: String?,
kernel_cmdlines: String?,
props: Map<String, String>?,
kernel_cmdlines: List<String>?,
required_libavb_version_minor: Int,
inReleaseString: String?): ByteArray {
//encoded descriptors
var encodedDesc: ByteArray = byteArrayOf()
descriptors.forEach { encodedDesc = Helper.join(encodedDesc, it.encode()) }
props?.let {
it.forEach { t, u ->
Helper.join(encodedDesc, PropertyDescriptor(t, u).encode())
}
}
kernel_cmdlines?.let {
it.forEach { eachCmdline ->
Helper.join(encodedDesc, KernelCmdlineDescriptor(cmdline = eachCmdline).encode())
}
}
//algorithm
val alg = Algorithms.get(algorithm_name)!!
//encoded pubkey
val encodedKey = Blob.encodePubKey(alg)
val encodedKey = AuxBlob.encodePubKey(alg)
//3 - whole aux blob
val auxBlob = Blob.getAuxDataBlob(encodedDesc, encodedKey)
val auxBlob = Blob.getAuxDataBlob(encodedDesc, encodedKey, byteArrayOf())
//1 - whole header blob
val headerBlob = Header().apply {
@ -182,7 +239,7 @@ class Avb {
}.encode()
//2 - auth blob
var authBlob = Blob.getAuthBlob(headerBlob, auxBlob, algorithm_name)
val authBlob = Blob.getAuthBlob(headerBlob, auxBlob, algorithm_name)
return Helper.join(headerBlob, authBlob, auxBlob)
}
@ -217,15 +274,15 @@ class Avb {
val ai = AVBInfo()
ai.footer = footer
ai.auxBlob = AVBInfo.AuxBlob()
ai.auxBlob = AuxBlob()
ai.header = vbMetaHeader
if (vbMetaHeader.public_key_size > 0L) {
ai.auxBlob!!.pubkey = AVBInfo.AuxBlob.PubKeyInfo()
ai.auxBlob!!.pubkey = AuxBlob.PubKeyInfo()
ai.auxBlob!!.pubkey!!.offset = vbMetaHeader.public_key_offset
ai.auxBlob!!.pubkey!!.size = vbMetaHeader.public_key_size
}
if (vbMetaHeader.public_key_metadata_size > 0L) {
ai.auxBlob!!.pubkeyMeta = AVBInfo.AuxBlob.PubKeyMetadataInfo()
ai.auxBlob!!.pubkeyMeta = AuxBlob.PubKeyMetadataInfo()
ai.auxBlob!!.pubkeyMeta!!.offset = vbMetaHeader.public_key_metadata_offset
ai.auxBlob!!.pubkeyMeta!!.size = vbMetaHeader.public_key_metadata_size
}
@ -275,7 +332,7 @@ class Avb {
fis.read(bb)
log.debug("Parsed Auth Signature (of hash): " + Hex.encodeHexString(bb))
ai.authBlob = AVBInfo.AuthBlob()
ai.authBlob = AuthBlob()
ai.authBlob!!.offset = authBlockOffset
ai.authBlob!!.size = vbMetaHeader.authentication_data_block_size
ai.authBlob!!.hash = Hex.encodeHexString(ba)
@ -321,7 +378,7 @@ class Avb {
val alg = Algorithms.get(ai.header!!.algorithm_type.toInt())!!
val encodedDesc = ai.auxBlob!!.encodeDescriptors()
//encoded pubkey
val encodedKey = Blob.encodePubKey(alg)
val encodedKey = AuxBlob.encodePubKey(alg)
//3 - whole aux blob
var auxBlob = byteArrayOf()
@ -331,7 +388,7 @@ class Avb {
} else {
log.warn("Using different key from original vbmeta")
}
auxBlob = Blob.getAuxDataBlob(encodedDesc, encodedKey)
auxBlob = Blob.getAuxDataBlob(encodedDesc, encodedKey, byteArrayOf())
} else {
log.info("No aux blob")
}
@ -381,9 +438,9 @@ class Avb {
companion object {
private val log = LoggerFactory.getLogger(Avb::class.java)
val AVB_VERSION_MAJOR = 1
val AVB_VERSION_MINOR = 1
val AVB_VERSION_SUB = 0
const val AVB_VERSION_MAJOR = 1
const val AVB_VERSION_MINOR = 1
const val AVB_VERSION_SUB = 0
fun getJsonFileName(image_file: String): String {
val fileName = File(image_file).name

@ -18,20 +18,26 @@ import java.math.RoundingMode
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.nio.file.Paths
import java.security.KeyFactory
import java.security.PrivateKey
import java.security.spec.PKCS8EncodedKeySpec
import java.security.spec.RSAPrivateKeySpec
import java.util.zip.GZIPInputStream
import java.util.zip.GZIPOutputStream
import javax.crypto.Cipher
class Helper {
companion object {
fun joinWithNulls(vararg source: ByteArray?): ByteArray {
val baos = ByteArrayOutputStream()
for (src in source) {
src?.let {
if (src.isNotEmpty()) baos.write(src)
}
}
return baos.toByteArray()
}
fun join(vararg source: ByteArray): ByteArray {
val baos = ByteArrayOutputStream()
for (src in source) {
if (source.isNotEmpty()) baos.write(src)
if (src.isNotEmpty()) baos.write(src)
}
return baos.toByteArray()
}
@ -76,7 +82,7 @@ class Helper {
while (true) {
bytesRead = fis.read(buffer)
if (bytesRead <= 0) break
gos.write(buffer, 0, bytesRead);
gos.write(buffer, 0, bytesRead)
}
gos.finish()
log.info("gzip done: $decompressedFile -> $compressedFile")
@ -179,6 +185,7 @@ class Helper {
@return: AvbRSAPublicKeyHeader formatted bytearray
https://android.googlesource.com/platform/external/avb/+/master/libavb/avb_crypto.h#158
from avbtool::encode_rsa_key()
*/
fun encodeRSAkey(key: ByteArray): ByteArray {
val rsa = KeyUtil.parsePemPrivateKey(ByteArrayInputStream(key))
@ -253,6 +260,14 @@ class Helper {
}
}
fun dumpToFile(dumpFile: String, data: ByteArray) {
log.info("Dumping data to $dumpFile ...")
FileOutputStream(dumpFile, false).use { fos ->
fos.write(data)
}
log.info("Dumping data to $dumpFile done")
}
private val log = LoggerFactory.getLogger("Helper")
}
}

@ -1,214 +0,0 @@
package cfig
import org.apache.commons.exec.CommandLine
import java.util.*
data class ImgArgs(
//file input
var kernel: String = UnifiedConfig.workDir + "kernel",
var ramdisk: String? = UnifiedConfig.workDir + "ramdisk.img.gz",
var second: String? = UnifiedConfig.workDir + "second",
var dtbo: String? = UnifiedConfig.workDir + "dtbo",
//file output
var output: String = "boot.img",
var cfg: String = UnifiedConfig.workDir + "bootimg.json",
//load address
internal var base: Long = 0,
internal var kernelOffset: Long = 0,
var ramdiskOffset: Long = 0,
var secondOffset: Long = 0,
var tagsOffset: Long = 0,
var dtboOffset: Long = 0,
var board: String = "",
var cmdline: String = "",
var osVersion: String? = null,
var osPatchLevel: String? = null,
var headerVersion: Int = 0,
var pageSize: Int = 0,
var id: Boolean = true,
//internal
var mkbootimg: String = "../src/mkbootimg/mkbootimg",
//signature
var verifyType: VerifyType = VerifyType.VERIFY
) {
enum class VerifyType {
VERIFY,
AVB
}
fun toCommandList(): List<String> {
val ret = ArrayList<String>()
ret.add(mkbootimg)
ret.add("--header_version")
ret.add(headerVersion.toString())
ret.add("--base")
ret.add("0x" + java.lang.Long.toHexString(base))
ret.add("--kernel")
ret.add(kernel)
ret.add("--kernel_offset")
ret.add("0x" + java.lang.Long.toHexString(kernelOffset))
ramdisk?.let {
ret.add("--ramdisk")
ret.add(it)
}
ret.add("--ramdisk_offset")
ret.add("0x" + java.lang.Long.toHexString(ramdiskOffset))
second?.let {
ret.add("--second")
ret.add(it)
}
ret.add("--second_offset")
ret.add("0x" + java.lang.Long.toHexString(secondOffset))
if (!board.isBlank()) {
ret.add("--board")
ret.add(board)
}
if (headerVersion > 0) {
dtbo?.let { dtbo ->
ret.add("--recovery_dtbo")
ret.add(dtbo)
}
ret.add("--recovery_dtbo_offset")
ret.add("0x" + java.lang.Long.toHexString(dtboOffset))
}
ret.add("--pagesize")
ret.add(Integer.toString(pageSize))
ret.add("--cmdline")
ret.add(cmdline)
if (!osVersion.isNullOrBlank()) {
ret.add("--os_version")
ret.add(osVersion!!)
}
if (!osPatchLevel.isNullOrBlank()) {
ret.add("--os_patch_level")
ret.add(osPatchLevel!!)
}
ret.add("--tags_offset")
ret.add("0x" + java.lang.Long.toHexString(tagsOffset))
if (id) {
ret.add("--id")
}
ret.add("--output")
ret.add(output + ".google")
return ret
}
fun toCommandString(): String {
val ret = StringBuilder()
ret.append(mkbootimg)
ret.append(" --header_version ")
ret.append(headerVersion.toString())
ret.append(" --base ")
ret.append("0x" + java.lang.Long.toHexString(base))
ret.append(" --kernel ")
ret.append(kernel)
ret.append(" --kernel_offset ")
ret.append("0x" + java.lang.Long.toHexString(kernelOffset))
ramdisk?.let {
ret.append(" --ramdisk ")
ret.append(it)
}
ret.append(" --ramdisk_offset ")
ret.append("0x" + java.lang.Long.toHexString(ramdiskOffset))
second?.let {
ret.append(" --second ")
ret.append(it)
}
ret.append(" --second_offset ")
ret.append("0x" + java.lang.Long.toHexString(secondOffset))
if (!board.isBlank()) {
ret.append(" --board ")
ret.append(board)
}
if (headerVersion > 0) {
dtbo?.let { dtbo ->
ret.append(" --recovery_dtbo ")
ret.append(dtbo)
}
ret.append(" --recovery_dtbo_offset ")
ret.append("0x" + java.lang.Long.toHexString(dtboOffset))
}
ret.append(" --pagesize ")
ret.append(Integer.toString(pageSize))
ret.append(" --cmdline ")
ret.append("\"" + cmdline + "\"")
if (!osVersion.isNullOrBlank()) {
ret.append(" --os_version ")
ret.append(osVersion)
}
if (!osPatchLevel.isNullOrBlank()) {
ret.append(" --os_patch_level ")
ret.append(osPatchLevel)
}
ret.append(" --tags_offset ")
ret.append("0x" + java.lang.Long.toHexString(tagsOffset))
if (id) {
ret.append(" --id ")
}
ret.append(" --output ")
ret.append(output + ".google")
return ret.toString()
}
fun toCommandLine(): CommandLine {
val ret = CommandLine(mkbootimg)
ret.addArgument(" --header_version ")
ret.addArgument(headerVersion.toString())
ret.addArgument(" --base ")
ret.addArgument("0x" + java.lang.Long.toHexString(base))
ret.addArgument(" --kernel ")
ret.addArgument(kernel)
ret.addArgument(" --kernel_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(kernelOffset))
ramdisk?.let {
ret.addArgument(" --ramdisk ")
ret.addArgument(it)
}
ret.addArgument(" --ramdisk_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(ramdiskOffset))
second?.let {
ret.addArgument(" --second ")
ret.addArgument(it)
}
ret.addArgument(" --second_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(secondOffset))
if (!board.isBlank()) {
ret.addArgument(" --board ")
ret.addArgument(board)
}
if (headerVersion > 0) {
dtbo?.let { dtbo ->
ret.addArgument(" --recovery_dtbo ")
ret.addArgument(dtbo)
}
ret.addArgument(" --recovery_dtbo_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(dtboOffset))
}
ret.addArgument(" --pagesize ")
ret.addArgument(Integer.toString(pageSize))
ret.addArgument(" --cmdline ")
ret.addArgument(cmdline, false)
if (!osVersion.isNullOrBlank()) {
ret.addArgument(" --os_version ")
ret.addArgument(osVersion)
}
if (!osPatchLevel.isNullOrBlank()) {
ret.addArgument(" --os_patch_level ")
ret.addArgument(osPatchLevel)
}
ret.addArgument(" --tags_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(tagsOffset))
if (id) {
ret.addArgument(" --id ")
}
ret.addArgument(" --output ")
ret.addArgument(output + ".google")
return ret
}
}

@ -35,4 +35,4 @@ data class ImgInfo(
var verity_pk8: String = "security/verity.pk8",
var verity_pem: String = "security/verity.x509.pem",
var jarPath: String = "boot_signer/build/libs/boot_signer.jar")
}
}

@ -1,16 +1,16 @@
package cfig
import cfig.bootimg.BootImgInfo
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.commons.exec.CommandLine
import org.apache.commons.exec.DefaultExecutor
import org.apache.commons.exec.PumpStreamHandler
import org.junit.Assert.assertTrue
import org.slf4j.LoggerFactory
import java.io.*
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.security.MessageDigest
import java.util.regex.Pattern
import org.junit.Assert.*
class Packer {
private val log = LoggerFactory.getLogger("Packer")
@ -69,148 +69,34 @@ class Packer {
inBF.put(ByteArray(pad))
}
private fun writeData(inArgs: ImgArgs) {
private fun writeData(info2: BootImgInfo, outputFile: String) {
log.info("Writing data ...")
val param = ParamConfig()
val bf = ByteBuffer.allocate(1024 * 1024 * 64)//assume total SIZE small than 64MB
bf.order(ByteOrder.LITTLE_ENDIAN)
writePaddedFile(bf, inArgs.kernel, inArgs.pageSize)
inArgs.ramdisk?.let { ramdisk ->
writePaddedFile(bf, ramdisk, inArgs.pageSize)
}
inArgs.second?.let { second ->
writePaddedFile(bf, second, inArgs.pageSize)
}
inArgs.dtbo?.let { dtbo ->
writePaddedFile(bf, dtbo, inArgs.pageSize)
}
//write
FileOutputStream(inArgs.output + ".clear", true).use { fos ->
fos.write(bf.array(), 0, bf.position())
writePaddedFile(bf, param.kernel, info2.pageSize)
if (info2.ramdiskLength > 0) {
writePaddedFile(bf, param.ramdisk!!, info2.pageSize)
}
}
@Throws(IllegalArgumentException::class)
private fun packOsVersion(x: String?): Int {
if (x.isNullOrBlank()) return 0
val pattern = Pattern.compile("^(\\d{1,3})(?:\\.(\\d{1,3})(?:\\.(\\d{1,3}))?)?")
val m = pattern.matcher(x)
if (m.find()) {
val a = Integer.decode(m.group(1))
var b = 0
var c = 0
if (m.groupCount() >= 2) {
b = Integer.decode(m.group(2))
}
if (m.groupCount() == 3) {
c = Integer.decode(m.group(3))
}
assertTrue(a < 128)
assertTrue(b < 128)
assertTrue(c < 128)
return (a shl 14) or (b shl 7) or c
} else {
throw IllegalArgumentException("invalid os_version")
if (info2.secondBootloaderLength > 0) {
writePaddedFile(bf, param.second!!, info2.pageSize)
}
}
private fun parseOsPatchLevel(x: String?): Int {
if (x.isNullOrBlank()) return 0
val ret: Int
val pattern = Pattern.compile("^(\\d{4})-(\\d{2})-(\\d{2})")
val matcher = pattern.matcher(x)
if (matcher.find()) {
val y = Integer.parseInt(matcher.group(1), 10) - 2000
val m = Integer.parseInt(matcher.group(2), 10)
// 7 bits allocated for the year, 4 bits for the month
assertTrue(y in 0..127)
assertTrue(m in 1..12)
ret = (y shl 4) or m
} else {
throw IllegalArgumentException("invalid os_patch_level")
if (info2.recoveryDtboLength > 0) {
writePaddedFile(bf, param.dtbo!!, info2.pageSize)
}
return ret
}
private fun writeHeader(inArgs: ImgArgs): ByteArray {
log.info("Writing header ...")
val bf = ByteBuffer.allocate(1024 * 32)
bf.order(ByteOrder.LITTLE_ENDIAN)
//header start
bf.put("ANDROID!".toByteArray())
bf.putInt(File(inArgs.kernel).length().toInt())
bf.putInt((inArgs.base + inArgs.kernelOffset).toInt())
if (null == inArgs.ramdisk) {
bf.putInt(0)
} else {
bf.putInt(File(inArgs.ramdisk).length().toInt())
if (info2.dtbLength > 0) {
writePaddedFile(bf, param.dtb!!, info2.pageSize)
}
bf.putInt((inArgs.base + inArgs.ramdiskOffset).toInt())
if (null == inArgs.second) {
bf.putInt(0)
} else {
bf.putInt(File(inArgs.second).length().toInt())
}
bf.putInt((inArgs.base + inArgs.secondOffset).toInt())
bf.putInt((inArgs.base + inArgs.tagsOffset).toInt())
bf.putInt(inArgs.pageSize)
bf.putInt(inArgs.headerVersion)
bf.putInt((packOsVersion(inArgs.osVersion) shl 11) or parseOsPatchLevel(inArgs.osPatchLevel))
if (inArgs.board.isBlank()) {
bf.put(ByteArray(16))
} else {
bf.put(inArgs.board.toByteArray())
bf.put(ByteArray(16 - inArgs.board.length))
}
bf.put(inArgs.cmdline.substring(0, minOf(512, inArgs.cmdline.length)).toByteArray())
bf.put(ByteArray(512 - minOf(512, inArgs.cmdline.length)))
//hash
val imageId = if (inArgs.headerVersion > 0) {
hashFileAndSize(inArgs.kernel, inArgs.ramdisk, inArgs.second, inArgs.dtbo)
} else {
hashFileAndSize(inArgs.kernel, inArgs.ramdisk, inArgs.second)
}
bf.put(imageId)
bf.put(ByteArray(32 - imageId.size))
if (inArgs.cmdline.length > 512) {
bf.put(inArgs.cmdline.substring(512).toByteArray())
bf.put(ByteArray(1024 + 512 - inArgs.cmdline.length))
} else {
bf.put(ByteArray(1024))
}
if (inArgs.headerVersion > 0) {
if (inArgs.dtbo == null) {
bf.putInt(0)
} else {
bf.putInt(File(inArgs.dtbo).length().toInt())
}
bf.putLong(inArgs.dtboOffset)
bf.putInt(1648)
}
//padding
padFile(bf, inArgs.pageSize)
//write
FileOutputStream(inArgs.output + ".clear", false).use { fos ->
FileOutputStream(outputFile + ".clear", true).use { fos ->
fos.write(bf.array(), 0, bf.position())
}
return imageId
}
fun packRootfs(args: ImgArgs, mkbootfs: String) {
fun packRootfs(mkbootfs: String) {
val param = ParamConfig()
log.info("Packing rootfs ${UnifiedConfig.workDir}root ...")
val outputStream = ByteArrayOutputStream()
val exec = DefaultExecutor()
@ -218,8 +104,8 @@ class Packer {
val cmdline = "$mkbootfs ${UnifiedConfig.workDir}root"
log.info(cmdline)
exec.execute(CommandLine.parse(cmdline))
Helper.gnuZipFile2(args.ramdisk!!, ByteArrayInputStream(outputStream.toByteArray()))
log.info("${args.ramdisk} is ready")
Helper.gnuZipFile2(param.ramdisk!!, ByteArrayInputStream(outputStream.toByteArray()))
log.info("${param.ramdisk} is ready")
}
private fun File.deleleIfExists() {
@ -232,39 +118,44 @@ class Packer {
}
}
fun pack(mkbootimgBin: String, mkbootfsBin: String) {
log.info("Loading config from ${workDir}bootimg.json")
val cfg = ObjectMapper().readValue(File(workDir + "bootimg.json"), UnifiedConfig::class.java)
val readBack = cfg.toArgs()
val args = readBack[0] as ImgArgs
val info = readBack[1] as ImgInfo
args.mkbootimg = mkbootimgBin
log.debug(args.toString())
log.debug(info.toString())
fun pack(mkbootfsBin: String) {
val param = ParamConfig()
log.info("Loading config from ${param.cfg}")
val cfg = ObjectMapper().readValue(File(param.cfg), UnifiedConfig::class.java)
val info2 = cfg.toBootImgInfo()
//clean
File(args.output + ".google").deleleIfExists()
File(args.output + ".clear").deleleIfExists()
File(args.output + ".signed").deleleIfExists()
File(args.output + ".signed2").deleleIfExists()
File(cfg.info.output + ".google").deleleIfExists()
File(cfg.info.output + ".clear").deleleIfExists()
File(cfg.info.output + ".signed").deleleIfExists()
File(cfg.info.output + ".signed2").deleleIfExists()
File("${UnifiedConfig.workDir}ramdisk.img").deleleIfExists()
args.ramdisk?.let {
if (File(it).exists() && !File(UnifiedConfig.workDir + "root").exists()) {
if (info2.ramdiskLength > 0) {
if (File(param.ramdisk).exists() && !File(UnifiedConfig.workDir + "root").exists()) {
//do nothing if we have ramdisk.img.gz but no /root
log.warn("Use prebuilt ramdisk file: $it")
log.warn("Use prebuilt ramdisk file: ${param.ramdisk}")
} else {
File(it).deleleIfExists()
packRootfs(args, mkbootfsBin)
File(param.ramdisk).deleleIfExists()
packRootfs(mkbootfsBin)
}
}
writeHeader(args)
writeData(args)
val encodedHeader = info2.encode()
//write
FileOutputStream(cfg.info.output + ".clear", false).use { fos ->
fos.write(encodedHeader)
fos.write(ByteArray(info2.pageSize - encodedHeader.size))
}
writeData(info2, cfg.info.output)
val googleCmd = info2.toCommandLine().apply {
addArgument(cfg.info.output + ".google")
}
DefaultExecutor().execute(googleCmd)
DefaultExecutor().execute(args.toCommandLine())
val ourHash = hashFileAndSize(args.output + ".clear")
val googleHash = hashFileAndSize(args.output + ".google")
val ourHash = hashFileAndSize(cfg.info.output + ".clear")
val googleHash = hashFileAndSize(cfg.info.output + ".google")
log.info("ours hash ${Helper.toHexString(ourHash)}, google's hash ${Helper.toHexString(googleHash)}")
if (ourHash.contentEquals(googleHash)) {
log.info("Hash verification passed: ${Helper.toHexString(ourHash)}")

@ -0,0 +1,11 @@
package cfig
data class ParamConfig(
//file input
var kernel: String = UnifiedConfig.workDir + "kernel",
var ramdisk: String? = UnifiedConfig.workDir + "ramdisk.img.gz",
var second: String? = UnifiedConfig.workDir + "second",
var dtbo: String? = UnifiedConfig.workDir + "recoveryDtbo",
var dtb: String? = UnifiedConfig.workDir + "dtb",
var cfg: String = UnifiedConfig.workDir + "bootimg.json",
val mkbootimg: String = "./src/mkbootimg/mkbootimg")

@ -1,231 +1,131 @@
package cfig
import cfig.bootimg.BootImgInfo
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.commons.exec.CommandLine
import org.apache.commons.exec.DefaultExecutor
import org.junit.Assert.assertTrue
import org.slf4j.LoggerFactory
import java.io.File
import java.io.FileInputStream
import java.io.InputStream
import java.lang.IllegalStateException
import java.nio.ByteBuffer
import java.nio.ByteOrder
import org.junit.Assert.*
import org.apache.commons.exec.PumpStreamHandler
import java.io.ByteArrayOutputStream
import java.util.regex.Pattern
class Parser {
private val workDir = UnifiedConfig.workDir
private fun parseOsVersion(x: Int): String {
val a = x shr 14
val b = x - (a shl 14) shr 7
val c = x and 0x7f
return String.format("%d.%d.%d", a, b, c)
}
private fun parseOsPatchLevel(x: Int): String {
var y = x shr 4
val m = x and 0xf
y += 2000
return String.format("%d-%02d-%02d", y, m, 0)
}
private fun getHeaderSize(pageSize: Int): Int {
val pad = (pageSize - (1648 and (pageSize - 1))) and (pageSize - 1)
return pad + 1648
}
private fun getPaddingSize(position: Int, pageSize: Int): Int {
return (pageSize - (position and pageSize - 1)) and (pageSize - 1)
}
private fun parseHeader(args: ImgArgs, info: ImgInfo) {
FileInputStream(args.output).use { iS ->
assertTrue(readBytes(iS, 8).contentEquals("ANDROID!".toByteArray()))
info.kernelLength = readInt(iS)
args.kernelOffset = readUnsignedAsLong(iS)
info.ramdiskLength = readInt(iS)
args.ramdiskOffset = readUnsignedAsLong(iS)
info.secondBootloaderLength = readInt(iS)
args.secondOffset = readUnsignedAsLong(iS)
args.tagsOffset = readUnsignedAsLong(iS)
args.pageSize = readInt(iS)
args.headerVersion = readInt(iS)
val osNPatch = readInt(iS)
if (0 != osNPatch) { //treated as 'reserved' in this boot image
args.osVersion = parseOsVersion(osNPatch shr 11)
args.osPatchLevel = parseOsPatchLevel(osNPatch and 0x7ff)
}
args.board = Helper.toCString(readBytes(iS, 16))
if (args.board.isBlank()) {
args.board = ""
}
val cmd1 = Helper.toCString(readBytes(iS, 512))
info.hash = readBytes(iS, 32) //hash
val cmd2 = Helper.toCString(readBytes(iS, 1024))
args.cmdline = cmd1 + cmd2
info.recoveryDtboLength = readInt(iS)
args.dtboOffset = readLong(iS)
info.headerSize = readInt(iS)
//calc subimg positions
info.kernelPosition = getHeaderSize(args.pageSize)
info.ramdiskPosition = info.kernelPosition + info.kernelLength + getPaddingSize(info.kernelLength, args.pageSize)
info.secondBootloaderPosition = info.ramdiskPosition + info.ramdiskLength + getPaddingSize(info.ramdiskLength, args.pageSize)
info.recoveryDtboPosition = info.secondBootloaderPosition + info.secondBootloaderLength + getPaddingSize(info.secondBootloaderLength, args.pageSize)
//adjust args
if (args.kernelOffset > Int.MAX_VALUE
&& args.ramdiskOffset > Int.MAX_VALUE
&& args.secondOffset > Int.MAX_VALUE
&& args.dtboOffset > Int.MAX_VALUE) {
args.base = Int.MAX_VALUE + 1L
args.kernelOffset -= args.base
args.ramdiskOffset -= args.base
args.secondOffset -= args.base
args.tagsOffset -= args.base
args.dtboOffset -= args.base
}
if (info.ramdiskLength == 0) args.ramdisk = null
if (info.kernelLength == 0) throw IllegalStateException("boot image has no kernel")
if (info.secondBootloaderLength == 0) args.second = null
if (info.recoveryDtboLength == 0) args.dtbo = null
}
}//resource-closable
private fun verifiedWithAVB(args: ImgArgs): Boolean {
private fun verifiedWithAVB(fileName: String): Boolean {
val expectedBf = "AVBf".toByteArray()
FileInputStream(args.output).use { fis ->
fis.skip(File(args.output).length() - 64)
FileInputStream(fileName).use { fis ->
fis.skip(File(fileName).length() - 64)
val bf = ByteArray(4)
fis.read(bf)
return bf.contentEquals(expectedBf)
}
}
private fun verifyAVBIntegrity(args: ImgArgs, avbtool: String) {
val cmdline = "$avbtool verify_image --image ${args.output}"
log.info(cmdline)
DefaultExecutor().execute(CommandLine.parse(cmdline))
}
private fun parseAVBInfo(args: ImgArgs, info: ImgInfo, avbtool: String) {
val outputStream = ByteArrayOutputStream()
val exec = DefaultExecutor()
exec.streamHandler = PumpStreamHandler(outputStream)
val cmdline = "$avbtool info_image --image ${args.output}"
log.info(cmdline)
exec.execute(CommandLine.parse(cmdline))
val lines = outputStream.toString().split("\n")
lines.forEach {
val m = Pattern.compile("^Original image size:\\s+(\\d+)\\s*bytes").matcher(it)
if (m.find()) {
(info.signature as ImgInfo.AvbSignature).originalImageSize = Integer.parseInt(m.group(1))
}
val m2 = Pattern.compile("^Image size:\\s+(\\d+)\\s*bytes").matcher(it)
if (m2.find()) {
(info.signature as ImgInfo.AvbSignature).imageSize = Integer.parseInt(m2.group(1))
}
val m3 = Pattern.compile("^\\s*Partition Name:\\s+(\\S+)$").matcher(it)
if (m3.find()) {
(info.signature as ImgInfo.AvbSignature).partName = m3.group(1)
}
val m4 = Pattern.compile("^\\s*Salt:\\s+(\\S+)$").matcher(it)
if (m4.find()) {
(info.signature as ImgInfo.AvbSignature).salt = m4.group(1)
}
val m5 = Pattern.compile("^\\s*Algorithm:\\s+(\\S+)$").matcher(it)
if (m5.find()) {
(info.signature as ImgInfo.AvbSignature).algorithm = m5.group(1)
}
val m6 = Pattern.compile("^\\s*Hash Algorithm:\\s+(\\S+)$").matcher(it)
if (m6.find()) {
(info.signature as ImgInfo.AvbSignature).hashAlgorithm = m6.group(1)
}
log.debug("[" + it + "]")
}
assertNotNull((info.signature as ImgInfo.AvbSignature).imageSize)
assertNotNull((info.signature as ImgInfo.AvbSignature).originalImageSize)
assertTrue(!(info.signature as ImgInfo.AvbSignature).partName.isNullOrBlank())
assertTrue(!(info.signature as ImgInfo.AvbSignature).salt.isNullOrBlank())
}
private fun unpackRamdisk(imgArgs: ImgArgs) {
private fun unpackRamdisk(workDir: String, ramdiskGz: String) {
val exe = DefaultExecutor()
exe.workingDirectory = File(workDir + "root")
if (exe.workingDirectory.exists()) exe.workingDirectory.deleteRecursively()
exe.workingDirectory.mkdirs()
val ramdiskFile = File(imgArgs.ramdisk!!.removeSuffix(".gz"))
val ramdiskFile = File(ramdiskGz.removeSuffix(".gz"))
exe.execute(CommandLine.parse("cpio -i -m -F " + ramdiskFile.canonicalPath))
log.info("extract ramdisk done: $ramdiskFile -> ${exe.workingDirectory.path}")
log.info(" ramdisk extracted : $ramdiskFile -> ${exe.workingDirectory.path}")
}
fun parseAndExtract(fileName: String?, avbtool: String) {
val imgArgs = ImgArgs(output = fileName ?: "boot.img")
val imgInfo = ImgInfo()
if (!fileName.isNullOrBlank()) {
imgArgs.output = fileName!!
fun parseBootImgHeader(fileName: String, avbtool: String): BootImgInfo {
val info2 = BootImgInfo(FileInputStream(fileName))
val param = ParamConfig()
if (verifiedWithAVB(fileName)) {
info2.signatureType = BootImgInfo.VerifyType.AVB
verifyAVBIntegrity(fileName, avbtool)
} else {
info2.signatureType = BootImgInfo.VerifyType.VERIFY
}
info2.imageSize = File(fileName).length()
//parse header
parseHeader(imgArgs, imgInfo)
val cfg = UnifiedConfig.fromBootImgInfo(info2).apply {
info.output = File(fileName).name
}
//parse signature
if (verifiedWithAVB(imgArgs)) {
imgArgs.verifyType = ImgArgs.VerifyType.AVB
imgInfo.signature = ImgInfo.AvbSignature()
verifyAVBIntegrity(imgArgs, avbtool)
parseAVBInfo(imgArgs, imgInfo, avbtool)
ObjectMapper().writerWithDefaultPrettyPrinter().writeValue(File(param.cfg), cfg)
log.info("image info written to ${param.cfg}")
return info2
}
fun extractBootImg(fileName: String, info2: BootImgInfo) {
val param = ParamConfig()
if (info2.kernelLength > 0) {
Helper.extractFile(fileName,
param.kernel,
info2.kernelPosition.toLong(),
info2.kernelLength.toInt())
log.info(" kernel dumped to: ${param.kernel}, size=${info2.kernelLength / 1024.0 / 1024.0}MB")
} else {
imgArgs.verifyType = ImgArgs.VerifyType.VERIFY
imgInfo.signature = ImgInfo.VeritySignature()
throw RuntimeException("bad boot image: no kernel found")
}
log.info(imgArgs.toString())
log.info(imgInfo.toString())
if (info2.ramdiskLength > 0) {
Helper.extractFile(fileName,
param.ramdisk!!,
info2.ramdiskPosition.toLong(),
info2.ramdiskLength.toInt())
log.info("ramdisk dumped to: ${param.ramdisk}")
Helper.unGnuzipFile(param.ramdisk!!, param.ramdisk!!.removeSuffix(".gz"))
unpackRamdisk(UnifiedConfig.workDir, param.ramdisk!!.removeSuffix(".gz"))
} else {
log.info("no ramdisk found")
}
Helper.extractFile(imgArgs.output, imgArgs.kernel, imgInfo.kernelPosition.toLong(), imgInfo.kernelLength)
log.info("kernel dumped to ${imgArgs.kernel}")
imgArgs.ramdisk?.let { ramdisk ->
log.info("ramdisk dumped to ${imgArgs.ramdisk}")
Helper.extractFile(imgArgs.output, ramdisk, imgInfo.ramdiskPosition.toLong(), imgInfo.ramdiskLength)
Helper.unGnuzipFile(ramdisk, workDir + "ramdisk.img")
unpackRamdisk(imgArgs)
if (info2.secondBootloaderLength > 0) {
Helper.extractFile(fileName,
param.second!!,
info2.secondBootloaderPosition.toLong(),
info2.secondBootloaderLength.toInt())
log.info("second bootloader dumped to ${param.second}")
} else {
log.info("no second bootloader found")
}
imgArgs.second?.let { second ->
Helper.extractFile(imgArgs.output, second, imgInfo.secondBootloaderPosition.toLong(), imgInfo.secondBootloaderLength)
log.info("second bootloader dumped to ${imgArgs.second}")
if (info2.recoveryDtboLength > 0) {
Helper.extractFile(fileName,
param.dtbo!!,
info2.recoveryDtboPosition.toLong(),
info2.recoveryDtboLength.toInt())
log.info("dtbo dumped to ${param.dtbo}")
} else {
if (info2.headerVersion > 0) {
log.info("no recovery dtbo found")
} else {
log.debug("no recovery dtbo for header v0")
}
}
imgArgs.dtbo?.let { dtbo ->
Helper.extractFile(imgArgs.output, dtbo, imgInfo.recoveryDtboPosition.toLong(), imgInfo.recoveryDtboLength)
log.info("dtbo dumped to ${imgArgs.dtbo}")
if (info2.dtbLength > 0) {
Helper.extractFile(fileName,
param.dtb!!,
info2.dtbPosition.toLong(),
info2.dtbLength.toInt())
log.info("dtb dumped to ${param.dtb}")
} else {
if (info2.headerVersion > 1) {
log.info("no dtb found")
} else {
log.debug("no dtb for header v0")
}
}
val cfg = UnifiedConfig.fromArgs(imgArgs, imgInfo)
log.debug(ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(cfg))
ObjectMapper().writerWithDefaultPrettyPrinter().writeValue(File(imgArgs.cfg), cfg)
log.info("image info written to ${imgArgs.cfg}")
}
companion object {
private val log = LoggerFactory.getLogger("Parser")!!
fun verifyAVBIntegrity(fileName: String, avbtool: String) {
val cmdline = "$avbtool verify_image --image $fileName"
log.info(cmdline)
DefaultExecutor().execute(CommandLine.parse(cmdline))
}
fun readShort(iS: InputStream): Short {
val bf = ByteBuffer.allocate(128)
bf.order(ByteOrder.LITTLE_ENDIAN)

@ -1,7 +1,6 @@
package cfig
import avb.AVBInfo
import com.fasterxml.jackson.databind.ObjectMapper
import cfig.bootimg.BootImgInfo
import org.slf4j.LoggerFactory
import java.io.File
@ -27,41 +26,41 @@ fun main(args: Array<String>) {
"unpack" -> {
if (File(UnifiedConfig.workDir).exists()) File(UnifiedConfig.workDir).deleteRecursively()
File(UnifiedConfig.workDir).mkdirs()
Parser().parseAndExtract(fileName = args[1], avbtool = args[3])
if (UnifiedConfig.readBack()[2] is ImgInfo.AvbSignature) {
val info = Parser().parseBootImgHeader(fileName = args[1], avbtool = args[3])
if (info.signatureType == BootImgInfo.VerifyType.AVB) {
log.info("continue to analyze vbmeta info in " + args[1])
Avb().parseVbMeta(args[1])
if (File("vbmeta.img").exists()) {
Avb().parseVbMeta("vbmeta.img")
}
}
Parser().extractBootImg(fileName = args[1], info2 = info)
}
"pack" -> {
Packer().pack(mkbootimgBin = args[2], mkbootfsBin = args[5])
Packer().pack(mkbootfsBin = args[5])
}
"sign" -> {
Signer.sign(avbtool = args[3], bootSigner = args[4])
val readBack = UnifiedConfig.readBack()
if ((readBack[0] as ImgArgs).verifyType == ImgArgs.VerifyType.AVB) {
val readBack2 = UnifiedConfig.readBack2()
if (readBack2.signatureType == BootImgInfo.VerifyType.AVB) {
if (File("vbmeta.img").exists()) {
val sig = readBack[2] as ImgInfo.AvbSignature
val newBootImgInfo = Avb().parseVbMeta(args[1] + ".signed")
val hashDesc = newBootImgInfo.auxBlob!!.hashDescriptors[0]
val origVbMeta = ObjectMapper().readValue(File(Avb.getJsonFileName("vbmeta.img")),
AVBInfo::class.java)
for (i in 0..(origVbMeta.auxBlob!!.hashDescriptors.size - 1)) {
if (origVbMeta.auxBlob!!.hashDescriptors[i].partition_name == sig.partName) {
val seq = origVbMeta.auxBlob!!.hashDescriptors[i].sequence
origVbMeta.auxBlob!!.hashDescriptors[i] = hashDesc
origVbMeta.auxBlob!!.hashDescriptors[i].sequence = seq
}
}
ObjectMapper().writerWithDefaultPrettyPrinter().writeValue(File(Avb.getJsonFileName("vbmeta.img")), origVbMeta)
log.info("vbmeta info updated")
Avb().packVbMetaWithPadding()
// val sig = readBack[2] as ImgInfo.AvbSignature
// val newBootImgInfo = Avb().parseVbMeta(args[1] + ".signed")
// val hashDesc = newBootImgInfo.auxBlob!!.hashDescriptors[0]
// val origVbMeta = ObjectMapper().readValue(File(Avb.getJsonFileName("vbmeta.img")),
// AVBInfo::class.java)
// for (i in 0..(origVbMeta.auxBlob!!.hashDescriptors.size - 1)) {
// if (origVbMeta.auxBlob!!.hashDescriptors[i].partition_name == sig.partName) {
// val seq = origVbMeta.auxBlob!!.hashDescriptors[i].sequence
// origVbMeta.auxBlob!!.hashDescriptors[i] = hashDesc
// origVbMeta.auxBlob!!.hashDescriptors[i].sequence = seq
// }
// }
// ObjectMapper().writerWithDefaultPrettyPrinter().writeValue(File(Avb.getJsonFileName("vbmeta.img")), origVbMeta)
// log.info("vbmeta.img info updated")
// Avb().packVbMetaWithPadding()
} else {
//no vbmeta provided
log.info("no vbmeta.img need to update")
}
}//end-of-avb
}//end-of-sign

@ -2,6 +2,7 @@ package cfig
import avb.AVBInfo
import avb.alg.Algorithms
import cfig.bootimg.BootImgInfo
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.commons.exec.CommandLine
import org.apache.commons.exec.DefaultExecutor
@ -11,68 +12,63 @@ import java.io.File
class Signer {
companion object {
private val log = LoggerFactory.getLogger(Signer::class.java)
private val workDir = UnifiedConfig.workDir
fun sign(avbtool: String, bootSigner: String) {
log.info("Loading config from ${workDir}bootimg.json")
val readBack = UnifiedConfig.readBack()
val args = readBack[0] as ImgArgs
log.info("Loading config from ${ParamConfig().cfg}")
val info2 = UnifiedConfig.readBack2()
val cfg = ObjectMapper().readValue(File(ParamConfig().cfg), UnifiedConfig::class.java)
when (args.verifyType) {
ImgArgs.VerifyType.VERIFY -> {
when (info2.signatureType) {
BootImgInfo.VerifyType.VERIFY -> {
log.info("Signing with verified-boot 1.0 style")
val sig = readBack[2] as ImgInfo.VeritySignature
DefaultExecutor().execute(CommandLine.parse("java -jar $bootSigner " +
"${sig.path} ${args.output}.clear ${sig.verity_pk8} ${sig.verity_pem} ${args.output}.signed"))
val sig = ImgInfo.VeritySignature()
val bootSignCmd = "java -jar $bootSigner " +
"${sig.path} ${cfg.info.output}.clear " +
"${sig.verity_pk8} ${sig.verity_pem} " +
"${cfg.info.output}.signed"
log.info(bootSignCmd)
DefaultExecutor().execute(CommandLine.parse(bootSignCmd))
}
ImgArgs.VerifyType.AVB -> {
BootImgInfo.VerifyType.AVB -> {
log.info("Adding hash_footer with verified-boot 2.0 style")
val sig = readBack[2] as ImgInfo.AvbSignature
val ai = ObjectMapper().readValue(File(Avb.getJsonFileName(args.output)), AVBInfo::class.java)
//val alg = Algorithms.get(ai.header!!.algorithm_type.toInt())
val ai = ObjectMapper().readValue(File(Avb.getJsonFileName(cfg.info.output)), AVBInfo::class.java)
val alg = Algorithms.get(ai.header!!.algorithm_type.toInt())
val bootDesc = ai.auxBlob!!.hashDescriptors[0]
//our signer
File(args.output + ".clear").copyTo(File(args.output + ".signed"))
Avb().add_hash_footer(args.output + ".signed",
sig.imageSize!!.toLong(),
File(cfg.info.output + ".clear").copyTo(File(cfg.info.output + ".signed"))
Avb().add_hash_footer(cfg.info.output + ".signed",
info2.imageSize.toLong(),
false,
false,
salt = sig.salt,
hash_algorithm = sig.hashAlgorithm!!,
partition_name = sig.partName!!,
salt = Helper.toHexString(bootDesc.salt),
hash_algorithm = bootDesc.hash_algorithm_str,
partition_name = bootDesc.partition_name,
rollback_index = ai.header!!.rollback_index,
common_algorithm = sig.algorithm!!,
common_algorithm = alg!!.name,
inReleaseString = ai.header!!.release_string)
//original signer
File(args.output + ".clear").copyTo(File(args.output + ".signed2"))
val signKey = Algorithms.get(sig.algorithm!!)
File(cfg.info.output + ".clear").copyTo(File(cfg.info.output + ".signed2"))
var cmdlineStr = "$avbtool add_hash_footer " +
"--image ${args.output}.signed2 " +
"--partition_size ${sig.imageSize} " +
"--salt ${sig.salt} " +
"--partition_name ${sig.partName} " +
"--hash_algorithm ${sig.hashAlgorithm} " +
"--algorithm ${sig.algorithm} "
if (signKey!!.defaultKey.isNotBlank()) {
cmdlineStr += "--key ${signKey.defaultKey}"
"--image ${cfg.info.output}.signed2 " +
"--partition_size ${info2.imageSize} " +
"--salt ${Helper.toHexString(bootDesc.salt)} " +
"--partition_name ${bootDesc.partition_name} " +
"--hash_algorithm ${bootDesc.hash_algorithm_str} " +
"--algorithm ${alg.name} "
if (alg.defaultKey.isNotBlank()) {
cmdlineStr += "--key ${alg.defaultKey}"
}
log.warn(cmdlineStr)
val cmdLine = CommandLine.parse(cmdlineStr)
cmdLine.addArgument("--internal_release_string")
cmdLine.addArgument(ai.header!!.release_string, false)
DefaultExecutor().execute(cmdLine)
verifyAVBIntegrity(args, avbtool)
Parser.verifyAVBIntegrity(cfg.info.output, avbtool)
}
}
}
private fun verifyAVBIntegrity(args: ImgArgs, avbtool: String) {
val tgt = args.output + ".signed"
log.info("Verifying AVB: $tgt")
DefaultExecutor().execute(CommandLine.parse("$avbtool verify_image --image $tgt"))
log.info("Verifying image passed: $tgt")
}
fun mapToJson(m: LinkedHashMap<*, *>): String {
val sb = StringBuilder()
m.forEach { k, v ->

@ -1,5 +1,6 @@
package cfig
import cfig.bootimg.BootImgInfo
import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.ObjectMapper
import org.slf4j.LoggerFactory
@ -12,7 +13,8 @@ data class UnifiedConfig(
var ramdisk: CommArgs? = null,
var secondBootloader: CommArgs? = null,
var recoveryDtbo: CommArgs? = null,
var signature: Any? = ImgInfo.VeritySignature()
var dtb: CommArgs? = null,
var signature: Any? = null
) {
data class CommArgs(
var file: String? = null,
@ -31,132 +33,121 @@ data class UnifiedConfig(
var cmdline: String = "",
var osVersion: String? = null,
var osPatchLevel: String? = null,
var hash: String = "",
var verify: ImgArgs.VerifyType = ImgArgs.VerifyType.VERIFY)
fun toArgs(): Array<Any> {
val args = ImgArgs()
val info = ImgInfo()
args.output = this.info.output
args.kernel = this.kernel.file ?: workDir + "kernel"
args.kernelOffset = this.kernel.loadOffset.removePrefix("0x").toLong(16)
info.kernelPosition = Integer.decode(this.kernel.position)
info.kernelLength = Integer.decode(this.kernel.size)
if (this.ramdisk == null) {
args.ramdisk = null
} else {
args.ramdisk = this.ramdisk!!.file
args.ramdiskOffset = this.ramdisk!!.loadOffset.removePrefix("0x").toLong(16)
info.ramdiskPosition = Integer.decode(this.ramdisk!!.position)
info.ramdiskLength = Integer.decode(this.ramdisk!!.size)
var hash: ByteArray = byteArrayOf(),
var verify: BootImgInfo.VerifyType = BootImgInfo.VerifyType.VERIFY,
var imageSize: Long = 0)
fun toBootImgInfo(): BootImgInfo {
val ret = BootImgInfo()
ret.kernelOffset = this.kernel.loadOffset.removePrefix("0x").toLong(16)
ret.kernelLength = Integer.decode(this.kernel.size).toLong()
ret.kernelOffset = this.kernel.loadOffset.removePrefix("0x").toLong(16)
ret.kernelLength = Integer.decode(this.kernel.size).toLong()
this.ramdisk?.let {
ret.ramdiskOffset = it.loadOffset.removePrefix("0x").toLong(16)
ret.ramdiskLength = it.size.removePrefix("0x").toLong(16)
}
this.secondBootloader?.let {
args.second = it.file
args.secondOffset = it.loadOffset.removePrefix("0x").toLong(16)
info.secondBootloaderPosition = Integer.decode(it.position)
info.secondBootloaderLength = Integer.decode(it.size)
ret.secondBootloaderOffset = it.loadOffset.removePrefix("0x").toLong(16)
ret.secondBootloaderLength = it.size.removePrefix("0x").toLong(16)
}
if (this.secondBootloader == null) args.second = null
this.recoveryDtbo?.let {
args.dtbo = it.file
args.dtboOffset = it.loadOffset.removePrefix("0x").toLong(16)
info.recoveryDtboPosition = Integer.decode(it.position)
info.recoveryDtboLength = Integer.decode(it.size)
ret.recoveryDtboOffset = it.loadOffset.removePrefix("0x").toLong(16)
ret.recoveryDtboLength = it.size.removePrefix("0x").toLong(16)
}
if (this.recoveryDtbo == null) args.dtbo = null
info.headerSize = this.info.headerSize
args.headerVersion = this.info.headerVersion
args.base = this.info.loadBase.removePrefix("0x").toLong(16)
this.info.board?.let { args.board = it }
args.tagsOffset = this.info.tagsOffset.removePrefix("0x").toLong(16)
args.cmdline = this.info.cmdline
args.osVersion = this.info.osVersion
args.osPatchLevel = this.info.osPatchLevel
info.hash = Helper.fromHexString(this.info.hash)
args.pageSize = this.info.pageSize
args.verifyType = this.info.verify
info.signature = this.signature
return arrayOf(args, info)
this.dtb?.let {
ret.dtbOffset = it.loadOffset.removePrefix("0x").toLong(16)
ret.dtbLength = it.size.removePrefix("0x").toLong(16)
}
ret.headerSize = this.info.headerSize.toLong()
ret.headerVersion = this.info.headerVersion
this.info.board?.let { ret.board = it }
ret.tagsOffset = this.info.tagsOffset.removePrefix("0x").toLong(16)
ret.cmdline = this.info.cmdline
ret.osVersion = this.info.osVersion
ret.osPatchLevel = this.info.osPatchLevel
ret.hash = this.info.hash
ret.pageSize = this.info.pageSize
ret.signatureType = this.info.verify
ret.imageSize = this.info.imageSize
return ret
}
companion object {
const val workDir = "build/unzip_boot/"
private val log = LoggerFactory.getLogger(UnifiedConfig::class.java)
fun fromArgs(args: ImgArgs, info: ImgInfo): UnifiedConfig {
fun fromBootImgInfo(info: BootImgInfo): UnifiedConfig {
val ret = UnifiedConfig()
ret.kernel.file = args.kernel
ret.kernel.loadOffset = "0x${java.lang.Long.toHexString(args.kernelOffset)}"
ret.kernel.size = "0x${Integer.toHexString(info.kernelLength)}"
val param = ParamConfig()
ret.kernel.file = param.kernel
ret.kernel.loadOffset = "0x${java.lang.Long.toHexString(info.kernelOffset)}"
ret.kernel.size = "0x${Integer.toHexString(info.kernelLength.toInt())}"
ret.kernel.position = "0x${Integer.toHexString(info.kernelPosition)}"
ret.ramdisk = CommArgs()
ret.ramdisk!!.loadOffset = "0x${java.lang.Long.toHexString(args.ramdiskOffset)}"
ret.ramdisk!!.size = "0x${Integer.toHexString(info.ramdiskLength)}"
ret.ramdisk!!.loadOffset = "0x${java.lang.Long.toHexString(info.ramdiskOffset)}"
ret.ramdisk!!.size = "0x${Integer.toHexString(info.ramdiskLength.toInt())}"
ret.ramdisk!!.position = "0x${Integer.toHexString(info.ramdiskPosition)}"
args.ramdisk?.let {
ret.ramdisk!!.file = args.ramdisk
if (info.ramdiskLength > 0) {
ret.ramdisk!!.file = param.ramdisk
}
ret.secondBootloader = CommArgs()
ret.secondBootloader!!.loadOffset = "0x${java.lang.Long.toHexString(args.secondOffset)}"
ret.secondBootloader!!.size = "0x${Integer.toHexString(info.secondBootloaderLength)}"
ret.secondBootloader!!.loadOffset = "0x${java.lang.Long.toHexString(info.secondBootloaderOffset)}"
ret.secondBootloader!!.size = "0x${Integer.toHexString(info.secondBootloaderLength.toInt())}"
ret.secondBootloader!!.position = "0x${Integer.toHexString(info.secondBootloaderPosition)}"
args.second?.let {
ret.secondBootloader!!.file = args.second
if (info.secondBootloaderLength > 0) {
ret.secondBootloader!!.file = param.second
}
if (args.headerVersion > 0) {
if (info.headerVersion > 0) {
ret.recoveryDtbo = CommArgs()
args.dtbo?.let {
ret.recoveryDtbo!!.file = args.dtbo
if (info.recoveryDtboLength > 0) {
ret.recoveryDtbo!!.file = param.dtbo
}
ret.recoveryDtbo!!.loadOffset = "0x${java.lang.Long.toHexString(args.dtboOffset)}"
ret.recoveryDtbo!!.size = "0x${Integer.toHexString(info.recoveryDtboLength)}"
ret.recoveryDtbo!!.loadOffset = "0x${java.lang.Long.toHexString(info.recoveryDtboOffset)}"
ret.recoveryDtbo!!.size = "0x${Integer.toHexString(info.recoveryDtboLength.toInt())}"
ret.recoveryDtbo!!.position = "0x${Integer.toHexString(info.recoveryDtboPosition)}"
}
ret.info.output = args.output
ret.info.headerSize = info.headerSize
ret.info.headerVersion = args.headerVersion
ret.info.loadBase = "0x${java.lang.Long.toHexString(args.base)}"
ret.info.board = if (args.board.isBlank()) null else args.board
ret.info.tagsOffset = "0x${java.lang.Long.toHexString(args.tagsOffset)}"
ret.info.cmdline = args.cmdline
ret.info.osVersion = args.osVersion
ret.info.osPatchLevel = args.osPatchLevel
ret.info.hash = Helper.toHexString(info.hash)
ret.info.pageSize = args.pageSize
ret.info.verify = args.verifyType
ret.signature = info.signature
if (info.headerVersion > 1) {
ret.dtb = CommArgs()
if (info.dtbLength > 0) {
ret.dtb!!.file = param.dtb
}
ret.dtb!!.loadOffset = "0x${java.lang.Long.toHexString(info.dtbOffset)}"
ret.dtb!!.size = "0x${Integer.toHexString(info.dtbLength.toInt())}"
ret.dtb!!.position = "0x${Integer.toHexString(info.dtbPosition)}"
}
//ret.info.output = //unknown
ret.info.headerSize = info.headerSize.toInt()
ret.info.headerVersion = info.headerVersion
ret.info.loadBase = "0x${java.lang.Long.toHexString(0)}"
ret.info.board = if (info.board.isBlank()) null else info.board
ret.info.tagsOffset = "0x${java.lang.Long.toHexString(info.tagsOffset)}"
ret.info.cmdline = info.cmdline
ret.info.osVersion = info.osVersion
ret.info.osPatchLevel = info.osPatchLevel
ret.info.hash = info.hash!!
ret.info.pageSize = info.pageSize
ret.info.verify = info.signatureType!!
ret.info.imageSize = info.imageSize
return ret
}
fun readBack(): Array<Any?> {
var ret: Array<Any?> = arrayOfNulls(3)
val readBack = ObjectMapper().readValue(File(workDir + "bootimg.json"),
UnifiedConfig::class.java).toArgs()
val imgArgs = readBack[0] as ImgArgs
val info = readBack[1] as ImgInfo
if (imgArgs.verifyType == ImgArgs.VerifyType.AVB) {
val sig = ObjectMapper().readValue(
Signer.mapToJson(info.signature as LinkedHashMap<*, *>), ImgInfo.AvbSignature::class.java)
ret[2] = sig
} else {
val sig2 = ObjectMapper().readValue(
Signer.mapToJson(info.signature as LinkedHashMap<*, *>), ImgInfo.VeritySignature::class.java)
ret[2] = sig2
}
ret[0] = imgArgs
ret[1] = info
return ret
fun readBack2(): BootImgInfo {
val param = ParamConfig()
return ObjectMapper().readValue(File(param.cfg),
UnifiedConfig::class.java).toBootImgInfo()
}
}
}

@ -1,53 +1,9 @@
package avb
import avb.desc.*
import cfig.Helper
/*
a wonderfaul base64 encoder/decoder: https://cryptii.com/base64-to-hex
*/
class AVBInfo(var header: Header? = null,
var authBlob: AuthBlob? = null,
var auxBlob: AuxBlob? = null,
var footer: Footer? = null) {
data class AuthBlob(
var offset: Long = 0L,
var size: Long = 0L,
var hash: String? = null,
var signature: String? = null)
data class AuxBlob(
var pubkey: PubKeyInfo? = null,
var pubkeyMeta: PubKeyMetadataInfo? = null,
var propertyDescriptor: MutableList<PropertyDescriptor> = mutableListOf(),
var hashTreeDescriptor: MutableList<HashTreeDescriptor> = mutableListOf(),
var hashDescriptors: MutableList<HashDescriptor> = mutableListOf(),
var kernelCmdlineDescriptor: MutableList<KernelCmdlineDescriptor> = mutableListOf(),
var chainPartitionDescriptor: MutableList<ChainPartitionDescriptor> = mutableListOf(),
var unknownDescriptors: MutableList<UnknownDescriptor> = mutableListOf()
) {
data class PubKeyInfo(
var offset: Long = 0L,
var size: Long = 0L,
var pubkey: ByteArray = byteArrayOf()
)
data class PubKeyMetadataInfo(
var offset: Long = 0L,
var size: Long = 0L
)
fun encodeDescriptors(): ByteArray {
var descList: MutableList<Descriptor> = mutableListOf()
this.hashTreeDescriptor.forEach { descList.add(it) }
this.hashDescriptors.forEach { descList.add(it) }
this.kernelCmdlineDescriptor.forEach { descList.add(it) }
this.chainPartitionDescriptor.forEach { descList.add(it) }
this.unknownDescriptors.forEach { descList.add(it) }
descList.sortBy { it.sequence }
var ret = byteArrayOf()
descList.forEach { ret = Helper.join(ret, it.encode()) }
return ret
}
}
}
var footer: Footer? = null)

@ -0,0 +1,7 @@
package avb
data class AuthBlob(
var offset: Long = 0L,
var size: Long = 0L,
var hash: String? = null,
var signature: String? = null)

@ -0,0 +1,82 @@
package avb
import avb.alg.Algorithm
import avb.desc.*
import cfig.Helper
import cfig.io.Struct
import org.junit.Assert
import org.slf4j.LoggerFactory
import java.nio.file.Files
import java.nio.file.Paths
data class AuxBlob(
var pubkey: PubKeyInfo? = null,
var pubkeyMeta: PubKeyMetadataInfo? = null,
var propertyDescriptor: MutableList<PropertyDescriptor> = mutableListOf(),
var hashTreeDescriptor: MutableList<HashTreeDescriptor> = mutableListOf(),
var hashDescriptors: MutableList<HashDescriptor> = mutableListOf(),
var kernelCmdlineDescriptor: MutableList<KernelCmdlineDescriptor> = mutableListOf(),
var chainPartitionDescriptor: MutableList<ChainPartitionDescriptor> = mutableListOf(),
var unknownDescriptors: MutableList<UnknownDescriptor> = mutableListOf()
) {
data class PubKeyInfo(
var offset: Long = 0L,
var size: Long = 0L,
var pubkey: ByteArray = byteArrayOf()
)
data class PubKeyMetadataInfo(
var offset: Long = 0L,
var size: Long = 0L,
var pkmd: ByteArray = byteArrayOf()
)
fun encodeDescriptors(): ByteArray {
var ret = byteArrayOf()
return mutableListOf<Descriptor>().let { descList ->
arrayOf(this.propertyDescriptor, //tag 0
this.hashTreeDescriptor, //tag 1
this.hashDescriptors, //tag 2
this.kernelCmdlineDescriptor, //tag 3
this.chainPartitionDescriptor, //tag 4
this.unknownDescriptors //tag X
).forEach { typedList ->
typedList.forEach { descList.add(it) }
}
descList.sortBy { it.sequence }
descList.forEach { ret = Helper.join(ret, it.encode()) }
ret
}
}
//encoded_descriptors + encoded_key + pkmd_blob + (padding)
fun encode(): ByteArray {
val encodedDesc = this.encodeDescriptors()
var sumOfSize = encodedDesc.size
this.pubkey?.let { sumOfSize += it.pubkey.size }
this.pubkeyMeta?.let { sumOfSize += it.pkmd.size }
val auxSize = Helper.round_to_multiple(sumOfSize.toLong(), 64)
return Struct("${auxSize}b").pack(
Helper.joinWithNulls(encodedDesc, this.pubkey?.pubkey, this.pubkeyMeta?.pkmd))
}
companion object {
fun encodePubKey(alg: Algorithm, key: ByteArray? = null): ByteArray {
var encodedKey = byteArrayOf()
var algKey: ByteArray? = key
if (alg.public_key_num_bytes > 0) {
if (key == null) {
algKey = Files.readAllBytes((Paths.get(alg.defaultKey)))
}
encodedKey = Helper.encodeRSAkey(algKey!!)
log.info("encodePubKey(): size = ${alg.public_key_num_bytes}, algorithm key size: ${encodedKey.size}")
Assert.assertEquals(alg.public_key_num_bytes, encodedKey.size)
} else {
log.info("encodePubKey(): No key to encode for algorithm " + alg.name)
}
return encodedKey
}
private val log = LoggerFactory.getLogger(AuxBlob::class.java)
}
}

@ -1,40 +1,21 @@
package avb
import avb.alg.Algorithm
import avb.alg.Algorithms
import cfig.Helper
import cfig.io.Struct
import org.junit.Assert
import org.slf4j.LoggerFactory
import java.nio.file.Files
import java.nio.file.Paths
import java.security.MessageDigest
class Blob {
companion object {
fun encodePubKey(alg: Algorithm, key: ByteArray? = null): ByteArray {
var encodedKey = byteArrayOf()
var algKey: ByteArray? = key
if (alg.public_key_num_bytes > 0) {
if (key == null) {
algKey = Files.readAllBytes((Paths.get(alg.defaultKey)))
}
encodedKey = Helper.encodeRSAkey(algKey!!)
log.info("encodePubKey(): size = ${alg.public_key_num_bytes}, algorithm key size: ${encodedKey.size}")
Assert.assertEquals(alg.public_key_num_bytes, encodedKey.size)
} else {
log.info("encodePubKey(): No key to encode for algorithm " + alg.name)
}
return encodedKey
}
private val log = LoggerFactory.getLogger(Blob::class.java)
//TODO: support pkmd_blob
//encoded_descriptors + encoded_key + pkmd_blob + (padding)
fun getAuxDataBlob(encodedDesc: ByteArray, encodedKey: ByteArray): ByteArray {
fun getAuxDataBlob(encodedDesc: ByteArray, encodedKey: ByteArray, pkmdBlob: ByteArray): ByteArray {
val auxSize = Helper.round_to_multiple(
encodedDesc.size + encodedKey.size /* encoded key */ + 0L /* pkmd_blob */,
(encodedDesc.size + encodedKey.size + pkmdBlob.size).toLong(),
64)
return Struct("${auxSize}b").pack(Helper.join(encodedDesc, encodedKey))
return Struct("${auxSize}b").pack(Helper.join(encodedDesc, encodedKey, pkmdBlob))
}
fun getAuthBlob(header_data_blob: ByteArray,
@ -42,7 +23,7 @@ class Blob {
algorithm_name: String): ByteArray {
val alg = Algorithms.get(algorithm_name)!!
val authBlockSize = Helper.round_to_multiple((alg.hash_num_bytes + alg.signature_num_bytes).toLong(), 64)
if (authBlockSize == 0L) {
if (0L == authBlockSize) {
log.info("No auth blob")
return byteArrayOf()
}
@ -61,7 +42,5 @@ class Blob {
val authData = Helper.join(binaryHash, binarySignature)
return Helper.join(authData, Struct("${authBlockSize - authData.size}x").pack(0))
}
private val log = LoggerFactory.getLogger(Blob::class.java)
}
}
}

@ -1,16 +0,0 @@
package avb
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.databind.JsonSerializer
import com.fasterxml.jackson.databind.SerializerProvider
import org.apache.commons.codec.binary.Hex
class ByteArraySerializer: JsonSerializer<ByteArray>() {
override fun serialize(value: ByteArray?, gen: JsonGenerator?, serializers: SerializerProvider?) {
if (value != null) {
gen!!.writeString(Hex.encodeHexString(value!!))
} else {
gen!!.writeString("")
}
}
}

@ -3,6 +3,23 @@ package avb
import cfig.io.Struct
import org.junit.Assert
import java.io.InputStream
/*
https://github.com/cfig/Android_boot_image_editor/blob/master/doc/layout.md#32-avb-footer-vboot-20
+---------------------------------------+-------------------------+ --> partition_size - block_size
| Padding | block_size - 64 |
+---------------------------------------+-------------------------+ --> partition_size - 64
| AVB Footer | total 64 |
| | |
| - Footer Magic "AVBf" | 4 |
| - Footer Major Version | 4 |
| - Footer Minor Version | 4 |
| - Original image size | 8 |
| - VBMeta offset | 8 |
| - VBMeta size | 8 |
| - Padding | 28 |
+---------------------------------------+-------------------------+ --> partition_size
*/
data class Footer constructor(
var versionMajor: Long = FOOTER_VERSION_MAJOR,
@ -14,7 +31,7 @@ data class Footer constructor(
companion object {
const val MAGIC = "AVBf"
const val SIZE = 64
const val RESERVED = 28
private const val RESERVED = 28
const val FOOTER_VERSION_MAJOR = 1L
const val FOOTER_VERSION_MINOR = 0L
private const val FORMAT_STRING = "!4s2L3Q${RESERVED}x"

@ -6,6 +6,7 @@ import cfig.io.Struct
import org.junit.Assert
import java.io.InputStream
//avbtool::AvbVBMetaHeader
data class Header(
var required_libavb_version_major: Int = Avb.AVB_VERSION_MAJOR,
var required_libavb_version_minor: Int = 0,
@ -25,10 +26,6 @@ data class Header(
var rollback_index: Long = 0L,
var flags: Long = 0,
var release_string: String = "avbtool ${Avb.AVB_VERSION_MAJOR}.${Avb.AVB_VERSION_MINOR}.${Avb.AVB_VERSION_SUB}") {
fun bump_required_libavb_version_minor(minor: Int) {
this.required_libavb_version_minor = maxOf(required_libavb_version_minor, minor)
}
@Throws(IllegalArgumentException::class)
constructor(iS: InputStream) : this() {
val info = Struct(FORMAT_STRING).unpack(iS)
@ -53,7 +50,7 @@ data class Header(
this.descriptors_size = info[15] as Long
this.rollback_index = info[16] as Long
this.flags = info[17] as Long
//padding
//padding: info[18]
this.release_string = Helper.toCString(info[19] as ByteArray)
}
@ -70,17 +67,21 @@ data class Header(
this.descriptors_offset, this.descriptors_size,
this.rollback_index,
this.flags,
null,
this.release_string.toByteArray(),
null,
null)
null, //${REVERSED0}x
this.release_string.toByteArray(), //47s
null, //x
null) //${REVERSED}x
}
fun bump_required_libavb_version_minor(minor: Int) {
this.required_libavb_version_minor = maxOf(required_libavb_version_minor, minor)
}
companion object {
const val magic: String = "AVB0"
const val SIZE = 256
const val REVERSED0 = 4
const val REVERSED = 80
private const val REVERSED0 = 4
private const val REVERSED = 80
const val FORMAT_STRING = ("!4s2L2QL11QL${REVERSED0}x47sx" + "${REVERSED}x")
init {

@ -0,0 +1,6 @@
package avb
class VBMeta(var header: Header? = null,
var authBlob: AuthBlob? = null,
var auxBlob: AuxBlob? = null) {
}

@ -5,8 +5,8 @@ import cfig.io.Struct
import java.io.InputStream
class PropertyDescriptor(
var key: String = "",
var value: String = "") : Descriptor(TAG, 0, 0) {
private var key: String = "",
private var value: String = "") : Descriptor(TAG, 0, 0) {
override fun encode(): ByteArray {
if (SIZE != Struct(FORMAT_STRING).calcSize()) {
throw RuntimeException()

@ -0,0 +1,295 @@
package cfig.bootimg
import cfig.Helper
import cfig.ParamConfig
import cfig.io.Struct
import com.fasterxml.jackson.databind.ObjectMapper
import org.junit.Assert
import org.slf4j.LoggerFactory
import java.io.File
import java.io.FileInputStream
import java.io.InputStream
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.security.MessageDigest
import java.util.regex.Pattern
open class BootImgHeader(
var kernelLength: Long = 0,
var kernelOffset: Long = 0,
var ramdiskLength: Long = 0,
var ramdiskOffset: Long = 0,
var secondBootloaderLength: Long = 0,
var secondBootloaderOffset: Long = 0,
var recoveryDtboLength: Long = 0,
var recoveryDtboOffset: Long = 0,
var dtbLength: Long = 0,
var dtbOffset: Long = 0,
var tagsOffset: Long = 0,
var pageSize: Int = 0,
var headerSize: Long = 0,
var headerVersion: Int = 0,
var board: String = "",
var cmdline: String = "",
var hash: ByteArray? = null,
var osVersion: String? = null,
var osPatchLevel: String? = null) {
@Throws(IllegalArgumentException::class)
constructor(iS: InputStream?) : this() {
if (iS == null) {
return
}
log.warn("BootImgHeader constructor")
val info = Struct(FORMAT_STRING).unpack(iS)
Assert.assertEquals(20, info.size)
if (!(info[0] as ByteArray).contentEquals(magic.toByteArray())) {
throw IllegalArgumentException("stream doesn't look like Android Boot Image Header")
}
this.kernelLength = info[1] as Long
this.kernelOffset = info[2] as Long
this.ramdiskLength = info[3] as Long
this.ramdiskOffset = info[4] as Long
this.secondBootloaderLength = info[5] as Long
this.secondBootloaderOffset = info[6] as Long
this.tagsOffset = info[7] as Long
this.pageSize = (info[8] as Long).toInt()
this.headerVersion = (info[9] as Long).toInt()
val osNPatch = (info[10] as Long).toInt()
if (0 != osNPatch) { //treated as 'reserved' in this boot image
this.osVersion = parseOsVersion(osNPatch shr 11)
this.osPatchLevel = parseOsPatchLevel(osNPatch and 0x7ff)
}
this.board = Helper.toCString(info[11] as ByteArray).trim()
this.cmdline = Helper.toCString(info[12] as ByteArray) + Helper.toCString(info[14] as ByteArray)
this.hash = info[13] as ByteArray
if (this.headerVersion > 0) {
this.recoveryDtboLength = info[15] as Long
this.recoveryDtboOffset = info[16] as Long
}
this.headerSize = info[17] as Long
assert(this.headerSize.toInt() in intArrayOf(BOOT_IMAGE_HEADER_V2_SIZE, BOOT_IMAGE_HEADER_V1_SIZE))
if (this.headerVersion > 1) {
this.dtbLength = info[18] as Long
this.dtbOffset = info[19] as Long
}
}
private fun parseOsVersion(x: Int): String {
val a = x shr 14
val b = x - (a shl 14) shr 7
val c = x and 0x7f
return String.format("%d.%d.%d", a, b, c)
}
private fun parseOsPatchLevel(x: Int): String {
var y = x shr 4
val m = x and 0xf
y += 2000
return String.format("%d-%02d-%02d", y, m, 0)
}
@Throws(IllegalArgumentException::class)
private fun packOsVersion(x: String?): Int {
if (x.isNullOrBlank()) return 0
val pattern = Pattern.compile("^(\\d{1,3})(?:\\.(\\d{1,3})(?:\\.(\\d{1,3}))?)?")
val m = pattern.matcher(x)
if (m.find()) {
val a = Integer.decode(m.group(1))
var b = 0
var c = 0
if (m.groupCount() >= 2) {
b = Integer.decode(m.group(2))
}
if (m.groupCount() == 3) {
c = Integer.decode(m.group(3))
}
Assert.assertTrue(a < 128)
Assert.assertTrue(b < 128)
Assert.assertTrue(c < 128)
return (a shl 14) or (b shl 7) or c
} else {
throw IllegalArgumentException("invalid os_version")
}
}
private fun packOsPatchLevel(x: String?): Int {
if (x.isNullOrBlank()) return 0
val ret: Int
val pattern = Pattern.compile("^(\\d{4})-(\\d{2})-(\\d{2})")
val matcher = pattern.matcher(x)
if (matcher.find()) {
val y = Integer.parseInt(matcher.group(1), 10) - 2000
val m = Integer.parseInt(matcher.group(2), 10)
// 7 bits allocated for the year, 4 bits for the month
Assert.assertTrue(y in 0..127)
Assert.assertTrue(m in 1..12)
ret = (y shl 4) or m
} else {
throw IllegalArgumentException("invalid os_patch_level")
}
return ret
}
@Throws(CloneNotSupportedException::class)
private fun hashFileAndSize(vararg inFiles: String?): ByteArray {
val md = MessageDigest.getInstance("SHA1")
for (item in inFiles) {
if (null == item) {
md.update(ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN)
.putInt(0)
.array())
log.debug("update null $item: " + Helper.toHexString((md.clone() as MessageDigest).digest()))
} else {
val currentFile = File(item)
FileInputStream(currentFile).use { iS ->
var byteRead: Int
var dataRead = ByteArray(1024)
while (true) {
byteRead = iS.read(dataRead)
if (-1 == byteRead) {
break
}
md.update(dataRead, 0, byteRead)
}
log.debug("update file $item: " + Helper.toHexString((md.clone() as MessageDigest).digest()))
md.update(ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN)
.putInt(currentFile.length().toInt())
.array())
log.debug("update SIZE $item: " + Helper.toHexString((md.clone() as MessageDigest).digest()))
}
}
}
return md.digest()
}
private fun refresh() {
val param = ParamConfig()
//refresh kernel size
if (0L == this.kernelLength) {
throw java.lang.IllegalArgumentException("kernel size can not be 0")
} else {
this.kernelLength = File(param.kernel).length()
}
//refresh ramdisk size
if (0L == this.ramdiskLength) {
param.ramdisk = null
} else {
this.ramdiskLength = File(param.ramdisk).length()
}
//refresh second bootloader size
if (0L == this.secondBootloaderLength) {
param.second = null
} else {
this.secondBootloaderLength = File(param.second).length()
}
//refresh recovery dtbo size
if (0L == this.recoveryDtboLength) {
param.dtbo = null
} else {
this.recoveryDtboLength = File(param.dtbo).length()
}
//refresh recovery dtbo size
if (0L == this.dtbLength) {
param.dtb = null
} else {
this.dtbLength = File(param.dtb).length()
}
//refresh image hash
val imageId = when (this.headerVersion) {
0 -> {
hashFileAndSize(param.kernel, param.ramdisk, param.second)
}
1 -> {
hashFileAndSize(param.kernel, param.ramdisk, param.second, param.dtbo)
}
2 -> {
hashFileAndSize(param.kernel, param.ramdisk, param.second, param.dtbo, param.dtb)
}
else -> {
throw java.lang.IllegalArgumentException("headerVersion ${this.headerVersion} illegal")
}
}
this.hash = imageId
}
fun encode(): ByteArray {
this.refresh()
val ret = Struct(FORMAT_STRING).pack(
"ANDROID!".toByteArray(),
//10I
this.kernelLength,
this.kernelOffset,
this.ramdiskLength,
this.ramdiskOffset,
this.secondBootloaderLength,
this.secondBootloaderOffset,
this.tagsOffset,
this.pageSize,
this.headerVersion,
(packOsVersion(this.osVersion) shl 11) or packOsPatchLevel(this.osPatchLevel),
//16s
this.board.toByteArray(),
//512s
this.cmdline.substring(0, minOf(512, this.cmdline.length)).toByteArray(),
//32s
this.hash!!,
//1024s
if (this.cmdline.length > 512) this.cmdline.substring(512).toByteArray() else byteArrayOf(0),
//I
this.recoveryDtboLength,
//Q
if (this.headerVersion > 0) this.recoveryDtboOffset else 0,
//I
when (this.headerVersion) {
0 -> 0
1 -> BOOT_IMAGE_HEADER_V1_SIZE
2 -> BOOT_IMAGE_HEADER_V2_SIZE
else -> java.lang.IllegalArgumentException("headerVersion ${this.headerVersion} illegal")
},
//I
this.dtbLength,
//Q
if (this.headerVersion > 1) this.dtbOffset else 0
)
return ret
}
companion object {
internal val log = LoggerFactory.getLogger(BootImgInfo::class.java)
const val magic = "ANDROID!"
const val FORMAT_STRING = "8s" + //"ANDROID!"
"10I" +
"16s" + //board name
"512s" + //cmdline part 1
"32s" + //hash digest
"1024s" + //cmdline part 2
"I" + //dtbo length [v1]
"Q" + //dtbo offset [v1]
"I" + //header size [v1]
"I" + //dtb length [v2]
"Q" //dtb offset [v2]
const val BOOT_IMAGE_HEADER_V2_SIZE = 1660
const val BOOT_IMAGE_HEADER_V1_SIZE = 1648
init {
Assert.assertEquals(BOOT_IMAGE_HEADER_V2_SIZE, Struct(FORMAT_STRING).calcSize())
}
}
}

@ -0,0 +1,120 @@
package cfig.bootimg
import cfig.ParamConfig
import org.apache.commons.exec.CommandLine
import java.io.InputStream
class BootImgInfo(iS: InputStream?) : BootImgHeader(iS) {
constructor() : this(null)
val kernelPosition: Int
get() {
return getHeaderSize(this.pageSize)
}
val ramdiskPosition: Int
get() {
return (kernelPosition + this.kernelLength +
getPaddingSize(this.kernelLength.toInt(), this.pageSize)).toInt()
}
val secondBootloaderPosition: Int
get() {
return (ramdiskPosition + ramdiskLength +
getPaddingSize(ramdiskLength.toInt(), pageSize)).toInt()
}
val recoveryDtboPosition: Int
get() {
return (secondBootloaderPosition + secondBootloaderLength +
getPaddingSize(secondBootloaderLength.toInt(), pageSize)).toInt()
}
val dtbPosition: Int
get() {
return (recoveryDtboPosition + dtbLength +
getPaddingSize(dtbLength.toInt(), pageSize)).toInt()
}
var signatureType: BootImgInfo.VerifyType? = null
var imageSize: Long = 0
private fun getHeaderSize(pageSize: Int): Int {
val pad = (pageSize - (1648 and (pageSize - 1))) and (pageSize - 1)
return pad + 1648
}
private fun getPaddingSize(position: Int, pageSize: Int): Int {
return (pageSize - (position and pageSize - 1)) and (pageSize - 1)
}
fun toCommandLine(): CommandLine {
val param = ParamConfig()
val ret = CommandLine(param.mkbootimg)
ret.addArgument(" --header_version ")
ret.addArgument(headerVersion.toString())
ret.addArgument(" --base ")
ret.addArgument("0x" + java.lang.Long.toHexString(0))
ret.addArgument(" --kernel ")
ret.addArgument(param.kernel)
ret.addArgument(" --kernel_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(kernelOffset))
if (this.ramdiskLength > 0) {
ret.addArgument(" --ramdisk ")
ret.addArgument(param.ramdisk)
}
ret.addArgument(" --ramdisk_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(ramdiskOffset))
if (this.secondBootloaderLength > 0) {
ret.addArgument(" --second ")
ret.addArgument(param.second)
}
ret.addArgument(" --second_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(this.secondBootloaderOffset))
if (!board.isBlank()) {
ret.addArgument(" --board ")
ret.addArgument(board)
}
if (headerVersion > 0) {
if (this.recoveryDtboLength > 0) {
ret.addArgument(" --recovery_dtbo ")
ret.addArgument(param.dtbo)
}
}
if (headerVersion > 1) {
if (this.dtbLength > 0) {
ret.addArgument("--dtb ")
ret.addArgument(param.dtb)
}
ret.addArgument("--dtb_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(this.dtbOffset))
}
ret.addArgument(" --pagesize ")
ret.addArgument(Integer.toString(pageSize))
ret.addArgument(" --cmdline ")
ret.addArgument(cmdline, false)
if (!osVersion.isNullOrBlank()) {
ret.addArgument(" --os_version ")
ret.addArgument(osVersion)
}
if (!osPatchLevel.isNullOrBlank()) {
ret.addArgument(" --os_patch_level ")
ret.addArgument(osPatchLevel)
}
ret.addArgument(" --tags_offset ")
ret.addArgument("0x" + java.lang.Long.toHexString(tagsOffset))
ret.addArgument(" --id ")
ret.addArgument(" --output ")
//ret.addArgument("boot.img" + ".google")
log.info("To Commandline: " + ret.toString())
return ret
}
enum class VerifyType {
VERIFY,
AVB
}
}

File diff suppressed because one or more lines are too long

@ -1,7 +1,5 @@
apply plugin: 'java'
apply plugin: 'groovy'
subprojects {
tasks.withType(JavaCompile) {
//options.compilerArgs << "-Xlint:unchecked" << "-Xlint:deprecation"
@ -11,6 +9,14 @@ subprojects {
// ----------------------------------------------------------------------------
// global
// ----------------------------------------------------------------------------
if (Float.parseFloat(gradle.gradleVersion) < 5.0) {
logger.error("ERROR: Gradle Version MUST >= 5.0, current is {}", gradle.gradleVersion)
throw new RuntimeException("ERROR: Gradle Version")
} else {
logger.info("Gradle Version {}", gradle.gradleVersion)
}
def workdir = 'build/unzip_boot'
project.ext.rootWorkDir = new File(workdir).getAbsolutePath()
String activeImg = "boot.img"
@ -33,7 +39,7 @@ project.ext.mkbootimgBin = new File("src/mkbootimg/mkbootimg").getAbsolutePath()
project.ext.mkbootfsBin = new File("mkbootfs/build/exe/mkbootfs/mkbootfs").getAbsolutePath()
project.ext.avbtool = new File("avb/avbtool").getAbsolutePath()
project.ext.bootSigner = new File("boot_signer/build/libs/boot_signer.jar").getAbsolutePath()
println("Active image target: " + activeImg)
logger.warn("Active image target: " + activeImg)
// ----------------------------------------------------------------------------
// tasks
@ -168,12 +174,3 @@ task rr {
rebootRecovery()
}
}
boolean inArray(String file, String[] inArray) {
for (String item : inArray) {
if (item.equals(file)) {
return true;
}
}
return false;
}

@ -15,81 +15,93 @@
### 1. header part
item size in bytes position
+----------------------------------------------------------+ --> 0
|<MAGIC HEADER> | 8 |
|--------------------------------+-------------------------| --> 8
|<kernel length> | 4 |
|--------------------------------+-------------------------| --> 12
|<kernel offset> | 4 |
|--------------------------------+-------------------------| --> 16 (0x10)
|<ramdisk length> | 4 |
|--------------------------------+-------------------------| --> 20
|<ramdisk offset> | 4 |
|--------------------------------+-------------------------| --> 24
|<second bootloader length> | 4 |
|--------------------------------+-------------------------| --> 28
|<second bootloader offset> | 4 |
|--------------------------------+-------------------------| --> 32 (0x20)
|<tags offset> | 4 |
|--------------------------------+-------------------------| --> 36
|<page size> | 4 |
|--------------------------------+-------------------------| --> 40
|<header version> | 4 |
|--------------------------------+-------------------------| --> 44
|<os version& os patch level> | 4 |
|--------------------------------+-------------------------| --> 48 (0x30)
|<board name> | 16 |
|--------------------------------+-------------------------| --> 64 (0x40)
|<cmdline part 1> | 512 |
|--------------------------------+-------------------------| --> 576 (0x240)
|<hash digest> | 32 |
|--------------------------------+-------------------------| --> 608 (0x260)
|<cmdline part 2> | 1024 |
|--------------------------------+-------------------------| --> 1632 (0x660)
|<dtbo length> | 4 |
|--------------------------------+-------------------------| --> 1636
|<dtbo offset> | 8 |
|--------------------------------+-------------------------| --> 1644
|<header size> | 4 |
|--------------------------------+-------------------------| --> 1648 (0x670)
|<padding> | min(n * page_zie - 1648)|
+----------------------------------------------------------+ --> pagesize
+-----------------------------------------------------------+ --> 0
|<MAGIC HEADER> | 8 |
|--------------------------------+--------------------------| --> 8
|<kernel length> | 4 |
|--------------------------------+--------------------------| --> 12
|<kernel offset> | 4 |
|--------------------------------+--------------------------| --> 16 (0x10)
|<ramdisk length> | 4 |
|--------------------------------+--------------------------| --> 20
|<ramdisk offset> | 4 |
|--------------------------------+--------------------------| --> 24
|<second bootloader length> | 4 |
|--------------------------------+--------------------------| --> 28
|<second bootloader offset> | 4 |
|--------------------------------+--------------------------| --> 32 (0x20)
|<tags offset> | 4 |
|--------------------------------+--------------------------| --> 36
|<page size> | 4 |
|--------------------------------+--------------------------| --> 40
|<header version> | 4 (value in [0,1,2]) |
|--------------------------------+--------------------------| --> 44
|<os version & os patch level> | 4 |
|--------------------------------+--------------------------| --> 48 (0x30)
|<board name> | 16 |
|--------------------------------+--------------------------| --> 64 (0x40)
|<cmdline part 1> | 512 |
|--------------------------------+--------------------------| --> 576 (0x240)
|<hash digest> | 32 |
|--------------------------------+--------------------------| --> 608 (0x260)
|<cmdline part 2> | 1024 |
|--------------------------------+--------------------------| --> 1632 (0x660)
|<dtbo length> [v1] | 4 |
|--------------------------------+--------------------------| --> 1636
|<dtbo offset> [v1] | 8 |
|--------------------------------+--------------------------| --> 1644
|<header size> [v1] | 4 (v1: value=1648) |
| | (v2: value=1660) |
|--------------------------------+--------------------------| --> 1648 (0x670)
|<dtb length> [v2] | 4 |
|--------------------------------+--------------------------| --> 1652
|<dtb offset> [v2] | 8 |
|--------------------------------+--------------------------| --> 1660 (0x67c)
|<padding> | min(n * page_size |
| | - header_size) |
+--------------------------------+--------------------------+ --> pagesize
### 2. data part
+----------------------------------------------------------+ --> pagesize
|<kernel> | kernel length |
|--------------------------------+-------------------------|
|<padding> | min(n * page_zie - len)|
+----------------------------------------------------------+
+--------------------------------+-------------------------+
|<ramdisk> | ramdisk length |
|--------------------------------+-------------------------|
|<padding> | min(n * page_zie - len)|
+----------------------------------------------------------+
+--------------------------------+-------------------------+
|<second bootloader> | second bootloader length|
|--------------------------------+-------------------------|
|<padding> | min(n * page_zie - len)|
+----------------------------------------------------------+
+--------------------------------+-------------------------+
|<recovery dtbo> | recovery dtbo length |
|--------------------------------+-------------------------|
|<padding> | min(n * page_zie - len)|
+----------------------------------------------------------+ --> end of data part
+-----------------------------------------------------------+ --> pagesize
|<kernel> | kernel length |
|--------------------------------+--------------------------|
|<padding> | min(n * page_size - len) |
+-----------------------------------------------------------+
+-----------------------------------------------------------+
|<ramdisk> | ramdisk length |
|--------------------------------+--------------------------|
|<padding> | min(n * page_size - len) |
+-----------------------------------------------------------+
+-----------------------------------------------------------+
|<second bootloader> | second bootloader length |
|--------------------------------+--------------------------|
|<padding> | min(n * page_size - len) |
+-----------------------------------------------------------+
+-----------------------------------------------------------+
|<recovery dtbo> [v1] | recovery dtbo length |
|--------------------------------+--------------------------|
|<padding> [v1] | min(n * page_size - len) |
+-----------------------------------------------------------+
+-----------------------------------------------------------+
|<dtb> [v2] | dtb length |
|--------------------------------+--------------------------|
|<padding> [v2] | min(n * page_size - len) |
+-----------------------------------------------------------+ --> end of data part
### 3. signature part
#### 3.1 Boot Image Signature (VBoot 1.0)
+--------------------------------+-------------------------+ --> end of data part
|<signature> | signature length |
|--------------------------------+-------------------------|
|<padding> | defined by boot_signer |
+--------------------------------+-------------------------+
+--------------------------------+--------------------------+ --> end of data part
|<signature> | signature length |
|--------------------------------+--------------------------+
|<padding> | defined by boot_signer |
+--------------------------------+--------------------------+
#### 3.2 AVB Footer (VBoot 2.0)

@ -0,0 +1,67 @@
#!/usr/bin/env python3
import shutil, os.path, json, subprocess, hashlib, glob
import unittest
def hashFile(fileName):
hasher = hashlib.md5()
with open(fileName, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
return hasher.hexdigest()
def deleteIfExists(inFile):
if os.path.isfile(inFile):
os.remove(inFile)
def verifySingleJson(inResourceDir, inImageDir, jsonFile):
print(jsonFile)
resDir = inResourceDir
imgDir = inImageDir
verifyItems = json.load(open(jsonFile))
for k, v in verifyItems["copy"].items():
shutil.copyfile(os.path.join(resDir, imgDir, k), v)
subprocess.check_call("gradle unpack", shell = True)
subprocess.check_call("gradle pack", shell = True)
for k, v in verifyItems["hash"].items():
print("%s : %s" % (k, v))
unittest.TestCase().assertEqual(hashFile(k), v)
shutil.rmtree("build")
deleteIfExists("boot.img")
deleteIfExists("boot.img.clear")
deleteIfExists("boot.img.google")
deleteIfExists("boot.img.signed")
deleteIfExists("recovery.img")
deleteIfExists("recovery.img.clear")
deleteIfExists("recovery.img.google")
deleteIfExists("recovery.img.signed")
deleteIfExists("vbmeta.img")
deleteIfExists("vbmeta.img.signed")
def verifySingleDir(inResourceDir, inImageDir):
resDir = inResourceDir
imgDir = inImageDir
print("enter %s ..." % os.path.join(resDir, imgDir))
jsonFiles = glob.glob(os.path.join(resDir, imgDir) + "/*.json")
for jsonFile in jsonFiles:
verifySingleJson(inResourceDir, inImageDir, jsonFile)
# 5.0
verifySingleDir("boot_image_res", "5.0_fugu_lrx21m")
# 6.0
verifySingleDir("boot_image_res", "6.0.0_bullhead_mda89e")
# 7.0 special boot
subprocess.check_call("dd if=boot_image_res/7.1.1_volantis_n9f27m/boot.img of=boot.img bs=256 skip=1", shell = True)
verifySingleJson("boot_image_res", "7.1.1_volantis_n9f27m", "boot_image_res/7.1.1_volantis_n9f27m/boot.json")
# 7.0 special recovery
subprocess.check_call("dd if=boot_image_res/7.1.1_volantis_n9f27m/recovery.img of=recovery.img bs=256 skip=1", shell = True)
verifySingleJson("boot_image_res", "7.1.1_volantis_n9f27m", "boot_image_res/7.1.1_volantis_n9f27m/recovery.json")
# 8.0
verifySingleDir("boot_image_res", "8.0.0_fugu_opr2.170623.027")
# 9.0 + avb
subprocess.check_call("tar xf boot_image_res/9.0.0_blueline_pq1a.181105.017.a1/boot.img.tar.gz", shell = True)
verifySingleJson("boot_image_res", "9.0.0_blueline_pq1a.181105.017.a1", "boot_image_res/9.0.0_blueline_pq1a.181105.017.a1/boot.json")
verifySingleJson("boot_image_res", "9.0.0_blueline_pq1a.181105.017.a1", "boot_image_res/9.0.0_blueline_pq1a.181105.017.a1/vbmeta.json")

@ -45,8 +45,30 @@ def pad_file(f, padding):
f.write(pack(str(pad) + 'x'))
def get_number_of_pages(image_size, page_size):
"""calculates the number of pages required for the image"""
return (image_size + page_size - 1) / page_size
def get_recovery_dtbo_offset(args):
"""calculates the offset of recovery_dtbo image in the boot image"""
num_header_pages = 1 # header occupies a page
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk), args.pagesize)
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
num_ramdisk_pages + num_second_pages)
return dtbo_offset
def write_header(args):
BOOT_IMAGE_HEADER_V1_SIZE = 1648
BOOT_IMAGE_HEADER_V2_SIZE = 1660
BOOT_MAGIC = 'ANDROID!'.encode()
if (args.header_version > 2):
raise ValueError('Boot header version %d not supported' % args.header_version)
args.output.write(pack('8s', BOOT_MAGIC))
args.output.write(pack('10I',
filesize(args.kernel), # size in bytes
@ -69,6 +91,8 @@ def write_header(args):
if args.header_version > 0:
update_sha(sha, args.recovery_dtbo)
if args.header_version > 1:
update_sha(sha, args.dtb)
img_id = pack('32s', sha.digest())
@ -76,10 +100,21 @@ def write_header(args):
args.output.write(pack('1024s', args.cmdline[512:].encode()))
if args.header_version > 0:
args.output.write(pack('I', filesize(args.recovery_dtbo))) # size in bytes
args.output.write(pack('Q', args.base + args.recovery_dtbo_offset)) # physical load addr
args.output.write(pack('I', args.output.tell() + 4)) # size of boot header
args.output.write(pack('I', filesize(args.recovery_dtbo))) # size in bytes
if args.recovery_dtbo:
args.output.write(pack('Q', get_recovery_dtbo_offset(args))) # recovery dtbo offset
else:
args.output.write(pack('Q', 0)) # Will be set to 0 for devices without a recovery dtbo
# Populate boot image header size for header versions 1 and 2.
if args.header_version == 1:
args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
elif args.header_version == 2:
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
if args.header_version > 1:
args.output.write(pack('I', filesize(args.dtb))) # size in bytes
args.output.write(pack('Q', args.base + args.dtb_offset)) # dtb physical load address
pad_file(args.output, args.pagesize)
return img_id
@ -142,7 +177,11 @@ def parse_cmdline():
required=True)
parser.add_argument('--ramdisk', help='path to the ramdisk', type=FileType('rb'))
parser.add_argument('--second', help='path to the 2nd bootloader', type=FileType('rb'))
parser.add_argument('--recovery_dtbo', help='path to the recovery DTBO', type=FileType('rb'))
parser.add_argument('--dtb', help='path to dtb', type=FileType('rb'))
recovery_dtbo_group = parser.add_mutually_exclusive_group()
recovery_dtbo_group.add_argument('--recovery_dtbo', help='path to the recovery DTBO', type=FileType('rb'))
recovery_dtbo_group.add_argument('--recovery_acpio', help='path to the recovery ACPIO',
type=FileType('rb'), metavar='RECOVERY_ACPIO', dest='recovery_dtbo')
parser.add_argument('--cmdline', help='extra arguments to be passed on the '
'kernel command line', default='', action=ValidateStrLenAction, maxlen=1536)
parser.add_argument('--base', help='base address', type=parse_int, default=0x10000000)
@ -150,8 +189,8 @@ def parse_cmdline():
parser.add_argument('--ramdisk_offset', help='ramdisk offset', type=parse_int, default=0x01000000)
parser.add_argument('--second_offset', help='2nd bootloader offset', type=parse_int,
default=0x00f00000)
parser.add_argument('--recovery_dtbo_offset', help='recovery dtbo offset', type=parse_int,
default=0x0f000000)
parser.add_argument('--dtb_offset', help='dtb offset', type=parse_int, default=0x01f00000)
parser.add_argument('--os_version', help='operating system version', type=parse_os_version,
default=0)
parser.add_argument('--os_patch_level', help='operating system patch level',
@ -176,6 +215,8 @@ def write_data(args):
if args.header_version > 0:
write_padded_file(args.output, args.recovery_dtbo, args.pagesize)
if args.header_version > 1:
write_padded_file(args.output, args.dtb, args.pagesize)
def main():
args = parse_cmdline()

Loading…
Cancel
Save